2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
49 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
| L2CAP_FC_CONNLESS
, };
51 static LIST_HEAD(chan_list
);
52 static DEFINE_RWLOCK(chan_list_lock
);
54 static u16 le_max_credits
= L2CAP_LE_MAX_CREDITS
;
55 static u16 le_default_mps
= L2CAP_LE_DEFAULT_MPS
;
57 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
58 u8 code
, u8 ident
, u16 dlen
, void *data
);
59 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
61 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
62 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
64 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
65 struct sk_buff_head
*skbs
, u8 event
);
67 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
69 if (hcon
->type
== LE_LINK
) {
70 if (type
== ADDR_LE_DEV_PUBLIC
)
71 return BDADDR_LE_PUBLIC
;
73 return BDADDR_LE_RANDOM
;
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
86 list_for_each_entry(c
, &conn
->chan_l
, list
) {
93 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
98 list_for_each_entry(c
, &conn
->chan_l
, list
) {
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
110 struct l2cap_chan
*c
;
112 mutex_lock(&conn
->chan_lock
);
113 c
= __l2cap_get_chan_by_scid(conn
, cid
);
116 mutex_unlock(&conn
->chan_lock
);
121 /* Find channel with given DCID.
122 * Returns locked channel.
124 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
127 struct l2cap_chan
*c
;
129 mutex_lock(&conn
->chan_lock
);
130 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
133 mutex_unlock(&conn
->chan_lock
);
138 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
141 struct l2cap_chan
*c
;
143 list_for_each_entry(c
, &conn
->chan_l
, list
) {
144 if (c
->ident
== ident
)
150 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
153 struct l2cap_chan
*c
;
155 mutex_lock(&conn
->chan_lock
);
156 c
= __l2cap_get_chan_by_ident(conn
, ident
);
159 mutex_unlock(&conn
->chan_lock
);
164 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
166 struct l2cap_chan
*c
;
168 list_for_each_entry(c
, &chan_list
, global_l
) {
169 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
175 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
179 write_lock(&chan_list_lock
);
181 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
194 for (p
= 0x1001; p
< 0x1100; p
+= 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
196 chan
->psm
= cpu_to_le16(p
);
197 chan
->sport
= cpu_to_le16(p
);
204 write_unlock(&chan_list_lock
);
207 EXPORT_SYMBOL_GPL(l2cap_add_psm
);
209 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
211 write_lock(&chan_list_lock
);
215 write_unlock(&chan_list_lock
);
220 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
224 if (conn
->hcon
->type
== LE_LINK
)
225 dyn_end
= L2CAP_CID_LE_DYN_END
;
227 dyn_end
= L2CAP_CID_DYN_END
;
229 for (cid
= L2CAP_CID_DYN_START
; cid
< dyn_end
; cid
++) {
230 if (!__l2cap_get_chan_by_scid(conn
, cid
))
237 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
239 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
240 state_to_string(state
));
243 chan
->ops
->state_change(chan
, state
, 0);
246 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
250 chan
->ops
->state_change(chan
, chan
->state
, err
);
253 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
255 chan
->ops
->state_change(chan
, chan
->state
, err
);
258 static void __set_retrans_timer(struct l2cap_chan
*chan
)
260 if (!delayed_work_pending(&chan
->monitor_timer
) &&
261 chan
->retrans_timeout
) {
262 l2cap_set_timer(chan
, &chan
->retrans_timer
,
263 msecs_to_jiffies(chan
->retrans_timeout
));
267 static void __set_monitor_timer(struct l2cap_chan
*chan
)
269 __clear_retrans_timer(chan
);
270 if (chan
->monitor_timeout
) {
271 l2cap_set_timer(chan
, &chan
->monitor_timer
,
272 msecs_to_jiffies(chan
->monitor_timeout
));
276 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
281 skb_queue_walk(head
, skb
) {
282 if (bt_cb(skb
)->control
.txseq
== seq
)
289 /* ---- L2CAP sequence number lists ---- */
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
300 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
302 size_t alloc_size
, i
;
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
308 alloc_size
= roundup_pow_of_two(size
);
310 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
314 seq_list
->mask
= alloc_size
- 1;
315 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
316 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
317 for (i
= 0; i
< alloc_size
; i
++)
318 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
325 kfree(seq_list
->list
);
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
331 /* Constant-time check for list membership */
332 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
335 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
337 u16 seq
= seq_list
->head
;
338 u16 mask
= seq_list
->mask
;
340 seq_list
->head
= seq_list
->list
[seq
& mask
];
341 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
343 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
344 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
345 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
351 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
355 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
358 for (i
= 0; i
<= seq_list
->mask
; i
++)
359 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
361 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
362 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
365 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
367 u16 mask
= seq_list
->mask
;
369 /* All appends happen in constant time */
371 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
374 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
375 seq_list
->head
= seq
;
377 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
379 seq_list
->tail
= seq
;
380 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
383 static void l2cap_chan_timeout(struct work_struct
*work
)
385 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
387 struct l2cap_conn
*conn
= chan
->conn
;
390 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
392 mutex_lock(&conn
->chan_lock
);
393 l2cap_chan_lock(chan
);
395 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
396 reason
= ECONNREFUSED
;
397 else if (chan
->state
== BT_CONNECT
&&
398 chan
->sec_level
!= BT_SECURITY_SDP
)
399 reason
= ECONNREFUSED
;
403 l2cap_chan_close(chan
, reason
);
405 l2cap_chan_unlock(chan
);
407 chan
->ops
->close(chan
);
408 mutex_unlock(&conn
->chan_lock
);
410 l2cap_chan_put(chan
);
413 struct l2cap_chan
*l2cap_chan_create(void)
415 struct l2cap_chan
*chan
;
417 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
421 mutex_init(&chan
->lock
);
423 write_lock(&chan_list_lock
);
424 list_add(&chan
->global_l
, &chan_list
);
425 write_unlock(&chan_list_lock
);
427 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
429 chan
->state
= BT_OPEN
;
431 kref_init(&chan
->kref
);
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
436 BT_DBG("chan %p", chan
);
440 EXPORT_SYMBOL_GPL(l2cap_chan_create
);
442 static void l2cap_chan_destroy(struct kref
*kref
)
444 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
446 BT_DBG("chan %p", chan
);
448 write_lock(&chan_list_lock
);
449 list_del(&chan
->global_l
);
450 write_unlock(&chan_list_lock
);
455 void l2cap_chan_hold(struct l2cap_chan
*c
)
457 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
462 void l2cap_chan_put(struct l2cap_chan
*c
)
464 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
466 kref_put(&c
->kref
, l2cap_chan_destroy
);
468 EXPORT_SYMBOL_GPL(l2cap_chan_put
);
470 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
472 chan
->fcs
= L2CAP_FCS_CRC16
;
473 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
474 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
475 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
476 chan
->remote_max_tx
= chan
->max_tx
;
477 chan
->remote_tx_win
= chan
->tx_win
;
478 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
479 chan
->sec_level
= BT_SECURITY_LOW
;
480 chan
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
481 chan
->retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
482 chan
->monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
483 chan
->conf_state
= 0;
485 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
487 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults
);
489 static void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
492 chan
->sdu_last_frag
= NULL
;
494 chan
->tx_credits
= 0;
495 chan
->rx_credits
= le_max_credits
;
496 chan
->mps
= min_t(u16
, chan
->imtu
, le_default_mps
);
498 skb_queue_head_init(&chan
->tx_q
);
501 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
503 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
504 __le16_to_cpu(chan
->psm
), chan
->dcid
);
506 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
510 switch (chan
->chan_type
) {
511 case L2CAP_CHAN_CONN_ORIENTED
:
512 /* Alloc CID for connection-oriented socket */
513 chan
->scid
= l2cap_alloc_cid(conn
);
514 if (conn
->hcon
->type
== ACL_LINK
)
515 chan
->omtu
= L2CAP_DEFAULT_MTU
;
518 case L2CAP_CHAN_CONN_LESS
:
519 /* Connectionless socket */
520 chan
->scid
= L2CAP_CID_CONN_LESS
;
521 chan
->dcid
= L2CAP_CID_CONN_LESS
;
522 chan
->omtu
= L2CAP_DEFAULT_MTU
;
525 case L2CAP_CHAN_FIXED
:
526 /* Caller will set CID and CID specific MTU values */
530 /* Raw socket can send/recv signalling messages only */
531 chan
->scid
= L2CAP_CID_SIGNALING
;
532 chan
->dcid
= L2CAP_CID_SIGNALING
;
533 chan
->omtu
= L2CAP_DEFAULT_MTU
;
536 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
537 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
538 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
539 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
540 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
541 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
543 l2cap_chan_hold(chan
);
545 hci_conn_hold(conn
->hcon
);
547 list_add(&chan
->list
, &conn
->chan_l
);
550 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
552 mutex_lock(&conn
->chan_lock
);
553 __l2cap_chan_add(conn
, chan
);
554 mutex_unlock(&conn
->chan_lock
);
557 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
559 struct l2cap_conn
*conn
= chan
->conn
;
561 __clear_chan_timer(chan
);
563 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
566 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
567 /* Delete from channel list */
568 list_del(&chan
->list
);
570 l2cap_chan_put(chan
);
574 if (chan
->scid
!= L2CAP_CID_A2MP
)
575 hci_conn_drop(conn
->hcon
);
577 if (mgr
&& mgr
->bredr_chan
== chan
)
578 mgr
->bredr_chan
= NULL
;
581 if (chan
->hs_hchan
) {
582 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
584 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
585 amp_disconnect_logical_link(hs_hchan
);
588 chan
->ops
->teardown(chan
, err
);
590 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
594 case L2CAP_MODE_BASIC
:
597 case L2CAP_MODE_LE_FLOWCTL
:
598 skb_queue_purge(&chan
->tx_q
);
601 case L2CAP_MODE_ERTM
:
602 __clear_retrans_timer(chan
);
603 __clear_monitor_timer(chan
);
604 __clear_ack_timer(chan
);
606 skb_queue_purge(&chan
->srej_q
);
608 l2cap_seq_list_free(&chan
->srej_list
);
609 l2cap_seq_list_free(&chan
->retrans_list
);
613 case L2CAP_MODE_STREAMING
:
614 skb_queue_purge(&chan
->tx_q
);
620 EXPORT_SYMBOL_GPL(l2cap_chan_del
);
622 void l2cap_conn_update_id_addr(struct hci_conn
*hcon
)
624 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
625 struct l2cap_chan
*chan
;
627 mutex_lock(&conn
->chan_lock
);
629 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
630 l2cap_chan_lock(chan
);
631 bacpy(&chan
->dst
, &hcon
->dst
);
632 chan
->dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
633 l2cap_chan_unlock(chan
);
636 mutex_unlock(&conn
->chan_lock
);
639 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
641 struct l2cap_conn
*conn
= chan
->conn
;
642 struct l2cap_le_conn_rsp rsp
;
645 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
646 result
= L2CAP_CR_AUTHORIZATION
;
648 result
= L2CAP_CR_BAD_PSM
;
650 l2cap_state_change(chan
, BT_DISCONN
);
652 rsp
.dcid
= cpu_to_le16(chan
->scid
);
653 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
654 rsp
.mps
= cpu_to_le16(chan
->mps
);
655 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
656 rsp
.result
= cpu_to_le16(result
);
658 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
662 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
664 struct l2cap_conn
*conn
= chan
->conn
;
665 struct l2cap_conn_rsp rsp
;
668 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
669 result
= L2CAP_CR_SEC_BLOCK
;
671 result
= L2CAP_CR_BAD_PSM
;
673 l2cap_state_change(chan
, BT_DISCONN
);
675 rsp
.scid
= cpu_to_le16(chan
->dcid
);
676 rsp
.dcid
= cpu_to_le16(chan
->scid
);
677 rsp
.result
= cpu_to_le16(result
);
678 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
680 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
683 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
685 struct l2cap_conn
*conn
= chan
->conn
;
687 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
689 switch (chan
->state
) {
691 chan
->ops
->teardown(chan
, 0);
696 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
697 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
698 l2cap_send_disconn_req(chan
, reason
);
700 l2cap_chan_del(chan
, reason
);
704 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
705 if (conn
->hcon
->type
== ACL_LINK
)
706 l2cap_chan_connect_reject(chan
);
707 else if (conn
->hcon
->type
== LE_LINK
)
708 l2cap_chan_le_connect_reject(chan
);
711 l2cap_chan_del(chan
, reason
);
716 l2cap_chan_del(chan
, reason
);
720 chan
->ops
->teardown(chan
, 0);
724 EXPORT_SYMBOL(l2cap_chan_close
);
726 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
728 switch (chan
->chan_type
) {
730 switch (chan
->sec_level
) {
731 case BT_SECURITY_HIGH
:
732 case BT_SECURITY_FIPS
:
733 return HCI_AT_DEDICATED_BONDING_MITM
;
734 case BT_SECURITY_MEDIUM
:
735 return HCI_AT_DEDICATED_BONDING
;
737 return HCI_AT_NO_BONDING
;
740 case L2CAP_CHAN_CONN_LESS
:
741 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_3DSP
)) {
742 if (chan
->sec_level
== BT_SECURITY_LOW
)
743 chan
->sec_level
= BT_SECURITY_SDP
;
745 if (chan
->sec_level
== BT_SECURITY_HIGH
||
746 chan
->sec_level
== BT_SECURITY_FIPS
)
747 return HCI_AT_NO_BONDING_MITM
;
749 return HCI_AT_NO_BONDING
;
751 case L2CAP_CHAN_CONN_ORIENTED
:
752 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_SDP
)) {
753 if (chan
->sec_level
== BT_SECURITY_LOW
)
754 chan
->sec_level
= BT_SECURITY_SDP
;
756 if (chan
->sec_level
== BT_SECURITY_HIGH
||
757 chan
->sec_level
== BT_SECURITY_FIPS
)
758 return HCI_AT_NO_BONDING_MITM
;
760 return HCI_AT_NO_BONDING
;
764 switch (chan
->sec_level
) {
765 case BT_SECURITY_HIGH
:
766 case BT_SECURITY_FIPS
:
767 return HCI_AT_GENERAL_BONDING_MITM
;
768 case BT_SECURITY_MEDIUM
:
769 return HCI_AT_GENERAL_BONDING
;
771 return HCI_AT_NO_BONDING
;
777 /* Service level security */
778 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
780 struct l2cap_conn
*conn
= chan
->conn
;
783 if (conn
->hcon
->type
== LE_LINK
)
784 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
786 auth_type
= l2cap_get_auth_type(chan
);
788 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
791 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
795 /* Get next available identificator.
796 * 1 - 128 are used by kernel.
797 * 129 - 199 are reserved.
798 * 200 - 254 are used by utilities like l2ping, etc.
801 spin_lock(&conn
->lock
);
803 if (++conn
->tx_ident
> 128)
808 spin_unlock(&conn
->lock
);
813 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
816 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
819 BT_DBG("code 0x%2.2x", code
);
824 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
825 flags
= ACL_START_NO_FLUSH
;
829 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
830 skb
->priority
= HCI_PRIO_MAX
;
832 hci_send_acl(conn
->hchan
, skb
, flags
);
835 static bool __chan_is_moving(struct l2cap_chan
*chan
)
837 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
838 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
841 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
843 struct hci_conn
*hcon
= chan
->conn
->hcon
;
846 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
849 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
851 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
858 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
859 lmp_no_flush_capable(hcon
->hdev
))
860 flags
= ACL_START_NO_FLUSH
;
864 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
865 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
868 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
870 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
871 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
873 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
876 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
877 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
884 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
885 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
892 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
894 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
895 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
897 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
900 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
901 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
908 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
909 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
916 static inline void __unpack_control(struct l2cap_chan
*chan
,
919 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
920 __unpack_extended_control(get_unaligned_le32(skb
->data
),
921 &bt_cb(skb
)->control
);
922 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
924 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
925 &bt_cb(skb
)->control
);
926 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
930 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
934 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
935 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
937 if (control
->sframe
) {
938 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
939 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
940 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
942 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
943 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
949 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
953 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
954 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
956 if (control
->sframe
) {
957 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
958 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
959 packed
|= L2CAP_CTRL_FRAME_TYPE
;
961 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
962 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
968 static inline void __pack_control(struct l2cap_chan
*chan
,
969 struct l2cap_ctrl
*control
,
972 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
973 put_unaligned_le32(__pack_extended_control(control
),
974 skb
->data
+ L2CAP_HDR_SIZE
);
976 put_unaligned_le16(__pack_enhanced_control(control
),
977 skb
->data
+ L2CAP_HDR_SIZE
);
981 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
983 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
984 return L2CAP_EXT_HDR_SIZE
;
986 return L2CAP_ENH_HDR_SIZE
;
989 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
993 struct l2cap_hdr
*lh
;
994 int hlen
= __ertm_hdr_size(chan
);
996 if (chan
->fcs
== L2CAP_FCS_CRC16
)
997 hlen
+= L2CAP_FCS_SIZE
;
999 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
1002 return ERR_PTR(-ENOMEM
);
1004 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1005 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
1006 lh
->cid
= cpu_to_le16(chan
->dcid
);
1008 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1009 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1011 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1013 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1014 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1015 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1018 skb
->priority
= HCI_PRIO_MAX
;
1022 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1023 struct l2cap_ctrl
*control
)
1025 struct sk_buff
*skb
;
1028 BT_DBG("chan %p, control %p", chan
, control
);
1030 if (!control
->sframe
)
1033 if (__chan_is_moving(chan
))
1036 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1040 if (control
->super
== L2CAP_SUPER_RR
)
1041 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1042 else if (control
->super
== L2CAP_SUPER_RNR
)
1043 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1045 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1046 chan
->last_acked_seq
= control
->reqseq
;
1047 __clear_ack_timer(chan
);
1050 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1051 control
->final
, control
->poll
, control
->super
);
1053 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1054 control_field
= __pack_extended_control(control
);
1056 control_field
= __pack_enhanced_control(control
);
1058 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1060 l2cap_do_send(chan
, skb
);
1063 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1065 struct l2cap_ctrl control
;
1067 BT_DBG("chan %p, poll %d", chan
, poll
);
1069 memset(&control
, 0, sizeof(control
));
1071 control
.poll
= poll
;
1073 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1074 control
.super
= L2CAP_SUPER_RNR
;
1076 control
.super
= L2CAP_SUPER_RR
;
1078 control
.reqseq
= chan
->buffer_seq
;
1079 l2cap_send_sframe(chan
, &control
);
1082 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1084 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1087 static bool __amp_capable(struct l2cap_chan
*chan
)
1089 struct l2cap_conn
*conn
= chan
->conn
;
1090 struct hci_dev
*hdev
;
1091 bool amp_available
= false;
1093 if (!conn
->hs_enabled
)
1096 if (!(conn
->fixed_chan_mask
& L2CAP_FC_A2MP
))
1099 read_lock(&hci_dev_list_lock
);
1100 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1101 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1102 test_bit(HCI_UP
, &hdev
->flags
)) {
1103 amp_available
= true;
1107 read_unlock(&hci_dev_list_lock
);
1109 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1110 return amp_available
;
1115 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1117 /* Check EFS parameters */
1121 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1123 struct l2cap_conn
*conn
= chan
->conn
;
1124 struct l2cap_conn_req req
;
1126 req
.scid
= cpu_to_le16(chan
->scid
);
1127 req
.psm
= chan
->psm
;
1129 chan
->ident
= l2cap_get_ident(conn
);
1131 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1133 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1136 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1138 struct l2cap_create_chan_req req
;
1139 req
.scid
= cpu_to_le16(chan
->scid
);
1140 req
.psm
= chan
->psm
;
1141 req
.amp_id
= amp_id
;
1143 chan
->ident
= l2cap_get_ident(chan
->conn
);
1145 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1149 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1151 struct sk_buff
*skb
;
1153 BT_DBG("chan %p", chan
);
1155 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1158 __clear_retrans_timer(chan
);
1159 __clear_monitor_timer(chan
);
1160 __clear_ack_timer(chan
);
1162 chan
->retry_count
= 0;
1163 skb_queue_walk(&chan
->tx_q
, skb
) {
1164 if (bt_cb(skb
)->control
.retries
)
1165 bt_cb(skb
)->control
.retries
= 1;
1170 chan
->expected_tx_seq
= chan
->buffer_seq
;
1172 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1173 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1174 l2cap_seq_list_clear(&chan
->retrans_list
);
1175 l2cap_seq_list_clear(&chan
->srej_list
);
1176 skb_queue_purge(&chan
->srej_q
);
1178 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1179 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1181 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1184 static void l2cap_move_done(struct l2cap_chan
*chan
)
1186 u8 move_role
= chan
->move_role
;
1187 BT_DBG("chan %p", chan
);
1189 chan
->move_state
= L2CAP_MOVE_STABLE
;
1190 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1192 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1195 switch (move_role
) {
1196 case L2CAP_MOVE_ROLE_INITIATOR
:
1197 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1198 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1200 case L2CAP_MOVE_ROLE_RESPONDER
:
1201 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1206 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1208 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1209 chan
->conf_state
= 0;
1210 __clear_chan_timer(chan
);
1212 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
&& !chan
->tx_credits
)
1213 chan
->ops
->suspend(chan
);
1215 chan
->state
= BT_CONNECTED
;
1217 chan
->ops
->ready(chan
);
1220 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1222 struct l2cap_conn
*conn
= chan
->conn
;
1223 struct l2cap_le_conn_req req
;
1225 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1228 req
.psm
= chan
->psm
;
1229 req
.scid
= cpu_to_le16(chan
->scid
);
1230 req
.mtu
= cpu_to_le16(chan
->imtu
);
1231 req
.mps
= cpu_to_le16(chan
->mps
);
1232 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1234 chan
->ident
= l2cap_get_ident(conn
);
1236 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1240 static void l2cap_le_start(struct l2cap_chan
*chan
)
1242 struct l2cap_conn
*conn
= chan
->conn
;
1244 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1248 l2cap_chan_ready(chan
);
1252 if (chan
->state
== BT_CONNECT
)
1253 l2cap_le_connect(chan
);
1256 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1258 if (__amp_capable(chan
)) {
1259 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1260 a2mp_discover_amp(chan
);
1261 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1262 l2cap_le_start(chan
);
1264 l2cap_send_conn_req(chan
);
1268 static void l2cap_do_start(struct l2cap_chan
*chan
)
1270 struct l2cap_conn
*conn
= chan
->conn
;
1272 if (conn
->hcon
->type
== LE_LINK
) {
1273 l2cap_le_start(chan
);
1277 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1278 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1281 if (l2cap_chan_check_security(chan
) &&
1282 __l2cap_no_conn_pending(chan
)) {
1283 l2cap_start_connection(chan
);
1286 struct l2cap_info_req req
;
1287 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1289 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1290 conn
->info_ident
= l2cap_get_ident(conn
);
1292 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1294 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1299 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1301 u32 local_feat_mask
= l2cap_feat_mask
;
1303 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1306 case L2CAP_MODE_ERTM
:
1307 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1308 case L2CAP_MODE_STREAMING
:
1309 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1315 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1317 struct l2cap_conn
*conn
= chan
->conn
;
1318 struct l2cap_disconn_req req
;
1323 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1324 __clear_retrans_timer(chan
);
1325 __clear_monitor_timer(chan
);
1326 __clear_ack_timer(chan
);
1329 if (chan
->scid
== L2CAP_CID_A2MP
) {
1330 l2cap_state_change(chan
, BT_DISCONN
);
1334 req
.dcid
= cpu_to_le16(chan
->dcid
);
1335 req
.scid
= cpu_to_le16(chan
->scid
);
1336 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1339 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1342 /* ---- L2CAP connections ---- */
1343 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1345 struct l2cap_chan
*chan
, *tmp
;
1347 BT_DBG("conn %p", conn
);
1349 mutex_lock(&conn
->chan_lock
);
1351 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1352 l2cap_chan_lock(chan
);
1354 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1355 l2cap_chan_unlock(chan
);
1359 if (chan
->state
== BT_CONNECT
) {
1360 if (!l2cap_chan_check_security(chan
) ||
1361 !__l2cap_no_conn_pending(chan
)) {
1362 l2cap_chan_unlock(chan
);
1366 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1367 && test_bit(CONF_STATE2_DEVICE
,
1368 &chan
->conf_state
)) {
1369 l2cap_chan_close(chan
, ECONNRESET
);
1370 l2cap_chan_unlock(chan
);
1374 l2cap_start_connection(chan
);
1376 } else if (chan
->state
== BT_CONNECT2
) {
1377 struct l2cap_conn_rsp rsp
;
1379 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1380 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1382 if (l2cap_chan_check_security(chan
)) {
1383 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1384 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1385 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1386 chan
->ops
->defer(chan
);
1389 l2cap_state_change(chan
, BT_CONFIG
);
1390 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1391 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1394 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1395 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1398 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1401 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1402 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1403 l2cap_chan_unlock(chan
);
1407 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1408 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1409 l2cap_build_conf_req(chan
, buf
), buf
);
1410 chan
->num_conf_req
++;
1413 l2cap_chan_unlock(chan
);
1416 mutex_unlock(&conn
->chan_lock
);
1419 /* Find socket with cid and source/destination bdaddr.
1420 * Returns closest match, locked.
1422 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1426 struct l2cap_chan
*c
, *c1
= NULL
;
1428 read_lock(&chan_list_lock
);
1430 list_for_each_entry(c
, &chan_list
, global_l
) {
1431 if (state
&& c
->state
!= state
)
1434 if (c
->scid
== cid
) {
1435 int src_match
, dst_match
;
1436 int src_any
, dst_any
;
1439 src_match
= !bacmp(&c
->src
, src
);
1440 dst_match
= !bacmp(&c
->dst
, dst
);
1441 if (src_match
&& dst_match
) {
1442 read_unlock(&chan_list_lock
);
1447 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1448 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1449 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1450 (src_any
&& dst_any
))
1455 read_unlock(&chan_list_lock
);
1460 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1462 struct hci_conn
*hcon
= conn
->hcon
;
1463 struct l2cap_chan
*chan
, *pchan
;
1468 /* Check if we have socket listening on cid */
1469 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_ATT
,
1470 &hcon
->src
, &hcon
->dst
);
1474 /* Client ATT sockets should override the server one */
1475 if (__l2cap_get_chan_by_dcid(conn
, L2CAP_CID_ATT
))
1478 dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
1480 /* If device is blocked, do not create a channel for it */
1481 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
, dst_type
))
1484 /* For LE slave connections, make sure the connection interval
1485 * is in the range of the minium and maximum interval that has
1486 * been configured for this connection. If not, then trigger
1487 * the connection update procedure.
1489 if (!test_bit(HCI_CONN_MASTER
, &hcon
->flags
) &&
1490 (hcon
->le_conn_interval
< hcon
->le_conn_min_interval
||
1491 hcon
->le_conn_interval
> hcon
->le_conn_max_interval
)) {
1492 struct l2cap_conn_param_update_req req
;
1494 req
.min
= cpu_to_le16(hcon
->le_conn_min_interval
);
1495 req
.max
= cpu_to_le16(hcon
->le_conn_max_interval
);
1496 req
.latency
= cpu_to_le16(hcon
->le_conn_latency
);
1497 req
.to_multiplier
= cpu_to_le16(hcon
->le_supv_timeout
);
1499 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1500 L2CAP_CONN_PARAM_UPDATE_REQ
, sizeof(req
), &req
);
1503 l2cap_chan_lock(pchan
);
1505 chan
= pchan
->ops
->new_connection(pchan
);
1509 bacpy(&chan
->src
, &hcon
->src
);
1510 bacpy(&chan
->dst
, &hcon
->dst
);
1511 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1512 chan
->dst_type
= dst_type
;
1514 __l2cap_chan_add(conn
, chan
);
1517 l2cap_chan_unlock(pchan
);
1520 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1522 struct l2cap_chan
*chan
;
1523 struct hci_conn
*hcon
= conn
->hcon
;
1525 BT_DBG("conn %p", conn
);
1527 /* For outgoing pairing which doesn't necessarily have an
1528 * associated socket (e.g. mgmt_pair_device).
1530 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1531 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1533 mutex_lock(&conn
->chan_lock
);
1535 if (hcon
->type
== LE_LINK
)
1536 l2cap_le_conn_ready(conn
);
1538 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1540 l2cap_chan_lock(chan
);
1542 if (chan
->scid
== L2CAP_CID_A2MP
) {
1543 l2cap_chan_unlock(chan
);
1547 if (hcon
->type
== LE_LINK
) {
1548 l2cap_le_start(chan
);
1549 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1550 l2cap_chan_ready(chan
);
1552 } else if (chan
->state
== BT_CONNECT
) {
1553 l2cap_do_start(chan
);
1556 l2cap_chan_unlock(chan
);
1559 mutex_unlock(&conn
->chan_lock
);
1561 queue_work(hcon
->hdev
->workqueue
, &conn
->pending_rx_work
);
1564 /* Notify sockets that we cannot guaranty reliability anymore */
1565 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1567 struct l2cap_chan
*chan
;
1569 BT_DBG("conn %p", conn
);
1571 mutex_lock(&conn
->chan_lock
);
1573 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1574 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1575 l2cap_chan_set_err(chan
, err
);
1578 mutex_unlock(&conn
->chan_lock
);
1581 static void l2cap_info_timeout(struct work_struct
*work
)
1583 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1586 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1587 conn
->info_ident
= 0;
1589 l2cap_conn_start(conn
);
1594 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1595 * callback is called during registration. The ->remove callback is called
1596 * during unregistration.
1597 * An l2cap_user object can either be explicitly unregistered or when the
1598 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1599 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1600 * External modules must own a reference to the l2cap_conn object if they intend
1601 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1602 * any time if they don't.
1605 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1607 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1610 /* We need to check whether l2cap_conn is registered. If it is not, we
1611 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1612 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1613 * relies on the parent hci_conn object to be locked. This itself relies
1614 * on the hci_dev object to be locked. So we must lock the hci device
1619 if (user
->list
.next
|| user
->list
.prev
) {
1624 /* conn->hchan is NULL after l2cap_conn_del() was called */
1630 ret
= user
->probe(conn
, user
);
1634 list_add(&user
->list
, &conn
->users
);
1638 hci_dev_unlock(hdev
);
1641 EXPORT_SYMBOL(l2cap_register_user
);
1643 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1645 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1649 if (!user
->list
.next
|| !user
->list
.prev
)
1652 list_del(&user
->list
);
1653 user
->list
.next
= NULL
;
1654 user
->list
.prev
= NULL
;
1655 user
->remove(conn
, user
);
1658 hci_dev_unlock(hdev
);
1660 EXPORT_SYMBOL(l2cap_unregister_user
);
1662 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1664 struct l2cap_user
*user
;
1666 while (!list_empty(&conn
->users
)) {
1667 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1668 list_del(&user
->list
);
1669 user
->list
.next
= NULL
;
1670 user
->list
.prev
= NULL
;
1671 user
->remove(conn
, user
);
1675 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1677 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1678 struct l2cap_chan
*chan
, *l
;
1683 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1685 kfree_skb(conn
->rx_skb
);
1687 skb_queue_purge(&conn
->pending_rx
);
1689 /* We can not call flush_work(&conn->pending_rx_work) here since we
1690 * might block if we are running on a worker from the same workqueue
1691 * pending_rx_work is waiting on.
1693 if (work_pending(&conn
->pending_rx_work
))
1694 cancel_work_sync(&conn
->pending_rx_work
);
1696 l2cap_unregister_all_users(conn
);
1698 mutex_lock(&conn
->chan_lock
);
1701 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1702 l2cap_chan_hold(chan
);
1703 l2cap_chan_lock(chan
);
1705 l2cap_chan_del(chan
, err
);
1707 l2cap_chan_unlock(chan
);
1709 chan
->ops
->close(chan
);
1710 l2cap_chan_put(chan
);
1713 mutex_unlock(&conn
->chan_lock
);
1715 hci_chan_del(conn
->hchan
);
1717 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1718 cancel_delayed_work_sync(&conn
->info_timer
);
1720 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1721 cancel_delayed_work_sync(&conn
->security_timer
);
1722 smp_chan_destroy(conn
);
1725 hcon
->l2cap_data
= NULL
;
1727 l2cap_conn_put(conn
);
1730 static void security_timeout(struct work_struct
*work
)
1732 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1733 security_timer
.work
);
1735 BT_DBG("conn %p", conn
);
1737 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1738 smp_chan_destroy(conn
);
1739 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1743 static void l2cap_conn_free(struct kref
*ref
)
1745 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1747 hci_conn_put(conn
->hcon
);
1751 void l2cap_conn_get(struct l2cap_conn
*conn
)
1753 kref_get(&conn
->ref
);
1755 EXPORT_SYMBOL(l2cap_conn_get
);
1757 void l2cap_conn_put(struct l2cap_conn
*conn
)
1759 kref_put(&conn
->ref
, l2cap_conn_free
);
1761 EXPORT_SYMBOL(l2cap_conn_put
);
1763 /* ---- Socket interface ---- */
1765 /* Find socket with psm and source / destination bdaddr.
1766 * Returns closest match.
1768 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1773 struct l2cap_chan
*c
, *c1
= NULL
;
1775 read_lock(&chan_list_lock
);
1777 list_for_each_entry(c
, &chan_list
, global_l
) {
1778 if (state
&& c
->state
!= state
)
1781 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1784 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1787 if (c
->psm
== psm
) {
1788 int src_match
, dst_match
;
1789 int src_any
, dst_any
;
1792 src_match
= !bacmp(&c
->src
, src
);
1793 dst_match
= !bacmp(&c
->dst
, dst
);
1794 if (src_match
&& dst_match
) {
1795 read_unlock(&chan_list_lock
);
1800 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1801 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1802 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1803 (src_any
&& dst_any
))
1808 read_unlock(&chan_list_lock
);
1813 static void l2cap_monitor_timeout(struct work_struct
*work
)
1815 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1816 monitor_timer
.work
);
1818 BT_DBG("chan %p", chan
);
1820 l2cap_chan_lock(chan
);
1823 l2cap_chan_unlock(chan
);
1824 l2cap_chan_put(chan
);
1828 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1830 l2cap_chan_unlock(chan
);
1831 l2cap_chan_put(chan
);
1834 static void l2cap_retrans_timeout(struct work_struct
*work
)
1836 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1837 retrans_timer
.work
);
1839 BT_DBG("chan %p", chan
);
1841 l2cap_chan_lock(chan
);
1844 l2cap_chan_unlock(chan
);
1845 l2cap_chan_put(chan
);
1849 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1850 l2cap_chan_unlock(chan
);
1851 l2cap_chan_put(chan
);
1854 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1855 struct sk_buff_head
*skbs
)
1857 struct sk_buff
*skb
;
1858 struct l2cap_ctrl
*control
;
1860 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1862 if (__chan_is_moving(chan
))
1865 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1867 while (!skb_queue_empty(&chan
->tx_q
)) {
1869 skb
= skb_dequeue(&chan
->tx_q
);
1871 bt_cb(skb
)->control
.retries
= 1;
1872 control
= &bt_cb(skb
)->control
;
1874 control
->reqseq
= 0;
1875 control
->txseq
= chan
->next_tx_seq
;
1877 __pack_control(chan
, control
, skb
);
1879 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1880 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1881 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1884 l2cap_do_send(chan
, skb
);
1886 BT_DBG("Sent txseq %u", control
->txseq
);
1888 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1889 chan
->frames_sent
++;
1893 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1895 struct sk_buff
*skb
, *tx_skb
;
1896 struct l2cap_ctrl
*control
;
1899 BT_DBG("chan %p", chan
);
1901 if (chan
->state
!= BT_CONNECTED
)
1904 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1907 if (__chan_is_moving(chan
))
1910 while (chan
->tx_send_head
&&
1911 chan
->unacked_frames
< chan
->remote_tx_win
&&
1912 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1914 skb
= chan
->tx_send_head
;
1916 bt_cb(skb
)->control
.retries
= 1;
1917 control
= &bt_cb(skb
)->control
;
1919 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1922 control
->reqseq
= chan
->buffer_seq
;
1923 chan
->last_acked_seq
= chan
->buffer_seq
;
1924 control
->txseq
= chan
->next_tx_seq
;
1926 __pack_control(chan
, control
, skb
);
1928 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1929 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1930 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1933 /* Clone after data has been modified. Data is assumed to be
1934 read-only (for locking purposes) on cloned sk_buffs.
1936 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1941 __set_retrans_timer(chan
);
1943 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1944 chan
->unacked_frames
++;
1945 chan
->frames_sent
++;
1948 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1949 chan
->tx_send_head
= NULL
;
1951 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1953 l2cap_do_send(chan
, tx_skb
);
1954 BT_DBG("Sent txseq %u", control
->txseq
);
1957 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1958 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1963 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1965 struct l2cap_ctrl control
;
1966 struct sk_buff
*skb
;
1967 struct sk_buff
*tx_skb
;
1970 BT_DBG("chan %p", chan
);
1972 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1975 if (__chan_is_moving(chan
))
1978 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1979 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1981 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1983 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1988 bt_cb(skb
)->control
.retries
++;
1989 control
= bt_cb(skb
)->control
;
1991 if (chan
->max_tx
!= 0 &&
1992 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1993 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1994 l2cap_send_disconn_req(chan
, ECONNRESET
);
1995 l2cap_seq_list_clear(&chan
->retrans_list
);
1999 control
.reqseq
= chan
->buffer_seq
;
2000 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2005 if (skb_cloned(skb
)) {
2006 /* Cloned sk_buffs are read-only, so we need a
2009 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
2011 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2015 l2cap_seq_list_clear(&chan
->retrans_list
);
2019 /* Update skb contents */
2020 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
2021 put_unaligned_le32(__pack_extended_control(&control
),
2022 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2024 put_unaligned_le16(__pack_enhanced_control(&control
),
2025 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2028 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2029 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
2030 put_unaligned_le16(fcs
, skb_put(tx_skb
,
2034 l2cap_do_send(chan
, tx_skb
);
2036 BT_DBG("Resent txseq %d", control
.txseq
);
2038 chan
->last_acked_seq
= chan
->buffer_seq
;
2042 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2043 struct l2cap_ctrl
*control
)
2045 BT_DBG("chan %p, control %p", chan
, control
);
2047 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2048 l2cap_ertm_resend(chan
);
2051 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2052 struct l2cap_ctrl
*control
)
2054 struct sk_buff
*skb
;
2056 BT_DBG("chan %p, control %p", chan
, control
);
2059 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2061 l2cap_seq_list_clear(&chan
->retrans_list
);
2063 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2066 if (chan
->unacked_frames
) {
2067 skb_queue_walk(&chan
->tx_q
, skb
) {
2068 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2069 skb
== chan
->tx_send_head
)
2073 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2074 if (skb
== chan
->tx_send_head
)
2077 l2cap_seq_list_append(&chan
->retrans_list
,
2078 bt_cb(skb
)->control
.txseq
);
2081 l2cap_ertm_resend(chan
);
2085 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2087 struct l2cap_ctrl control
;
2088 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2089 chan
->last_acked_seq
);
2092 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2093 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2095 memset(&control
, 0, sizeof(control
));
2098 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2099 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2100 __clear_ack_timer(chan
);
2101 control
.super
= L2CAP_SUPER_RNR
;
2102 control
.reqseq
= chan
->buffer_seq
;
2103 l2cap_send_sframe(chan
, &control
);
2105 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2106 l2cap_ertm_send(chan
);
2107 /* If any i-frames were sent, they included an ack */
2108 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2112 /* Ack now if the window is 3/4ths full.
2113 * Calculate without mul or div
2115 threshold
= chan
->ack_win
;
2116 threshold
+= threshold
<< 1;
2119 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2122 if (frames_to_ack
>= threshold
) {
2123 __clear_ack_timer(chan
);
2124 control
.super
= L2CAP_SUPER_RR
;
2125 control
.reqseq
= chan
->buffer_seq
;
2126 l2cap_send_sframe(chan
, &control
);
2131 __set_ack_timer(chan
);
2135 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2136 struct msghdr
*msg
, int len
,
2137 int count
, struct sk_buff
*skb
)
2139 struct l2cap_conn
*conn
= chan
->conn
;
2140 struct sk_buff
**frag
;
2143 if (chan
->ops
->memcpy_fromiovec(chan
, skb_put(skb
, count
),
2144 msg
->msg_iov
, count
))
2150 /* Continuation fragments (no L2CAP header) */
2151 frag
= &skb_shinfo(skb
)->frag_list
;
2153 struct sk_buff
*tmp
;
2155 count
= min_t(unsigned int, conn
->mtu
, len
);
2157 tmp
= chan
->ops
->alloc_skb(chan
, 0, count
,
2158 msg
->msg_flags
& MSG_DONTWAIT
);
2160 return PTR_ERR(tmp
);
2164 if (chan
->ops
->memcpy_fromiovec(chan
, skb_put(*frag
, count
),
2165 msg
->msg_iov
, count
))
2171 skb
->len
+= (*frag
)->len
;
2172 skb
->data_len
+= (*frag
)->len
;
2174 frag
= &(*frag
)->next
;
2180 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2181 struct msghdr
*msg
, size_t len
)
2183 struct l2cap_conn
*conn
= chan
->conn
;
2184 struct sk_buff
*skb
;
2185 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2186 struct l2cap_hdr
*lh
;
2188 BT_DBG("chan %p psm 0x%2.2x len %zu", chan
,
2189 __le16_to_cpu(chan
->psm
), len
);
2191 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2193 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2194 msg
->msg_flags
& MSG_DONTWAIT
);
2198 /* Create L2CAP header */
2199 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2200 lh
->cid
= cpu_to_le16(chan
->dcid
);
2201 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2202 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2204 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2205 if (unlikely(err
< 0)) {
2207 return ERR_PTR(err
);
2212 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2213 struct msghdr
*msg
, size_t len
)
2215 struct l2cap_conn
*conn
= chan
->conn
;
2216 struct sk_buff
*skb
;
2218 struct l2cap_hdr
*lh
;
2220 BT_DBG("chan %p len %zu", chan
, len
);
2222 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2224 skb
= chan
->ops
->alloc_skb(chan
, L2CAP_HDR_SIZE
, count
,
2225 msg
->msg_flags
& MSG_DONTWAIT
);
2229 /* Create L2CAP header */
2230 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2231 lh
->cid
= cpu_to_le16(chan
->dcid
);
2232 lh
->len
= cpu_to_le16(len
);
2234 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2235 if (unlikely(err
< 0)) {
2237 return ERR_PTR(err
);
2242 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2243 struct msghdr
*msg
, size_t len
,
2246 struct l2cap_conn
*conn
= chan
->conn
;
2247 struct sk_buff
*skb
;
2248 int err
, count
, hlen
;
2249 struct l2cap_hdr
*lh
;
2251 BT_DBG("chan %p len %zu", chan
, len
);
2254 return ERR_PTR(-ENOTCONN
);
2256 hlen
= __ertm_hdr_size(chan
);
2259 hlen
+= L2CAP_SDULEN_SIZE
;
2261 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2262 hlen
+= L2CAP_FCS_SIZE
;
2264 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2266 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2267 msg
->msg_flags
& MSG_DONTWAIT
);
2271 /* Create L2CAP header */
2272 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2273 lh
->cid
= cpu_to_le16(chan
->dcid
);
2274 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2276 /* Control header is populated later */
2277 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2278 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2280 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2283 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2285 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2286 if (unlikely(err
< 0)) {
2288 return ERR_PTR(err
);
2291 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2292 bt_cb(skb
)->control
.retries
= 0;
2296 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2297 struct sk_buff_head
*seg_queue
,
2298 struct msghdr
*msg
, size_t len
)
2300 struct sk_buff
*skb
;
2305 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2307 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2308 * so fragmented skbs are not used. The HCI layer's handling
2309 * of fragmented skbs is not compatible with ERTM's queueing.
2312 /* PDU size is derived from the HCI MTU */
2313 pdu_len
= chan
->conn
->mtu
;
2315 /* Constrain PDU size for BR/EDR connections */
2317 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2319 /* Adjust for largest possible L2CAP overhead. */
2321 pdu_len
-= L2CAP_FCS_SIZE
;
2323 pdu_len
-= __ertm_hdr_size(chan
);
2325 /* Remote device may have requested smaller PDUs */
2326 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2328 if (len
<= pdu_len
) {
2329 sar
= L2CAP_SAR_UNSEGMENTED
;
2333 sar
= L2CAP_SAR_START
;
2335 pdu_len
-= L2CAP_SDULEN_SIZE
;
2339 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2342 __skb_queue_purge(seg_queue
);
2343 return PTR_ERR(skb
);
2346 bt_cb(skb
)->control
.sar
= sar
;
2347 __skb_queue_tail(seg_queue
, skb
);
2352 pdu_len
+= L2CAP_SDULEN_SIZE
;
2355 if (len
<= pdu_len
) {
2356 sar
= L2CAP_SAR_END
;
2359 sar
= L2CAP_SAR_CONTINUE
;
2366 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2368 size_t len
, u16 sdulen
)
2370 struct l2cap_conn
*conn
= chan
->conn
;
2371 struct sk_buff
*skb
;
2372 int err
, count
, hlen
;
2373 struct l2cap_hdr
*lh
;
2375 BT_DBG("chan %p len %zu", chan
, len
);
2378 return ERR_PTR(-ENOTCONN
);
2380 hlen
= L2CAP_HDR_SIZE
;
2383 hlen
+= L2CAP_SDULEN_SIZE
;
2385 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2387 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2388 msg
->msg_flags
& MSG_DONTWAIT
);
2392 /* Create L2CAP header */
2393 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2394 lh
->cid
= cpu_to_le16(chan
->dcid
);
2395 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2398 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2400 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2401 if (unlikely(err
< 0)) {
2403 return ERR_PTR(err
);
2409 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2410 struct sk_buff_head
*seg_queue
,
2411 struct msghdr
*msg
, size_t len
)
2413 struct sk_buff
*skb
;
2417 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2419 pdu_len
= chan
->conn
->mtu
- L2CAP_HDR_SIZE
;
2421 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2424 pdu_len
-= L2CAP_SDULEN_SIZE
;
2430 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2432 __skb_queue_purge(seg_queue
);
2433 return PTR_ERR(skb
);
2436 __skb_queue_tail(seg_queue
, skb
);
2442 pdu_len
+= L2CAP_SDULEN_SIZE
;
2449 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
2451 struct sk_buff
*skb
;
2453 struct sk_buff_head seg_queue
;
2458 /* Connectionless channel */
2459 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2460 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
2462 return PTR_ERR(skb
);
2464 /* Channel lock is released before requesting new skb and then
2465 * reacquired thus we need to recheck channel state.
2467 if (chan
->state
!= BT_CONNECTED
) {
2472 l2cap_do_send(chan
, skb
);
2476 switch (chan
->mode
) {
2477 case L2CAP_MODE_LE_FLOWCTL
:
2478 /* Check outgoing MTU */
2479 if (len
> chan
->omtu
)
2482 if (!chan
->tx_credits
)
2485 __skb_queue_head_init(&seg_queue
);
2487 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2489 if (chan
->state
!= BT_CONNECTED
) {
2490 __skb_queue_purge(&seg_queue
);
2497 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2499 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2500 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2504 if (!chan
->tx_credits
)
2505 chan
->ops
->suspend(chan
);
2511 case L2CAP_MODE_BASIC
:
2512 /* Check outgoing MTU */
2513 if (len
> chan
->omtu
)
2516 /* Create a basic PDU */
2517 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
2519 return PTR_ERR(skb
);
2521 /* Channel lock is released before requesting new skb and then
2522 * reacquired thus we need to recheck channel state.
2524 if (chan
->state
!= BT_CONNECTED
) {
2529 l2cap_do_send(chan
, skb
);
2533 case L2CAP_MODE_ERTM
:
2534 case L2CAP_MODE_STREAMING
:
2535 /* Check outgoing MTU */
2536 if (len
> chan
->omtu
) {
2541 __skb_queue_head_init(&seg_queue
);
2543 /* Do segmentation before calling in to the state machine,
2544 * since it's possible to block while waiting for memory
2547 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2549 /* The channel could have been closed while segmenting,
2550 * check that it is still connected.
2552 if (chan
->state
!= BT_CONNECTED
) {
2553 __skb_queue_purge(&seg_queue
);
2560 if (chan
->mode
== L2CAP_MODE_ERTM
)
2561 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2563 l2cap_streaming_send(chan
, &seg_queue
);
2567 /* If the skbs were not queued for sending, they'll still be in
2568 * seg_queue and need to be purged.
2570 __skb_queue_purge(&seg_queue
);
2574 BT_DBG("bad state %1.1x", chan
->mode
);
2580 EXPORT_SYMBOL_GPL(l2cap_chan_send
);
2582 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2584 struct l2cap_ctrl control
;
2587 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2589 memset(&control
, 0, sizeof(control
));
2591 control
.super
= L2CAP_SUPER_SREJ
;
2593 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2594 seq
= __next_seq(chan
, seq
)) {
2595 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2596 control
.reqseq
= seq
;
2597 l2cap_send_sframe(chan
, &control
);
2598 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2602 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2605 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2607 struct l2cap_ctrl control
;
2609 BT_DBG("chan %p", chan
);
2611 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2614 memset(&control
, 0, sizeof(control
));
2616 control
.super
= L2CAP_SUPER_SREJ
;
2617 control
.reqseq
= chan
->srej_list
.tail
;
2618 l2cap_send_sframe(chan
, &control
);
2621 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2623 struct l2cap_ctrl control
;
2627 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2629 memset(&control
, 0, sizeof(control
));
2631 control
.super
= L2CAP_SUPER_SREJ
;
2633 /* Capture initial list head to allow only one pass through the list. */
2634 initial_head
= chan
->srej_list
.head
;
2637 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2638 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2641 control
.reqseq
= seq
;
2642 l2cap_send_sframe(chan
, &control
);
2643 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2644 } while (chan
->srej_list
.head
!= initial_head
);
2647 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2649 struct sk_buff
*acked_skb
;
2652 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2654 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2657 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2658 chan
->expected_ack_seq
, chan
->unacked_frames
);
2660 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2661 ackseq
= __next_seq(chan
, ackseq
)) {
2663 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2665 skb_unlink(acked_skb
, &chan
->tx_q
);
2666 kfree_skb(acked_skb
);
2667 chan
->unacked_frames
--;
2671 chan
->expected_ack_seq
= reqseq
;
2673 if (chan
->unacked_frames
== 0)
2674 __clear_retrans_timer(chan
);
2676 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2679 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2681 BT_DBG("chan %p", chan
);
2683 chan
->expected_tx_seq
= chan
->buffer_seq
;
2684 l2cap_seq_list_clear(&chan
->srej_list
);
2685 skb_queue_purge(&chan
->srej_q
);
2686 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2689 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2690 struct l2cap_ctrl
*control
,
2691 struct sk_buff_head
*skbs
, u8 event
)
2693 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2697 case L2CAP_EV_DATA_REQUEST
:
2698 if (chan
->tx_send_head
== NULL
)
2699 chan
->tx_send_head
= skb_peek(skbs
);
2701 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2702 l2cap_ertm_send(chan
);
2704 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2705 BT_DBG("Enter LOCAL_BUSY");
2706 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2708 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2709 /* The SREJ_SENT state must be aborted if we are to
2710 * enter the LOCAL_BUSY state.
2712 l2cap_abort_rx_srej_sent(chan
);
2715 l2cap_send_ack(chan
);
2718 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2719 BT_DBG("Exit LOCAL_BUSY");
2720 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2722 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2723 struct l2cap_ctrl local_control
;
2725 memset(&local_control
, 0, sizeof(local_control
));
2726 local_control
.sframe
= 1;
2727 local_control
.super
= L2CAP_SUPER_RR
;
2728 local_control
.poll
= 1;
2729 local_control
.reqseq
= chan
->buffer_seq
;
2730 l2cap_send_sframe(chan
, &local_control
);
2732 chan
->retry_count
= 1;
2733 __set_monitor_timer(chan
);
2734 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2737 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2738 l2cap_process_reqseq(chan
, control
->reqseq
);
2740 case L2CAP_EV_EXPLICIT_POLL
:
2741 l2cap_send_rr_or_rnr(chan
, 1);
2742 chan
->retry_count
= 1;
2743 __set_monitor_timer(chan
);
2744 __clear_ack_timer(chan
);
2745 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2747 case L2CAP_EV_RETRANS_TO
:
2748 l2cap_send_rr_or_rnr(chan
, 1);
2749 chan
->retry_count
= 1;
2750 __set_monitor_timer(chan
);
2751 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2753 case L2CAP_EV_RECV_FBIT
:
2754 /* Nothing to process */
2761 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2762 struct l2cap_ctrl
*control
,
2763 struct sk_buff_head
*skbs
, u8 event
)
2765 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2769 case L2CAP_EV_DATA_REQUEST
:
2770 if (chan
->tx_send_head
== NULL
)
2771 chan
->tx_send_head
= skb_peek(skbs
);
2772 /* Queue data, but don't send. */
2773 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2775 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2776 BT_DBG("Enter LOCAL_BUSY");
2777 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2779 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2780 /* The SREJ_SENT state must be aborted if we are to
2781 * enter the LOCAL_BUSY state.
2783 l2cap_abort_rx_srej_sent(chan
);
2786 l2cap_send_ack(chan
);
2789 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2790 BT_DBG("Exit LOCAL_BUSY");
2791 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2793 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2794 struct l2cap_ctrl local_control
;
2795 memset(&local_control
, 0, sizeof(local_control
));
2796 local_control
.sframe
= 1;
2797 local_control
.super
= L2CAP_SUPER_RR
;
2798 local_control
.poll
= 1;
2799 local_control
.reqseq
= chan
->buffer_seq
;
2800 l2cap_send_sframe(chan
, &local_control
);
2802 chan
->retry_count
= 1;
2803 __set_monitor_timer(chan
);
2804 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2807 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2808 l2cap_process_reqseq(chan
, control
->reqseq
);
2812 case L2CAP_EV_RECV_FBIT
:
2813 if (control
&& control
->final
) {
2814 __clear_monitor_timer(chan
);
2815 if (chan
->unacked_frames
> 0)
2816 __set_retrans_timer(chan
);
2817 chan
->retry_count
= 0;
2818 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2819 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2822 case L2CAP_EV_EXPLICIT_POLL
:
2825 case L2CAP_EV_MONITOR_TO
:
2826 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2827 l2cap_send_rr_or_rnr(chan
, 1);
2828 __set_monitor_timer(chan
);
2829 chan
->retry_count
++;
2831 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2839 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2840 struct sk_buff_head
*skbs
, u8 event
)
2842 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2843 chan
, control
, skbs
, event
, chan
->tx_state
);
2845 switch (chan
->tx_state
) {
2846 case L2CAP_TX_STATE_XMIT
:
2847 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2849 case L2CAP_TX_STATE_WAIT_F
:
2850 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2858 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2859 struct l2cap_ctrl
*control
)
2861 BT_DBG("chan %p, control %p", chan
, control
);
2862 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2865 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2866 struct l2cap_ctrl
*control
)
2868 BT_DBG("chan %p, control %p", chan
, control
);
2869 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2872 /* Copy frame to all raw sockets on that connection */
2873 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2875 struct sk_buff
*nskb
;
2876 struct l2cap_chan
*chan
;
2878 BT_DBG("conn %p", conn
);
2880 mutex_lock(&conn
->chan_lock
);
2882 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2883 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2886 /* Don't send frame to the channel it came from */
2887 if (bt_cb(skb
)->chan
== chan
)
2890 nskb
= skb_clone(skb
, GFP_KERNEL
);
2893 if (chan
->ops
->recv(chan
, nskb
))
2897 mutex_unlock(&conn
->chan_lock
);
2900 /* ---- L2CAP signalling commands ---- */
2901 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2902 u8 ident
, u16 dlen
, void *data
)
2904 struct sk_buff
*skb
, **frag
;
2905 struct l2cap_cmd_hdr
*cmd
;
2906 struct l2cap_hdr
*lh
;
2909 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2910 conn
, code
, ident
, dlen
);
2912 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2915 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2916 count
= min_t(unsigned int, conn
->mtu
, len
);
2918 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2922 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2923 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2925 if (conn
->hcon
->type
== LE_LINK
)
2926 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2928 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2930 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2933 cmd
->len
= cpu_to_le16(dlen
);
2936 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2937 memcpy(skb_put(skb
, count
), data
, count
);
2943 /* Continuation fragments (no L2CAP header) */
2944 frag
= &skb_shinfo(skb
)->frag_list
;
2946 count
= min_t(unsigned int, conn
->mtu
, len
);
2948 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2952 memcpy(skb_put(*frag
, count
), data
, count
);
2957 frag
= &(*frag
)->next
;
2967 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2970 struct l2cap_conf_opt
*opt
= *ptr
;
2973 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2981 *val
= *((u8
*) opt
->val
);
2985 *val
= get_unaligned_le16(opt
->val
);
2989 *val
= get_unaligned_le32(opt
->val
);
2993 *val
= (unsigned long) opt
->val
;
2997 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
3001 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
3003 struct l2cap_conf_opt
*opt
= *ptr
;
3005 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
3012 *((u8
*) opt
->val
) = val
;
3016 put_unaligned_le16(val
, opt
->val
);
3020 put_unaligned_le32(val
, opt
->val
);
3024 memcpy(opt
->val
, (void *) val
, len
);
3028 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
3031 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
3033 struct l2cap_conf_efs efs
;
3035 switch (chan
->mode
) {
3036 case L2CAP_MODE_ERTM
:
3037 efs
.id
= chan
->local_id
;
3038 efs
.stype
= chan
->local_stype
;
3039 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3040 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3041 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
3042 efs
.flush_to
= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3045 case L2CAP_MODE_STREAMING
:
3047 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3048 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3049 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3058 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3059 (unsigned long) &efs
);
3062 static void l2cap_ack_timeout(struct work_struct
*work
)
3064 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3068 BT_DBG("chan %p", chan
);
3070 l2cap_chan_lock(chan
);
3072 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3073 chan
->last_acked_seq
);
3076 l2cap_send_rr_or_rnr(chan
, 0);
3078 l2cap_chan_unlock(chan
);
3079 l2cap_chan_put(chan
);
3082 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3086 chan
->next_tx_seq
= 0;
3087 chan
->expected_tx_seq
= 0;
3088 chan
->expected_ack_seq
= 0;
3089 chan
->unacked_frames
= 0;
3090 chan
->buffer_seq
= 0;
3091 chan
->frames_sent
= 0;
3092 chan
->last_acked_seq
= 0;
3094 chan
->sdu_last_frag
= NULL
;
3097 skb_queue_head_init(&chan
->tx_q
);
3099 chan
->local_amp_id
= AMP_ID_BREDR
;
3100 chan
->move_id
= AMP_ID_BREDR
;
3101 chan
->move_state
= L2CAP_MOVE_STABLE
;
3102 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3104 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3107 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3108 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3110 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3111 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3112 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3114 skb_queue_head_init(&chan
->srej_q
);
3116 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3120 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3122 l2cap_seq_list_free(&chan
->srej_list
);
3127 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3130 case L2CAP_MODE_STREAMING
:
3131 case L2CAP_MODE_ERTM
:
3132 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3136 return L2CAP_MODE_BASIC
;
3140 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3142 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3145 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3147 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3150 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3151 struct l2cap_conf_rfc
*rfc
)
3153 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3154 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3156 /* Class 1 devices have must have ERTM timeouts
3157 * exceeding the Link Supervision Timeout. The
3158 * default Link Supervision Timeout for AMP
3159 * controllers is 10 seconds.
3161 * Class 1 devices use 0xffffffff for their
3162 * best-effort flush timeout, so the clamping logic
3163 * will result in a timeout that meets the above
3164 * requirement. ERTM timeouts are 16-bit values, so
3165 * the maximum timeout is 65.535 seconds.
3168 /* Convert timeout to milliseconds and round */
3169 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3171 /* This is the recommended formula for class 2 devices
3172 * that start ERTM timers when packets are sent to the
3175 ertm_to
= 3 * ertm_to
+ 500;
3177 if (ertm_to
> 0xffff)
3180 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3181 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3183 rfc
->retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3184 rfc
->monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3188 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3190 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3191 __l2cap_ews_supported(chan
->conn
)) {
3192 /* use extended control field */
3193 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3194 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3196 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3197 L2CAP_DEFAULT_TX_WINDOW
);
3198 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3200 chan
->ack_win
= chan
->tx_win
;
3203 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3205 struct l2cap_conf_req
*req
= data
;
3206 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3207 void *ptr
= req
->data
;
3210 BT_DBG("chan %p", chan
);
3212 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3215 switch (chan
->mode
) {
3216 case L2CAP_MODE_STREAMING
:
3217 case L2CAP_MODE_ERTM
:
3218 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3221 if (__l2cap_efs_supported(chan
->conn
))
3222 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3226 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3231 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3232 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3234 switch (chan
->mode
) {
3235 case L2CAP_MODE_BASIC
:
3236 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3237 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3240 rfc
.mode
= L2CAP_MODE_BASIC
;
3242 rfc
.max_transmit
= 0;
3243 rfc
.retrans_timeout
= 0;
3244 rfc
.monitor_timeout
= 0;
3245 rfc
.max_pdu_size
= 0;
3247 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3248 (unsigned long) &rfc
);
3251 case L2CAP_MODE_ERTM
:
3252 rfc
.mode
= L2CAP_MODE_ERTM
;
3253 rfc
.max_transmit
= chan
->max_tx
;
3255 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3257 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3258 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3260 rfc
.max_pdu_size
= cpu_to_le16(size
);
3262 l2cap_txwin_setup(chan
);
3264 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3265 L2CAP_DEFAULT_TX_WINDOW
);
3267 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3268 (unsigned long) &rfc
);
3270 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3271 l2cap_add_opt_efs(&ptr
, chan
);
3273 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3274 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3277 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3278 if (chan
->fcs
== L2CAP_FCS_NONE
||
3279 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3280 chan
->fcs
= L2CAP_FCS_NONE
;
3281 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3286 case L2CAP_MODE_STREAMING
:
3287 l2cap_txwin_setup(chan
);
3288 rfc
.mode
= L2CAP_MODE_STREAMING
;
3290 rfc
.max_transmit
= 0;
3291 rfc
.retrans_timeout
= 0;
3292 rfc
.monitor_timeout
= 0;
3294 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3295 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3297 rfc
.max_pdu_size
= cpu_to_le16(size
);
3299 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3300 (unsigned long) &rfc
);
3302 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3303 l2cap_add_opt_efs(&ptr
, chan
);
3305 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3306 if (chan
->fcs
== L2CAP_FCS_NONE
||
3307 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3308 chan
->fcs
= L2CAP_FCS_NONE
;
3309 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3315 req
->dcid
= cpu_to_le16(chan
->dcid
);
3316 req
->flags
= cpu_to_le16(0);
3321 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3323 struct l2cap_conf_rsp
*rsp
= data
;
3324 void *ptr
= rsp
->data
;
3325 void *req
= chan
->conf_req
;
3326 int len
= chan
->conf_len
;
3327 int type
, hint
, olen
;
3329 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3330 struct l2cap_conf_efs efs
;
3332 u16 mtu
= L2CAP_DEFAULT_MTU
;
3333 u16 result
= L2CAP_CONF_SUCCESS
;
3336 BT_DBG("chan %p", chan
);
3338 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3339 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3341 hint
= type
& L2CAP_CONF_HINT
;
3342 type
&= L2CAP_CONF_MASK
;
3345 case L2CAP_CONF_MTU
:
3349 case L2CAP_CONF_FLUSH_TO
:
3350 chan
->flush_to
= val
;
3353 case L2CAP_CONF_QOS
:
3356 case L2CAP_CONF_RFC
:
3357 if (olen
== sizeof(rfc
))
3358 memcpy(&rfc
, (void *) val
, olen
);
3361 case L2CAP_CONF_FCS
:
3362 if (val
== L2CAP_FCS_NONE
)
3363 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3366 case L2CAP_CONF_EFS
:
3368 if (olen
== sizeof(efs
))
3369 memcpy(&efs
, (void *) val
, olen
);
3372 case L2CAP_CONF_EWS
:
3373 if (!chan
->conn
->hs_enabled
)
3374 return -ECONNREFUSED
;
3376 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3377 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3378 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3379 chan
->remote_tx_win
= val
;
3386 result
= L2CAP_CONF_UNKNOWN
;
3387 *((u8
*) ptr
++) = type
;
3392 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3395 switch (chan
->mode
) {
3396 case L2CAP_MODE_STREAMING
:
3397 case L2CAP_MODE_ERTM
:
3398 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3399 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3400 chan
->conn
->feat_mask
);
3405 if (__l2cap_efs_supported(chan
->conn
))
3406 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3408 return -ECONNREFUSED
;
3411 if (chan
->mode
!= rfc
.mode
)
3412 return -ECONNREFUSED
;
3418 if (chan
->mode
!= rfc
.mode
) {
3419 result
= L2CAP_CONF_UNACCEPT
;
3420 rfc
.mode
= chan
->mode
;
3422 if (chan
->num_conf_rsp
== 1)
3423 return -ECONNREFUSED
;
3425 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3426 (unsigned long) &rfc
);
3429 if (result
== L2CAP_CONF_SUCCESS
) {
3430 /* Configure output options and let the other side know
3431 * which ones we don't like. */
3433 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3434 result
= L2CAP_CONF_UNACCEPT
;
3437 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3439 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3442 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3443 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3444 efs
.stype
!= chan
->local_stype
) {
3446 result
= L2CAP_CONF_UNACCEPT
;
3448 if (chan
->num_conf_req
>= 1)
3449 return -ECONNREFUSED
;
3451 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3453 (unsigned long) &efs
);
3455 /* Send PENDING Conf Rsp */
3456 result
= L2CAP_CONF_PENDING
;
3457 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3462 case L2CAP_MODE_BASIC
:
3463 chan
->fcs
= L2CAP_FCS_NONE
;
3464 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3467 case L2CAP_MODE_ERTM
:
3468 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3469 chan
->remote_tx_win
= rfc
.txwin_size
;
3471 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3473 chan
->remote_max_tx
= rfc
.max_transmit
;
3475 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3476 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3477 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3478 rfc
.max_pdu_size
= cpu_to_le16(size
);
3479 chan
->remote_mps
= size
;
3481 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3483 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3485 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3486 sizeof(rfc
), (unsigned long) &rfc
);
3488 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3489 chan
->remote_id
= efs
.id
;
3490 chan
->remote_stype
= efs
.stype
;
3491 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3492 chan
->remote_flush_to
=
3493 le32_to_cpu(efs
.flush_to
);
3494 chan
->remote_acc_lat
=
3495 le32_to_cpu(efs
.acc_lat
);
3496 chan
->remote_sdu_itime
=
3497 le32_to_cpu(efs
.sdu_itime
);
3498 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3500 (unsigned long) &efs
);
3504 case L2CAP_MODE_STREAMING
:
3505 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3506 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3507 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3508 rfc
.max_pdu_size
= cpu_to_le16(size
);
3509 chan
->remote_mps
= size
;
3511 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3513 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3514 (unsigned long) &rfc
);
3519 result
= L2CAP_CONF_UNACCEPT
;
3521 memset(&rfc
, 0, sizeof(rfc
));
3522 rfc
.mode
= chan
->mode
;
3525 if (result
== L2CAP_CONF_SUCCESS
)
3526 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3528 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3529 rsp
->result
= cpu_to_le16(result
);
3530 rsp
->flags
= cpu_to_le16(0);
3535 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3536 void *data
, u16
*result
)
3538 struct l2cap_conf_req
*req
= data
;
3539 void *ptr
= req
->data
;
3542 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3543 struct l2cap_conf_efs efs
;
3545 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3547 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3548 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3551 case L2CAP_CONF_MTU
:
3552 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3553 *result
= L2CAP_CONF_UNACCEPT
;
3554 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3557 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3560 case L2CAP_CONF_FLUSH_TO
:
3561 chan
->flush_to
= val
;
3562 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3566 case L2CAP_CONF_RFC
:
3567 if (olen
== sizeof(rfc
))
3568 memcpy(&rfc
, (void *)val
, olen
);
3570 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3571 rfc
.mode
!= chan
->mode
)
3572 return -ECONNREFUSED
;
3576 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3577 sizeof(rfc
), (unsigned long) &rfc
);
3580 case L2CAP_CONF_EWS
:
3581 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3582 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3586 case L2CAP_CONF_EFS
:
3587 if (olen
== sizeof(efs
))
3588 memcpy(&efs
, (void *)val
, olen
);
3590 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3591 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3592 efs
.stype
!= chan
->local_stype
)
3593 return -ECONNREFUSED
;
3595 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3596 (unsigned long) &efs
);
3599 case L2CAP_CONF_FCS
:
3600 if (*result
== L2CAP_CONF_PENDING
)
3601 if (val
== L2CAP_FCS_NONE
)
3602 set_bit(CONF_RECV_NO_FCS
,
3608 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3609 return -ECONNREFUSED
;
3611 chan
->mode
= rfc
.mode
;
3613 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3615 case L2CAP_MODE_ERTM
:
3616 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3617 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3618 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3619 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3620 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3623 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3624 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3625 chan
->local_sdu_itime
=
3626 le32_to_cpu(efs
.sdu_itime
);
3627 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3628 chan
->local_flush_to
=
3629 le32_to_cpu(efs
.flush_to
);
3633 case L2CAP_MODE_STREAMING
:
3634 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3638 req
->dcid
= cpu_to_le16(chan
->dcid
);
3639 req
->flags
= cpu_to_le16(0);
3644 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3645 u16 result
, u16 flags
)
3647 struct l2cap_conf_rsp
*rsp
= data
;
3648 void *ptr
= rsp
->data
;
3650 BT_DBG("chan %p", chan
);
3652 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3653 rsp
->result
= cpu_to_le16(result
);
3654 rsp
->flags
= cpu_to_le16(flags
);
3659 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3661 struct l2cap_le_conn_rsp rsp
;
3662 struct l2cap_conn
*conn
= chan
->conn
;
3664 BT_DBG("chan %p", chan
);
3666 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3667 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3668 rsp
.mps
= cpu_to_le16(chan
->mps
);
3669 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3670 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3672 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3676 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3678 struct l2cap_conn_rsp rsp
;
3679 struct l2cap_conn
*conn
= chan
->conn
;
3683 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3684 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3685 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3686 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3689 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3691 rsp_code
= L2CAP_CONN_RSP
;
3693 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3695 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3697 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3700 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3701 l2cap_build_conf_req(chan
, buf
), buf
);
3702 chan
->num_conf_req
++;
3705 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3709 /* Use sane default values in case a misbehaving remote device
3710 * did not send an RFC or extended window size option.
3712 u16 txwin_ext
= chan
->ack_win
;
3713 struct l2cap_conf_rfc rfc
= {
3715 .retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3716 .monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3717 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3718 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3721 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3723 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3726 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3727 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3730 case L2CAP_CONF_RFC
:
3731 if (olen
== sizeof(rfc
))
3732 memcpy(&rfc
, (void *)val
, olen
);
3734 case L2CAP_CONF_EWS
:
3741 case L2CAP_MODE_ERTM
:
3742 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3743 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3744 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3745 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3746 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3748 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3751 case L2CAP_MODE_STREAMING
:
3752 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3756 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3757 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3760 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3762 if (cmd_len
< sizeof(*rej
))
3765 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3768 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3769 cmd
->ident
== conn
->info_ident
) {
3770 cancel_delayed_work(&conn
->info_timer
);
3772 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3773 conn
->info_ident
= 0;
3775 l2cap_conn_start(conn
);
3781 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3782 struct l2cap_cmd_hdr
*cmd
,
3783 u8
*data
, u8 rsp_code
, u8 amp_id
)
3785 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3786 struct l2cap_conn_rsp rsp
;
3787 struct l2cap_chan
*chan
= NULL
, *pchan
;
3788 int result
, status
= L2CAP_CS_NO_INFO
;
3790 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3791 __le16 psm
= req
->psm
;
3793 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3795 /* Check if we have socket listening on psm */
3796 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3797 &conn
->hcon
->dst
, ACL_LINK
);
3799 result
= L2CAP_CR_BAD_PSM
;
3803 mutex_lock(&conn
->chan_lock
);
3804 l2cap_chan_lock(pchan
);
3806 /* Check if the ACL is secure enough (if not SDP) */
3807 if (psm
!= cpu_to_le16(L2CAP_PSM_SDP
) &&
3808 !hci_conn_check_link_mode(conn
->hcon
)) {
3809 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3810 result
= L2CAP_CR_SEC_BLOCK
;
3814 result
= L2CAP_CR_NO_MEM
;
3816 /* Check if we already have channel with that dcid */
3817 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3820 chan
= pchan
->ops
->new_connection(pchan
);
3824 /* For certain devices (ex: HID mouse), support for authentication,
3825 * pairing and bonding is optional. For such devices, inorder to avoid
3826 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3827 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3829 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3831 bacpy(&chan
->src
, &conn
->hcon
->src
);
3832 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3833 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
3834 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
3837 chan
->local_amp_id
= amp_id
;
3839 __l2cap_chan_add(conn
, chan
);
3843 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3845 chan
->ident
= cmd
->ident
;
3847 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3848 if (l2cap_chan_check_security(chan
)) {
3849 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3850 l2cap_state_change(chan
, BT_CONNECT2
);
3851 result
= L2CAP_CR_PEND
;
3852 status
= L2CAP_CS_AUTHOR_PEND
;
3853 chan
->ops
->defer(chan
);
3855 /* Force pending result for AMP controllers.
3856 * The connection will succeed after the
3857 * physical link is up.
3859 if (amp_id
== AMP_ID_BREDR
) {
3860 l2cap_state_change(chan
, BT_CONFIG
);
3861 result
= L2CAP_CR_SUCCESS
;
3863 l2cap_state_change(chan
, BT_CONNECT2
);
3864 result
= L2CAP_CR_PEND
;
3866 status
= L2CAP_CS_NO_INFO
;
3869 l2cap_state_change(chan
, BT_CONNECT2
);
3870 result
= L2CAP_CR_PEND
;
3871 status
= L2CAP_CS_AUTHEN_PEND
;
3874 l2cap_state_change(chan
, BT_CONNECT2
);
3875 result
= L2CAP_CR_PEND
;
3876 status
= L2CAP_CS_NO_INFO
;
3880 l2cap_chan_unlock(pchan
);
3881 mutex_unlock(&conn
->chan_lock
);
3884 rsp
.scid
= cpu_to_le16(scid
);
3885 rsp
.dcid
= cpu_to_le16(dcid
);
3886 rsp
.result
= cpu_to_le16(result
);
3887 rsp
.status
= cpu_to_le16(status
);
3888 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3890 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3891 struct l2cap_info_req info
;
3892 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3894 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3895 conn
->info_ident
= l2cap_get_ident(conn
);
3897 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3899 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3900 sizeof(info
), &info
);
3903 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3904 result
== L2CAP_CR_SUCCESS
) {
3906 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3907 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3908 l2cap_build_conf_req(chan
, buf
), buf
);
3909 chan
->num_conf_req
++;
3915 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3916 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3918 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3919 struct hci_conn
*hcon
= conn
->hcon
;
3921 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3925 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3926 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3927 mgmt_device_connected(hdev
, &hcon
->dst
, hcon
->type
,
3928 hcon
->dst_type
, 0, NULL
, 0,
3930 hci_dev_unlock(hdev
);
3932 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3936 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3937 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3940 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3941 u16 scid
, dcid
, result
, status
;
3942 struct l2cap_chan
*chan
;
3946 if (cmd_len
< sizeof(*rsp
))
3949 scid
= __le16_to_cpu(rsp
->scid
);
3950 dcid
= __le16_to_cpu(rsp
->dcid
);
3951 result
= __le16_to_cpu(rsp
->result
);
3952 status
= __le16_to_cpu(rsp
->status
);
3954 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3955 dcid
, scid
, result
, status
);
3957 mutex_lock(&conn
->chan_lock
);
3960 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3966 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3975 l2cap_chan_lock(chan
);
3978 case L2CAP_CR_SUCCESS
:
3979 l2cap_state_change(chan
, BT_CONFIG
);
3982 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3984 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3987 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3988 l2cap_build_conf_req(chan
, req
), req
);
3989 chan
->num_conf_req
++;
3993 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3997 l2cap_chan_del(chan
, ECONNREFUSED
);
4001 l2cap_chan_unlock(chan
);
4004 mutex_unlock(&conn
->chan_lock
);
4009 static inline void set_default_fcs(struct l2cap_chan
*chan
)
4011 /* FCS is enabled only in ERTM or streaming mode, if one or both
4014 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
4015 chan
->fcs
= L2CAP_FCS_NONE
;
4016 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
4017 chan
->fcs
= L2CAP_FCS_CRC16
;
4020 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
4021 u8 ident
, u16 flags
)
4023 struct l2cap_conn
*conn
= chan
->conn
;
4025 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
4028 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
4029 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
4031 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
4032 l2cap_build_conf_rsp(chan
, data
,
4033 L2CAP_CONF_SUCCESS
, flags
), data
);
4036 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
4039 struct l2cap_cmd_rej_cid rej
;
4041 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4042 rej
.scid
= __cpu_to_le16(scid
);
4043 rej
.dcid
= __cpu_to_le16(dcid
);
4045 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4048 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4049 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4052 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4055 struct l2cap_chan
*chan
;
4058 if (cmd_len
< sizeof(*req
))
4061 dcid
= __le16_to_cpu(req
->dcid
);
4062 flags
= __le16_to_cpu(req
->flags
);
4064 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4066 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4068 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4072 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4073 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4078 /* Reject if config buffer is too small. */
4079 len
= cmd_len
- sizeof(*req
);
4080 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4081 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4082 l2cap_build_conf_rsp(chan
, rsp
,
4083 L2CAP_CONF_REJECT
, flags
), rsp
);
4088 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4089 chan
->conf_len
+= len
;
4091 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4092 /* Incomplete config. Send empty response. */
4093 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4094 l2cap_build_conf_rsp(chan
, rsp
,
4095 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4099 /* Complete config. */
4100 len
= l2cap_parse_conf_req(chan
, rsp
);
4102 l2cap_send_disconn_req(chan
, ECONNRESET
);
4106 chan
->ident
= cmd
->ident
;
4107 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4108 chan
->num_conf_rsp
++;
4110 /* Reset config buffer. */
4113 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4116 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4117 set_default_fcs(chan
);
4119 if (chan
->mode
== L2CAP_MODE_ERTM
||
4120 chan
->mode
== L2CAP_MODE_STREAMING
)
4121 err
= l2cap_ertm_init(chan
);
4124 l2cap_send_disconn_req(chan
, -err
);
4126 l2cap_chan_ready(chan
);
4131 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4133 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4134 l2cap_build_conf_req(chan
, buf
), buf
);
4135 chan
->num_conf_req
++;
4138 /* Got Conf Rsp PENDING from remote side and asume we sent
4139 Conf Rsp PENDING in the code above */
4140 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4141 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4143 /* check compatibility */
4145 /* Send rsp for BR/EDR channel */
4147 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4149 chan
->ident
= cmd
->ident
;
4153 l2cap_chan_unlock(chan
);
4157 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4158 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4161 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4162 u16 scid
, flags
, result
;
4163 struct l2cap_chan
*chan
;
4164 int len
= cmd_len
- sizeof(*rsp
);
4167 if (cmd_len
< sizeof(*rsp
))
4170 scid
= __le16_to_cpu(rsp
->scid
);
4171 flags
= __le16_to_cpu(rsp
->flags
);
4172 result
= __le16_to_cpu(rsp
->result
);
4174 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4177 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4182 case L2CAP_CONF_SUCCESS
:
4183 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4184 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4187 case L2CAP_CONF_PENDING
:
4188 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4190 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4193 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4196 l2cap_send_disconn_req(chan
, ECONNRESET
);
4200 if (!chan
->hs_hcon
) {
4201 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4204 if (l2cap_check_efs(chan
)) {
4205 amp_create_logical_link(chan
);
4206 chan
->ident
= cmd
->ident
;
4212 case L2CAP_CONF_UNACCEPT
:
4213 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4216 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4217 l2cap_send_disconn_req(chan
, ECONNRESET
);
4221 /* throw out any old stored conf requests */
4222 result
= L2CAP_CONF_SUCCESS
;
4223 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4226 l2cap_send_disconn_req(chan
, ECONNRESET
);
4230 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4231 L2CAP_CONF_REQ
, len
, req
);
4232 chan
->num_conf_req
++;
4233 if (result
!= L2CAP_CONF_SUCCESS
)
4239 l2cap_chan_set_err(chan
, ECONNRESET
);
4241 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4242 l2cap_send_disconn_req(chan
, ECONNRESET
);
4246 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4249 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4251 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4252 set_default_fcs(chan
);
4254 if (chan
->mode
== L2CAP_MODE_ERTM
||
4255 chan
->mode
== L2CAP_MODE_STREAMING
)
4256 err
= l2cap_ertm_init(chan
);
4259 l2cap_send_disconn_req(chan
, -err
);
4261 l2cap_chan_ready(chan
);
4265 l2cap_chan_unlock(chan
);
4269 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4270 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4273 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4274 struct l2cap_disconn_rsp rsp
;
4276 struct l2cap_chan
*chan
;
4278 if (cmd_len
!= sizeof(*req
))
4281 scid
= __le16_to_cpu(req
->scid
);
4282 dcid
= __le16_to_cpu(req
->dcid
);
4284 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4286 mutex_lock(&conn
->chan_lock
);
4288 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4290 mutex_unlock(&conn
->chan_lock
);
4291 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4295 l2cap_chan_lock(chan
);
4297 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4298 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4299 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4301 chan
->ops
->set_shutdown(chan
);
4303 l2cap_chan_hold(chan
);
4304 l2cap_chan_del(chan
, ECONNRESET
);
4306 l2cap_chan_unlock(chan
);
4308 chan
->ops
->close(chan
);
4309 l2cap_chan_put(chan
);
4311 mutex_unlock(&conn
->chan_lock
);
4316 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4317 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4320 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4322 struct l2cap_chan
*chan
;
4324 if (cmd_len
!= sizeof(*rsp
))
4327 scid
= __le16_to_cpu(rsp
->scid
);
4328 dcid
= __le16_to_cpu(rsp
->dcid
);
4330 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4332 mutex_lock(&conn
->chan_lock
);
4334 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4336 mutex_unlock(&conn
->chan_lock
);
4340 l2cap_chan_lock(chan
);
4342 l2cap_chan_hold(chan
);
4343 l2cap_chan_del(chan
, 0);
4345 l2cap_chan_unlock(chan
);
4347 chan
->ops
->close(chan
);
4348 l2cap_chan_put(chan
);
4350 mutex_unlock(&conn
->chan_lock
);
4355 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4356 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4359 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4362 if (cmd_len
!= sizeof(*req
))
4365 type
= __le16_to_cpu(req
->type
);
4367 BT_DBG("type 0x%4.4x", type
);
4369 if (type
== L2CAP_IT_FEAT_MASK
) {
4371 u32 feat_mask
= l2cap_feat_mask
;
4372 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4373 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4374 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4376 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4378 if (conn
->hs_enabled
)
4379 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4380 | L2CAP_FEAT_EXT_WINDOW
;
4382 put_unaligned_le32(feat_mask
, rsp
->data
);
4383 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4385 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4387 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4389 if (conn
->hs_enabled
)
4390 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4392 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4394 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4395 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4396 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4397 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4400 struct l2cap_info_rsp rsp
;
4401 rsp
.type
= cpu_to_le16(type
);
4402 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
4403 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4410 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4411 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4414 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4417 if (cmd_len
< sizeof(*rsp
))
4420 type
= __le16_to_cpu(rsp
->type
);
4421 result
= __le16_to_cpu(rsp
->result
);
4423 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4425 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4426 if (cmd
->ident
!= conn
->info_ident
||
4427 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4430 cancel_delayed_work(&conn
->info_timer
);
4432 if (result
!= L2CAP_IR_SUCCESS
) {
4433 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4434 conn
->info_ident
= 0;
4436 l2cap_conn_start(conn
);
4442 case L2CAP_IT_FEAT_MASK
:
4443 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4445 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4446 struct l2cap_info_req req
;
4447 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4449 conn
->info_ident
= l2cap_get_ident(conn
);
4451 l2cap_send_cmd(conn
, conn
->info_ident
,
4452 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4454 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4455 conn
->info_ident
= 0;
4457 l2cap_conn_start(conn
);
4461 case L2CAP_IT_FIXED_CHAN
:
4462 conn
->fixed_chan_mask
= rsp
->data
[0];
4463 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4464 conn
->info_ident
= 0;
4466 l2cap_conn_start(conn
);
4473 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4474 struct l2cap_cmd_hdr
*cmd
,
4475 u16 cmd_len
, void *data
)
4477 struct l2cap_create_chan_req
*req
= data
;
4478 struct l2cap_create_chan_rsp rsp
;
4479 struct l2cap_chan
*chan
;
4480 struct hci_dev
*hdev
;
4483 if (cmd_len
!= sizeof(*req
))
4486 if (!conn
->hs_enabled
)
4489 psm
= le16_to_cpu(req
->psm
);
4490 scid
= le16_to_cpu(req
->scid
);
4492 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4494 /* For controller id 0 make BR/EDR connection */
4495 if (req
->amp_id
== AMP_ID_BREDR
) {
4496 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4501 /* Validate AMP controller id */
4502 hdev
= hci_dev_get(req
->amp_id
);
4506 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4511 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4514 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4515 struct hci_conn
*hs_hcon
;
4517 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4521 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4526 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4528 mgr
->bredr_chan
= chan
;
4529 chan
->hs_hcon
= hs_hcon
;
4530 chan
->fcs
= L2CAP_FCS_NONE
;
4531 conn
->mtu
= hdev
->block_mtu
;
4540 rsp
.scid
= cpu_to_le16(scid
);
4541 rsp
.result
= cpu_to_le16(L2CAP_CR_BAD_AMP
);
4542 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4544 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4550 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4552 struct l2cap_move_chan_req req
;
4555 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4557 ident
= l2cap_get_ident(chan
->conn
);
4558 chan
->ident
= ident
;
4560 req
.icid
= cpu_to_le16(chan
->scid
);
4561 req
.dest_amp_id
= dest_amp_id
;
4563 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4566 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4569 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4571 struct l2cap_move_chan_rsp rsp
;
4573 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4575 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4576 rsp
.result
= cpu_to_le16(result
);
4578 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4582 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4584 struct l2cap_move_chan_cfm cfm
;
4586 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4588 chan
->ident
= l2cap_get_ident(chan
->conn
);
4590 cfm
.icid
= cpu_to_le16(chan
->scid
);
4591 cfm
.result
= cpu_to_le16(result
);
4593 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4596 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4599 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4601 struct l2cap_move_chan_cfm cfm
;
4603 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4605 cfm
.icid
= cpu_to_le16(icid
);
4606 cfm
.result
= cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4608 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4612 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4615 struct l2cap_move_chan_cfm_rsp rsp
;
4617 BT_DBG("icid 0x%4.4x", icid
);
4619 rsp
.icid
= cpu_to_le16(icid
);
4620 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4623 static void __release_logical_link(struct l2cap_chan
*chan
)
4625 chan
->hs_hchan
= NULL
;
4626 chan
->hs_hcon
= NULL
;
4628 /* Placeholder - release the logical link */
4631 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4633 /* Logical link setup failed */
4634 if (chan
->state
!= BT_CONNECTED
) {
4635 /* Create channel failure, disconnect */
4636 l2cap_send_disconn_req(chan
, ECONNRESET
);
4640 switch (chan
->move_role
) {
4641 case L2CAP_MOVE_ROLE_RESPONDER
:
4642 l2cap_move_done(chan
);
4643 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4645 case L2CAP_MOVE_ROLE_INITIATOR
:
4646 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4647 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4648 /* Remote has only sent pending or
4649 * success responses, clean up
4651 l2cap_move_done(chan
);
4654 /* Other amp move states imply that the move
4655 * has already aborted
4657 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4662 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4663 struct hci_chan
*hchan
)
4665 struct l2cap_conf_rsp rsp
;
4667 chan
->hs_hchan
= hchan
;
4668 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4670 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4672 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4675 set_default_fcs(chan
);
4677 err
= l2cap_ertm_init(chan
);
4679 l2cap_send_disconn_req(chan
, -err
);
4681 l2cap_chan_ready(chan
);
4685 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4686 struct hci_chan
*hchan
)
4688 chan
->hs_hcon
= hchan
->conn
;
4689 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4691 BT_DBG("move_state %d", chan
->move_state
);
4693 switch (chan
->move_state
) {
4694 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4695 /* Move confirm will be sent after a success
4696 * response is received
4698 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4700 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4701 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4702 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4703 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4704 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4705 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4706 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4707 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4708 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4712 /* Move was not in expected state, free the channel */
4713 __release_logical_link(chan
);
4715 chan
->move_state
= L2CAP_MOVE_STABLE
;
4719 /* Call with chan locked */
4720 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4723 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4726 l2cap_logical_fail(chan
);
4727 __release_logical_link(chan
);
4731 if (chan
->state
!= BT_CONNECTED
) {
4732 /* Ignore logical link if channel is on BR/EDR */
4733 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4734 l2cap_logical_finish_create(chan
, hchan
);
4736 l2cap_logical_finish_move(chan
, hchan
);
4740 void l2cap_move_start(struct l2cap_chan
*chan
)
4742 BT_DBG("chan %p", chan
);
4744 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4745 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4747 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4748 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4749 /* Placeholder - start physical link setup */
4751 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4752 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4754 l2cap_move_setup(chan
);
4755 l2cap_send_move_chan_req(chan
, 0);
4759 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4760 u8 local_amp_id
, u8 remote_amp_id
)
4762 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4763 local_amp_id
, remote_amp_id
);
4765 chan
->fcs
= L2CAP_FCS_NONE
;
4767 /* Outgoing channel on AMP */
4768 if (chan
->state
== BT_CONNECT
) {
4769 if (result
== L2CAP_CR_SUCCESS
) {
4770 chan
->local_amp_id
= local_amp_id
;
4771 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4773 /* Revert to BR/EDR connect */
4774 l2cap_send_conn_req(chan
);
4780 /* Incoming channel on AMP */
4781 if (__l2cap_no_conn_pending(chan
)) {
4782 struct l2cap_conn_rsp rsp
;
4784 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4785 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4787 if (result
== L2CAP_CR_SUCCESS
) {
4788 /* Send successful response */
4789 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4790 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4792 /* Send negative response */
4793 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4794 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4797 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4800 if (result
== L2CAP_CR_SUCCESS
) {
4801 l2cap_state_change(chan
, BT_CONFIG
);
4802 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4803 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4805 l2cap_build_conf_req(chan
, buf
), buf
);
4806 chan
->num_conf_req
++;
4811 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4814 l2cap_move_setup(chan
);
4815 chan
->move_id
= local_amp_id
;
4816 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4818 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4821 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4823 struct hci_chan
*hchan
= NULL
;
4825 /* Placeholder - get hci_chan for logical link */
4828 if (hchan
->state
== BT_CONNECTED
) {
4829 /* Logical link is ready to go */
4830 chan
->hs_hcon
= hchan
->conn
;
4831 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4832 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4833 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4835 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4837 /* Wait for logical link to be ready */
4838 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4841 /* Logical link not available */
4842 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4846 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4848 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4850 if (result
== -EINVAL
)
4851 rsp_result
= L2CAP_MR_BAD_ID
;
4853 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4855 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4858 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4859 chan
->move_state
= L2CAP_MOVE_STABLE
;
4861 /* Restart data transmission */
4862 l2cap_ertm_send(chan
);
4865 /* Invoke with locked chan */
4866 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4868 u8 local_amp_id
= chan
->local_amp_id
;
4869 u8 remote_amp_id
= chan
->remote_amp_id
;
4871 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4872 chan
, result
, local_amp_id
, remote_amp_id
);
4874 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4875 l2cap_chan_unlock(chan
);
4879 if (chan
->state
!= BT_CONNECTED
) {
4880 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4881 } else if (result
!= L2CAP_MR_SUCCESS
) {
4882 l2cap_do_move_cancel(chan
, result
);
4884 switch (chan
->move_role
) {
4885 case L2CAP_MOVE_ROLE_INITIATOR
:
4886 l2cap_do_move_initiate(chan
, local_amp_id
,
4889 case L2CAP_MOVE_ROLE_RESPONDER
:
4890 l2cap_do_move_respond(chan
, result
);
4893 l2cap_do_move_cancel(chan
, result
);
4899 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4900 struct l2cap_cmd_hdr
*cmd
,
4901 u16 cmd_len
, void *data
)
4903 struct l2cap_move_chan_req
*req
= data
;
4904 struct l2cap_move_chan_rsp rsp
;
4905 struct l2cap_chan
*chan
;
4907 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4909 if (cmd_len
!= sizeof(*req
))
4912 icid
= le16_to_cpu(req
->icid
);
4914 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4916 if (!conn
->hs_enabled
)
4919 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4921 rsp
.icid
= cpu_to_le16(icid
);
4922 rsp
.result
= cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4923 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4928 chan
->ident
= cmd
->ident
;
4930 if (chan
->scid
< L2CAP_CID_DYN_START
||
4931 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4932 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4933 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4934 result
= L2CAP_MR_NOT_ALLOWED
;
4935 goto send_move_response
;
4938 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4939 result
= L2CAP_MR_SAME_ID
;
4940 goto send_move_response
;
4943 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4944 struct hci_dev
*hdev
;
4945 hdev
= hci_dev_get(req
->dest_amp_id
);
4946 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4947 !test_bit(HCI_UP
, &hdev
->flags
)) {
4951 result
= L2CAP_MR_BAD_ID
;
4952 goto send_move_response
;
4957 /* Detect a move collision. Only send a collision response
4958 * if this side has "lost", otherwise proceed with the move.
4959 * The winner has the larger bd_addr.
4961 if ((__chan_is_moving(chan
) ||
4962 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4963 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4964 result
= L2CAP_MR_COLLISION
;
4965 goto send_move_response
;
4968 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4969 l2cap_move_setup(chan
);
4970 chan
->move_id
= req
->dest_amp_id
;
4973 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4974 /* Moving to BR/EDR */
4975 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4976 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4977 result
= L2CAP_MR_PEND
;
4979 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4980 result
= L2CAP_MR_SUCCESS
;
4983 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4984 /* Placeholder - uncomment when amp functions are available */
4985 /*amp_accept_physical(chan, req->dest_amp_id);*/
4986 result
= L2CAP_MR_PEND
;
4990 l2cap_send_move_chan_rsp(chan
, result
);
4992 l2cap_chan_unlock(chan
);
4997 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4999 struct l2cap_chan
*chan
;
5000 struct hci_chan
*hchan
= NULL
;
5002 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5004 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5008 __clear_chan_timer(chan
);
5009 if (result
== L2CAP_MR_PEND
)
5010 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
5012 switch (chan
->move_state
) {
5013 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
5014 /* Move confirm will be sent when logical link
5017 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5019 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
5020 if (result
== L2CAP_MR_PEND
) {
5022 } else if (test_bit(CONN_LOCAL_BUSY
,
5023 &chan
->conn_state
)) {
5024 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
5026 /* Logical link is up or moving to BR/EDR,
5029 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
5030 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5033 case L2CAP_MOVE_WAIT_RSP
:
5035 if (result
== L2CAP_MR_SUCCESS
) {
5036 /* Remote is ready, send confirm immediately
5037 * after logical link is ready
5039 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5041 /* Both logical link and move success
5042 * are required to confirm
5044 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5047 /* Placeholder - get hci_chan for logical link */
5049 /* Logical link not available */
5050 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5054 /* If the logical link is not yet connected, do not
5055 * send confirmation.
5057 if (hchan
->state
!= BT_CONNECTED
)
5060 /* Logical link is already ready to go */
5062 chan
->hs_hcon
= hchan
->conn
;
5063 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5065 if (result
== L2CAP_MR_SUCCESS
) {
5066 /* Can confirm now */
5067 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5069 /* Now only need move success
5072 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5075 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5078 /* Any other amp move state means the move failed. */
5079 chan
->move_id
= chan
->local_amp_id
;
5080 l2cap_move_done(chan
);
5081 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5084 l2cap_chan_unlock(chan
);
5087 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5090 struct l2cap_chan
*chan
;
5092 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5094 /* Could not locate channel, icid is best guess */
5095 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5099 __clear_chan_timer(chan
);
5101 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5102 if (result
== L2CAP_MR_COLLISION
) {
5103 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5105 /* Cleanup - cancel move */
5106 chan
->move_id
= chan
->local_amp_id
;
5107 l2cap_move_done(chan
);
5111 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5113 l2cap_chan_unlock(chan
);
5116 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5117 struct l2cap_cmd_hdr
*cmd
,
5118 u16 cmd_len
, void *data
)
5120 struct l2cap_move_chan_rsp
*rsp
= data
;
5123 if (cmd_len
!= sizeof(*rsp
))
5126 icid
= le16_to_cpu(rsp
->icid
);
5127 result
= le16_to_cpu(rsp
->result
);
5129 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5131 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5132 l2cap_move_continue(conn
, icid
, result
);
5134 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5139 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5140 struct l2cap_cmd_hdr
*cmd
,
5141 u16 cmd_len
, void *data
)
5143 struct l2cap_move_chan_cfm
*cfm
= data
;
5144 struct l2cap_chan
*chan
;
5147 if (cmd_len
!= sizeof(*cfm
))
5150 icid
= le16_to_cpu(cfm
->icid
);
5151 result
= le16_to_cpu(cfm
->result
);
5153 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5155 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5157 /* Spec requires a response even if the icid was not found */
5158 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5162 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5163 if (result
== L2CAP_MC_CONFIRMED
) {
5164 chan
->local_amp_id
= chan
->move_id
;
5165 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5166 __release_logical_link(chan
);
5168 chan
->move_id
= chan
->local_amp_id
;
5171 l2cap_move_done(chan
);
5174 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5176 l2cap_chan_unlock(chan
);
5181 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5182 struct l2cap_cmd_hdr
*cmd
,
5183 u16 cmd_len
, void *data
)
5185 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5186 struct l2cap_chan
*chan
;
5189 if (cmd_len
!= sizeof(*rsp
))
5192 icid
= le16_to_cpu(rsp
->icid
);
5194 BT_DBG("icid 0x%4.4x", icid
);
5196 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5200 __clear_chan_timer(chan
);
5202 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5203 chan
->local_amp_id
= chan
->move_id
;
5205 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5206 __release_logical_link(chan
);
5208 l2cap_move_done(chan
);
5211 l2cap_chan_unlock(chan
);
5216 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5217 struct l2cap_cmd_hdr
*cmd
,
5218 u16 cmd_len
, u8
*data
)
5220 struct hci_conn
*hcon
= conn
->hcon
;
5221 struct l2cap_conn_param_update_req
*req
;
5222 struct l2cap_conn_param_update_rsp rsp
;
5223 u16 min
, max
, latency
, to_multiplier
;
5226 if (!test_bit(HCI_CONN_MASTER
, &hcon
->flags
))
5229 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5232 req
= (struct l2cap_conn_param_update_req
*) data
;
5233 min
= __le16_to_cpu(req
->min
);
5234 max
= __le16_to_cpu(req
->max
);
5235 latency
= __le16_to_cpu(req
->latency
);
5236 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5238 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5239 min
, max
, latency
, to_multiplier
);
5241 memset(&rsp
, 0, sizeof(rsp
));
5243 err
= hci_check_conn_params(min
, max
, latency
, to_multiplier
);
5245 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5247 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5249 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5253 mgmt_new_conn_param(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
,
5254 min
, max
, latency
, to_multiplier
);
5256 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
5262 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5263 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5266 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5267 u16 dcid
, mtu
, mps
, credits
, result
;
5268 struct l2cap_chan
*chan
;
5271 if (cmd_len
< sizeof(*rsp
))
5274 dcid
= __le16_to_cpu(rsp
->dcid
);
5275 mtu
= __le16_to_cpu(rsp
->mtu
);
5276 mps
= __le16_to_cpu(rsp
->mps
);
5277 credits
= __le16_to_cpu(rsp
->credits
);
5278 result
= __le16_to_cpu(rsp
->result
);
5280 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23))
5283 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5284 dcid
, mtu
, mps
, credits
, result
);
5286 mutex_lock(&conn
->chan_lock
);
5288 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5296 l2cap_chan_lock(chan
);
5299 case L2CAP_CR_SUCCESS
:
5303 chan
->remote_mps
= mps
;
5304 chan
->tx_credits
= credits
;
5305 l2cap_chan_ready(chan
);
5309 l2cap_chan_del(chan
, ECONNREFUSED
);
5313 l2cap_chan_unlock(chan
);
5316 mutex_unlock(&conn
->chan_lock
);
5321 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5322 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5327 switch (cmd
->code
) {
5328 case L2CAP_COMMAND_REJ
:
5329 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5332 case L2CAP_CONN_REQ
:
5333 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5336 case L2CAP_CONN_RSP
:
5337 case L2CAP_CREATE_CHAN_RSP
:
5338 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5341 case L2CAP_CONF_REQ
:
5342 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5345 case L2CAP_CONF_RSP
:
5346 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5349 case L2CAP_DISCONN_REQ
:
5350 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5353 case L2CAP_DISCONN_RSP
:
5354 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5357 case L2CAP_ECHO_REQ
:
5358 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5361 case L2CAP_ECHO_RSP
:
5364 case L2CAP_INFO_REQ
:
5365 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5368 case L2CAP_INFO_RSP
:
5369 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5372 case L2CAP_CREATE_CHAN_REQ
:
5373 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5376 case L2CAP_MOVE_CHAN_REQ
:
5377 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5380 case L2CAP_MOVE_CHAN_RSP
:
5381 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5384 case L2CAP_MOVE_CHAN_CFM
:
5385 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5388 case L2CAP_MOVE_CHAN_CFM_RSP
:
5389 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5393 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5401 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5402 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5405 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5406 struct l2cap_le_conn_rsp rsp
;
5407 struct l2cap_chan
*chan
, *pchan
;
5408 u16 dcid
, scid
, credits
, mtu
, mps
;
5412 if (cmd_len
!= sizeof(*req
))
5415 scid
= __le16_to_cpu(req
->scid
);
5416 mtu
= __le16_to_cpu(req
->mtu
);
5417 mps
= __le16_to_cpu(req
->mps
);
5422 if (mtu
< 23 || mps
< 23)
5425 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5428 /* Check if we have socket listening on psm */
5429 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5430 &conn
->hcon
->dst
, LE_LINK
);
5432 result
= L2CAP_CR_BAD_PSM
;
5437 mutex_lock(&conn
->chan_lock
);
5438 l2cap_chan_lock(pchan
);
5440 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
)) {
5441 result
= L2CAP_CR_AUTHENTICATION
;
5443 goto response_unlock
;
5446 /* Check if we already have channel with that dcid */
5447 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5448 result
= L2CAP_CR_NO_MEM
;
5450 goto response_unlock
;
5453 chan
= pchan
->ops
->new_connection(pchan
);
5455 result
= L2CAP_CR_NO_MEM
;
5456 goto response_unlock
;
5459 l2cap_le_flowctl_init(chan
);
5461 bacpy(&chan
->src
, &conn
->hcon
->src
);
5462 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5463 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
5464 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
5468 chan
->remote_mps
= mps
;
5469 chan
->tx_credits
= __le16_to_cpu(req
->credits
);
5471 __l2cap_chan_add(conn
, chan
);
5473 credits
= chan
->rx_credits
;
5475 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5477 chan
->ident
= cmd
->ident
;
5479 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5480 l2cap_state_change(chan
, BT_CONNECT2
);
5481 result
= L2CAP_CR_PEND
;
5482 chan
->ops
->defer(chan
);
5484 l2cap_chan_ready(chan
);
5485 result
= L2CAP_CR_SUCCESS
;
5489 l2cap_chan_unlock(pchan
);
5490 mutex_unlock(&conn
->chan_lock
);
5492 if (result
== L2CAP_CR_PEND
)
5497 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5498 rsp
.mps
= cpu_to_le16(chan
->mps
);
5504 rsp
.dcid
= cpu_to_le16(dcid
);
5505 rsp
.credits
= cpu_to_le16(credits
);
5506 rsp
.result
= cpu_to_le16(result
);
5508 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5513 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5514 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5517 struct l2cap_le_credits
*pkt
;
5518 struct l2cap_chan
*chan
;
5519 u16 cid
, credits
, max_credits
;
5521 if (cmd_len
!= sizeof(*pkt
))
5524 pkt
= (struct l2cap_le_credits
*) data
;
5525 cid
= __le16_to_cpu(pkt
->cid
);
5526 credits
= __le16_to_cpu(pkt
->credits
);
5528 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5530 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5534 max_credits
= LE_FLOWCTL_MAX_CREDITS
- chan
->tx_credits
;
5535 if (credits
> max_credits
) {
5536 BT_ERR("LE credits overflow");
5537 l2cap_send_disconn_req(chan
, ECONNRESET
);
5539 /* Return 0 so that we don't trigger an unnecessary
5540 * command reject packet.
5545 chan
->tx_credits
+= credits
;
5547 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
5548 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
5552 if (chan
->tx_credits
)
5553 chan
->ops
->resume(chan
);
5555 l2cap_chan_unlock(chan
);
5560 static inline int l2cap_le_command_rej(struct l2cap_conn
*conn
,
5561 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5564 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
5565 struct l2cap_chan
*chan
;
5567 if (cmd_len
< sizeof(*rej
))
5570 mutex_lock(&conn
->chan_lock
);
5572 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5576 l2cap_chan_lock(chan
);
5577 l2cap_chan_del(chan
, ECONNREFUSED
);
5578 l2cap_chan_unlock(chan
);
5581 mutex_unlock(&conn
->chan_lock
);
5585 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5586 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5591 switch (cmd
->code
) {
5592 case L2CAP_COMMAND_REJ
:
5593 l2cap_le_command_rej(conn
, cmd
, cmd_len
, data
);
5596 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5597 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5600 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5603 case L2CAP_LE_CONN_RSP
:
5604 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5607 case L2CAP_LE_CONN_REQ
:
5608 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5611 case L2CAP_LE_CREDITS
:
5612 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5615 case L2CAP_DISCONN_REQ
:
5616 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5619 case L2CAP_DISCONN_RSP
:
5620 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5624 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5632 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5633 struct sk_buff
*skb
)
5635 struct hci_conn
*hcon
= conn
->hcon
;
5636 struct l2cap_cmd_hdr
*cmd
;
5640 if (hcon
->type
!= LE_LINK
)
5643 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5646 cmd
= (void *) skb
->data
;
5647 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5649 len
= le16_to_cpu(cmd
->len
);
5651 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5653 if (len
!= skb
->len
|| !cmd
->ident
) {
5654 BT_DBG("corrupted command");
5658 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5660 struct l2cap_cmd_rej_unk rej
;
5662 BT_ERR("Wrong link type (%d)", err
);
5664 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5665 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5673 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5674 struct sk_buff
*skb
)
5676 struct hci_conn
*hcon
= conn
->hcon
;
5677 u8
*data
= skb
->data
;
5679 struct l2cap_cmd_hdr cmd
;
5682 l2cap_raw_recv(conn
, skb
);
5684 if (hcon
->type
!= ACL_LINK
)
5687 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5689 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5690 data
+= L2CAP_CMD_HDR_SIZE
;
5691 len
-= L2CAP_CMD_HDR_SIZE
;
5693 cmd_len
= le16_to_cpu(cmd
.len
);
5695 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5698 if (cmd_len
> len
|| !cmd
.ident
) {
5699 BT_DBG("corrupted command");
5703 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5705 struct l2cap_cmd_rej_unk rej
;
5707 BT_ERR("Wrong link type (%d)", err
);
5709 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5710 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5722 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5724 u16 our_fcs
, rcv_fcs
;
5727 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5728 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5730 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5732 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5733 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5734 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5735 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5737 if (our_fcs
!= rcv_fcs
)
5743 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5745 struct l2cap_ctrl control
;
5747 BT_DBG("chan %p", chan
);
5749 memset(&control
, 0, sizeof(control
));
5752 control
.reqseq
= chan
->buffer_seq
;
5753 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5755 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5756 control
.super
= L2CAP_SUPER_RNR
;
5757 l2cap_send_sframe(chan
, &control
);
5760 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5761 chan
->unacked_frames
> 0)
5762 __set_retrans_timer(chan
);
5764 /* Send pending iframes */
5765 l2cap_ertm_send(chan
);
5767 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5768 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5769 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5772 control
.super
= L2CAP_SUPER_RR
;
5773 l2cap_send_sframe(chan
, &control
);
5777 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5778 struct sk_buff
**last_frag
)
5780 /* skb->len reflects data in skb as well as all fragments
5781 * skb->data_len reflects only data in fragments
5783 if (!skb_has_frag_list(skb
))
5784 skb_shinfo(skb
)->frag_list
= new_frag
;
5786 new_frag
->next
= NULL
;
5788 (*last_frag
)->next
= new_frag
;
5789 *last_frag
= new_frag
;
5791 skb
->len
+= new_frag
->len
;
5792 skb
->data_len
+= new_frag
->len
;
5793 skb
->truesize
+= new_frag
->truesize
;
5796 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5797 struct l2cap_ctrl
*control
)
5801 switch (control
->sar
) {
5802 case L2CAP_SAR_UNSEGMENTED
:
5806 err
= chan
->ops
->recv(chan
, skb
);
5809 case L2CAP_SAR_START
:
5813 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5814 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5816 if (chan
->sdu_len
> chan
->imtu
) {
5821 if (skb
->len
>= chan
->sdu_len
)
5825 chan
->sdu_last_frag
= skb
;
5831 case L2CAP_SAR_CONTINUE
:
5835 append_skb_frag(chan
->sdu
, skb
,
5836 &chan
->sdu_last_frag
);
5839 if (chan
->sdu
->len
>= chan
->sdu_len
)
5849 append_skb_frag(chan
->sdu
, skb
,
5850 &chan
->sdu_last_frag
);
5853 if (chan
->sdu
->len
!= chan
->sdu_len
)
5856 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5859 /* Reassembly complete */
5861 chan
->sdu_last_frag
= NULL
;
5869 kfree_skb(chan
->sdu
);
5871 chan
->sdu_last_frag
= NULL
;
5878 static int l2cap_resegment(struct l2cap_chan
*chan
)
5884 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5888 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5891 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5892 l2cap_tx(chan
, NULL
, NULL
, event
);
5895 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5898 /* Pass sequential frames to l2cap_reassemble_sdu()
5899 * until a gap is encountered.
5902 BT_DBG("chan %p", chan
);
5904 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5905 struct sk_buff
*skb
;
5906 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5907 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5909 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5914 skb_unlink(skb
, &chan
->srej_q
);
5915 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5916 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5921 if (skb_queue_empty(&chan
->srej_q
)) {
5922 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5923 l2cap_send_ack(chan
);
5929 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5930 struct l2cap_ctrl
*control
)
5932 struct sk_buff
*skb
;
5934 BT_DBG("chan %p, control %p", chan
, control
);
5936 if (control
->reqseq
== chan
->next_tx_seq
) {
5937 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5938 l2cap_send_disconn_req(chan
, ECONNRESET
);
5942 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5945 BT_DBG("Seq %d not available for retransmission",
5950 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5951 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5952 l2cap_send_disconn_req(chan
, ECONNRESET
);
5956 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5958 if (control
->poll
) {
5959 l2cap_pass_to_tx(chan
, control
);
5961 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5962 l2cap_retransmit(chan
, control
);
5963 l2cap_ertm_send(chan
);
5965 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5966 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5967 chan
->srej_save_reqseq
= control
->reqseq
;
5970 l2cap_pass_to_tx_fbit(chan
, control
);
5972 if (control
->final
) {
5973 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5974 !test_and_clear_bit(CONN_SREJ_ACT
,
5976 l2cap_retransmit(chan
, control
);
5978 l2cap_retransmit(chan
, control
);
5979 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5980 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5981 chan
->srej_save_reqseq
= control
->reqseq
;
5987 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5988 struct l2cap_ctrl
*control
)
5990 struct sk_buff
*skb
;
5992 BT_DBG("chan %p, control %p", chan
, control
);
5994 if (control
->reqseq
== chan
->next_tx_seq
) {
5995 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5996 l2cap_send_disconn_req(chan
, ECONNRESET
);
6000 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
6002 if (chan
->max_tx
&& skb
&&
6003 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
6004 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
6005 l2cap_send_disconn_req(chan
, ECONNRESET
);
6009 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6011 l2cap_pass_to_tx(chan
, control
);
6013 if (control
->final
) {
6014 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
6015 l2cap_retransmit_all(chan
, control
);
6017 l2cap_retransmit_all(chan
, control
);
6018 l2cap_ertm_send(chan
);
6019 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
6020 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
6024 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
6026 BT_DBG("chan %p, txseq %d", chan
, txseq
);
6028 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
6029 chan
->expected_tx_seq
);
6031 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
6032 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6034 /* See notes below regarding "double poll" and
6037 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6038 BT_DBG("Invalid/Ignore - after SREJ");
6039 return L2CAP_TXSEQ_INVALID_IGNORE
;
6041 BT_DBG("Invalid - in window after SREJ sent");
6042 return L2CAP_TXSEQ_INVALID
;
6046 if (chan
->srej_list
.head
== txseq
) {
6047 BT_DBG("Expected SREJ");
6048 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6051 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6052 BT_DBG("Duplicate SREJ - txseq already stored");
6053 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6056 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6057 BT_DBG("Unexpected SREJ - not requested");
6058 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6062 if (chan
->expected_tx_seq
== txseq
) {
6063 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6065 BT_DBG("Invalid - txseq outside tx window");
6066 return L2CAP_TXSEQ_INVALID
;
6069 return L2CAP_TXSEQ_EXPECTED
;
6073 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6074 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6075 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6076 return L2CAP_TXSEQ_DUPLICATE
;
6079 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6080 /* A source of invalid packets is a "double poll" condition,
6081 * where delays cause us to send multiple poll packets. If
6082 * the remote stack receives and processes both polls,
6083 * sequence numbers can wrap around in such a way that a
6084 * resent frame has a sequence number that looks like new data
6085 * with a sequence gap. This would trigger an erroneous SREJ
6088 * Fortunately, this is impossible with a tx window that's
6089 * less than half of the maximum sequence number, which allows
6090 * invalid frames to be safely ignored.
6092 * With tx window sizes greater than half of the tx window
6093 * maximum, the frame is invalid and cannot be ignored. This
6094 * causes a disconnect.
6097 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6098 BT_DBG("Invalid/Ignore - txseq outside tx window");
6099 return L2CAP_TXSEQ_INVALID_IGNORE
;
6101 BT_DBG("Invalid - txseq outside tx window");
6102 return L2CAP_TXSEQ_INVALID
;
6105 BT_DBG("Unexpected - txseq indicates missing frames");
6106 return L2CAP_TXSEQ_UNEXPECTED
;
6110 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6111 struct l2cap_ctrl
*control
,
6112 struct sk_buff
*skb
, u8 event
)
6115 bool skb_in_use
= false;
6117 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6121 case L2CAP_EV_RECV_IFRAME
:
6122 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6123 case L2CAP_TXSEQ_EXPECTED
:
6124 l2cap_pass_to_tx(chan
, control
);
6126 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6127 BT_DBG("Busy, discarding expected seq %d",
6132 chan
->expected_tx_seq
= __next_seq(chan
,
6135 chan
->buffer_seq
= chan
->expected_tx_seq
;
6138 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6142 if (control
->final
) {
6143 if (!test_and_clear_bit(CONN_REJ_ACT
,
6144 &chan
->conn_state
)) {
6146 l2cap_retransmit_all(chan
, control
);
6147 l2cap_ertm_send(chan
);
6151 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6152 l2cap_send_ack(chan
);
6154 case L2CAP_TXSEQ_UNEXPECTED
:
6155 l2cap_pass_to_tx(chan
, control
);
6157 /* Can't issue SREJ frames in the local busy state.
6158 * Drop this frame, it will be seen as missing
6159 * when local busy is exited.
6161 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6162 BT_DBG("Busy, discarding unexpected seq %d",
6167 /* There was a gap in the sequence, so an SREJ
6168 * must be sent for each missing frame. The
6169 * current frame is stored for later use.
6171 skb_queue_tail(&chan
->srej_q
, skb
);
6173 BT_DBG("Queued %p (queue len %d)", skb
,
6174 skb_queue_len(&chan
->srej_q
));
6176 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6177 l2cap_seq_list_clear(&chan
->srej_list
);
6178 l2cap_send_srej(chan
, control
->txseq
);
6180 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6182 case L2CAP_TXSEQ_DUPLICATE
:
6183 l2cap_pass_to_tx(chan
, control
);
6185 case L2CAP_TXSEQ_INVALID_IGNORE
:
6187 case L2CAP_TXSEQ_INVALID
:
6189 l2cap_send_disconn_req(chan
, ECONNRESET
);
6193 case L2CAP_EV_RECV_RR
:
6194 l2cap_pass_to_tx(chan
, control
);
6195 if (control
->final
) {
6196 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6198 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6199 !__chan_is_moving(chan
)) {
6201 l2cap_retransmit_all(chan
, control
);
6204 l2cap_ertm_send(chan
);
6205 } else if (control
->poll
) {
6206 l2cap_send_i_or_rr_or_rnr(chan
);
6208 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6209 &chan
->conn_state
) &&
6210 chan
->unacked_frames
)
6211 __set_retrans_timer(chan
);
6213 l2cap_ertm_send(chan
);
6216 case L2CAP_EV_RECV_RNR
:
6217 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6218 l2cap_pass_to_tx(chan
, control
);
6219 if (control
&& control
->poll
) {
6220 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6221 l2cap_send_rr_or_rnr(chan
, 0);
6223 __clear_retrans_timer(chan
);
6224 l2cap_seq_list_clear(&chan
->retrans_list
);
6226 case L2CAP_EV_RECV_REJ
:
6227 l2cap_handle_rej(chan
, control
);
6229 case L2CAP_EV_RECV_SREJ
:
6230 l2cap_handle_srej(chan
, control
);
6236 if (skb
&& !skb_in_use
) {
6237 BT_DBG("Freeing %p", skb
);
6244 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6245 struct l2cap_ctrl
*control
,
6246 struct sk_buff
*skb
, u8 event
)
6249 u16 txseq
= control
->txseq
;
6250 bool skb_in_use
= false;
6252 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6256 case L2CAP_EV_RECV_IFRAME
:
6257 switch (l2cap_classify_txseq(chan
, txseq
)) {
6258 case L2CAP_TXSEQ_EXPECTED
:
6259 /* Keep frame for reassembly later */
6260 l2cap_pass_to_tx(chan
, control
);
6261 skb_queue_tail(&chan
->srej_q
, skb
);
6263 BT_DBG("Queued %p (queue len %d)", skb
,
6264 skb_queue_len(&chan
->srej_q
));
6266 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6268 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6269 l2cap_seq_list_pop(&chan
->srej_list
);
6271 l2cap_pass_to_tx(chan
, control
);
6272 skb_queue_tail(&chan
->srej_q
, skb
);
6274 BT_DBG("Queued %p (queue len %d)", skb
,
6275 skb_queue_len(&chan
->srej_q
));
6277 err
= l2cap_rx_queued_iframes(chan
);
6282 case L2CAP_TXSEQ_UNEXPECTED
:
6283 /* Got a frame that can't be reassembled yet.
6284 * Save it for later, and send SREJs to cover
6285 * the missing frames.
6287 skb_queue_tail(&chan
->srej_q
, skb
);
6289 BT_DBG("Queued %p (queue len %d)", skb
,
6290 skb_queue_len(&chan
->srej_q
));
6292 l2cap_pass_to_tx(chan
, control
);
6293 l2cap_send_srej(chan
, control
->txseq
);
6295 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6296 /* This frame was requested with an SREJ, but
6297 * some expected retransmitted frames are
6298 * missing. Request retransmission of missing
6301 skb_queue_tail(&chan
->srej_q
, skb
);
6303 BT_DBG("Queued %p (queue len %d)", skb
,
6304 skb_queue_len(&chan
->srej_q
));
6306 l2cap_pass_to_tx(chan
, control
);
6307 l2cap_send_srej_list(chan
, control
->txseq
);
6309 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6310 /* We've already queued this frame. Drop this copy. */
6311 l2cap_pass_to_tx(chan
, control
);
6313 case L2CAP_TXSEQ_DUPLICATE
:
6314 /* Expecting a later sequence number, so this frame
6315 * was already received. Ignore it completely.
6318 case L2CAP_TXSEQ_INVALID_IGNORE
:
6320 case L2CAP_TXSEQ_INVALID
:
6322 l2cap_send_disconn_req(chan
, ECONNRESET
);
6326 case L2CAP_EV_RECV_RR
:
6327 l2cap_pass_to_tx(chan
, control
);
6328 if (control
->final
) {
6329 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6331 if (!test_and_clear_bit(CONN_REJ_ACT
,
6332 &chan
->conn_state
)) {
6334 l2cap_retransmit_all(chan
, control
);
6337 l2cap_ertm_send(chan
);
6338 } else if (control
->poll
) {
6339 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6340 &chan
->conn_state
) &&
6341 chan
->unacked_frames
) {
6342 __set_retrans_timer(chan
);
6345 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6346 l2cap_send_srej_tail(chan
);
6348 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6349 &chan
->conn_state
) &&
6350 chan
->unacked_frames
)
6351 __set_retrans_timer(chan
);
6353 l2cap_send_ack(chan
);
6356 case L2CAP_EV_RECV_RNR
:
6357 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6358 l2cap_pass_to_tx(chan
, control
);
6359 if (control
->poll
) {
6360 l2cap_send_srej_tail(chan
);
6362 struct l2cap_ctrl rr_control
;
6363 memset(&rr_control
, 0, sizeof(rr_control
));
6364 rr_control
.sframe
= 1;
6365 rr_control
.super
= L2CAP_SUPER_RR
;
6366 rr_control
.reqseq
= chan
->buffer_seq
;
6367 l2cap_send_sframe(chan
, &rr_control
);
6371 case L2CAP_EV_RECV_REJ
:
6372 l2cap_handle_rej(chan
, control
);
6374 case L2CAP_EV_RECV_SREJ
:
6375 l2cap_handle_srej(chan
, control
);
6379 if (skb
&& !skb_in_use
) {
6380 BT_DBG("Freeing %p", skb
);
6387 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6389 BT_DBG("chan %p", chan
);
6391 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6394 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6396 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6398 return l2cap_resegment(chan
);
6401 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6402 struct l2cap_ctrl
*control
,
6403 struct sk_buff
*skb
, u8 event
)
6407 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6413 l2cap_process_reqseq(chan
, control
->reqseq
);
6415 if (!skb_queue_empty(&chan
->tx_q
))
6416 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6418 chan
->tx_send_head
= NULL
;
6420 /* Rewind next_tx_seq to the point expected
6423 chan
->next_tx_seq
= control
->reqseq
;
6424 chan
->unacked_frames
= 0;
6426 err
= l2cap_finish_move(chan
);
6430 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6431 l2cap_send_i_or_rr_or_rnr(chan
);
6433 if (event
== L2CAP_EV_RECV_IFRAME
)
6436 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6439 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6440 struct l2cap_ctrl
*control
,
6441 struct sk_buff
*skb
, u8 event
)
6445 if (!control
->final
)
6448 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6450 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6451 l2cap_process_reqseq(chan
, control
->reqseq
);
6453 if (!skb_queue_empty(&chan
->tx_q
))
6454 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6456 chan
->tx_send_head
= NULL
;
6458 /* Rewind next_tx_seq to the point expected
6461 chan
->next_tx_seq
= control
->reqseq
;
6462 chan
->unacked_frames
= 0;
6465 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6467 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6469 err
= l2cap_resegment(chan
);
6472 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6477 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6479 /* Make sure reqseq is for a packet that has been sent but not acked */
6482 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6483 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6486 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6487 struct sk_buff
*skb
, u8 event
)
6491 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6492 control
, skb
, event
, chan
->rx_state
);
6494 if (__valid_reqseq(chan
, control
->reqseq
)) {
6495 switch (chan
->rx_state
) {
6496 case L2CAP_RX_STATE_RECV
:
6497 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6499 case L2CAP_RX_STATE_SREJ_SENT
:
6500 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6503 case L2CAP_RX_STATE_WAIT_P
:
6504 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6506 case L2CAP_RX_STATE_WAIT_F
:
6507 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6514 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6515 control
->reqseq
, chan
->next_tx_seq
,
6516 chan
->expected_ack_seq
);
6517 l2cap_send_disconn_req(chan
, ECONNRESET
);
6523 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6524 struct sk_buff
*skb
)
6528 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6531 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6532 L2CAP_TXSEQ_EXPECTED
) {
6533 l2cap_pass_to_tx(chan
, control
);
6535 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6536 __next_seq(chan
, chan
->buffer_seq
));
6538 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6540 l2cap_reassemble_sdu(chan
, skb
, control
);
6543 kfree_skb(chan
->sdu
);
6546 chan
->sdu_last_frag
= NULL
;
6550 BT_DBG("Freeing %p", skb
);
6555 chan
->last_acked_seq
= control
->txseq
;
6556 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6561 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6563 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6567 __unpack_control(chan
, skb
);
6572 * We can just drop the corrupted I-frame here.
6573 * Receiver will miss it and start proper recovery
6574 * procedures and ask for retransmission.
6576 if (l2cap_check_fcs(chan
, skb
))
6579 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6580 len
-= L2CAP_SDULEN_SIZE
;
6582 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6583 len
-= L2CAP_FCS_SIZE
;
6585 if (len
> chan
->mps
) {
6586 l2cap_send_disconn_req(chan
, ECONNRESET
);
6590 if (!control
->sframe
) {
6593 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6594 control
->sar
, control
->reqseq
, control
->final
,
6597 /* Validate F-bit - F=0 always valid, F=1 only
6598 * valid in TX WAIT_F
6600 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6603 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6604 event
= L2CAP_EV_RECV_IFRAME
;
6605 err
= l2cap_rx(chan
, control
, skb
, event
);
6607 err
= l2cap_stream_rx(chan
, control
, skb
);
6611 l2cap_send_disconn_req(chan
, ECONNRESET
);
6613 const u8 rx_func_to_event
[4] = {
6614 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6615 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6618 /* Only I-frames are expected in streaming mode */
6619 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6622 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6623 control
->reqseq
, control
->final
, control
->poll
,
6627 BT_ERR("Trailing bytes: %d in sframe", len
);
6628 l2cap_send_disconn_req(chan
, ECONNRESET
);
6632 /* Validate F and P bits */
6633 if (control
->final
&& (control
->poll
||
6634 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6637 event
= rx_func_to_event
[control
->super
];
6638 if (l2cap_rx(chan
, control
, skb
, event
))
6639 l2cap_send_disconn_req(chan
, ECONNRESET
);
6649 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6651 struct l2cap_conn
*conn
= chan
->conn
;
6652 struct l2cap_le_credits pkt
;
6655 /* We return more credits to the sender only after the amount of
6656 * credits falls below half of the initial amount.
6658 if (chan
->rx_credits
>= (le_max_credits
+ 1) / 2)
6661 return_credits
= le_max_credits
- chan
->rx_credits
;
6663 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6665 chan
->rx_credits
+= return_credits
;
6667 pkt
.cid
= cpu_to_le16(chan
->scid
);
6668 pkt
.credits
= cpu_to_le16(return_credits
);
6670 chan
->ident
= l2cap_get_ident(conn
);
6672 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6675 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6679 if (!chan
->rx_credits
) {
6680 BT_ERR("No credits to receive LE L2CAP data");
6681 l2cap_send_disconn_req(chan
, ECONNRESET
);
6685 if (chan
->imtu
< skb
->len
) {
6686 BT_ERR("Too big LE L2CAP PDU");
6691 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6693 l2cap_chan_le_send_credits(chan
);
6700 sdu_len
= get_unaligned_le16(skb
->data
);
6701 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6703 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6704 sdu_len
, skb
->len
, chan
->imtu
);
6706 if (sdu_len
> chan
->imtu
) {
6707 BT_ERR("Too big LE L2CAP SDU length received");
6712 if (skb
->len
> sdu_len
) {
6713 BT_ERR("Too much LE L2CAP data received");
6718 if (skb
->len
== sdu_len
)
6719 return chan
->ops
->recv(chan
, skb
);
6722 chan
->sdu_len
= sdu_len
;
6723 chan
->sdu_last_frag
= skb
;
6728 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6729 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6731 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6732 BT_ERR("Too much LE L2CAP data received");
6737 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6740 if (chan
->sdu
->len
== chan
->sdu_len
) {
6741 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6744 chan
->sdu_last_frag
= NULL
;
6752 kfree_skb(chan
->sdu
);
6754 chan
->sdu_last_frag
= NULL
;
6758 /* We can't return an error here since we took care of the skb
6759 * freeing internally. An error return would cause the caller to
6760 * do a double-free of the skb.
6765 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6766 struct sk_buff
*skb
)
6768 struct l2cap_chan
*chan
;
6770 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6772 if (cid
== L2CAP_CID_A2MP
) {
6773 chan
= a2mp_channel_create(conn
, skb
);
6779 l2cap_chan_lock(chan
);
6781 BT_DBG("unknown cid 0x%4.4x", cid
);
6782 /* Drop packet and return */
6788 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6790 if (chan
->state
!= BT_CONNECTED
)
6793 switch (chan
->mode
) {
6794 case L2CAP_MODE_LE_FLOWCTL
:
6795 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6800 case L2CAP_MODE_BASIC
:
6801 /* If socket recv buffers overflows we drop data here
6802 * which is *bad* because L2CAP has to be reliable.
6803 * But we don't have any other choice. L2CAP doesn't
6804 * provide flow control mechanism. */
6806 if (chan
->imtu
< skb
->len
) {
6807 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6811 if (!chan
->ops
->recv(chan
, skb
))
6815 case L2CAP_MODE_ERTM
:
6816 case L2CAP_MODE_STREAMING
:
6817 l2cap_data_rcv(chan
, skb
);
6821 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6829 l2cap_chan_unlock(chan
);
6832 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6833 struct sk_buff
*skb
)
6835 struct hci_conn
*hcon
= conn
->hcon
;
6836 struct l2cap_chan
*chan
;
6838 if (hcon
->type
!= ACL_LINK
)
6841 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
6846 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6848 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6851 if (chan
->imtu
< skb
->len
)
6854 /* Store remote BD_ADDR and PSM for msg_name */
6855 bacpy(&bt_cb(skb
)->bdaddr
, &hcon
->dst
);
6856 bt_cb(skb
)->psm
= psm
;
6858 if (!chan
->ops
->recv(chan
, skb
))
6865 static void l2cap_att_channel(struct l2cap_conn
*conn
,
6866 struct sk_buff
*skb
)
6868 struct hci_conn
*hcon
= conn
->hcon
;
6869 struct l2cap_chan
*chan
;
6871 if (hcon
->type
!= LE_LINK
)
6874 chan
= l2cap_global_chan_by_scid(BT_CONNECTED
, L2CAP_CID_ATT
,
6875 &hcon
->src
, &hcon
->dst
);
6879 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6881 if (chan
->imtu
< skb
->len
)
6884 if (!chan
->ops
->recv(chan
, skb
))
6891 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6893 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6894 struct hci_conn
*hcon
= conn
->hcon
;
6898 if (hcon
->state
!= BT_CONNECTED
) {
6899 BT_DBG("queueing pending rx skb");
6900 skb_queue_tail(&conn
->pending_rx
, skb
);
6904 skb_pull(skb
, L2CAP_HDR_SIZE
);
6905 cid
= __le16_to_cpu(lh
->cid
);
6906 len
= __le16_to_cpu(lh
->len
);
6908 if (len
!= skb
->len
) {
6913 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
,
6914 bdaddr_type(hcon
, hcon
->dst_type
))) {
6919 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6922 case L2CAP_CID_SIGNALING
:
6923 l2cap_sig_channel(conn
, skb
);
6926 case L2CAP_CID_CONN_LESS
:
6927 psm
= get_unaligned((__le16
*) skb
->data
);
6928 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6929 l2cap_conless_channel(conn
, psm
, skb
);
6933 l2cap_att_channel(conn
, skb
);
6936 case L2CAP_CID_LE_SIGNALING
:
6937 l2cap_le_sig_channel(conn
, skb
);
6941 if (smp_sig_channel(conn
, skb
))
6942 l2cap_conn_del(conn
->hcon
, EACCES
);
6946 l2cap_data_channel(conn
, cid
, skb
);
6951 static void process_pending_rx(struct work_struct
*work
)
6953 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
6955 struct sk_buff
*skb
;
6959 while ((skb
= skb_dequeue(&conn
->pending_rx
)))
6960 l2cap_recv_frame(conn
, skb
);
6963 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
6965 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6966 struct hci_chan
*hchan
;
6971 hchan
= hci_chan_create(hcon
);
6975 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
6977 hci_chan_del(hchan
);
6981 kref_init(&conn
->ref
);
6982 hcon
->l2cap_data
= conn
;
6984 hci_conn_get(conn
->hcon
);
6985 conn
->hchan
= hchan
;
6987 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
6989 switch (hcon
->type
) {
6991 if (hcon
->hdev
->le_mtu
) {
6992 conn
->mtu
= hcon
->hdev
->le_mtu
;
6997 conn
->mtu
= hcon
->hdev
->acl_mtu
;
7001 conn
->feat_mask
= 0;
7003 if (hcon
->type
== ACL_LINK
)
7004 conn
->hs_enabled
= test_bit(HCI_HS_ENABLED
,
7005 &hcon
->hdev
->dev_flags
);
7007 spin_lock_init(&conn
->lock
);
7008 mutex_init(&conn
->chan_lock
);
7010 INIT_LIST_HEAD(&conn
->chan_l
);
7011 INIT_LIST_HEAD(&conn
->users
);
7013 if (hcon
->type
== LE_LINK
)
7014 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
7016 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
7018 skb_queue_head_init(&conn
->pending_rx
);
7019 INIT_WORK(&conn
->pending_rx_work
, process_pending_rx
);
7021 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
7026 static bool is_valid_psm(u16 psm
, u8 dst_type
) {
7030 if (bdaddr_type_is_le(dst_type
))
7031 return (psm
<= 0x00ff);
7033 /* PSM must be odd and lsb of upper byte must be 0 */
7034 return ((psm
& 0x0101) == 0x0001);
7037 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
7038 bdaddr_t
*dst
, u8 dst_type
)
7040 struct l2cap_conn
*conn
;
7041 struct hci_conn
*hcon
;
7042 struct hci_dev
*hdev
;
7046 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
7047 dst_type
, __le16_to_cpu(psm
));
7049 hdev
= hci_get_route(dst
, &chan
->src
);
7051 return -EHOSTUNREACH
;
7055 l2cap_chan_lock(chan
);
7057 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
7058 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
7063 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !psm
) {
7068 if (chan
->chan_type
== L2CAP_CHAN_FIXED
&& !cid
) {
7073 switch (chan
->mode
) {
7074 case L2CAP_MODE_BASIC
:
7076 case L2CAP_MODE_LE_FLOWCTL
:
7077 l2cap_le_flowctl_init(chan
);
7079 case L2CAP_MODE_ERTM
:
7080 case L2CAP_MODE_STREAMING
:
7089 switch (chan
->state
) {
7093 /* Already connecting */
7098 /* Already connected */
7112 /* Set destination address and psm */
7113 bacpy(&chan
->dst
, dst
);
7114 chan
->dst_type
= dst_type
;
7119 auth_type
= l2cap_get_auth_type(chan
);
7121 if (bdaddr_type_is_le(dst_type
)) {
7122 /* Convert from L2CAP channel address type to HCI address type
7124 if (dst_type
== BDADDR_LE_PUBLIC
)
7125 dst_type
= ADDR_LE_DEV_PUBLIC
;
7127 dst_type
= ADDR_LE_DEV_RANDOM
;
7129 hcon
= hci_connect_le(hdev
, dst
, dst_type
, chan
->sec_level
,
7132 hcon
= hci_connect_acl(hdev
, dst
, chan
->sec_level
, auth_type
);
7136 err
= PTR_ERR(hcon
);
7140 conn
= l2cap_conn_add(hcon
);
7142 hci_conn_drop(hcon
);
7147 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
7148 hci_conn_drop(hcon
);
7153 /* Update source addr of the socket */
7154 bacpy(&chan
->src
, &hcon
->src
);
7155 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
7157 l2cap_chan_unlock(chan
);
7158 l2cap_chan_add(conn
, chan
);
7159 l2cap_chan_lock(chan
);
7161 /* l2cap_chan_add takes its own ref so we can drop this one */
7162 hci_conn_drop(hcon
);
7164 l2cap_state_change(chan
, BT_CONNECT
);
7165 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
7167 /* Release chan->sport so that it can be reused by other
7168 * sockets (as it's only used for listening sockets).
7170 write_lock(&chan_list_lock
);
7172 write_unlock(&chan_list_lock
);
7174 if (hcon
->state
== BT_CONNECTED
) {
7175 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
7176 __clear_chan_timer(chan
);
7177 if (l2cap_chan_check_security(chan
))
7178 l2cap_state_change(chan
, BT_CONNECTED
);
7180 l2cap_do_start(chan
);
7186 l2cap_chan_unlock(chan
);
7187 hci_dev_unlock(hdev
);
7191 EXPORT_SYMBOL_GPL(l2cap_chan_connect
);
7193 /* ---- L2CAP interface with lower layer (HCI) ---- */
7195 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7197 int exact
= 0, lm1
= 0, lm2
= 0;
7198 struct l2cap_chan
*c
;
7200 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7202 /* Find listening sockets and check their link_mode */
7203 read_lock(&chan_list_lock
);
7204 list_for_each_entry(c
, &chan_list
, global_l
) {
7205 if (c
->state
!= BT_LISTEN
)
7208 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7209 lm1
|= HCI_LM_ACCEPT
;
7210 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7211 lm1
|= HCI_LM_MASTER
;
7213 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7214 lm2
|= HCI_LM_ACCEPT
;
7215 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7216 lm2
|= HCI_LM_MASTER
;
7219 read_unlock(&chan_list_lock
);
7221 return exact
? lm1
: lm2
;
7224 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7226 struct l2cap_conn
*conn
;
7228 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7231 conn
= l2cap_conn_add(hcon
);
7233 l2cap_conn_ready(conn
);
7235 l2cap_conn_del(hcon
, bt_to_errno(status
));
7239 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7241 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7243 BT_DBG("hcon %p", hcon
);
7246 return HCI_ERROR_REMOTE_USER_TERM
;
7247 return conn
->disc_reason
;
7250 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7252 BT_DBG("hcon %p reason %d", hcon
, reason
);
7254 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7257 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7259 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7262 if (encrypt
== 0x00) {
7263 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7264 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7265 } else if (chan
->sec_level
== BT_SECURITY_HIGH
||
7266 chan
->sec_level
== BT_SECURITY_FIPS
)
7267 l2cap_chan_close(chan
, ECONNREFUSED
);
7269 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7270 __clear_chan_timer(chan
);
7274 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7276 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7277 struct l2cap_chan
*chan
;
7282 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7284 if (hcon
->type
== LE_LINK
) {
7285 if (!status
&& encrypt
)
7286 smp_distribute_keys(conn
);
7287 cancel_delayed_work(&conn
->security_timer
);
7290 mutex_lock(&conn
->chan_lock
);
7292 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7293 l2cap_chan_lock(chan
);
7295 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7296 state_to_string(chan
->state
));
7298 if (chan
->scid
== L2CAP_CID_A2MP
) {
7299 l2cap_chan_unlock(chan
);
7303 if (chan
->scid
== L2CAP_CID_ATT
) {
7304 if (!status
&& encrypt
) {
7305 chan
->sec_level
= hcon
->sec_level
;
7306 l2cap_chan_ready(chan
);
7309 l2cap_chan_unlock(chan
);
7313 if (!__l2cap_no_conn_pending(chan
)) {
7314 l2cap_chan_unlock(chan
);
7318 if (!status
&& (chan
->state
== BT_CONNECTED
||
7319 chan
->state
== BT_CONFIG
)) {
7320 chan
->ops
->resume(chan
);
7321 l2cap_check_encryption(chan
, encrypt
);
7322 l2cap_chan_unlock(chan
);
7326 if (chan
->state
== BT_CONNECT
) {
7328 l2cap_start_connection(chan
);
7330 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7331 } else if (chan
->state
== BT_CONNECT2
) {
7332 struct l2cap_conn_rsp rsp
;
7336 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7337 res
= L2CAP_CR_PEND
;
7338 stat
= L2CAP_CS_AUTHOR_PEND
;
7339 chan
->ops
->defer(chan
);
7341 l2cap_state_change(chan
, BT_CONFIG
);
7342 res
= L2CAP_CR_SUCCESS
;
7343 stat
= L2CAP_CS_NO_INFO
;
7346 l2cap_state_change(chan
, BT_DISCONN
);
7347 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7348 res
= L2CAP_CR_SEC_BLOCK
;
7349 stat
= L2CAP_CS_NO_INFO
;
7352 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7353 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7354 rsp
.result
= cpu_to_le16(res
);
7355 rsp
.status
= cpu_to_le16(stat
);
7356 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7359 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7360 res
== L2CAP_CR_SUCCESS
) {
7362 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7363 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7365 l2cap_build_conf_req(chan
, buf
),
7367 chan
->num_conf_req
++;
7371 l2cap_chan_unlock(chan
);
7374 mutex_unlock(&conn
->chan_lock
);
7379 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7381 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7382 struct l2cap_hdr
*hdr
;
7385 /* For AMP controller do not create l2cap conn */
7386 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
7390 conn
= l2cap_conn_add(hcon
);
7395 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7399 case ACL_START_NO_FLUSH
:
7402 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7403 kfree_skb(conn
->rx_skb
);
7404 conn
->rx_skb
= NULL
;
7406 l2cap_conn_unreliable(conn
, ECOMM
);
7409 /* Start fragment always begin with Basic L2CAP header */
7410 if (skb
->len
< L2CAP_HDR_SIZE
) {
7411 BT_ERR("Frame is too short (len %d)", skb
->len
);
7412 l2cap_conn_unreliable(conn
, ECOMM
);
7416 hdr
= (struct l2cap_hdr
*) skb
->data
;
7417 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7419 if (len
== skb
->len
) {
7420 /* Complete frame received */
7421 l2cap_recv_frame(conn
, skb
);
7425 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7427 if (skb
->len
> len
) {
7428 BT_ERR("Frame is too long (len %d, expected len %d)",
7430 l2cap_conn_unreliable(conn
, ECOMM
);
7434 /* Allocate skb for the complete frame (with header) */
7435 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7439 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7441 conn
->rx_len
= len
- skb
->len
;
7445 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7447 if (!conn
->rx_len
) {
7448 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7449 l2cap_conn_unreliable(conn
, ECOMM
);
7453 if (skb
->len
> conn
->rx_len
) {
7454 BT_ERR("Fragment is too long (len %d, expected %d)",
7455 skb
->len
, conn
->rx_len
);
7456 kfree_skb(conn
->rx_skb
);
7457 conn
->rx_skb
= NULL
;
7459 l2cap_conn_unreliable(conn
, ECOMM
);
7463 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7465 conn
->rx_len
-= skb
->len
;
7467 if (!conn
->rx_len
) {
7468 /* Complete frame received. l2cap_recv_frame
7469 * takes ownership of the skb so set the global
7470 * rx_skb pointer to NULL first.
7472 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7473 conn
->rx_skb
= NULL
;
7474 l2cap_recv_frame(conn
, rx_skb
);
7484 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7486 struct l2cap_chan
*c
;
7488 read_lock(&chan_list_lock
);
7490 list_for_each_entry(c
, &chan_list
, global_l
) {
7491 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7493 c
->state
, __le16_to_cpu(c
->psm
),
7494 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7495 c
->sec_level
, c
->mode
);
7498 read_unlock(&chan_list_lock
);
7503 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7505 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7508 static const struct file_operations l2cap_debugfs_fops
= {
7509 .open
= l2cap_debugfs_open
,
7511 .llseek
= seq_lseek
,
7512 .release
= single_release
,
7515 static struct dentry
*l2cap_debugfs
;
7517 int __init
l2cap_init(void)
7521 err
= l2cap_init_sockets();
7525 if (IS_ERR_OR_NULL(bt_debugfs
))
7528 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7529 NULL
, &l2cap_debugfs_fops
);
7531 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs
,
7533 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs
,
7539 void l2cap_exit(void)
7541 debugfs_remove(l2cap_debugfs
);
7542 l2cap_cleanup_sockets();
7545 module_param(disable_ertm
, bool, 0644);
7546 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");