2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
49 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_SIG_BREDR
| L2CAP_FC_CONNLESS
, };
51 static LIST_HEAD(chan_list
);
52 static DEFINE_RWLOCK(chan_list_lock
);
54 static u16 le_max_credits
= L2CAP_LE_MAX_CREDITS
;
55 static u16 le_default_mps
= L2CAP_LE_DEFAULT_MPS
;
57 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
58 u8 code
, u8 ident
, u16 dlen
, void *data
);
59 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
61 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
62 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
64 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
65 struct sk_buff_head
*skbs
, u8 event
);
67 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
69 if (hcon
->type
== LE_LINK
) {
70 if (type
== ADDR_LE_DEV_PUBLIC
)
71 return BDADDR_LE_PUBLIC
;
73 return BDADDR_LE_RANDOM
;
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
86 list_for_each_entry(c
, &conn
->chan_l
, list
) {
93 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
98 list_for_each_entry(c
, &conn
->chan_l
, list
) {
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
110 struct l2cap_chan
*c
;
112 mutex_lock(&conn
->chan_lock
);
113 c
= __l2cap_get_chan_by_scid(conn
, cid
);
116 mutex_unlock(&conn
->chan_lock
);
121 /* Find channel with given DCID.
122 * Returns locked channel.
124 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
127 struct l2cap_chan
*c
;
129 mutex_lock(&conn
->chan_lock
);
130 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
133 mutex_unlock(&conn
->chan_lock
);
138 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
141 struct l2cap_chan
*c
;
143 list_for_each_entry(c
, &conn
->chan_l
, list
) {
144 if (c
->ident
== ident
)
150 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
153 struct l2cap_chan
*c
;
155 mutex_lock(&conn
->chan_lock
);
156 c
= __l2cap_get_chan_by_ident(conn
, ident
);
159 mutex_unlock(&conn
->chan_lock
);
164 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
166 struct l2cap_chan
*c
;
168 list_for_each_entry(c
, &chan_list
, global_l
) {
169 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
175 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
179 write_lock(&chan_list_lock
);
181 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
194 for (p
= 0x1001; p
< 0x1100; p
+= 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
196 chan
->psm
= cpu_to_le16(p
);
197 chan
->sport
= cpu_to_le16(p
);
204 write_unlock(&chan_list_lock
);
207 EXPORT_SYMBOL_GPL(l2cap_add_psm
);
209 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
211 write_lock(&chan_list_lock
);
213 /* Override the defaults (which are for conn-oriented) */
214 chan
->omtu
= L2CAP_DEFAULT_MTU
;
215 chan
->chan_type
= L2CAP_CHAN_FIXED
;
219 write_unlock(&chan_list_lock
);
224 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
228 if (conn
->hcon
->type
== LE_LINK
)
229 dyn_end
= L2CAP_CID_LE_DYN_END
;
231 dyn_end
= L2CAP_CID_DYN_END
;
233 for (cid
= L2CAP_CID_DYN_START
; cid
< dyn_end
; cid
++) {
234 if (!__l2cap_get_chan_by_scid(conn
, cid
))
241 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
243 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
244 state_to_string(state
));
247 chan
->ops
->state_change(chan
, state
, 0);
250 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
254 chan
->ops
->state_change(chan
, chan
->state
, err
);
257 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
259 chan
->ops
->state_change(chan
, chan
->state
, err
);
262 static void __set_retrans_timer(struct l2cap_chan
*chan
)
264 if (!delayed_work_pending(&chan
->monitor_timer
) &&
265 chan
->retrans_timeout
) {
266 l2cap_set_timer(chan
, &chan
->retrans_timer
,
267 msecs_to_jiffies(chan
->retrans_timeout
));
271 static void __set_monitor_timer(struct l2cap_chan
*chan
)
273 __clear_retrans_timer(chan
);
274 if (chan
->monitor_timeout
) {
275 l2cap_set_timer(chan
, &chan
->monitor_timer
,
276 msecs_to_jiffies(chan
->monitor_timeout
));
280 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
285 skb_queue_walk(head
, skb
) {
286 if (bt_cb(skb
)->control
.txseq
== seq
)
293 /* ---- L2CAP sequence number lists ---- */
295 /* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
304 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
306 size_t alloc_size
, i
;
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
312 alloc_size
= roundup_pow_of_two(size
);
314 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
318 seq_list
->mask
= alloc_size
- 1;
319 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
320 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
321 for (i
= 0; i
< alloc_size
; i
++)
322 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
327 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
329 kfree(seq_list
->list
);
332 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
335 /* Constant-time check for list membership */
336 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
339 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
341 u16 seq
= seq_list
->head
;
342 u16 mask
= seq_list
->mask
;
344 seq_list
->head
= seq_list
->list
[seq
& mask
];
345 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
347 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
348 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
349 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
355 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
359 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
362 for (i
= 0; i
<= seq_list
->mask
; i
++)
363 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
365 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
366 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
369 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
371 u16 mask
= seq_list
->mask
;
373 /* All appends happen in constant time */
375 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
378 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
379 seq_list
->head
= seq
;
381 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
383 seq_list
->tail
= seq
;
384 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
387 static void l2cap_chan_timeout(struct work_struct
*work
)
389 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
391 struct l2cap_conn
*conn
= chan
->conn
;
394 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
396 mutex_lock(&conn
->chan_lock
);
397 l2cap_chan_lock(chan
);
399 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
400 reason
= ECONNREFUSED
;
401 else if (chan
->state
== BT_CONNECT
&&
402 chan
->sec_level
!= BT_SECURITY_SDP
)
403 reason
= ECONNREFUSED
;
407 l2cap_chan_close(chan
, reason
);
409 l2cap_chan_unlock(chan
);
411 chan
->ops
->close(chan
);
412 mutex_unlock(&conn
->chan_lock
);
414 l2cap_chan_put(chan
);
417 struct l2cap_chan
*l2cap_chan_create(void)
419 struct l2cap_chan
*chan
;
421 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
425 mutex_init(&chan
->lock
);
427 write_lock(&chan_list_lock
);
428 list_add(&chan
->global_l
, &chan_list
);
429 write_unlock(&chan_list_lock
);
431 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
433 chan
->state
= BT_OPEN
;
435 kref_init(&chan
->kref
);
437 /* This flag is cleared in l2cap_chan_ready() */
438 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
440 BT_DBG("chan %p", chan
);
444 EXPORT_SYMBOL_GPL(l2cap_chan_create
);
446 static void l2cap_chan_destroy(struct kref
*kref
)
448 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
450 BT_DBG("chan %p", chan
);
452 write_lock(&chan_list_lock
);
453 list_del(&chan
->global_l
);
454 write_unlock(&chan_list_lock
);
459 void l2cap_chan_hold(struct l2cap_chan
*c
)
461 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
466 void l2cap_chan_put(struct l2cap_chan
*c
)
468 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
470 kref_put(&c
->kref
, l2cap_chan_destroy
);
472 EXPORT_SYMBOL_GPL(l2cap_chan_put
);
474 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
476 chan
->fcs
= L2CAP_FCS_CRC16
;
477 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
478 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
479 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
480 chan
->remote_max_tx
= chan
->max_tx
;
481 chan
->remote_tx_win
= chan
->tx_win
;
482 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
483 chan
->sec_level
= BT_SECURITY_LOW
;
484 chan
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
485 chan
->retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
486 chan
->monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
487 chan
->conf_state
= 0;
489 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
491 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults
);
493 static void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
496 chan
->sdu_last_frag
= NULL
;
498 chan
->tx_credits
= 0;
499 chan
->rx_credits
= le_max_credits
;
500 chan
->mps
= min_t(u16
, chan
->imtu
, le_default_mps
);
502 skb_queue_head_init(&chan
->tx_q
);
505 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
507 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
508 __le16_to_cpu(chan
->psm
), chan
->dcid
);
510 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
514 switch (chan
->chan_type
) {
515 case L2CAP_CHAN_CONN_ORIENTED
:
516 /* Alloc CID for connection-oriented socket */
517 chan
->scid
= l2cap_alloc_cid(conn
);
518 if (conn
->hcon
->type
== ACL_LINK
)
519 chan
->omtu
= L2CAP_DEFAULT_MTU
;
522 case L2CAP_CHAN_CONN_LESS
:
523 /* Connectionless socket */
524 chan
->scid
= L2CAP_CID_CONN_LESS
;
525 chan
->dcid
= L2CAP_CID_CONN_LESS
;
526 chan
->omtu
= L2CAP_DEFAULT_MTU
;
529 case L2CAP_CHAN_FIXED
:
530 /* Caller will set CID and CID specific MTU values */
534 /* Raw socket can send/recv signalling messages only */
535 chan
->scid
= L2CAP_CID_SIGNALING
;
536 chan
->dcid
= L2CAP_CID_SIGNALING
;
537 chan
->omtu
= L2CAP_DEFAULT_MTU
;
540 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
541 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
542 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
543 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
544 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
545 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
547 l2cap_chan_hold(chan
);
549 /* Only keep a reference for fixed channels if they requested it */
550 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
551 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
552 hci_conn_hold(conn
->hcon
);
554 list_add(&chan
->list
, &conn
->chan_l
);
557 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
559 mutex_lock(&conn
->chan_lock
);
560 __l2cap_chan_add(conn
, chan
);
561 mutex_unlock(&conn
->chan_lock
);
564 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
566 struct l2cap_conn
*conn
= chan
->conn
;
568 __clear_chan_timer(chan
);
570 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
572 chan
->ops
->teardown(chan
, err
);
575 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
576 /* Delete from channel list */
577 list_del(&chan
->list
);
579 l2cap_chan_put(chan
);
583 /* Reference was only held for non-fixed channels or
584 * fixed channels that explicitly requested it using the
585 * FLAG_HOLD_HCI_CONN flag.
587 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
588 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
589 hci_conn_drop(conn
->hcon
);
591 if (mgr
&& mgr
->bredr_chan
== chan
)
592 mgr
->bredr_chan
= NULL
;
595 if (chan
->hs_hchan
) {
596 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
598 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
599 amp_disconnect_logical_link(hs_hchan
);
602 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
606 case L2CAP_MODE_BASIC
:
609 case L2CAP_MODE_LE_FLOWCTL
:
610 skb_queue_purge(&chan
->tx_q
);
613 case L2CAP_MODE_ERTM
:
614 __clear_retrans_timer(chan
);
615 __clear_monitor_timer(chan
);
616 __clear_ack_timer(chan
);
618 skb_queue_purge(&chan
->srej_q
);
620 l2cap_seq_list_free(&chan
->srej_list
);
621 l2cap_seq_list_free(&chan
->retrans_list
);
625 case L2CAP_MODE_STREAMING
:
626 skb_queue_purge(&chan
->tx_q
);
632 EXPORT_SYMBOL_GPL(l2cap_chan_del
);
634 void l2cap_conn_update_id_addr(struct hci_conn
*hcon
)
636 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
637 struct l2cap_chan
*chan
;
639 mutex_lock(&conn
->chan_lock
);
641 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
642 l2cap_chan_lock(chan
);
643 bacpy(&chan
->dst
, &hcon
->dst
);
644 chan
->dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
645 l2cap_chan_unlock(chan
);
648 mutex_unlock(&conn
->chan_lock
);
651 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
653 struct l2cap_conn
*conn
= chan
->conn
;
654 struct l2cap_le_conn_rsp rsp
;
657 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
658 result
= L2CAP_CR_AUTHORIZATION
;
660 result
= L2CAP_CR_BAD_PSM
;
662 l2cap_state_change(chan
, BT_DISCONN
);
664 rsp
.dcid
= cpu_to_le16(chan
->scid
);
665 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
666 rsp
.mps
= cpu_to_le16(chan
->mps
);
667 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
668 rsp
.result
= cpu_to_le16(result
);
670 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
674 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
676 struct l2cap_conn
*conn
= chan
->conn
;
677 struct l2cap_conn_rsp rsp
;
680 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
681 result
= L2CAP_CR_SEC_BLOCK
;
683 result
= L2CAP_CR_BAD_PSM
;
685 l2cap_state_change(chan
, BT_DISCONN
);
687 rsp
.scid
= cpu_to_le16(chan
->dcid
);
688 rsp
.dcid
= cpu_to_le16(chan
->scid
);
689 rsp
.result
= cpu_to_le16(result
);
690 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
692 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
695 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
697 struct l2cap_conn
*conn
= chan
->conn
;
699 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
701 switch (chan
->state
) {
703 chan
->ops
->teardown(chan
, 0);
708 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
709 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
710 l2cap_send_disconn_req(chan
, reason
);
712 l2cap_chan_del(chan
, reason
);
716 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
717 if (conn
->hcon
->type
== ACL_LINK
)
718 l2cap_chan_connect_reject(chan
);
719 else if (conn
->hcon
->type
== LE_LINK
)
720 l2cap_chan_le_connect_reject(chan
);
723 l2cap_chan_del(chan
, reason
);
728 l2cap_chan_del(chan
, reason
);
732 chan
->ops
->teardown(chan
, 0);
736 EXPORT_SYMBOL(l2cap_chan_close
);
738 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
740 switch (chan
->chan_type
) {
742 switch (chan
->sec_level
) {
743 case BT_SECURITY_HIGH
:
744 case BT_SECURITY_FIPS
:
745 return HCI_AT_DEDICATED_BONDING_MITM
;
746 case BT_SECURITY_MEDIUM
:
747 return HCI_AT_DEDICATED_BONDING
;
749 return HCI_AT_NO_BONDING
;
752 case L2CAP_CHAN_CONN_LESS
:
753 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_3DSP
)) {
754 if (chan
->sec_level
== BT_SECURITY_LOW
)
755 chan
->sec_level
= BT_SECURITY_SDP
;
757 if (chan
->sec_level
== BT_SECURITY_HIGH
||
758 chan
->sec_level
== BT_SECURITY_FIPS
)
759 return HCI_AT_NO_BONDING_MITM
;
761 return HCI_AT_NO_BONDING
;
763 case L2CAP_CHAN_CONN_ORIENTED
:
764 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_SDP
)) {
765 if (chan
->sec_level
== BT_SECURITY_LOW
)
766 chan
->sec_level
= BT_SECURITY_SDP
;
768 if (chan
->sec_level
== BT_SECURITY_HIGH
||
769 chan
->sec_level
== BT_SECURITY_FIPS
)
770 return HCI_AT_NO_BONDING_MITM
;
772 return HCI_AT_NO_BONDING
;
776 switch (chan
->sec_level
) {
777 case BT_SECURITY_HIGH
:
778 case BT_SECURITY_FIPS
:
779 return HCI_AT_GENERAL_BONDING_MITM
;
780 case BT_SECURITY_MEDIUM
:
781 return HCI_AT_GENERAL_BONDING
;
783 return HCI_AT_NO_BONDING
;
789 /* Service level security */
790 int l2cap_chan_check_security(struct l2cap_chan
*chan
, bool initiator
)
792 struct l2cap_conn
*conn
= chan
->conn
;
795 if (conn
->hcon
->type
== LE_LINK
)
796 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
798 auth_type
= l2cap_get_auth_type(chan
);
800 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
,
804 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
808 /* Get next available identificator.
809 * 1 - 128 are used by kernel.
810 * 129 - 199 are reserved.
811 * 200 - 254 are used by utilities like l2ping, etc.
814 mutex_lock(&conn
->ident_lock
);
816 if (++conn
->tx_ident
> 128)
821 mutex_unlock(&conn
->ident_lock
);
826 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
829 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
832 BT_DBG("code 0x%2.2x", code
);
837 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
838 flags
= ACL_START_NO_FLUSH
;
842 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
843 skb
->priority
= HCI_PRIO_MAX
;
845 hci_send_acl(conn
->hchan
, skb
, flags
);
848 static bool __chan_is_moving(struct l2cap_chan
*chan
)
850 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
851 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
854 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
856 struct hci_conn
*hcon
= chan
->conn
->hcon
;
859 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
862 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
864 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
871 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
872 lmp_no_flush_capable(hcon
->hdev
))
873 flags
= ACL_START_NO_FLUSH
;
877 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
878 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
881 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
883 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
884 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
886 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
889 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
890 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
897 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
898 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
905 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
907 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
908 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
910 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
913 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
914 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
921 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
922 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
929 static inline void __unpack_control(struct l2cap_chan
*chan
,
932 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
933 __unpack_extended_control(get_unaligned_le32(skb
->data
),
934 &bt_cb(skb
)->control
);
935 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
937 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
938 &bt_cb(skb
)->control
);
939 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
943 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
947 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
948 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
950 if (control
->sframe
) {
951 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
952 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
953 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
955 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
956 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
962 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
966 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
967 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
969 if (control
->sframe
) {
970 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
971 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
972 packed
|= L2CAP_CTRL_FRAME_TYPE
;
974 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
975 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
981 static inline void __pack_control(struct l2cap_chan
*chan
,
982 struct l2cap_ctrl
*control
,
985 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
986 put_unaligned_le32(__pack_extended_control(control
),
987 skb
->data
+ L2CAP_HDR_SIZE
);
989 put_unaligned_le16(__pack_enhanced_control(control
),
990 skb
->data
+ L2CAP_HDR_SIZE
);
994 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
996 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
997 return L2CAP_EXT_HDR_SIZE
;
999 return L2CAP_ENH_HDR_SIZE
;
1002 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
1005 struct sk_buff
*skb
;
1006 struct l2cap_hdr
*lh
;
1007 int hlen
= __ertm_hdr_size(chan
);
1009 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1010 hlen
+= L2CAP_FCS_SIZE
;
1012 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
1015 return ERR_PTR(-ENOMEM
);
1017 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1018 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
1019 lh
->cid
= cpu_to_le16(chan
->dcid
);
1021 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1022 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1024 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1026 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1027 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1028 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1031 skb
->priority
= HCI_PRIO_MAX
;
1035 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1036 struct l2cap_ctrl
*control
)
1038 struct sk_buff
*skb
;
1041 BT_DBG("chan %p, control %p", chan
, control
);
1043 if (!control
->sframe
)
1046 if (__chan_is_moving(chan
))
1049 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1053 if (control
->super
== L2CAP_SUPER_RR
)
1054 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1055 else if (control
->super
== L2CAP_SUPER_RNR
)
1056 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1058 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1059 chan
->last_acked_seq
= control
->reqseq
;
1060 __clear_ack_timer(chan
);
1063 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1064 control
->final
, control
->poll
, control
->super
);
1066 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1067 control_field
= __pack_extended_control(control
);
1069 control_field
= __pack_enhanced_control(control
);
1071 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1073 l2cap_do_send(chan
, skb
);
1076 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1078 struct l2cap_ctrl control
;
1080 BT_DBG("chan %p, poll %d", chan
, poll
);
1082 memset(&control
, 0, sizeof(control
));
1084 control
.poll
= poll
;
1086 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1087 control
.super
= L2CAP_SUPER_RNR
;
1089 control
.super
= L2CAP_SUPER_RR
;
1091 control
.reqseq
= chan
->buffer_seq
;
1092 l2cap_send_sframe(chan
, &control
);
1095 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1097 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
1100 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1103 static bool __amp_capable(struct l2cap_chan
*chan
)
1105 struct l2cap_conn
*conn
= chan
->conn
;
1106 struct hci_dev
*hdev
;
1107 bool amp_available
= false;
1109 if (!conn
->hs_enabled
)
1112 if (!(conn
->fixed_chan_mask
& L2CAP_FC_A2MP
))
1115 read_lock(&hci_dev_list_lock
);
1116 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1117 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1118 test_bit(HCI_UP
, &hdev
->flags
)) {
1119 amp_available
= true;
1123 read_unlock(&hci_dev_list_lock
);
1125 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1126 return amp_available
;
1131 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1133 /* Check EFS parameters */
1137 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1139 struct l2cap_conn
*conn
= chan
->conn
;
1140 struct l2cap_conn_req req
;
1142 req
.scid
= cpu_to_le16(chan
->scid
);
1143 req
.psm
= chan
->psm
;
1145 chan
->ident
= l2cap_get_ident(conn
);
1147 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1149 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1152 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1154 struct l2cap_create_chan_req req
;
1155 req
.scid
= cpu_to_le16(chan
->scid
);
1156 req
.psm
= chan
->psm
;
1157 req
.amp_id
= amp_id
;
1159 chan
->ident
= l2cap_get_ident(chan
->conn
);
1161 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1165 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1167 struct sk_buff
*skb
;
1169 BT_DBG("chan %p", chan
);
1171 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1174 __clear_retrans_timer(chan
);
1175 __clear_monitor_timer(chan
);
1176 __clear_ack_timer(chan
);
1178 chan
->retry_count
= 0;
1179 skb_queue_walk(&chan
->tx_q
, skb
) {
1180 if (bt_cb(skb
)->control
.retries
)
1181 bt_cb(skb
)->control
.retries
= 1;
1186 chan
->expected_tx_seq
= chan
->buffer_seq
;
1188 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1189 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1190 l2cap_seq_list_clear(&chan
->retrans_list
);
1191 l2cap_seq_list_clear(&chan
->srej_list
);
1192 skb_queue_purge(&chan
->srej_q
);
1194 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1195 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1197 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1200 static void l2cap_move_done(struct l2cap_chan
*chan
)
1202 u8 move_role
= chan
->move_role
;
1203 BT_DBG("chan %p", chan
);
1205 chan
->move_state
= L2CAP_MOVE_STABLE
;
1206 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1208 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1211 switch (move_role
) {
1212 case L2CAP_MOVE_ROLE_INITIATOR
:
1213 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1214 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1216 case L2CAP_MOVE_ROLE_RESPONDER
:
1217 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1222 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1224 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1225 chan
->conf_state
= 0;
1226 __clear_chan_timer(chan
);
1228 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
&& !chan
->tx_credits
)
1229 chan
->ops
->suspend(chan
);
1231 chan
->state
= BT_CONNECTED
;
1233 chan
->ops
->ready(chan
);
1236 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1238 struct l2cap_conn
*conn
= chan
->conn
;
1239 struct l2cap_le_conn_req req
;
1241 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1244 req
.psm
= chan
->psm
;
1245 req
.scid
= cpu_to_le16(chan
->scid
);
1246 req
.mtu
= cpu_to_le16(chan
->imtu
);
1247 req
.mps
= cpu_to_le16(chan
->mps
);
1248 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1250 chan
->ident
= l2cap_get_ident(conn
);
1252 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1256 static void l2cap_le_start(struct l2cap_chan
*chan
)
1258 struct l2cap_conn
*conn
= chan
->conn
;
1260 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1264 l2cap_chan_ready(chan
);
1268 if (chan
->state
== BT_CONNECT
)
1269 l2cap_le_connect(chan
);
1272 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1274 if (__amp_capable(chan
)) {
1275 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1276 a2mp_discover_amp(chan
);
1277 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1278 l2cap_le_start(chan
);
1280 l2cap_send_conn_req(chan
);
1284 static void l2cap_do_start(struct l2cap_chan
*chan
)
1286 struct l2cap_conn
*conn
= chan
->conn
;
1288 if (conn
->hcon
->type
== LE_LINK
) {
1289 l2cap_le_start(chan
);
1293 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1294 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1297 if (l2cap_chan_check_security(chan
, true) &&
1298 __l2cap_no_conn_pending(chan
)) {
1299 l2cap_start_connection(chan
);
1302 struct l2cap_info_req req
;
1303 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1305 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1306 conn
->info_ident
= l2cap_get_ident(conn
);
1308 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1310 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1315 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1317 u32 local_feat_mask
= l2cap_feat_mask
;
1319 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1322 case L2CAP_MODE_ERTM
:
1323 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1324 case L2CAP_MODE_STREAMING
:
1325 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1331 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1333 struct l2cap_conn
*conn
= chan
->conn
;
1334 struct l2cap_disconn_req req
;
1339 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1340 __clear_retrans_timer(chan
);
1341 __clear_monitor_timer(chan
);
1342 __clear_ack_timer(chan
);
1345 if (chan
->scid
== L2CAP_CID_A2MP
) {
1346 l2cap_state_change(chan
, BT_DISCONN
);
1350 req
.dcid
= cpu_to_le16(chan
->dcid
);
1351 req
.scid
= cpu_to_le16(chan
->scid
);
1352 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1355 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1358 /* ---- L2CAP connections ---- */
1359 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1361 struct l2cap_chan
*chan
, *tmp
;
1363 BT_DBG("conn %p", conn
);
1365 mutex_lock(&conn
->chan_lock
);
1367 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1368 l2cap_chan_lock(chan
);
1370 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1371 l2cap_chan_unlock(chan
);
1375 if (chan
->state
== BT_CONNECT
) {
1376 if (!l2cap_chan_check_security(chan
, true) ||
1377 !__l2cap_no_conn_pending(chan
)) {
1378 l2cap_chan_unlock(chan
);
1382 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1383 && test_bit(CONF_STATE2_DEVICE
,
1384 &chan
->conf_state
)) {
1385 l2cap_chan_close(chan
, ECONNRESET
);
1386 l2cap_chan_unlock(chan
);
1390 l2cap_start_connection(chan
);
1392 } else if (chan
->state
== BT_CONNECT2
) {
1393 struct l2cap_conn_rsp rsp
;
1395 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1396 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1398 if (l2cap_chan_check_security(chan
, false)) {
1399 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1400 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1401 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1402 chan
->ops
->defer(chan
);
1405 l2cap_state_change(chan
, BT_CONFIG
);
1406 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1407 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1410 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1411 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1414 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1417 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1418 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1419 l2cap_chan_unlock(chan
);
1423 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1424 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1425 l2cap_build_conf_req(chan
, buf
), buf
);
1426 chan
->num_conf_req
++;
1429 l2cap_chan_unlock(chan
);
1432 mutex_unlock(&conn
->chan_lock
);
1435 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1437 struct hci_conn
*hcon
= conn
->hcon
;
1438 struct hci_dev
*hdev
= hcon
->hdev
;
1440 BT_DBG("%s conn %p", hdev
->name
, conn
);
1442 /* For outgoing pairing which doesn't necessarily have an
1443 * associated socket (e.g. mgmt_pair_device).
1446 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1448 /* For LE slave connections, make sure the connection interval
1449 * is in the range of the minium and maximum interval that has
1450 * been configured for this connection. If not, then trigger
1451 * the connection update procedure.
1453 if (hcon
->role
== HCI_ROLE_SLAVE
&&
1454 (hcon
->le_conn_interval
< hcon
->le_conn_min_interval
||
1455 hcon
->le_conn_interval
> hcon
->le_conn_max_interval
)) {
1456 struct l2cap_conn_param_update_req req
;
1458 req
.min
= cpu_to_le16(hcon
->le_conn_min_interval
);
1459 req
.max
= cpu_to_le16(hcon
->le_conn_max_interval
);
1460 req
.latency
= cpu_to_le16(hcon
->le_conn_latency
);
1461 req
.to_multiplier
= cpu_to_le16(hcon
->le_supv_timeout
);
1463 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1464 L2CAP_CONN_PARAM_UPDATE_REQ
, sizeof(req
), &req
);
1468 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1470 struct l2cap_chan
*chan
;
1471 struct hci_conn
*hcon
= conn
->hcon
;
1473 BT_DBG("conn %p", conn
);
1475 mutex_lock(&conn
->chan_lock
);
1477 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1479 l2cap_chan_lock(chan
);
1481 if (chan
->scid
== L2CAP_CID_A2MP
) {
1482 l2cap_chan_unlock(chan
);
1486 if (hcon
->type
== LE_LINK
) {
1487 l2cap_le_start(chan
);
1488 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1489 l2cap_chan_ready(chan
);
1491 } else if (chan
->state
== BT_CONNECT
) {
1492 l2cap_do_start(chan
);
1495 l2cap_chan_unlock(chan
);
1498 mutex_unlock(&conn
->chan_lock
);
1500 if (hcon
->type
== LE_LINK
)
1501 l2cap_le_conn_ready(conn
);
1503 queue_work(hcon
->hdev
->workqueue
, &conn
->pending_rx_work
);
1506 /* Notify sockets that we cannot guaranty reliability anymore */
1507 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1509 struct l2cap_chan
*chan
;
1511 BT_DBG("conn %p", conn
);
1513 mutex_lock(&conn
->chan_lock
);
1515 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1516 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1517 l2cap_chan_set_err(chan
, err
);
1520 mutex_unlock(&conn
->chan_lock
);
1523 static void l2cap_info_timeout(struct work_struct
*work
)
1525 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1528 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1529 conn
->info_ident
= 0;
1531 l2cap_conn_start(conn
);
1536 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1537 * callback is called during registration. The ->remove callback is called
1538 * during unregistration.
1539 * An l2cap_user object can either be explicitly unregistered or when the
1540 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1541 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1542 * External modules must own a reference to the l2cap_conn object if they intend
1543 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1544 * any time if they don't.
1547 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1549 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1552 /* We need to check whether l2cap_conn is registered. If it is not, we
1553 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1554 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1555 * relies on the parent hci_conn object to be locked. This itself relies
1556 * on the hci_dev object to be locked. So we must lock the hci device
1561 if (user
->list
.next
|| user
->list
.prev
) {
1566 /* conn->hchan is NULL after l2cap_conn_del() was called */
1572 ret
= user
->probe(conn
, user
);
1576 list_add(&user
->list
, &conn
->users
);
1580 hci_dev_unlock(hdev
);
1583 EXPORT_SYMBOL(l2cap_register_user
);
1585 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1587 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1591 if (!user
->list
.next
|| !user
->list
.prev
)
1594 list_del(&user
->list
);
1595 user
->list
.next
= NULL
;
1596 user
->list
.prev
= NULL
;
1597 user
->remove(conn
, user
);
1600 hci_dev_unlock(hdev
);
1602 EXPORT_SYMBOL(l2cap_unregister_user
);
1604 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1606 struct l2cap_user
*user
;
1608 while (!list_empty(&conn
->users
)) {
1609 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1610 list_del(&user
->list
);
1611 user
->list
.next
= NULL
;
1612 user
->list
.prev
= NULL
;
1613 user
->remove(conn
, user
);
1617 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1619 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1620 struct l2cap_chan
*chan
, *l
;
1625 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1627 kfree_skb(conn
->rx_skb
);
1629 skb_queue_purge(&conn
->pending_rx
);
1631 /* We can not call flush_work(&conn->pending_rx_work) here since we
1632 * might block if we are running on a worker from the same workqueue
1633 * pending_rx_work is waiting on.
1635 if (work_pending(&conn
->pending_rx_work
))
1636 cancel_work_sync(&conn
->pending_rx_work
);
1638 l2cap_unregister_all_users(conn
);
1640 /* Force the connection to be immediately dropped */
1641 hcon
->disc_timeout
= 0;
1643 mutex_lock(&conn
->chan_lock
);
1646 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1647 l2cap_chan_hold(chan
);
1648 l2cap_chan_lock(chan
);
1650 l2cap_chan_del(chan
, err
);
1652 l2cap_chan_unlock(chan
);
1654 chan
->ops
->close(chan
);
1655 l2cap_chan_put(chan
);
1658 mutex_unlock(&conn
->chan_lock
);
1660 hci_chan_del(conn
->hchan
);
1662 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1663 cancel_delayed_work_sync(&conn
->info_timer
);
1665 hcon
->l2cap_data
= NULL
;
1667 l2cap_conn_put(conn
);
1670 static void l2cap_conn_free(struct kref
*ref
)
1672 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1674 hci_conn_put(conn
->hcon
);
1678 struct l2cap_conn
*l2cap_conn_get(struct l2cap_conn
*conn
)
1680 kref_get(&conn
->ref
);
1683 EXPORT_SYMBOL(l2cap_conn_get
);
1685 void l2cap_conn_put(struct l2cap_conn
*conn
)
1687 kref_put(&conn
->ref
, l2cap_conn_free
);
1689 EXPORT_SYMBOL(l2cap_conn_put
);
1691 /* ---- Socket interface ---- */
1693 /* Find socket with psm and source / destination bdaddr.
1694 * Returns closest match.
1696 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1701 struct l2cap_chan
*c
, *c1
= NULL
;
1703 read_lock(&chan_list_lock
);
1705 list_for_each_entry(c
, &chan_list
, global_l
) {
1706 if (state
&& c
->state
!= state
)
1709 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1712 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1715 if (c
->psm
== psm
) {
1716 int src_match
, dst_match
;
1717 int src_any
, dst_any
;
1720 src_match
= !bacmp(&c
->src
, src
);
1721 dst_match
= !bacmp(&c
->dst
, dst
);
1722 if (src_match
&& dst_match
) {
1724 read_unlock(&chan_list_lock
);
1729 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1730 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1731 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1732 (src_any
&& dst_any
))
1738 l2cap_chan_hold(c1
);
1740 read_unlock(&chan_list_lock
);
1745 static void l2cap_monitor_timeout(struct work_struct
*work
)
1747 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1748 monitor_timer
.work
);
1750 BT_DBG("chan %p", chan
);
1752 l2cap_chan_lock(chan
);
1755 l2cap_chan_unlock(chan
);
1756 l2cap_chan_put(chan
);
1760 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1762 l2cap_chan_unlock(chan
);
1763 l2cap_chan_put(chan
);
1766 static void l2cap_retrans_timeout(struct work_struct
*work
)
1768 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1769 retrans_timer
.work
);
1771 BT_DBG("chan %p", chan
);
1773 l2cap_chan_lock(chan
);
1776 l2cap_chan_unlock(chan
);
1777 l2cap_chan_put(chan
);
1781 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1782 l2cap_chan_unlock(chan
);
1783 l2cap_chan_put(chan
);
1786 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1787 struct sk_buff_head
*skbs
)
1789 struct sk_buff
*skb
;
1790 struct l2cap_ctrl
*control
;
1792 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1794 if (__chan_is_moving(chan
))
1797 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1799 while (!skb_queue_empty(&chan
->tx_q
)) {
1801 skb
= skb_dequeue(&chan
->tx_q
);
1803 bt_cb(skb
)->control
.retries
= 1;
1804 control
= &bt_cb(skb
)->control
;
1806 control
->reqseq
= 0;
1807 control
->txseq
= chan
->next_tx_seq
;
1809 __pack_control(chan
, control
, skb
);
1811 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1812 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1813 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1816 l2cap_do_send(chan
, skb
);
1818 BT_DBG("Sent txseq %u", control
->txseq
);
1820 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1821 chan
->frames_sent
++;
1825 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1827 struct sk_buff
*skb
, *tx_skb
;
1828 struct l2cap_ctrl
*control
;
1831 BT_DBG("chan %p", chan
);
1833 if (chan
->state
!= BT_CONNECTED
)
1836 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1839 if (__chan_is_moving(chan
))
1842 while (chan
->tx_send_head
&&
1843 chan
->unacked_frames
< chan
->remote_tx_win
&&
1844 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1846 skb
= chan
->tx_send_head
;
1848 bt_cb(skb
)->control
.retries
= 1;
1849 control
= &bt_cb(skb
)->control
;
1851 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1854 control
->reqseq
= chan
->buffer_seq
;
1855 chan
->last_acked_seq
= chan
->buffer_seq
;
1856 control
->txseq
= chan
->next_tx_seq
;
1858 __pack_control(chan
, control
, skb
);
1860 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1861 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1862 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1865 /* Clone after data has been modified. Data is assumed to be
1866 read-only (for locking purposes) on cloned sk_buffs.
1868 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1873 __set_retrans_timer(chan
);
1875 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1876 chan
->unacked_frames
++;
1877 chan
->frames_sent
++;
1880 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1881 chan
->tx_send_head
= NULL
;
1883 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1885 l2cap_do_send(chan
, tx_skb
);
1886 BT_DBG("Sent txseq %u", control
->txseq
);
1889 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1890 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1895 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1897 struct l2cap_ctrl control
;
1898 struct sk_buff
*skb
;
1899 struct sk_buff
*tx_skb
;
1902 BT_DBG("chan %p", chan
);
1904 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1907 if (__chan_is_moving(chan
))
1910 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1911 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1913 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1915 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1920 bt_cb(skb
)->control
.retries
++;
1921 control
= bt_cb(skb
)->control
;
1923 if (chan
->max_tx
!= 0 &&
1924 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1925 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1926 l2cap_send_disconn_req(chan
, ECONNRESET
);
1927 l2cap_seq_list_clear(&chan
->retrans_list
);
1931 control
.reqseq
= chan
->buffer_seq
;
1932 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1937 if (skb_cloned(skb
)) {
1938 /* Cloned sk_buffs are read-only, so we need a
1941 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1943 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1947 l2cap_seq_list_clear(&chan
->retrans_list
);
1951 /* Update skb contents */
1952 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1953 put_unaligned_le32(__pack_extended_control(&control
),
1954 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1956 put_unaligned_le16(__pack_enhanced_control(&control
),
1957 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1961 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1962 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
,
1963 tx_skb
->len
- L2CAP_FCS_SIZE
);
1964 put_unaligned_le16(fcs
, skb_tail_pointer(tx_skb
) -
1968 l2cap_do_send(chan
, tx_skb
);
1970 BT_DBG("Resent txseq %d", control
.txseq
);
1972 chan
->last_acked_seq
= chan
->buffer_seq
;
1976 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1977 struct l2cap_ctrl
*control
)
1979 BT_DBG("chan %p, control %p", chan
, control
);
1981 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1982 l2cap_ertm_resend(chan
);
1985 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1986 struct l2cap_ctrl
*control
)
1988 struct sk_buff
*skb
;
1990 BT_DBG("chan %p, control %p", chan
, control
);
1993 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1995 l2cap_seq_list_clear(&chan
->retrans_list
);
1997 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2000 if (chan
->unacked_frames
) {
2001 skb_queue_walk(&chan
->tx_q
, skb
) {
2002 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2003 skb
== chan
->tx_send_head
)
2007 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2008 if (skb
== chan
->tx_send_head
)
2011 l2cap_seq_list_append(&chan
->retrans_list
,
2012 bt_cb(skb
)->control
.txseq
);
2015 l2cap_ertm_resend(chan
);
2019 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2021 struct l2cap_ctrl control
;
2022 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2023 chan
->last_acked_seq
);
2026 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2027 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2029 memset(&control
, 0, sizeof(control
));
2032 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2033 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2034 __clear_ack_timer(chan
);
2035 control
.super
= L2CAP_SUPER_RNR
;
2036 control
.reqseq
= chan
->buffer_seq
;
2037 l2cap_send_sframe(chan
, &control
);
2039 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2040 l2cap_ertm_send(chan
);
2041 /* If any i-frames were sent, they included an ack */
2042 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2046 /* Ack now if the window is 3/4ths full.
2047 * Calculate without mul or div
2049 threshold
= chan
->ack_win
;
2050 threshold
+= threshold
<< 1;
2053 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2056 if (frames_to_ack
>= threshold
) {
2057 __clear_ack_timer(chan
);
2058 control
.super
= L2CAP_SUPER_RR
;
2059 control
.reqseq
= chan
->buffer_seq
;
2060 l2cap_send_sframe(chan
, &control
);
2065 __set_ack_timer(chan
);
2069 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2070 struct msghdr
*msg
, int len
,
2071 int count
, struct sk_buff
*skb
)
2073 struct l2cap_conn
*conn
= chan
->conn
;
2074 struct sk_buff
**frag
;
2077 if (chan
->ops
->memcpy_fromiovec(chan
, skb_put(skb
, count
),
2078 msg
->msg_iov
, count
))
2084 /* Continuation fragments (no L2CAP header) */
2085 frag
= &skb_shinfo(skb
)->frag_list
;
2087 struct sk_buff
*tmp
;
2089 count
= min_t(unsigned int, conn
->mtu
, len
);
2091 tmp
= chan
->ops
->alloc_skb(chan
, 0, count
,
2092 msg
->msg_flags
& MSG_DONTWAIT
);
2094 return PTR_ERR(tmp
);
2098 if (chan
->ops
->memcpy_fromiovec(chan
, skb_put(*frag
, count
),
2099 msg
->msg_iov
, count
))
2105 skb
->len
+= (*frag
)->len
;
2106 skb
->data_len
+= (*frag
)->len
;
2108 frag
= &(*frag
)->next
;
2114 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2115 struct msghdr
*msg
, size_t len
)
2117 struct l2cap_conn
*conn
= chan
->conn
;
2118 struct sk_buff
*skb
;
2119 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2120 struct l2cap_hdr
*lh
;
2122 BT_DBG("chan %p psm 0x%2.2x len %zu", chan
,
2123 __le16_to_cpu(chan
->psm
), len
);
2125 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2127 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2128 msg
->msg_flags
& MSG_DONTWAIT
);
2132 /* Create L2CAP header */
2133 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2134 lh
->cid
= cpu_to_le16(chan
->dcid
);
2135 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2136 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2138 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2139 if (unlikely(err
< 0)) {
2141 return ERR_PTR(err
);
2146 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2147 struct msghdr
*msg
, size_t len
)
2149 struct l2cap_conn
*conn
= chan
->conn
;
2150 struct sk_buff
*skb
;
2152 struct l2cap_hdr
*lh
;
2154 BT_DBG("chan %p len %zu", chan
, len
);
2156 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2158 skb
= chan
->ops
->alloc_skb(chan
, L2CAP_HDR_SIZE
, count
,
2159 msg
->msg_flags
& MSG_DONTWAIT
);
2163 /* Create L2CAP header */
2164 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2165 lh
->cid
= cpu_to_le16(chan
->dcid
);
2166 lh
->len
= cpu_to_le16(len
);
2168 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2169 if (unlikely(err
< 0)) {
2171 return ERR_PTR(err
);
2176 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2177 struct msghdr
*msg
, size_t len
,
2180 struct l2cap_conn
*conn
= chan
->conn
;
2181 struct sk_buff
*skb
;
2182 int err
, count
, hlen
;
2183 struct l2cap_hdr
*lh
;
2185 BT_DBG("chan %p len %zu", chan
, len
);
2188 return ERR_PTR(-ENOTCONN
);
2190 hlen
= __ertm_hdr_size(chan
);
2193 hlen
+= L2CAP_SDULEN_SIZE
;
2195 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2196 hlen
+= L2CAP_FCS_SIZE
;
2198 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2200 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2201 msg
->msg_flags
& MSG_DONTWAIT
);
2205 /* Create L2CAP header */
2206 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2207 lh
->cid
= cpu_to_le16(chan
->dcid
);
2208 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2210 /* Control header is populated later */
2211 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2212 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2214 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2217 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2219 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2220 if (unlikely(err
< 0)) {
2222 return ERR_PTR(err
);
2225 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2226 bt_cb(skb
)->control
.retries
= 0;
2230 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2231 struct sk_buff_head
*seg_queue
,
2232 struct msghdr
*msg
, size_t len
)
2234 struct sk_buff
*skb
;
2239 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2241 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2242 * so fragmented skbs are not used. The HCI layer's handling
2243 * of fragmented skbs is not compatible with ERTM's queueing.
2246 /* PDU size is derived from the HCI MTU */
2247 pdu_len
= chan
->conn
->mtu
;
2249 /* Constrain PDU size for BR/EDR connections */
2251 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2253 /* Adjust for largest possible L2CAP overhead. */
2255 pdu_len
-= L2CAP_FCS_SIZE
;
2257 pdu_len
-= __ertm_hdr_size(chan
);
2259 /* Remote device may have requested smaller PDUs */
2260 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2262 if (len
<= pdu_len
) {
2263 sar
= L2CAP_SAR_UNSEGMENTED
;
2267 sar
= L2CAP_SAR_START
;
2272 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2275 __skb_queue_purge(seg_queue
);
2276 return PTR_ERR(skb
);
2279 bt_cb(skb
)->control
.sar
= sar
;
2280 __skb_queue_tail(seg_queue
, skb
);
2286 if (len
<= pdu_len
) {
2287 sar
= L2CAP_SAR_END
;
2290 sar
= L2CAP_SAR_CONTINUE
;
2297 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2299 size_t len
, u16 sdulen
)
2301 struct l2cap_conn
*conn
= chan
->conn
;
2302 struct sk_buff
*skb
;
2303 int err
, count
, hlen
;
2304 struct l2cap_hdr
*lh
;
2306 BT_DBG("chan %p len %zu", chan
, len
);
2309 return ERR_PTR(-ENOTCONN
);
2311 hlen
= L2CAP_HDR_SIZE
;
2314 hlen
+= L2CAP_SDULEN_SIZE
;
2316 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2318 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2319 msg
->msg_flags
& MSG_DONTWAIT
);
2323 /* Create L2CAP header */
2324 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2325 lh
->cid
= cpu_to_le16(chan
->dcid
);
2326 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2329 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2331 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2332 if (unlikely(err
< 0)) {
2334 return ERR_PTR(err
);
2340 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2341 struct sk_buff_head
*seg_queue
,
2342 struct msghdr
*msg
, size_t len
)
2344 struct sk_buff
*skb
;
2348 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2351 pdu_len
= chan
->remote_mps
- L2CAP_SDULEN_SIZE
;
2357 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2359 __skb_queue_purge(seg_queue
);
2360 return PTR_ERR(skb
);
2363 __skb_queue_tail(seg_queue
, skb
);
2369 pdu_len
+= L2CAP_SDULEN_SIZE
;
2376 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
2378 struct sk_buff
*skb
;
2380 struct sk_buff_head seg_queue
;
2385 /* Connectionless channel */
2386 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2387 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
2389 return PTR_ERR(skb
);
2391 /* Channel lock is released before requesting new skb and then
2392 * reacquired thus we need to recheck channel state.
2394 if (chan
->state
!= BT_CONNECTED
) {
2399 l2cap_do_send(chan
, skb
);
2403 switch (chan
->mode
) {
2404 case L2CAP_MODE_LE_FLOWCTL
:
2405 /* Check outgoing MTU */
2406 if (len
> chan
->omtu
)
2409 if (!chan
->tx_credits
)
2412 __skb_queue_head_init(&seg_queue
);
2414 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2416 if (chan
->state
!= BT_CONNECTED
) {
2417 __skb_queue_purge(&seg_queue
);
2424 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2426 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2427 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2431 if (!chan
->tx_credits
)
2432 chan
->ops
->suspend(chan
);
2438 case L2CAP_MODE_BASIC
:
2439 /* Check outgoing MTU */
2440 if (len
> chan
->omtu
)
2443 /* Create a basic PDU */
2444 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
2446 return PTR_ERR(skb
);
2448 /* Channel lock is released before requesting new skb and then
2449 * reacquired thus we need to recheck channel state.
2451 if (chan
->state
!= BT_CONNECTED
) {
2456 l2cap_do_send(chan
, skb
);
2460 case L2CAP_MODE_ERTM
:
2461 case L2CAP_MODE_STREAMING
:
2462 /* Check outgoing MTU */
2463 if (len
> chan
->omtu
) {
2468 __skb_queue_head_init(&seg_queue
);
2470 /* Do segmentation before calling in to the state machine,
2471 * since it's possible to block while waiting for memory
2474 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2476 /* The channel could have been closed while segmenting,
2477 * check that it is still connected.
2479 if (chan
->state
!= BT_CONNECTED
) {
2480 __skb_queue_purge(&seg_queue
);
2487 if (chan
->mode
== L2CAP_MODE_ERTM
)
2488 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2490 l2cap_streaming_send(chan
, &seg_queue
);
2494 /* If the skbs were not queued for sending, they'll still be in
2495 * seg_queue and need to be purged.
2497 __skb_queue_purge(&seg_queue
);
2501 BT_DBG("bad state %1.1x", chan
->mode
);
2507 EXPORT_SYMBOL_GPL(l2cap_chan_send
);
2509 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2511 struct l2cap_ctrl control
;
2514 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2516 memset(&control
, 0, sizeof(control
));
2518 control
.super
= L2CAP_SUPER_SREJ
;
2520 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2521 seq
= __next_seq(chan
, seq
)) {
2522 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2523 control
.reqseq
= seq
;
2524 l2cap_send_sframe(chan
, &control
);
2525 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2529 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2532 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2534 struct l2cap_ctrl control
;
2536 BT_DBG("chan %p", chan
);
2538 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2541 memset(&control
, 0, sizeof(control
));
2543 control
.super
= L2CAP_SUPER_SREJ
;
2544 control
.reqseq
= chan
->srej_list
.tail
;
2545 l2cap_send_sframe(chan
, &control
);
2548 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2550 struct l2cap_ctrl control
;
2554 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2556 memset(&control
, 0, sizeof(control
));
2558 control
.super
= L2CAP_SUPER_SREJ
;
2560 /* Capture initial list head to allow only one pass through the list. */
2561 initial_head
= chan
->srej_list
.head
;
2564 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2565 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2568 control
.reqseq
= seq
;
2569 l2cap_send_sframe(chan
, &control
);
2570 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2571 } while (chan
->srej_list
.head
!= initial_head
);
2574 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2576 struct sk_buff
*acked_skb
;
2579 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2581 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2584 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2585 chan
->expected_ack_seq
, chan
->unacked_frames
);
2587 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2588 ackseq
= __next_seq(chan
, ackseq
)) {
2590 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2592 skb_unlink(acked_skb
, &chan
->tx_q
);
2593 kfree_skb(acked_skb
);
2594 chan
->unacked_frames
--;
2598 chan
->expected_ack_seq
= reqseq
;
2600 if (chan
->unacked_frames
== 0)
2601 __clear_retrans_timer(chan
);
2603 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2606 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2608 BT_DBG("chan %p", chan
);
2610 chan
->expected_tx_seq
= chan
->buffer_seq
;
2611 l2cap_seq_list_clear(&chan
->srej_list
);
2612 skb_queue_purge(&chan
->srej_q
);
2613 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2616 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2617 struct l2cap_ctrl
*control
,
2618 struct sk_buff_head
*skbs
, u8 event
)
2620 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2624 case L2CAP_EV_DATA_REQUEST
:
2625 if (chan
->tx_send_head
== NULL
)
2626 chan
->tx_send_head
= skb_peek(skbs
);
2628 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2629 l2cap_ertm_send(chan
);
2631 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2632 BT_DBG("Enter LOCAL_BUSY");
2633 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2635 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2636 /* The SREJ_SENT state must be aborted if we are to
2637 * enter the LOCAL_BUSY state.
2639 l2cap_abort_rx_srej_sent(chan
);
2642 l2cap_send_ack(chan
);
2645 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2646 BT_DBG("Exit LOCAL_BUSY");
2647 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2649 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2650 struct l2cap_ctrl local_control
;
2652 memset(&local_control
, 0, sizeof(local_control
));
2653 local_control
.sframe
= 1;
2654 local_control
.super
= L2CAP_SUPER_RR
;
2655 local_control
.poll
= 1;
2656 local_control
.reqseq
= chan
->buffer_seq
;
2657 l2cap_send_sframe(chan
, &local_control
);
2659 chan
->retry_count
= 1;
2660 __set_monitor_timer(chan
);
2661 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2664 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2665 l2cap_process_reqseq(chan
, control
->reqseq
);
2667 case L2CAP_EV_EXPLICIT_POLL
:
2668 l2cap_send_rr_or_rnr(chan
, 1);
2669 chan
->retry_count
= 1;
2670 __set_monitor_timer(chan
);
2671 __clear_ack_timer(chan
);
2672 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2674 case L2CAP_EV_RETRANS_TO
:
2675 l2cap_send_rr_or_rnr(chan
, 1);
2676 chan
->retry_count
= 1;
2677 __set_monitor_timer(chan
);
2678 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2680 case L2CAP_EV_RECV_FBIT
:
2681 /* Nothing to process */
2688 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2689 struct l2cap_ctrl
*control
,
2690 struct sk_buff_head
*skbs
, u8 event
)
2692 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2696 case L2CAP_EV_DATA_REQUEST
:
2697 if (chan
->tx_send_head
== NULL
)
2698 chan
->tx_send_head
= skb_peek(skbs
);
2699 /* Queue data, but don't send. */
2700 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2702 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2703 BT_DBG("Enter LOCAL_BUSY");
2704 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2706 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2707 /* The SREJ_SENT state must be aborted if we are to
2708 * enter the LOCAL_BUSY state.
2710 l2cap_abort_rx_srej_sent(chan
);
2713 l2cap_send_ack(chan
);
2716 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2717 BT_DBG("Exit LOCAL_BUSY");
2718 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2720 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2721 struct l2cap_ctrl local_control
;
2722 memset(&local_control
, 0, sizeof(local_control
));
2723 local_control
.sframe
= 1;
2724 local_control
.super
= L2CAP_SUPER_RR
;
2725 local_control
.poll
= 1;
2726 local_control
.reqseq
= chan
->buffer_seq
;
2727 l2cap_send_sframe(chan
, &local_control
);
2729 chan
->retry_count
= 1;
2730 __set_monitor_timer(chan
);
2731 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2734 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2735 l2cap_process_reqseq(chan
, control
->reqseq
);
2739 case L2CAP_EV_RECV_FBIT
:
2740 if (control
&& control
->final
) {
2741 __clear_monitor_timer(chan
);
2742 if (chan
->unacked_frames
> 0)
2743 __set_retrans_timer(chan
);
2744 chan
->retry_count
= 0;
2745 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2746 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2749 case L2CAP_EV_EXPLICIT_POLL
:
2752 case L2CAP_EV_MONITOR_TO
:
2753 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2754 l2cap_send_rr_or_rnr(chan
, 1);
2755 __set_monitor_timer(chan
);
2756 chan
->retry_count
++;
2758 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2766 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2767 struct sk_buff_head
*skbs
, u8 event
)
2769 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2770 chan
, control
, skbs
, event
, chan
->tx_state
);
2772 switch (chan
->tx_state
) {
2773 case L2CAP_TX_STATE_XMIT
:
2774 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2776 case L2CAP_TX_STATE_WAIT_F
:
2777 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2785 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2786 struct l2cap_ctrl
*control
)
2788 BT_DBG("chan %p, control %p", chan
, control
);
2789 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2792 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2793 struct l2cap_ctrl
*control
)
2795 BT_DBG("chan %p, control %p", chan
, control
);
2796 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2799 /* Copy frame to all raw sockets on that connection */
2800 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2802 struct sk_buff
*nskb
;
2803 struct l2cap_chan
*chan
;
2805 BT_DBG("conn %p", conn
);
2807 mutex_lock(&conn
->chan_lock
);
2809 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2810 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2813 /* Don't send frame to the channel it came from */
2814 if (bt_cb(skb
)->chan
== chan
)
2817 nskb
= skb_clone(skb
, GFP_KERNEL
);
2820 if (chan
->ops
->recv(chan
, nskb
))
2824 mutex_unlock(&conn
->chan_lock
);
2827 /* ---- L2CAP signalling commands ---- */
2828 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2829 u8 ident
, u16 dlen
, void *data
)
2831 struct sk_buff
*skb
, **frag
;
2832 struct l2cap_cmd_hdr
*cmd
;
2833 struct l2cap_hdr
*lh
;
2836 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2837 conn
, code
, ident
, dlen
);
2839 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2842 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2843 count
= min_t(unsigned int, conn
->mtu
, len
);
2845 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2849 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2850 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2852 if (conn
->hcon
->type
== LE_LINK
)
2853 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2855 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2857 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2860 cmd
->len
= cpu_to_le16(dlen
);
2863 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2864 memcpy(skb_put(skb
, count
), data
, count
);
2870 /* Continuation fragments (no L2CAP header) */
2871 frag
= &skb_shinfo(skb
)->frag_list
;
2873 count
= min_t(unsigned int, conn
->mtu
, len
);
2875 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2879 memcpy(skb_put(*frag
, count
), data
, count
);
2884 frag
= &(*frag
)->next
;
2894 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2897 struct l2cap_conf_opt
*opt
= *ptr
;
2900 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2908 *val
= *((u8
*) opt
->val
);
2912 *val
= get_unaligned_le16(opt
->val
);
2916 *val
= get_unaligned_le32(opt
->val
);
2920 *val
= (unsigned long) opt
->val
;
2924 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2928 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2930 struct l2cap_conf_opt
*opt
= *ptr
;
2932 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2939 *((u8
*) opt
->val
) = val
;
2943 put_unaligned_le16(val
, opt
->val
);
2947 put_unaligned_le32(val
, opt
->val
);
2951 memcpy(opt
->val
, (void *) val
, len
);
2955 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2958 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2960 struct l2cap_conf_efs efs
;
2962 switch (chan
->mode
) {
2963 case L2CAP_MODE_ERTM
:
2964 efs
.id
= chan
->local_id
;
2965 efs
.stype
= chan
->local_stype
;
2966 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2967 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2968 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2969 efs
.flush_to
= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
2972 case L2CAP_MODE_STREAMING
:
2974 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2975 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2976 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2985 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2986 (unsigned long) &efs
);
2989 static void l2cap_ack_timeout(struct work_struct
*work
)
2991 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2995 BT_DBG("chan %p", chan
);
2997 l2cap_chan_lock(chan
);
2999 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3000 chan
->last_acked_seq
);
3003 l2cap_send_rr_or_rnr(chan
, 0);
3005 l2cap_chan_unlock(chan
);
3006 l2cap_chan_put(chan
);
3009 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3013 chan
->next_tx_seq
= 0;
3014 chan
->expected_tx_seq
= 0;
3015 chan
->expected_ack_seq
= 0;
3016 chan
->unacked_frames
= 0;
3017 chan
->buffer_seq
= 0;
3018 chan
->frames_sent
= 0;
3019 chan
->last_acked_seq
= 0;
3021 chan
->sdu_last_frag
= NULL
;
3024 skb_queue_head_init(&chan
->tx_q
);
3026 chan
->local_amp_id
= AMP_ID_BREDR
;
3027 chan
->move_id
= AMP_ID_BREDR
;
3028 chan
->move_state
= L2CAP_MOVE_STABLE
;
3029 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3031 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3034 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3035 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3037 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3038 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3039 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3041 skb_queue_head_init(&chan
->srej_q
);
3043 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3047 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3049 l2cap_seq_list_free(&chan
->srej_list
);
3054 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3057 case L2CAP_MODE_STREAMING
:
3058 case L2CAP_MODE_ERTM
:
3059 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3063 return L2CAP_MODE_BASIC
;
3067 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3069 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3072 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3074 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3077 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3078 struct l2cap_conf_rfc
*rfc
)
3080 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3081 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3083 /* Class 1 devices have must have ERTM timeouts
3084 * exceeding the Link Supervision Timeout. The
3085 * default Link Supervision Timeout for AMP
3086 * controllers is 10 seconds.
3088 * Class 1 devices use 0xffffffff for their
3089 * best-effort flush timeout, so the clamping logic
3090 * will result in a timeout that meets the above
3091 * requirement. ERTM timeouts are 16-bit values, so
3092 * the maximum timeout is 65.535 seconds.
3095 /* Convert timeout to milliseconds and round */
3096 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3098 /* This is the recommended formula for class 2 devices
3099 * that start ERTM timers when packets are sent to the
3102 ertm_to
= 3 * ertm_to
+ 500;
3104 if (ertm_to
> 0xffff)
3107 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3108 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3110 rfc
->retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3111 rfc
->monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3115 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3117 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3118 __l2cap_ews_supported(chan
->conn
)) {
3119 /* use extended control field */
3120 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3121 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3123 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3124 L2CAP_DEFAULT_TX_WINDOW
);
3125 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3127 chan
->ack_win
= chan
->tx_win
;
3130 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3132 struct l2cap_conf_req
*req
= data
;
3133 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3134 void *ptr
= req
->data
;
3137 BT_DBG("chan %p", chan
);
3139 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3142 switch (chan
->mode
) {
3143 case L2CAP_MODE_STREAMING
:
3144 case L2CAP_MODE_ERTM
:
3145 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3148 if (__l2cap_efs_supported(chan
->conn
))
3149 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3153 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3158 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3159 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3161 switch (chan
->mode
) {
3162 case L2CAP_MODE_BASIC
:
3166 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3167 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3170 rfc
.mode
= L2CAP_MODE_BASIC
;
3172 rfc
.max_transmit
= 0;
3173 rfc
.retrans_timeout
= 0;
3174 rfc
.monitor_timeout
= 0;
3175 rfc
.max_pdu_size
= 0;
3177 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3178 (unsigned long) &rfc
);
3181 case L2CAP_MODE_ERTM
:
3182 rfc
.mode
= L2CAP_MODE_ERTM
;
3183 rfc
.max_transmit
= chan
->max_tx
;
3185 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3187 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3188 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3190 rfc
.max_pdu_size
= cpu_to_le16(size
);
3192 l2cap_txwin_setup(chan
);
3194 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3195 L2CAP_DEFAULT_TX_WINDOW
);
3197 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3198 (unsigned long) &rfc
);
3200 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3201 l2cap_add_opt_efs(&ptr
, chan
);
3203 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3204 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3207 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3208 if (chan
->fcs
== L2CAP_FCS_NONE
||
3209 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3210 chan
->fcs
= L2CAP_FCS_NONE
;
3211 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3216 case L2CAP_MODE_STREAMING
:
3217 l2cap_txwin_setup(chan
);
3218 rfc
.mode
= L2CAP_MODE_STREAMING
;
3220 rfc
.max_transmit
= 0;
3221 rfc
.retrans_timeout
= 0;
3222 rfc
.monitor_timeout
= 0;
3224 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3225 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3227 rfc
.max_pdu_size
= cpu_to_le16(size
);
3229 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3230 (unsigned long) &rfc
);
3232 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3233 l2cap_add_opt_efs(&ptr
, chan
);
3235 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3236 if (chan
->fcs
== L2CAP_FCS_NONE
||
3237 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3238 chan
->fcs
= L2CAP_FCS_NONE
;
3239 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3245 req
->dcid
= cpu_to_le16(chan
->dcid
);
3246 req
->flags
= cpu_to_le16(0);
3251 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3253 struct l2cap_conf_rsp
*rsp
= data
;
3254 void *ptr
= rsp
->data
;
3255 void *req
= chan
->conf_req
;
3256 int len
= chan
->conf_len
;
3257 int type
, hint
, olen
;
3259 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3260 struct l2cap_conf_efs efs
;
3262 u16 mtu
= L2CAP_DEFAULT_MTU
;
3263 u16 result
= L2CAP_CONF_SUCCESS
;
3266 BT_DBG("chan %p", chan
);
3268 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3269 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3271 hint
= type
& L2CAP_CONF_HINT
;
3272 type
&= L2CAP_CONF_MASK
;
3275 case L2CAP_CONF_MTU
:
3279 case L2CAP_CONF_FLUSH_TO
:
3280 chan
->flush_to
= val
;
3283 case L2CAP_CONF_QOS
:
3286 case L2CAP_CONF_RFC
:
3287 if (olen
== sizeof(rfc
))
3288 memcpy(&rfc
, (void *) val
, olen
);
3291 case L2CAP_CONF_FCS
:
3292 if (val
== L2CAP_FCS_NONE
)
3293 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3296 case L2CAP_CONF_EFS
:
3298 if (olen
== sizeof(efs
))
3299 memcpy(&efs
, (void *) val
, olen
);
3302 case L2CAP_CONF_EWS
:
3303 if (!chan
->conn
->hs_enabled
)
3304 return -ECONNREFUSED
;
3306 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3307 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3308 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3309 chan
->remote_tx_win
= val
;
3316 result
= L2CAP_CONF_UNKNOWN
;
3317 *((u8
*) ptr
++) = type
;
3322 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3325 switch (chan
->mode
) {
3326 case L2CAP_MODE_STREAMING
:
3327 case L2CAP_MODE_ERTM
:
3328 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3329 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3330 chan
->conn
->feat_mask
);
3335 if (__l2cap_efs_supported(chan
->conn
))
3336 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3338 return -ECONNREFUSED
;
3341 if (chan
->mode
!= rfc
.mode
)
3342 return -ECONNREFUSED
;
3348 if (chan
->mode
!= rfc
.mode
) {
3349 result
= L2CAP_CONF_UNACCEPT
;
3350 rfc
.mode
= chan
->mode
;
3352 if (chan
->num_conf_rsp
== 1)
3353 return -ECONNREFUSED
;
3355 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3356 (unsigned long) &rfc
);
3359 if (result
== L2CAP_CONF_SUCCESS
) {
3360 /* Configure output options and let the other side know
3361 * which ones we don't like. */
3363 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3364 result
= L2CAP_CONF_UNACCEPT
;
3367 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3369 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3372 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3373 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3374 efs
.stype
!= chan
->local_stype
) {
3376 result
= L2CAP_CONF_UNACCEPT
;
3378 if (chan
->num_conf_req
>= 1)
3379 return -ECONNREFUSED
;
3381 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3383 (unsigned long) &efs
);
3385 /* Send PENDING Conf Rsp */
3386 result
= L2CAP_CONF_PENDING
;
3387 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3392 case L2CAP_MODE_BASIC
:
3393 chan
->fcs
= L2CAP_FCS_NONE
;
3394 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3397 case L2CAP_MODE_ERTM
:
3398 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3399 chan
->remote_tx_win
= rfc
.txwin_size
;
3401 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3403 chan
->remote_max_tx
= rfc
.max_transmit
;
3405 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3406 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3407 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3408 rfc
.max_pdu_size
= cpu_to_le16(size
);
3409 chan
->remote_mps
= size
;
3411 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3413 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3415 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3416 sizeof(rfc
), (unsigned long) &rfc
);
3418 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3419 chan
->remote_id
= efs
.id
;
3420 chan
->remote_stype
= efs
.stype
;
3421 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3422 chan
->remote_flush_to
=
3423 le32_to_cpu(efs
.flush_to
);
3424 chan
->remote_acc_lat
=
3425 le32_to_cpu(efs
.acc_lat
);
3426 chan
->remote_sdu_itime
=
3427 le32_to_cpu(efs
.sdu_itime
);
3428 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3430 (unsigned long) &efs
);
3434 case L2CAP_MODE_STREAMING
:
3435 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3436 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3437 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3438 rfc
.max_pdu_size
= cpu_to_le16(size
);
3439 chan
->remote_mps
= size
;
3441 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3443 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3444 (unsigned long) &rfc
);
3449 result
= L2CAP_CONF_UNACCEPT
;
3451 memset(&rfc
, 0, sizeof(rfc
));
3452 rfc
.mode
= chan
->mode
;
3455 if (result
== L2CAP_CONF_SUCCESS
)
3456 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3458 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3459 rsp
->result
= cpu_to_le16(result
);
3460 rsp
->flags
= cpu_to_le16(0);
3465 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3466 void *data
, u16
*result
)
3468 struct l2cap_conf_req
*req
= data
;
3469 void *ptr
= req
->data
;
3472 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3473 struct l2cap_conf_efs efs
;
3475 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3477 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3478 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3481 case L2CAP_CONF_MTU
:
3482 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3483 *result
= L2CAP_CONF_UNACCEPT
;
3484 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3487 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3490 case L2CAP_CONF_FLUSH_TO
:
3491 chan
->flush_to
= val
;
3492 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3496 case L2CAP_CONF_RFC
:
3497 if (olen
== sizeof(rfc
))
3498 memcpy(&rfc
, (void *)val
, olen
);
3500 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3501 rfc
.mode
!= chan
->mode
)
3502 return -ECONNREFUSED
;
3506 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3507 sizeof(rfc
), (unsigned long) &rfc
);
3510 case L2CAP_CONF_EWS
:
3511 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3512 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3516 case L2CAP_CONF_EFS
:
3517 if (olen
== sizeof(efs
))
3518 memcpy(&efs
, (void *)val
, olen
);
3520 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3521 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3522 efs
.stype
!= chan
->local_stype
)
3523 return -ECONNREFUSED
;
3525 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3526 (unsigned long) &efs
);
3529 case L2CAP_CONF_FCS
:
3530 if (*result
== L2CAP_CONF_PENDING
)
3531 if (val
== L2CAP_FCS_NONE
)
3532 set_bit(CONF_RECV_NO_FCS
,
3538 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3539 return -ECONNREFUSED
;
3541 chan
->mode
= rfc
.mode
;
3543 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3545 case L2CAP_MODE_ERTM
:
3546 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3547 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3548 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3549 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3550 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3553 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3554 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3555 chan
->local_sdu_itime
=
3556 le32_to_cpu(efs
.sdu_itime
);
3557 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3558 chan
->local_flush_to
=
3559 le32_to_cpu(efs
.flush_to
);
3563 case L2CAP_MODE_STREAMING
:
3564 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3568 req
->dcid
= cpu_to_le16(chan
->dcid
);
3569 req
->flags
= cpu_to_le16(0);
3574 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3575 u16 result
, u16 flags
)
3577 struct l2cap_conf_rsp
*rsp
= data
;
3578 void *ptr
= rsp
->data
;
3580 BT_DBG("chan %p", chan
);
3582 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3583 rsp
->result
= cpu_to_le16(result
);
3584 rsp
->flags
= cpu_to_le16(flags
);
3589 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3591 struct l2cap_le_conn_rsp rsp
;
3592 struct l2cap_conn
*conn
= chan
->conn
;
3594 BT_DBG("chan %p", chan
);
3596 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3597 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3598 rsp
.mps
= cpu_to_le16(chan
->mps
);
3599 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3600 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3602 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3606 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3608 struct l2cap_conn_rsp rsp
;
3609 struct l2cap_conn
*conn
= chan
->conn
;
3613 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3614 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3615 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3616 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3619 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3621 rsp_code
= L2CAP_CONN_RSP
;
3623 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3625 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3627 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3630 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3631 l2cap_build_conf_req(chan
, buf
), buf
);
3632 chan
->num_conf_req
++;
3635 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3639 /* Use sane default values in case a misbehaving remote device
3640 * did not send an RFC or extended window size option.
3642 u16 txwin_ext
= chan
->ack_win
;
3643 struct l2cap_conf_rfc rfc
= {
3645 .retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3646 .monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3647 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3648 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3651 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3653 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3656 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3657 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3660 case L2CAP_CONF_RFC
:
3661 if (olen
== sizeof(rfc
))
3662 memcpy(&rfc
, (void *)val
, olen
);
3664 case L2CAP_CONF_EWS
:
3671 case L2CAP_MODE_ERTM
:
3672 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3673 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3674 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3675 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3676 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3678 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3681 case L2CAP_MODE_STREAMING
:
3682 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3686 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3687 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3690 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3692 if (cmd_len
< sizeof(*rej
))
3695 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3698 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3699 cmd
->ident
== conn
->info_ident
) {
3700 cancel_delayed_work(&conn
->info_timer
);
3702 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3703 conn
->info_ident
= 0;
3705 l2cap_conn_start(conn
);
3711 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3712 struct l2cap_cmd_hdr
*cmd
,
3713 u8
*data
, u8 rsp_code
, u8 amp_id
)
3715 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3716 struct l2cap_conn_rsp rsp
;
3717 struct l2cap_chan
*chan
= NULL
, *pchan
;
3718 int result
, status
= L2CAP_CS_NO_INFO
;
3720 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3721 __le16 psm
= req
->psm
;
3723 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3725 /* Check if we have socket listening on psm */
3726 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3727 &conn
->hcon
->dst
, ACL_LINK
);
3729 result
= L2CAP_CR_BAD_PSM
;
3733 mutex_lock(&conn
->chan_lock
);
3734 l2cap_chan_lock(pchan
);
3736 /* Check if the ACL is secure enough (if not SDP) */
3737 if (psm
!= cpu_to_le16(L2CAP_PSM_SDP
) &&
3738 !hci_conn_check_link_mode(conn
->hcon
)) {
3739 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3740 result
= L2CAP_CR_SEC_BLOCK
;
3744 result
= L2CAP_CR_NO_MEM
;
3746 /* Check if we already have channel with that dcid */
3747 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3750 chan
= pchan
->ops
->new_connection(pchan
);
3754 /* For certain devices (ex: HID mouse), support for authentication,
3755 * pairing and bonding is optional. For such devices, inorder to avoid
3756 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3757 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3759 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3761 bacpy(&chan
->src
, &conn
->hcon
->src
);
3762 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3763 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
3764 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
3767 chan
->local_amp_id
= amp_id
;
3769 __l2cap_chan_add(conn
, chan
);
3773 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3775 chan
->ident
= cmd
->ident
;
3777 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3778 if (l2cap_chan_check_security(chan
, false)) {
3779 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3780 l2cap_state_change(chan
, BT_CONNECT2
);
3781 result
= L2CAP_CR_PEND
;
3782 status
= L2CAP_CS_AUTHOR_PEND
;
3783 chan
->ops
->defer(chan
);
3785 /* Force pending result for AMP controllers.
3786 * The connection will succeed after the
3787 * physical link is up.
3789 if (amp_id
== AMP_ID_BREDR
) {
3790 l2cap_state_change(chan
, BT_CONFIG
);
3791 result
= L2CAP_CR_SUCCESS
;
3793 l2cap_state_change(chan
, BT_CONNECT2
);
3794 result
= L2CAP_CR_PEND
;
3796 status
= L2CAP_CS_NO_INFO
;
3799 l2cap_state_change(chan
, BT_CONNECT2
);
3800 result
= L2CAP_CR_PEND
;
3801 status
= L2CAP_CS_AUTHEN_PEND
;
3804 l2cap_state_change(chan
, BT_CONNECT2
);
3805 result
= L2CAP_CR_PEND
;
3806 status
= L2CAP_CS_NO_INFO
;
3810 l2cap_chan_unlock(pchan
);
3811 mutex_unlock(&conn
->chan_lock
);
3812 l2cap_chan_put(pchan
);
3815 rsp
.scid
= cpu_to_le16(scid
);
3816 rsp
.dcid
= cpu_to_le16(dcid
);
3817 rsp
.result
= cpu_to_le16(result
);
3818 rsp
.status
= cpu_to_le16(status
);
3819 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3821 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3822 struct l2cap_info_req info
;
3823 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3825 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3826 conn
->info_ident
= l2cap_get_ident(conn
);
3828 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3830 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3831 sizeof(info
), &info
);
3834 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3835 result
== L2CAP_CR_SUCCESS
) {
3837 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3838 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3839 l2cap_build_conf_req(chan
, buf
), buf
);
3840 chan
->num_conf_req
++;
3846 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3847 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3849 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3850 struct hci_conn
*hcon
= conn
->hcon
;
3852 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3856 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3857 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3858 mgmt_device_connected(hdev
, &hcon
->dst
, hcon
->type
,
3859 hcon
->dst_type
, 0, NULL
, 0,
3861 hci_dev_unlock(hdev
);
3863 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3867 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3868 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3871 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3872 u16 scid
, dcid
, result
, status
;
3873 struct l2cap_chan
*chan
;
3877 if (cmd_len
< sizeof(*rsp
))
3880 scid
= __le16_to_cpu(rsp
->scid
);
3881 dcid
= __le16_to_cpu(rsp
->dcid
);
3882 result
= __le16_to_cpu(rsp
->result
);
3883 status
= __le16_to_cpu(rsp
->status
);
3885 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3886 dcid
, scid
, result
, status
);
3888 mutex_lock(&conn
->chan_lock
);
3891 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3897 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3906 l2cap_chan_lock(chan
);
3909 case L2CAP_CR_SUCCESS
:
3910 l2cap_state_change(chan
, BT_CONFIG
);
3913 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3915 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3918 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3919 l2cap_build_conf_req(chan
, req
), req
);
3920 chan
->num_conf_req
++;
3924 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3928 l2cap_chan_del(chan
, ECONNREFUSED
);
3932 l2cap_chan_unlock(chan
);
3935 mutex_unlock(&conn
->chan_lock
);
3940 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3942 /* FCS is enabled only in ERTM or streaming mode, if one or both
3945 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3946 chan
->fcs
= L2CAP_FCS_NONE
;
3947 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
3948 chan
->fcs
= L2CAP_FCS_CRC16
;
3951 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3952 u8 ident
, u16 flags
)
3954 struct l2cap_conn
*conn
= chan
->conn
;
3956 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3959 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3960 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3962 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
3963 l2cap_build_conf_rsp(chan
, data
,
3964 L2CAP_CONF_SUCCESS
, flags
), data
);
3967 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
3970 struct l2cap_cmd_rej_cid rej
;
3972 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3973 rej
.scid
= __cpu_to_le16(scid
);
3974 rej
.dcid
= __cpu_to_le16(dcid
);
3976 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3979 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
3980 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3983 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3986 struct l2cap_chan
*chan
;
3989 if (cmd_len
< sizeof(*req
))
3992 dcid
= __le16_to_cpu(req
->dcid
);
3993 flags
= __le16_to_cpu(req
->flags
);
3995 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3997 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3999 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4003 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4004 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4009 /* Reject if config buffer is too small. */
4010 len
= cmd_len
- sizeof(*req
);
4011 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4012 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4013 l2cap_build_conf_rsp(chan
, rsp
,
4014 L2CAP_CONF_REJECT
, flags
), rsp
);
4019 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4020 chan
->conf_len
+= len
;
4022 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4023 /* Incomplete config. Send empty response. */
4024 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4025 l2cap_build_conf_rsp(chan
, rsp
,
4026 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4030 /* Complete config. */
4031 len
= l2cap_parse_conf_req(chan
, rsp
);
4033 l2cap_send_disconn_req(chan
, ECONNRESET
);
4037 chan
->ident
= cmd
->ident
;
4038 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4039 chan
->num_conf_rsp
++;
4041 /* Reset config buffer. */
4044 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4047 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4048 set_default_fcs(chan
);
4050 if (chan
->mode
== L2CAP_MODE_ERTM
||
4051 chan
->mode
== L2CAP_MODE_STREAMING
)
4052 err
= l2cap_ertm_init(chan
);
4055 l2cap_send_disconn_req(chan
, -err
);
4057 l2cap_chan_ready(chan
);
4062 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4064 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4065 l2cap_build_conf_req(chan
, buf
), buf
);
4066 chan
->num_conf_req
++;
4069 /* Got Conf Rsp PENDING from remote side and asume we sent
4070 Conf Rsp PENDING in the code above */
4071 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4072 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4074 /* check compatibility */
4076 /* Send rsp for BR/EDR channel */
4078 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4080 chan
->ident
= cmd
->ident
;
4084 l2cap_chan_unlock(chan
);
4088 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4089 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4092 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4093 u16 scid
, flags
, result
;
4094 struct l2cap_chan
*chan
;
4095 int len
= cmd_len
- sizeof(*rsp
);
4098 if (cmd_len
< sizeof(*rsp
))
4101 scid
= __le16_to_cpu(rsp
->scid
);
4102 flags
= __le16_to_cpu(rsp
->flags
);
4103 result
= __le16_to_cpu(rsp
->result
);
4105 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4108 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4113 case L2CAP_CONF_SUCCESS
:
4114 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4115 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4118 case L2CAP_CONF_PENDING
:
4119 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4121 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4124 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4127 l2cap_send_disconn_req(chan
, ECONNRESET
);
4131 if (!chan
->hs_hcon
) {
4132 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4135 if (l2cap_check_efs(chan
)) {
4136 amp_create_logical_link(chan
);
4137 chan
->ident
= cmd
->ident
;
4143 case L2CAP_CONF_UNACCEPT
:
4144 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4147 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4148 l2cap_send_disconn_req(chan
, ECONNRESET
);
4152 /* throw out any old stored conf requests */
4153 result
= L2CAP_CONF_SUCCESS
;
4154 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4157 l2cap_send_disconn_req(chan
, ECONNRESET
);
4161 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4162 L2CAP_CONF_REQ
, len
, req
);
4163 chan
->num_conf_req
++;
4164 if (result
!= L2CAP_CONF_SUCCESS
)
4170 l2cap_chan_set_err(chan
, ECONNRESET
);
4172 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4173 l2cap_send_disconn_req(chan
, ECONNRESET
);
4177 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4180 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4182 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4183 set_default_fcs(chan
);
4185 if (chan
->mode
== L2CAP_MODE_ERTM
||
4186 chan
->mode
== L2CAP_MODE_STREAMING
)
4187 err
= l2cap_ertm_init(chan
);
4190 l2cap_send_disconn_req(chan
, -err
);
4192 l2cap_chan_ready(chan
);
4196 l2cap_chan_unlock(chan
);
4200 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4201 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4204 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4205 struct l2cap_disconn_rsp rsp
;
4207 struct l2cap_chan
*chan
;
4209 if (cmd_len
!= sizeof(*req
))
4212 scid
= __le16_to_cpu(req
->scid
);
4213 dcid
= __le16_to_cpu(req
->dcid
);
4215 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4217 mutex_lock(&conn
->chan_lock
);
4219 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4221 mutex_unlock(&conn
->chan_lock
);
4222 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4226 l2cap_chan_lock(chan
);
4228 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4229 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4230 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4232 chan
->ops
->set_shutdown(chan
);
4234 l2cap_chan_hold(chan
);
4235 l2cap_chan_del(chan
, ECONNRESET
);
4237 l2cap_chan_unlock(chan
);
4239 chan
->ops
->close(chan
);
4240 l2cap_chan_put(chan
);
4242 mutex_unlock(&conn
->chan_lock
);
4247 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4248 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4251 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4253 struct l2cap_chan
*chan
;
4255 if (cmd_len
!= sizeof(*rsp
))
4258 scid
= __le16_to_cpu(rsp
->scid
);
4259 dcid
= __le16_to_cpu(rsp
->dcid
);
4261 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4263 mutex_lock(&conn
->chan_lock
);
4265 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4267 mutex_unlock(&conn
->chan_lock
);
4271 l2cap_chan_lock(chan
);
4273 l2cap_chan_hold(chan
);
4274 l2cap_chan_del(chan
, 0);
4276 l2cap_chan_unlock(chan
);
4278 chan
->ops
->close(chan
);
4279 l2cap_chan_put(chan
);
4281 mutex_unlock(&conn
->chan_lock
);
4286 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4287 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4290 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4293 if (cmd_len
!= sizeof(*req
))
4296 type
= __le16_to_cpu(req
->type
);
4298 BT_DBG("type 0x%4.4x", type
);
4300 if (type
== L2CAP_IT_FEAT_MASK
) {
4302 u32 feat_mask
= l2cap_feat_mask
;
4303 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4304 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4305 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4307 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4309 if (conn
->hs_enabled
)
4310 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4311 | L2CAP_FEAT_EXT_WINDOW
;
4313 put_unaligned_le32(feat_mask
, rsp
->data
);
4314 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4316 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4318 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4320 if (conn
->hs_enabled
)
4321 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4323 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4325 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4326 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4327 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4328 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4331 struct l2cap_info_rsp rsp
;
4332 rsp
.type
= cpu_to_le16(type
);
4333 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
4334 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4341 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4342 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4345 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4348 if (cmd_len
< sizeof(*rsp
))
4351 type
= __le16_to_cpu(rsp
->type
);
4352 result
= __le16_to_cpu(rsp
->result
);
4354 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4356 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4357 if (cmd
->ident
!= conn
->info_ident
||
4358 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4361 cancel_delayed_work(&conn
->info_timer
);
4363 if (result
!= L2CAP_IR_SUCCESS
) {
4364 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4365 conn
->info_ident
= 0;
4367 l2cap_conn_start(conn
);
4373 case L2CAP_IT_FEAT_MASK
:
4374 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4376 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4377 struct l2cap_info_req req
;
4378 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4380 conn
->info_ident
= l2cap_get_ident(conn
);
4382 l2cap_send_cmd(conn
, conn
->info_ident
,
4383 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4385 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4386 conn
->info_ident
= 0;
4388 l2cap_conn_start(conn
);
4392 case L2CAP_IT_FIXED_CHAN
:
4393 conn
->fixed_chan_mask
= rsp
->data
[0];
4394 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4395 conn
->info_ident
= 0;
4397 l2cap_conn_start(conn
);
4404 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4405 struct l2cap_cmd_hdr
*cmd
,
4406 u16 cmd_len
, void *data
)
4408 struct l2cap_create_chan_req
*req
= data
;
4409 struct l2cap_create_chan_rsp rsp
;
4410 struct l2cap_chan
*chan
;
4411 struct hci_dev
*hdev
;
4414 if (cmd_len
!= sizeof(*req
))
4417 if (!conn
->hs_enabled
)
4420 psm
= le16_to_cpu(req
->psm
);
4421 scid
= le16_to_cpu(req
->scid
);
4423 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4425 /* For controller id 0 make BR/EDR connection */
4426 if (req
->amp_id
== AMP_ID_BREDR
) {
4427 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4432 /* Validate AMP controller id */
4433 hdev
= hci_dev_get(req
->amp_id
);
4437 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4442 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4445 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4446 struct hci_conn
*hs_hcon
;
4448 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4452 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4457 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4459 mgr
->bredr_chan
= chan
;
4460 chan
->hs_hcon
= hs_hcon
;
4461 chan
->fcs
= L2CAP_FCS_NONE
;
4462 conn
->mtu
= hdev
->block_mtu
;
4471 rsp
.scid
= cpu_to_le16(scid
);
4472 rsp
.result
= cpu_to_le16(L2CAP_CR_BAD_AMP
);
4473 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4475 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4481 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4483 struct l2cap_move_chan_req req
;
4486 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4488 ident
= l2cap_get_ident(chan
->conn
);
4489 chan
->ident
= ident
;
4491 req
.icid
= cpu_to_le16(chan
->scid
);
4492 req
.dest_amp_id
= dest_amp_id
;
4494 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4497 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4500 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4502 struct l2cap_move_chan_rsp rsp
;
4504 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4506 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4507 rsp
.result
= cpu_to_le16(result
);
4509 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4513 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4515 struct l2cap_move_chan_cfm cfm
;
4517 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4519 chan
->ident
= l2cap_get_ident(chan
->conn
);
4521 cfm
.icid
= cpu_to_le16(chan
->scid
);
4522 cfm
.result
= cpu_to_le16(result
);
4524 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4527 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4530 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4532 struct l2cap_move_chan_cfm cfm
;
4534 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4536 cfm
.icid
= cpu_to_le16(icid
);
4537 cfm
.result
= cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4539 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4543 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4546 struct l2cap_move_chan_cfm_rsp rsp
;
4548 BT_DBG("icid 0x%4.4x", icid
);
4550 rsp
.icid
= cpu_to_le16(icid
);
4551 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4554 static void __release_logical_link(struct l2cap_chan
*chan
)
4556 chan
->hs_hchan
= NULL
;
4557 chan
->hs_hcon
= NULL
;
4559 /* Placeholder - release the logical link */
4562 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4564 /* Logical link setup failed */
4565 if (chan
->state
!= BT_CONNECTED
) {
4566 /* Create channel failure, disconnect */
4567 l2cap_send_disconn_req(chan
, ECONNRESET
);
4571 switch (chan
->move_role
) {
4572 case L2CAP_MOVE_ROLE_RESPONDER
:
4573 l2cap_move_done(chan
);
4574 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4576 case L2CAP_MOVE_ROLE_INITIATOR
:
4577 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4578 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4579 /* Remote has only sent pending or
4580 * success responses, clean up
4582 l2cap_move_done(chan
);
4585 /* Other amp move states imply that the move
4586 * has already aborted
4588 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4593 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4594 struct hci_chan
*hchan
)
4596 struct l2cap_conf_rsp rsp
;
4598 chan
->hs_hchan
= hchan
;
4599 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4601 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4603 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4606 set_default_fcs(chan
);
4608 err
= l2cap_ertm_init(chan
);
4610 l2cap_send_disconn_req(chan
, -err
);
4612 l2cap_chan_ready(chan
);
4616 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4617 struct hci_chan
*hchan
)
4619 chan
->hs_hcon
= hchan
->conn
;
4620 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4622 BT_DBG("move_state %d", chan
->move_state
);
4624 switch (chan
->move_state
) {
4625 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4626 /* Move confirm will be sent after a success
4627 * response is received
4629 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4631 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4632 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4633 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4634 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4635 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4636 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4637 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4638 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4639 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4643 /* Move was not in expected state, free the channel */
4644 __release_logical_link(chan
);
4646 chan
->move_state
= L2CAP_MOVE_STABLE
;
4650 /* Call with chan locked */
4651 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4654 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4657 l2cap_logical_fail(chan
);
4658 __release_logical_link(chan
);
4662 if (chan
->state
!= BT_CONNECTED
) {
4663 /* Ignore logical link if channel is on BR/EDR */
4664 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4665 l2cap_logical_finish_create(chan
, hchan
);
4667 l2cap_logical_finish_move(chan
, hchan
);
4671 void l2cap_move_start(struct l2cap_chan
*chan
)
4673 BT_DBG("chan %p", chan
);
4675 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4676 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4678 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4679 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4680 /* Placeholder - start physical link setup */
4682 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4683 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4685 l2cap_move_setup(chan
);
4686 l2cap_send_move_chan_req(chan
, 0);
4690 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4691 u8 local_amp_id
, u8 remote_amp_id
)
4693 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4694 local_amp_id
, remote_amp_id
);
4696 chan
->fcs
= L2CAP_FCS_NONE
;
4698 /* Outgoing channel on AMP */
4699 if (chan
->state
== BT_CONNECT
) {
4700 if (result
== L2CAP_CR_SUCCESS
) {
4701 chan
->local_amp_id
= local_amp_id
;
4702 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4704 /* Revert to BR/EDR connect */
4705 l2cap_send_conn_req(chan
);
4711 /* Incoming channel on AMP */
4712 if (__l2cap_no_conn_pending(chan
)) {
4713 struct l2cap_conn_rsp rsp
;
4715 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4716 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4718 if (result
== L2CAP_CR_SUCCESS
) {
4719 /* Send successful response */
4720 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4721 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4723 /* Send negative response */
4724 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4725 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4728 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4731 if (result
== L2CAP_CR_SUCCESS
) {
4732 l2cap_state_change(chan
, BT_CONFIG
);
4733 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4734 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4736 l2cap_build_conf_req(chan
, buf
), buf
);
4737 chan
->num_conf_req
++;
4742 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4745 l2cap_move_setup(chan
);
4746 chan
->move_id
= local_amp_id
;
4747 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4749 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4752 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4754 struct hci_chan
*hchan
= NULL
;
4756 /* Placeholder - get hci_chan for logical link */
4759 if (hchan
->state
== BT_CONNECTED
) {
4760 /* Logical link is ready to go */
4761 chan
->hs_hcon
= hchan
->conn
;
4762 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4763 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4764 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4766 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4768 /* Wait for logical link to be ready */
4769 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4772 /* Logical link not available */
4773 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4777 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4779 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4781 if (result
== -EINVAL
)
4782 rsp_result
= L2CAP_MR_BAD_ID
;
4784 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4786 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4789 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4790 chan
->move_state
= L2CAP_MOVE_STABLE
;
4792 /* Restart data transmission */
4793 l2cap_ertm_send(chan
);
4796 /* Invoke with locked chan */
4797 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4799 u8 local_amp_id
= chan
->local_amp_id
;
4800 u8 remote_amp_id
= chan
->remote_amp_id
;
4802 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4803 chan
, result
, local_amp_id
, remote_amp_id
);
4805 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4806 l2cap_chan_unlock(chan
);
4810 if (chan
->state
!= BT_CONNECTED
) {
4811 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4812 } else if (result
!= L2CAP_MR_SUCCESS
) {
4813 l2cap_do_move_cancel(chan
, result
);
4815 switch (chan
->move_role
) {
4816 case L2CAP_MOVE_ROLE_INITIATOR
:
4817 l2cap_do_move_initiate(chan
, local_amp_id
,
4820 case L2CAP_MOVE_ROLE_RESPONDER
:
4821 l2cap_do_move_respond(chan
, result
);
4824 l2cap_do_move_cancel(chan
, result
);
4830 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4831 struct l2cap_cmd_hdr
*cmd
,
4832 u16 cmd_len
, void *data
)
4834 struct l2cap_move_chan_req
*req
= data
;
4835 struct l2cap_move_chan_rsp rsp
;
4836 struct l2cap_chan
*chan
;
4838 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4840 if (cmd_len
!= sizeof(*req
))
4843 icid
= le16_to_cpu(req
->icid
);
4845 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4847 if (!conn
->hs_enabled
)
4850 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4852 rsp
.icid
= cpu_to_le16(icid
);
4853 rsp
.result
= cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4854 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4859 chan
->ident
= cmd
->ident
;
4861 if (chan
->scid
< L2CAP_CID_DYN_START
||
4862 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4863 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4864 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4865 result
= L2CAP_MR_NOT_ALLOWED
;
4866 goto send_move_response
;
4869 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4870 result
= L2CAP_MR_SAME_ID
;
4871 goto send_move_response
;
4874 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4875 struct hci_dev
*hdev
;
4876 hdev
= hci_dev_get(req
->dest_amp_id
);
4877 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4878 !test_bit(HCI_UP
, &hdev
->flags
)) {
4882 result
= L2CAP_MR_BAD_ID
;
4883 goto send_move_response
;
4888 /* Detect a move collision. Only send a collision response
4889 * if this side has "lost", otherwise proceed with the move.
4890 * The winner has the larger bd_addr.
4892 if ((__chan_is_moving(chan
) ||
4893 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4894 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4895 result
= L2CAP_MR_COLLISION
;
4896 goto send_move_response
;
4899 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4900 l2cap_move_setup(chan
);
4901 chan
->move_id
= req
->dest_amp_id
;
4904 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4905 /* Moving to BR/EDR */
4906 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4907 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4908 result
= L2CAP_MR_PEND
;
4910 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4911 result
= L2CAP_MR_SUCCESS
;
4914 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4915 /* Placeholder - uncomment when amp functions are available */
4916 /*amp_accept_physical(chan, req->dest_amp_id);*/
4917 result
= L2CAP_MR_PEND
;
4921 l2cap_send_move_chan_rsp(chan
, result
);
4923 l2cap_chan_unlock(chan
);
4928 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4930 struct l2cap_chan
*chan
;
4931 struct hci_chan
*hchan
= NULL
;
4933 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4935 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4939 __clear_chan_timer(chan
);
4940 if (result
== L2CAP_MR_PEND
)
4941 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4943 switch (chan
->move_state
) {
4944 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4945 /* Move confirm will be sent when logical link
4948 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4950 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4951 if (result
== L2CAP_MR_PEND
) {
4953 } else if (test_bit(CONN_LOCAL_BUSY
,
4954 &chan
->conn_state
)) {
4955 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4957 /* Logical link is up or moving to BR/EDR,
4960 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4961 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4964 case L2CAP_MOVE_WAIT_RSP
:
4966 if (result
== L2CAP_MR_SUCCESS
) {
4967 /* Remote is ready, send confirm immediately
4968 * after logical link is ready
4970 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4972 /* Both logical link and move success
4973 * are required to confirm
4975 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
4978 /* Placeholder - get hci_chan for logical link */
4980 /* Logical link not available */
4981 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4985 /* If the logical link is not yet connected, do not
4986 * send confirmation.
4988 if (hchan
->state
!= BT_CONNECTED
)
4991 /* Logical link is already ready to go */
4993 chan
->hs_hcon
= hchan
->conn
;
4994 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4996 if (result
== L2CAP_MR_SUCCESS
) {
4997 /* Can confirm now */
4998 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5000 /* Now only need move success
5003 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5006 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5009 /* Any other amp move state means the move failed. */
5010 chan
->move_id
= chan
->local_amp_id
;
5011 l2cap_move_done(chan
);
5012 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5015 l2cap_chan_unlock(chan
);
5018 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5021 struct l2cap_chan
*chan
;
5023 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5025 /* Could not locate channel, icid is best guess */
5026 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5030 __clear_chan_timer(chan
);
5032 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5033 if (result
== L2CAP_MR_COLLISION
) {
5034 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5036 /* Cleanup - cancel move */
5037 chan
->move_id
= chan
->local_amp_id
;
5038 l2cap_move_done(chan
);
5042 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5044 l2cap_chan_unlock(chan
);
5047 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5048 struct l2cap_cmd_hdr
*cmd
,
5049 u16 cmd_len
, void *data
)
5051 struct l2cap_move_chan_rsp
*rsp
= data
;
5054 if (cmd_len
!= sizeof(*rsp
))
5057 icid
= le16_to_cpu(rsp
->icid
);
5058 result
= le16_to_cpu(rsp
->result
);
5060 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5062 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5063 l2cap_move_continue(conn
, icid
, result
);
5065 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5070 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5071 struct l2cap_cmd_hdr
*cmd
,
5072 u16 cmd_len
, void *data
)
5074 struct l2cap_move_chan_cfm
*cfm
= data
;
5075 struct l2cap_chan
*chan
;
5078 if (cmd_len
!= sizeof(*cfm
))
5081 icid
= le16_to_cpu(cfm
->icid
);
5082 result
= le16_to_cpu(cfm
->result
);
5084 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5086 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5088 /* Spec requires a response even if the icid was not found */
5089 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5093 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5094 if (result
== L2CAP_MC_CONFIRMED
) {
5095 chan
->local_amp_id
= chan
->move_id
;
5096 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5097 __release_logical_link(chan
);
5099 chan
->move_id
= chan
->local_amp_id
;
5102 l2cap_move_done(chan
);
5105 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5107 l2cap_chan_unlock(chan
);
5112 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5113 struct l2cap_cmd_hdr
*cmd
,
5114 u16 cmd_len
, void *data
)
5116 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5117 struct l2cap_chan
*chan
;
5120 if (cmd_len
!= sizeof(*rsp
))
5123 icid
= le16_to_cpu(rsp
->icid
);
5125 BT_DBG("icid 0x%4.4x", icid
);
5127 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5131 __clear_chan_timer(chan
);
5133 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5134 chan
->local_amp_id
= chan
->move_id
;
5136 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5137 __release_logical_link(chan
);
5139 l2cap_move_done(chan
);
5142 l2cap_chan_unlock(chan
);
5147 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5148 struct l2cap_cmd_hdr
*cmd
,
5149 u16 cmd_len
, u8
*data
)
5151 struct hci_conn
*hcon
= conn
->hcon
;
5152 struct l2cap_conn_param_update_req
*req
;
5153 struct l2cap_conn_param_update_rsp rsp
;
5154 u16 min
, max
, latency
, to_multiplier
;
5157 if (hcon
->role
!= HCI_ROLE_MASTER
)
5160 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5163 req
= (struct l2cap_conn_param_update_req
*) data
;
5164 min
= __le16_to_cpu(req
->min
);
5165 max
= __le16_to_cpu(req
->max
);
5166 latency
= __le16_to_cpu(req
->latency
);
5167 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5169 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5170 min
, max
, latency
, to_multiplier
);
5172 memset(&rsp
, 0, sizeof(rsp
));
5174 err
= hci_check_conn_params(min
, max
, latency
, to_multiplier
);
5176 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5178 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5180 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5186 store_hint
= hci_le_conn_update(hcon
, min
, max
, latency
,
5188 mgmt_new_conn_param(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
,
5189 store_hint
, min
, max
, latency
,
5197 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5198 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5201 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5202 u16 dcid
, mtu
, mps
, credits
, result
;
5203 struct l2cap_chan
*chan
;
5206 if (cmd_len
< sizeof(*rsp
))
5209 dcid
= __le16_to_cpu(rsp
->dcid
);
5210 mtu
= __le16_to_cpu(rsp
->mtu
);
5211 mps
= __le16_to_cpu(rsp
->mps
);
5212 credits
= __le16_to_cpu(rsp
->credits
);
5213 result
= __le16_to_cpu(rsp
->result
);
5215 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23))
5218 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5219 dcid
, mtu
, mps
, credits
, result
);
5221 mutex_lock(&conn
->chan_lock
);
5223 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5231 l2cap_chan_lock(chan
);
5234 case L2CAP_CR_SUCCESS
:
5238 chan
->remote_mps
= mps
;
5239 chan
->tx_credits
= credits
;
5240 l2cap_chan_ready(chan
);
5244 l2cap_chan_del(chan
, ECONNREFUSED
);
5248 l2cap_chan_unlock(chan
);
5251 mutex_unlock(&conn
->chan_lock
);
5256 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5257 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5262 switch (cmd
->code
) {
5263 case L2CAP_COMMAND_REJ
:
5264 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5267 case L2CAP_CONN_REQ
:
5268 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5271 case L2CAP_CONN_RSP
:
5272 case L2CAP_CREATE_CHAN_RSP
:
5273 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5276 case L2CAP_CONF_REQ
:
5277 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5280 case L2CAP_CONF_RSP
:
5281 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5284 case L2CAP_DISCONN_REQ
:
5285 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5288 case L2CAP_DISCONN_RSP
:
5289 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5292 case L2CAP_ECHO_REQ
:
5293 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5296 case L2CAP_ECHO_RSP
:
5299 case L2CAP_INFO_REQ
:
5300 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5303 case L2CAP_INFO_RSP
:
5304 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5307 case L2CAP_CREATE_CHAN_REQ
:
5308 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5311 case L2CAP_MOVE_CHAN_REQ
:
5312 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5315 case L2CAP_MOVE_CHAN_RSP
:
5316 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5319 case L2CAP_MOVE_CHAN_CFM
:
5320 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5323 case L2CAP_MOVE_CHAN_CFM_RSP
:
5324 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5328 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5336 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5337 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5340 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5341 struct l2cap_le_conn_rsp rsp
;
5342 struct l2cap_chan
*chan
, *pchan
;
5343 u16 dcid
, scid
, credits
, mtu
, mps
;
5347 if (cmd_len
!= sizeof(*req
))
5350 scid
= __le16_to_cpu(req
->scid
);
5351 mtu
= __le16_to_cpu(req
->mtu
);
5352 mps
= __le16_to_cpu(req
->mps
);
5357 if (mtu
< 23 || mps
< 23)
5360 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5363 /* Check if we have socket listening on psm */
5364 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5365 &conn
->hcon
->dst
, LE_LINK
);
5367 result
= L2CAP_CR_BAD_PSM
;
5372 mutex_lock(&conn
->chan_lock
);
5373 l2cap_chan_lock(pchan
);
5375 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
)) {
5376 result
= L2CAP_CR_AUTHENTICATION
;
5378 goto response_unlock
;
5381 /* Check if we already have channel with that dcid */
5382 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5383 result
= L2CAP_CR_NO_MEM
;
5385 goto response_unlock
;
5388 chan
= pchan
->ops
->new_connection(pchan
);
5390 result
= L2CAP_CR_NO_MEM
;
5391 goto response_unlock
;
5394 l2cap_le_flowctl_init(chan
);
5396 bacpy(&chan
->src
, &conn
->hcon
->src
);
5397 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5398 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
5399 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
5403 chan
->remote_mps
= mps
;
5404 chan
->tx_credits
= __le16_to_cpu(req
->credits
);
5406 __l2cap_chan_add(conn
, chan
);
5408 credits
= chan
->rx_credits
;
5410 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5412 chan
->ident
= cmd
->ident
;
5414 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5415 l2cap_state_change(chan
, BT_CONNECT2
);
5416 /* The following result value is actually not defined
5417 * for LE CoC but we use it to let the function know
5418 * that it should bail out after doing its cleanup
5419 * instead of sending a response.
5421 result
= L2CAP_CR_PEND
;
5422 chan
->ops
->defer(chan
);
5424 l2cap_chan_ready(chan
);
5425 result
= L2CAP_CR_SUCCESS
;
5429 l2cap_chan_unlock(pchan
);
5430 mutex_unlock(&conn
->chan_lock
);
5431 l2cap_chan_put(pchan
);
5433 if (result
== L2CAP_CR_PEND
)
5438 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5439 rsp
.mps
= cpu_to_le16(chan
->mps
);
5445 rsp
.dcid
= cpu_to_le16(dcid
);
5446 rsp
.credits
= cpu_to_le16(credits
);
5447 rsp
.result
= cpu_to_le16(result
);
5449 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5454 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5455 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5458 struct l2cap_le_credits
*pkt
;
5459 struct l2cap_chan
*chan
;
5460 u16 cid
, credits
, max_credits
;
5462 if (cmd_len
!= sizeof(*pkt
))
5465 pkt
= (struct l2cap_le_credits
*) data
;
5466 cid
= __le16_to_cpu(pkt
->cid
);
5467 credits
= __le16_to_cpu(pkt
->credits
);
5469 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5471 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5475 max_credits
= LE_FLOWCTL_MAX_CREDITS
- chan
->tx_credits
;
5476 if (credits
> max_credits
) {
5477 BT_ERR("LE credits overflow");
5478 l2cap_send_disconn_req(chan
, ECONNRESET
);
5480 /* Return 0 so that we don't trigger an unnecessary
5481 * command reject packet.
5486 chan
->tx_credits
+= credits
;
5488 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
5489 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
5493 if (chan
->tx_credits
)
5494 chan
->ops
->resume(chan
);
5496 l2cap_chan_unlock(chan
);
5501 static inline int l2cap_le_command_rej(struct l2cap_conn
*conn
,
5502 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5505 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
5506 struct l2cap_chan
*chan
;
5508 if (cmd_len
< sizeof(*rej
))
5511 mutex_lock(&conn
->chan_lock
);
5513 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5517 l2cap_chan_lock(chan
);
5518 l2cap_chan_del(chan
, ECONNREFUSED
);
5519 l2cap_chan_unlock(chan
);
5522 mutex_unlock(&conn
->chan_lock
);
5526 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5527 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5532 switch (cmd
->code
) {
5533 case L2CAP_COMMAND_REJ
:
5534 l2cap_le_command_rej(conn
, cmd
, cmd_len
, data
);
5537 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5538 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5541 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5544 case L2CAP_LE_CONN_RSP
:
5545 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5548 case L2CAP_LE_CONN_REQ
:
5549 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5552 case L2CAP_LE_CREDITS
:
5553 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5556 case L2CAP_DISCONN_REQ
:
5557 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5560 case L2CAP_DISCONN_RSP
:
5561 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5565 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5573 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5574 struct sk_buff
*skb
)
5576 struct hci_conn
*hcon
= conn
->hcon
;
5577 struct l2cap_cmd_hdr
*cmd
;
5581 if (hcon
->type
!= LE_LINK
)
5584 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5587 cmd
= (void *) skb
->data
;
5588 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5590 len
= le16_to_cpu(cmd
->len
);
5592 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5594 if (len
!= skb
->len
|| !cmd
->ident
) {
5595 BT_DBG("corrupted command");
5599 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5601 struct l2cap_cmd_rej_unk rej
;
5603 BT_ERR("Wrong link type (%d)", err
);
5605 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5606 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5614 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5615 struct sk_buff
*skb
)
5617 struct hci_conn
*hcon
= conn
->hcon
;
5618 u8
*data
= skb
->data
;
5620 struct l2cap_cmd_hdr cmd
;
5623 l2cap_raw_recv(conn
, skb
);
5625 if (hcon
->type
!= ACL_LINK
)
5628 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5630 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5631 data
+= L2CAP_CMD_HDR_SIZE
;
5632 len
-= L2CAP_CMD_HDR_SIZE
;
5634 cmd_len
= le16_to_cpu(cmd
.len
);
5636 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5639 if (cmd_len
> len
|| !cmd
.ident
) {
5640 BT_DBG("corrupted command");
5644 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5646 struct l2cap_cmd_rej_unk rej
;
5648 BT_ERR("Wrong link type (%d)", err
);
5650 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5651 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5663 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5665 u16 our_fcs
, rcv_fcs
;
5668 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5669 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5671 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5673 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5674 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5675 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5676 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5678 if (our_fcs
!= rcv_fcs
)
5684 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5686 struct l2cap_ctrl control
;
5688 BT_DBG("chan %p", chan
);
5690 memset(&control
, 0, sizeof(control
));
5693 control
.reqseq
= chan
->buffer_seq
;
5694 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5696 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5697 control
.super
= L2CAP_SUPER_RNR
;
5698 l2cap_send_sframe(chan
, &control
);
5701 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5702 chan
->unacked_frames
> 0)
5703 __set_retrans_timer(chan
);
5705 /* Send pending iframes */
5706 l2cap_ertm_send(chan
);
5708 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5709 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5710 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5713 control
.super
= L2CAP_SUPER_RR
;
5714 l2cap_send_sframe(chan
, &control
);
5718 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5719 struct sk_buff
**last_frag
)
5721 /* skb->len reflects data in skb as well as all fragments
5722 * skb->data_len reflects only data in fragments
5724 if (!skb_has_frag_list(skb
))
5725 skb_shinfo(skb
)->frag_list
= new_frag
;
5727 new_frag
->next
= NULL
;
5729 (*last_frag
)->next
= new_frag
;
5730 *last_frag
= new_frag
;
5732 skb
->len
+= new_frag
->len
;
5733 skb
->data_len
+= new_frag
->len
;
5734 skb
->truesize
+= new_frag
->truesize
;
5737 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5738 struct l2cap_ctrl
*control
)
5742 switch (control
->sar
) {
5743 case L2CAP_SAR_UNSEGMENTED
:
5747 err
= chan
->ops
->recv(chan
, skb
);
5750 case L2CAP_SAR_START
:
5754 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5755 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5757 if (chan
->sdu_len
> chan
->imtu
) {
5762 if (skb
->len
>= chan
->sdu_len
)
5766 chan
->sdu_last_frag
= skb
;
5772 case L2CAP_SAR_CONTINUE
:
5776 append_skb_frag(chan
->sdu
, skb
,
5777 &chan
->sdu_last_frag
);
5780 if (chan
->sdu
->len
>= chan
->sdu_len
)
5790 append_skb_frag(chan
->sdu
, skb
,
5791 &chan
->sdu_last_frag
);
5794 if (chan
->sdu
->len
!= chan
->sdu_len
)
5797 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5800 /* Reassembly complete */
5802 chan
->sdu_last_frag
= NULL
;
5810 kfree_skb(chan
->sdu
);
5812 chan
->sdu_last_frag
= NULL
;
5819 static int l2cap_resegment(struct l2cap_chan
*chan
)
5825 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5829 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5832 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5833 l2cap_tx(chan
, NULL
, NULL
, event
);
5836 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5839 /* Pass sequential frames to l2cap_reassemble_sdu()
5840 * until a gap is encountered.
5843 BT_DBG("chan %p", chan
);
5845 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5846 struct sk_buff
*skb
;
5847 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5848 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5850 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5855 skb_unlink(skb
, &chan
->srej_q
);
5856 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5857 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5862 if (skb_queue_empty(&chan
->srej_q
)) {
5863 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5864 l2cap_send_ack(chan
);
5870 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5871 struct l2cap_ctrl
*control
)
5873 struct sk_buff
*skb
;
5875 BT_DBG("chan %p, control %p", chan
, control
);
5877 if (control
->reqseq
== chan
->next_tx_seq
) {
5878 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5879 l2cap_send_disconn_req(chan
, ECONNRESET
);
5883 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5886 BT_DBG("Seq %d not available for retransmission",
5891 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5892 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5893 l2cap_send_disconn_req(chan
, ECONNRESET
);
5897 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5899 if (control
->poll
) {
5900 l2cap_pass_to_tx(chan
, control
);
5902 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5903 l2cap_retransmit(chan
, control
);
5904 l2cap_ertm_send(chan
);
5906 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5907 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5908 chan
->srej_save_reqseq
= control
->reqseq
;
5911 l2cap_pass_to_tx_fbit(chan
, control
);
5913 if (control
->final
) {
5914 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5915 !test_and_clear_bit(CONN_SREJ_ACT
,
5917 l2cap_retransmit(chan
, control
);
5919 l2cap_retransmit(chan
, control
);
5920 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5921 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5922 chan
->srej_save_reqseq
= control
->reqseq
;
5928 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5929 struct l2cap_ctrl
*control
)
5931 struct sk_buff
*skb
;
5933 BT_DBG("chan %p, control %p", chan
, control
);
5935 if (control
->reqseq
== chan
->next_tx_seq
) {
5936 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5937 l2cap_send_disconn_req(chan
, ECONNRESET
);
5941 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5943 if (chan
->max_tx
&& skb
&&
5944 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5945 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5946 l2cap_send_disconn_req(chan
, ECONNRESET
);
5950 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5952 l2cap_pass_to_tx(chan
, control
);
5954 if (control
->final
) {
5955 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
5956 l2cap_retransmit_all(chan
, control
);
5958 l2cap_retransmit_all(chan
, control
);
5959 l2cap_ertm_send(chan
);
5960 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
5961 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
5965 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
5967 BT_DBG("chan %p, txseq %d", chan
, txseq
);
5969 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
5970 chan
->expected_tx_seq
);
5972 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
5973 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5975 /* See notes below regarding "double poll" and
5978 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5979 BT_DBG("Invalid/Ignore - after SREJ");
5980 return L2CAP_TXSEQ_INVALID_IGNORE
;
5982 BT_DBG("Invalid - in window after SREJ sent");
5983 return L2CAP_TXSEQ_INVALID
;
5987 if (chan
->srej_list
.head
== txseq
) {
5988 BT_DBG("Expected SREJ");
5989 return L2CAP_TXSEQ_EXPECTED_SREJ
;
5992 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
5993 BT_DBG("Duplicate SREJ - txseq already stored");
5994 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
5997 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
5998 BT_DBG("Unexpected SREJ - not requested");
5999 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6003 if (chan
->expected_tx_seq
== txseq
) {
6004 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6006 BT_DBG("Invalid - txseq outside tx window");
6007 return L2CAP_TXSEQ_INVALID
;
6010 return L2CAP_TXSEQ_EXPECTED
;
6014 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6015 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6016 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6017 return L2CAP_TXSEQ_DUPLICATE
;
6020 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6021 /* A source of invalid packets is a "double poll" condition,
6022 * where delays cause us to send multiple poll packets. If
6023 * the remote stack receives and processes both polls,
6024 * sequence numbers can wrap around in such a way that a
6025 * resent frame has a sequence number that looks like new data
6026 * with a sequence gap. This would trigger an erroneous SREJ
6029 * Fortunately, this is impossible with a tx window that's
6030 * less than half of the maximum sequence number, which allows
6031 * invalid frames to be safely ignored.
6033 * With tx window sizes greater than half of the tx window
6034 * maximum, the frame is invalid and cannot be ignored. This
6035 * causes a disconnect.
6038 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6039 BT_DBG("Invalid/Ignore - txseq outside tx window");
6040 return L2CAP_TXSEQ_INVALID_IGNORE
;
6042 BT_DBG("Invalid - txseq outside tx window");
6043 return L2CAP_TXSEQ_INVALID
;
6046 BT_DBG("Unexpected - txseq indicates missing frames");
6047 return L2CAP_TXSEQ_UNEXPECTED
;
6051 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6052 struct l2cap_ctrl
*control
,
6053 struct sk_buff
*skb
, u8 event
)
6056 bool skb_in_use
= false;
6058 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6062 case L2CAP_EV_RECV_IFRAME
:
6063 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6064 case L2CAP_TXSEQ_EXPECTED
:
6065 l2cap_pass_to_tx(chan
, control
);
6067 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6068 BT_DBG("Busy, discarding expected seq %d",
6073 chan
->expected_tx_seq
= __next_seq(chan
,
6076 chan
->buffer_seq
= chan
->expected_tx_seq
;
6079 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6083 if (control
->final
) {
6084 if (!test_and_clear_bit(CONN_REJ_ACT
,
6085 &chan
->conn_state
)) {
6087 l2cap_retransmit_all(chan
, control
);
6088 l2cap_ertm_send(chan
);
6092 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6093 l2cap_send_ack(chan
);
6095 case L2CAP_TXSEQ_UNEXPECTED
:
6096 l2cap_pass_to_tx(chan
, control
);
6098 /* Can't issue SREJ frames in the local busy state.
6099 * Drop this frame, it will be seen as missing
6100 * when local busy is exited.
6102 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6103 BT_DBG("Busy, discarding unexpected seq %d",
6108 /* There was a gap in the sequence, so an SREJ
6109 * must be sent for each missing frame. The
6110 * current frame is stored for later use.
6112 skb_queue_tail(&chan
->srej_q
, skb
);
6114 BT_DBG("Queued %p (queue len %d)", skb
,
6115 skb_queue_len(&chan
->srej_q
));
6117 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6118 l2cap_seq_list_clear(&chan
->srej_list
);
6119 l2cap_send_srej(chan
, control
->txseq
);
6121 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6123 case L2CAP_TXSEQ_DUPLICATE
:
6124 l2cap_pass_to_tx(chan
, control
);
6126 case L2CAP_TXSEQ_INVALID_IGNORE
:
6128 case L2CAP_TXSEQ_INVALID
:
6130 l2cap_send_disconn_req(chan
, ECONNRESET
);
6134 case L2CAP_EV_RECV_RR
:
6135 l2cap_pass_to_tx(chan
, control
);
6136 if (control
->final
) {
6137 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6139 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6140 !__chan_is_moving(chan
)) {
6142 l2cap_retransmit_all(chan
, control
);
6145 l2cap_ertm_send(chan
);
6146 } else if (control
->poll
) {
6147 l2cap_send_i_or_rr_or_rnr(chan
);
6149 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6150 &chan
->conn_state
) &&
6151 chan
->unacked_frames
)
6152 __set_retrans_timer(chan
);
6154 l2cap_ertm_send(chan
);
6157 case L2CAP_EV_RECV_RNR
:
6158 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6159 l2cap_pass_to_tx(chan
, control
);
6160 if (control
&& control
->poll
) {
6161 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6162 l2cap_send_rr_or_rnr(chan
, 0);
6164 __clear_retrans_timer(chan
);
6165 l2cap_seq_list_clear(&chan
->retrans_list
);
6167 case L2CAP_EV_RECV_REJ
:
6168 l2cap_handle_rej(chan
, control
);
6170 case L2CAP_EV_RECV_SREJ
:
6171 l2cap_handle_srej(chan
, control
);
6177 if (skb
&& !skb_in_use
) {
6178 BT_DBG("Freeing %p", skb
);
6185 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6186 struct l2cap_ctrl
*control
,
6187 struct sk_buff
*skb
, u8 event
)
6190 u16 txseq
= control
->txseq
;
6191 bool skb_in_use
= false;
6193 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6197 case L2CAP_EV_RECV_IFRAME
:
6198 switch (l2cap_classify_txseq(chan
, txseq
)) {
6199 case L2CAP_TXSEQ_EXPECTED
:
6200 /* Keep frame for reassembly later */
6201 l2cap_pass_to_tx(chan
, control
);
6202 skb_queue_tail(&chan
->srej_q
, skb
);
6204 BT_DBG("Queued %p (queue len %d)", skb
,
6205 skb_queue_len(&chan
->srej_q
));
6207 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6209 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6210 l2cap_seq_list_pop(&chan
->srej_list
);
6212 l2cap_pass_to_tx(chan
, control
);
6213 skb_queue_tail(&chan
->srej_q
, skb
);
6215 BT_DBG("Queued %p (queue len %d)", skb
,
6216 skb_queue_len(&chan
->srej_q
));
6218 err
= l2cap_rx_queued_iframes(chan
);
6223 case L2CAP_TXSEQ_UNEXPECTED
:
6224 /* Got a frame that can't be reassembled yet.
6225 * Save it for later, and send SREJs to cover
6226 * the missing frames.
6228 skb_queue_tail(&chan
->srej_q
, skb
);
6230 BT_DBG("Queued %p (queue len %d)", skb
,
6231 skb_queue_len(&chan
->srej_q
));
6233 l2cap_pass_to_tx(chan
, control
);
6234 l2cap_send_srej(chan
, control
->txseq
);
6236 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6237 /* This frame was requested with an SREJ, but
6238 * some expected retransmitted frames are
6239 * missing. Request retransmission of missing
6242 skb_queue_tail(&chan
->srej_q
, skb
);
6244 BT_DBG("Queued %p (queue len %d)", skb
,
6245 skb_queue_len(&chan
->srej_q
));
6247 l2cap_pass_to_tx(chan
, control
);
6248 l2cap_send_srej_list(chan
, control
->txseq
);
6250 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6251 /* We've already queued this frame. Drop this copy. */
6252 l2cap_pass_to_tx(chan
, control
);
6254 case L2CAP_TXSEQ_DUPLICATE
:
6255 /* Expecting a later sequence number, so this frame
6256 * was already received. Ignore it completely.
6259 case L2CAP_TXSEQ_INVALID_IGNORE
:
6261 case L2CAP_TXSEQ_INVALID
:
6263 l2cap_send_disconn_req(chan
, ECONNRESET
);
6267 case L2CAP_EV_RECV_RR
:
6268 l2cap_pass_to_tx(chan
, control
);
6269 if (control
->final
) {
6270 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6272 if (!test_and_clear_bit(CONN_REJ_ACT
,
6273 &chan
->conn_state
)) {
6275 l2cap_retransmit_all(chan
, control
);
6278 l2cap_ertm_send(chan
);
6279 } else if (control
->poll
) {
6280 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6281 &chan
->conn_state
) &&
6282 chan
->unacked_frames
) {
6283 __set_retrans_timer(chan
);
6286 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6287 l2cap_send_srej_tail(chan
);
6289 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6290 &chan
->conn_state
) &&
6291 chan
->unacked_frames
)
6292 __set_retrans_timer(chan
);
6294 l2cap_send_ack(chan
);
6297 case L2CAP_EV_RECV_RNR
:
6298 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6299 l2cap_pass_to_tx(chan
, control
);
6300 if (control
->poll
) {
6301 l2cap_send_srej_tail(chan
);
6303 struct l2cap_ctrl rr_control
;
6304 memset(&rr_control
, 0, sizeof(rr_control
));
6305 rr_control
.sframe
= 1;
6306 rr_control
.super
= L2CAP_SUPER_RR
;
6307 rr_control
.reqseq
= chan
->buffer_seq
;
6308 l2cap_send_sframe(chan
, &rr_control
);
6312 case L2CAP_EV_RECV_REJ
:
6313 l2cap_handle_rej(chan
, control
);
6315 case L2CAP_EV_RECV_SREJ
:
6316 l2cap_handle_srej(chan
, control
);
6320 if (skb
&& !skb_in_use
) {
6321 BT_DBG("Freeing %p", skb
);
6328 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6330 BT_DBG("chan %p", chan
);
6332 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6335 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6337 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6339 return l2cap_resegment(chan
);
6342 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6343 struct l2cap_ctrl
*control
,
6344 struct sk_buff
*skb
, u8 event
)
6348 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6354 l2cap_process_reqseq(chan
, control
->reqseq
);
6356 if (!skb_queue_empty(&chan
->tx_q
))
6357 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6359 chan
->tx_send_head
= NULL
;
6361 /* Rewind next_tx_seq to the point expected
6364 chan
->next_tx_seq
= control
->reqseq
;
6365 chan
->unacked_frames
= 0;
6367 err
= l2cap_finish_move(chan
);
6371 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6372 l2cap_send_i_or_rr_or_rnr(chan
);
6374 if (event
== L2CAP_EV_RECV_IFRAME
)
6377 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6380 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6381 struct l2cap_ctrl
*control
,
6382 struct sk_buff
*skb
, u8 event
)
6386 if (!control
->final
)
6389 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6391 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6392 l2cap_process_reqseq(chan
, control
->reqseq
);
6394 if (!skb_queue_empty(&chan
->tx_q
))
6395 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6397 chan
->tx_send_head
= NULL
;
6399 /* Rewind next_tx_seq to the point expected
6402 chan
->next_tx_seq
= control
->reqseq
;
6403 chan
->unacked_frames
= 0;
6406 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6408 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6410 err
= l2cap_resegment(chan
);
6413 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6418 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6420 /* Make sure reqseq is for a packet that has been sent but not acked */
6423 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6424 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6427 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6428 struct sk_buff
*skb
, u8 event
)
6432 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6433 control
, skb
, event
, chan
->rx_state
);
6435 if (__valid_reqseq(chan
, control
->reqseq
)) {
6436 switch (chan
->rx_state
) {
6437 case L2CAP_RX_STATE_RECV
:
6438 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6440 case L2CAP_RX_STATE_SREJ_SENT
:
6441 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6444 case L2CAP_RX_STATE_WAIT_P
:
6445 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6447 case L2CAP_RX_STATE_WAIT_F
:
6448 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6455 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6456 control
->reqseq
, chan
->next_tx_seq
,
6457 chan
->expected_ack_seq
);
6458 l2cap_send_disconn_req(chan
, ECONNRESET
);
6464 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6465 struct sk_buff
*skb
)
6469 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6472 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6473 L2CAP_TXSEQ_EXPECTED
) {
6474 l2cap_pass_to_tx(chan
, control
);
6476 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6477 __next_seq(chan
, chan
->buffer_seq
));
6479 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6481 l2cap_reassemble_sdu(chan
, skb
, control
);
6484 kfree_skb(chan
->sdu
);
6487 chan
->sdu_last_frag
= NULL
;
6491 BT_DBG("Freeing %p", skb
);
6496 chan
->last_acked_seq
= control
->txseq
;
6497 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6502 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6504 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6508 __unpack_control(chan
, skb
);
6513 * We can just drop the corrupted I-frame here.
6514 * Receiver will miss it and start proper recovery
6515 * procedures and ask for retransmission.
6517 if (l2cap_check_fcs(chan
, skb
))
6520 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6521 len
-= L2CAP_SDULEN_SIZE
;
6523 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6524 len
-= L2CAP_FCS_SIZE
;
6526 if (len
> chan
->mps
) {
6527 l2cap_send_disconn_req(chan
, ECONNRESET
);
6531 if (!control
->sframe
) {
6534 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6535 control
->sar
, control
->reqseq
, control
->final
,
6538 /* Validate F-bit - F=0 always valid, F=1 only
6539 * valid in TX WAIT_F
6541 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6544 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6545 event
= L2CAP_EV_RECV_IFRAME
;
6546 err
= l2cap_rx(chan
, control
, skb
, event
);
6548 err
= l2cap_stream_rx(chan
, control
, skb
);
6552 l2cap_send_disconn_req(chan
, ECONNRESET
);
6554 const u8 rx_func_to_event
[4] = {
6555 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6556 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6559 /* Only I-frames are expected in streaming mode */
6560 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6563 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6564 control
->reqseq
, control
->final
, control
->poll
,
6568 BT_ERR("Trailing bytes: %d in sframe", len
);
6569 l2cap_send_disconn_req(chan
, ECONNRESET
);
6573 /* Validate F and P bits */
6574 if (control
->final
&& (control
->poll
||
6575 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6578 event
= rx_func_to_event
[control
->super
];
6579 if (l2cap_rx(chan
, control
, skb
, event
))
6580 l2cap_send_disconn_req(chan
, ECONNRESET
);
6590 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6592 struct l2cap_conn
*conn
= chan
->conn
;
6593 struct l2cap_le_credits pkt
;
6596 /* We return more credits to the sender only after the amount of
6597 * credits falls below half of the initial amount.
6599 if (chan
->rx_credits
>= (le_max_credits
+ 1) / 2)
6602 return_credits
= le_max_credits
- chan
->rx_credits
;
6604 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6606 chan
->rx_credits
+= return_credits
;
6608 pkt
.cid
= cpu_to_le16(chan
->scid
);
6609 pkt
.credits
= cpu_to_le16(return_credits
);
6611 chan
->ident
= l2cap_get_ident(conn
);
6613 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6616 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6620 if (!chan
->rx_credits
) {
6621 BT_ERR("No credits to receive LE L2CAP data");
6622 l2cap_send_disconn_req(chan
, ECONNRESET
);
6626 if (chan
->imtu
< skb
->len
) {
6627 BT_ERR("Too big LE L2CAP PDU");
6632 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6634 l2cap_chan_le_send_credits(chan
);
6641 sdu_len
= get_unaligned_le16(skb
->data
);
6642 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6644 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6645 sdu_len
, skb
->len
, chan
->imtu
);
6647 if (sdu_len
> chan
->imtu
) {
6648 BT_ERR("Too big LE L2CAP SDU length received");
6653 if (skb
->len
> sdu_len
) {
6654 BT_ERR("Too much LE L2CAP data received");
6659 if (skb
->len
== sdu_len
)
6660 return chan
->ops
->recv(chan
, skb
);
6663 chan
->sdu_len
= sdu_len
;
6664 chan
->sdu_last_frag
= skb
;
6669 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6670 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6672 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6673 BT_ERR("Too much LE L2CAP data received");
6678 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6681 if (chan
->sdu
->len
== chan
->sdu_len
) {
6682 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6685 chan
->sdu_last_frag
= NULL
;
6693 kfree_skb(chan
->sdu
);
6695 chan
->sdu_last_frag
= NULL
;
6699 /* We can't return an error here since we took care of the skb
6700 * freeing internally. An error return would cause the caller to
6701 * do a double-free of the skb.
6706 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6707 struct sk_buff
*skb
)
6709 struct l2cap_chan
*chan
;
6711 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6713 if (cid
== L2CAP_CID_A2MP
) {
6714 chan
= a2mp_channel_create(conn
, skb
);
6720 l2cap_chan_lock(chan
);
6722 BT_DBG("unknown cid 0x%4.4x", cid
);
6723 /* Drop packet and return */
6729 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6731 if (chan
->state
!= BT_CONNECTED
)
6734 switch (chan
->mode
) {
6735 case L2CAP_MODE_LE_FLOWCTL
:
6736 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6741 case L2CAP_MODE_BASIC
:
6742 /* If socket recv buffers overflows we drop data here
6743 * which is *bad* because L2CAP has to be reliable.
6744 * But we don't have any other choice. L2CAP doesn't
6745 * provide flow control mechanism. */
6747 if (chan
->imtu
< skb
->len
) {
6748 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6752 if (!chan
->ops
->recv(chan
, skb
))
6756 case L2CAP_MODE_ERTM
:
6757 case L2CAP_MODE_STREAMING
:
6758 l2cap_data_rcv(chan
, skb
);
6762 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6770 l2cap_chan_unlock(chan
);
6773 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6774 struct sk_buff
*skb
)
6776 struct hci_conn
*hcon
= conn
->hcon
;
6777 struct l2cap_chan
*chan
;
6779 if (hcon
->type
!= ACL_LINK
)
6782 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
6787 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6789 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6792 if (chan
->imtu
< skb
->len
)
6795 /* Store remote BD_ADDR and PSM for msg_name */
6796 bacpy(&bt_cb(skb
)->bdaddr
, &hcon
->dst
);
6797 bt_cb(skb
)->psm
= psm
;
6799 if (!chan
->ops
->recv(chan
, skb
)) {
6800 l2cap_chan_put(chan
);
6805 l2cap_chan_put(chan
);
6810 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6812 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6813 struct hci_conn
*hcon
= conn
->hcon
;
6817 if (hcon
->state
!= BT_CONNECTED
) {
6818 BT_DBG("queueing pending rx skb");
6819 skb_queue_tail(&conn
->pending_rx
, skb
);
6823 skb_pull(skb
, L2CAP_HDR_SIZE
);
6824 cid
= __le16_to_cpu(lh
->cid
);
6825 len
= __le16_to_cpu(lh
->len
);
6827 if (len
!= skb
->len
) {
6832 /* Since we can't actively block incoming LE connections we must
6833 * at least ensure that we ignore incoming data from them.
6835 if (hcon
->type
== LE_LINK
&&
6836 hci_bdaddr_list_lookup(&hcon
->hdev
->blacklist
, &hcon
->dst
,
6837 bdaddr_type(hcon
, hcon
->dst_type
))) {
6842 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6845 case L2CAP_CID_SIGNALING
:
6846 l2cap_sig_channel(conn
, skb
);
6849 case L2CAP_CID_CONN_LESS
:
6850 psm
= get_unaligned((__le16
*) skb
->data
);
6851 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6852 l2cap_conless_channel(conn
, psm
, skb
);
6855 case L2CAP_CID_LE_SIGNALING
:
6856 l2cap_le_sig_channel(conn
, skb
);
6860 l2cap_data_channel(conn
, cid
, skb
);
6865 static void process_pending_rx(struct work_struct
*work
)
6867 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
6869 struct sk_buff
*skb
;
6873 while ((skb
= skb_dequeue(&conn
->pending_rx
)))
6874 l2cap_recv_frame(conn
, skb
);
6877 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
6879 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6880 struct hci_chan
*hchan
;
6885 hchan
= hci_chan_create(hcon
);
6889 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
6891 hci_chan_del(hchan
);
6895 kref_init(&conn
->ref
);
6896 hcon
->l2cap_data
= conn
;
6897 conn
->hcon
= hci_conn_get(hcon
);
6898 conn
->hchan
= hchan
;
6900 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
6902 switch (hcon
->type
) {
6904 if (hcon
->hdev
->le_mtu
) {
6905 conn
->mtu
= hcon
->hdev
->le_mtu
;
6910 conn
->mtu
= hcon
->hdev
->acl_mtu
;
6914 conn
->feat_mask
= 0;
6916 if (hcon
->type
== ACL_LINK
)
6917 conn
->hs_enabled
= test_bit(HCI_HS_ENABLED
,
6918 &hcon
->hdev
->dev_flags
);
6920 mutex_init(&conn
->ident_lock
);
6921 mutex_init(&conn
->chan_lock
);
6923 INIT_LIST_HEAD(&conn
->chan_l
);
6924 INIT_LIST_HEAD(&conn
->users
);
6926 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
6928 skb_queue_head_init(&conn
->pending_rx
);
6929 INIT_WORK(&conn
->pending_rx_work
, process_pending_rx
);
6931 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
6936 static bool is_valid_psm(u16 psm
, u8 dst_type
) {
6940 if (bdaddr_type_is_le(dst_type
))
6941 return (psm
<= 0x00ff);
6943 /* PSM must be odd and lsb of upper byte must be 0 */
6944 return ((psm
& 0x0101) == 0x0001);
6947 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
6948 bdaddr_t
*dst
, u8 dst_type
)
6950 struct l2cap_conn
*conn
;
6951 struct hci_conn
*hcon
;
6952 struct hci_dev
*hdev
;
6955 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
6956 dst_type
, __le16_to_cpu(psm
));
6958 hdev
= hci_get_route(dst
, &chan
->src
);
6960 return -EHOSTUNREACH
;
6964 l2cap_chan_lock(chan
);
6966 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
6967 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
6972 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !psm
) {
6977 if (chan
->chan_type
== L2CAP_CHAN_FIXED
&& !cid
) {
6982 switch (chan
->mode
) {
6983 case L2CAP_MODE_BASIC
:
6985 case L2CAP_MODE_LE_FLOWCTL
:
6986 l2cap_le_flowctl_init(chan
);
6988 case L2CAP_MODE_ERTM
:
6989 case L2CAP_MODE_STREAMING
:
6998 switch (chan
->state
) {
7002 /* Already connecting */
7007 /* Already connected */
7021 /* Set destination address and psm */
7022 bacpy(&chan
->dst
, dst
);
7023 chan
->dst_type
= dst_type
;
7028 if (bdaddr_type_is_le(dst_type
)) {
7031 /* Convert from L2CAP channel address type to HCI address type
7033 if (dst_type
== BDADDR_LE_PUBLIC
)
7034 dst_type
= ADDR_LE_DEV_PUBLIC
;
7036 dst_type
= ADDR_LE_DEV_RANDOM
;
7038 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
7039 role
= HCI_ROLE_SLAVE
;
7041 role
= HCI_ROLE_MASTER
;
7043 hcon
= hci_connect_le(hdev
, dst
, dst_type
, chan
->sec_level
,
7044 HCI_LE_CONN_TIMEOUT
, role
);
7046 u8 auth_type
= l2cap_get_auth_type(chan
);
7047 hcon
= hci_connect_acl(hdev
, dst
, chan
->sec_level
, auth_type
);
7051 err
= PTR_ERR(hcon
);
7055 conn
= l2cap_conn_add(hcon
);
7057 hci_conn_drop(hcon
);
7062 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
7063 hci_conn_drop(hcon
);
7068 /* Update source addr of the socket */
7069 bacpy(&chan
->src
, &hcon
->src
);
7070 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
7072 l2cap_chan_add(conn
, chan
);
7074 /* l2cap_chan_add takes its own ref so we can drop this one */
7075 hci_conn_drop(hcon
);
7077 l2cap_state_change(chan
, BT_CONNECT
);
7078 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
7080 /* Release chan->sport so that it can be reused by other
7081 * sockets (as it's only used for listening sockets).
7083 write_lock(&chan_list_lock
);
7085 write_unlock(&chan_list_lock
);
7087 if (hcon
->state
== BT_CONNECTED
) {
7088 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
7089 __clear_chan_timer(chan
);
7090 if (l2cap_chan_check_security(chan
, true))
7091 l2cap_state_change(chan
, BT_CONNECTED
);
7093 l2cap_do_start(chan
);
7099 l2cap_chan_unlock(chan
);
7100 hci_dev_unlock(hdev
);
7104 EXPORT_SYMBOL_GPL(l2cap_chan_connect
);
7106 /* ---- L2CAP interface with lower layer (HCI) ---- */
7108 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7110 int exact
= 0, lm1
= 0, lm2
= 0;
7111 struct l2cap_chan
*c
;
7113 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7115 /* Find listening sockets and check their link_mode */
7116 read_lock(&chan_list_lock
);
7117 list_for_each_entry(c
, &chan_list
, global_l
) {
7118 if (c
->state
!= BT_LISTEN
)
7121 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7122 lm1
|= HCI_LM_ACCEPT
;
7123 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7124 lm1
|= HCI_LM_MASTER
;
7126 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7127 lm2
|= HCI_LM_ACCEPT
;
7128 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7129 lm2
|= HCI_LM_MASTER
;
7132 read_unlock(&chan_list_lock
);
7134 return exact
? lm1
: lm2
;
7137 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7138 * from an existing channel in the list or from the beginning of the
7139 * global list (by passing NULL as first parameter).
7141 static struct l2cap_chan
*l2cap_global_fixed_chan(struct l2cap_chan
*c
,
7142 bdaddr_t
*src
, u8 link_type
)
7144 read_lock(&chan_list_lock
);
7147 c
= list_next_entry(c
, global_l
);
7149 c
= list_entry(chan_list
.next
, typeof(*c
), global_l
);
7151 list_for_each_entry_from(c
, &chan_list
, global_l
) {
7152 if (c
->chan_type
!= L2CAP_CHAN_FIXED
)
7154 if (c
->state
!= BT_LISTEN
)
7156 if (bacmp(&c
->src
, src
) && bacmp(&c
->src
, BDADDR_ANY
))
7158 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
7160 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
7164 read_unlock(&chan_list_lock
);
7168 read_unlock(&chan_list_lock
);
7173 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7175 struct hci_dev
*hdev
= hcon
->hdev
;
7176 struct l2cap_conn
*conn
;
7177 struct l2cap_chan
*pchan
;
7180 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7183 l2cap_conn_del(hcon
, bt_to_errno(status
));
7187 conn
= l2cap_conn_add(hcon
);
7191 dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
7193 /* If device is blocked, do not create channels for it */
7194 if (hci_bdaddr_list_lookup(&hdev
->blacklist
, &hcon
->dst
, dst_type
))
7197 /* Find fixed channels and notify them of the new connection. We
7198 * use multiple individual lookups, continuing each time where
7199 * we left off, because the list lock would prevent calling the
7200 * potentially sleeping l2cap_chan_lock() function.
7202 pchan
= l2cap_global_fixed_chan(NULL
, &hdev
->bdaddr
, hcon
->type
);
7204 struct l2cap_chan
*chan
, *next
;
7206 /* Client fixed channels should override server ones */
7207 if (__l2cap_get_chan_by_dcid(conn
, pchan
->scid
))
7210 l2cap_chan_lock(pchan
);
7211 chan
= pchan
->ops
->new_connection(pchan
);
7213 bacpy(&chan
->src
, &hcon
->src
);
7214 bacpy(&chan
->dst
, &hcon
->dst
);
7215 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
7216 chan
->dst_type
= dst_type
;
7218 __l2cap_chan_add(conn
, chan
);
7221 l2cap_chan_unlock(pchan
);
7223 next
= l2cap_global_fixed_chan(pchan
, &hdev
->bdaddr
,
7225 l2cap_chan_put(pchan
);
7229 l2cap_conn_ready(conn
);
7232 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7234 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7236 BT_DBG("hcon %p", hcon
);
7239 return HCI_ERROR_REMOTE_USER_TERM
;
7240 return conn
->disc_reason
;
7243 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7245 BT_DBG("hcon %p reason %d", hcon
, reason
);
7247 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7250 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7252 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7255 if (encrypt
== 0x00) {
7256 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7257 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7258 } else if (chan
->sec_level
== BT_SECURITY_HIGH
||
7259 chan
->sec_level
== BT_SECURITY_FIPS
)
7260 l2cap_chan_close(chan
, ECONNREFUSED
);
7262 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7263 __clear_chan_timer(chan
);
7267 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7269 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7270 struct l2cap_chan
*chan
;
7275 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7277 mutex_lock(&conn
->chan_lock
);
7279 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7280 l2cap_chan_lock(chan
);
7282 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7283 state_to_string(chan
->state
));
7285 if (chan
->scid
== L2CAP_CID_A2MP
) {
7286 l2cap_chan_unlock(chan
);
7290 if (!status
&& encrypt
)
7291 chan
->sec_level
= hcon
->sec_level
;
7293 if (!__l2cap_no_conn_pending(chan
)) {
7294 l2cap_chan_unlock(chan
);
7298 if (!status
&& (chan
->state
== BT_CONNECTED
||
7299 chan
->state
== BT_CONFIG
)) {
7300 chan
->ops
->resume(chan
);
7301 l2cap_check_encryption(chan
, encrypt
);
7302 l2cap_chan_unlock(chan
);
7306 if (chan
->state
== BT_CONNECT
) {
7308 l2cap_start_connection(chan
);
7310 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7311 } else if (chan
->state
== BT_CONNECT2
) {
7312 struct l2cap_conn_rsp rsp
;
7316 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7317 res
= L2CAP_CR_PEND
;
7318 stat
= L2CAP_CS_AUTHOR_PEND
;
7319 chan
->ops
->defer(chan
);
7321 l2cap_state_change(chan
, BT_CONFIG
);
7322 res
= L2CAP_CR_SUCCESS
;
7323 stat
= L2CAP_CS_NO_INFO
;
7326 l2cap_state_change(chan
, BT_DISCONN
);
7327 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7328 res
= L2CAP_CR_SEC_BLOCK
;
7329 stat
= L2CAP_CS_NO_INFO
;
7332 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7333 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7334 rsp
.result
= cpu_to_le16(res
);
7335 rsp
.status
= cpu_to_le16(stat
);
7336 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7339 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7340 res
== L2CAP_CR_SUCCESS
) {
7342 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7343 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7345 l2cap_build_conf_req(chan
, buf
),
7347 chan
->num_conf_req
++;
7351 l2cap_chan_unlock(chan
);
7354 mutex_unlock(&conn
->chan_lock
);
7359 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7361 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7362 struct l2cap_hdr
*hdr
;
7365 /* For AMP controller do not create l2cap conn */
7366 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
7370 conn
= l2cap_conn_add(hcon
);
7375 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7379 case ACL_START_NO_FLUSH
:
7382 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7383 kfree_skb(conn
->rx_skb
);
7384 conn
->rx_skb
= NULL
;
7386 l2cap_conn_unreliable(conn
, ECOMM
);
7389 /* Start fragment always begin with Basic L2CAP header */
7390 if (skb
->len
< L2CAP_HDR_SIZE
) {
7391 BT_ERR("Frame is too short (len %d)", skb
->len
);
7392 l2cap_conn_unreliable(conn
, ECOMM
);
7396 hdr
= (struct l2cap_hdr
*) skb
->data
;
7397 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7399 if (len
== skb
->len
) {
7400 /* Complete frame received */
7401 l2cap_recv_frame(conn
, skb
);
7405 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7407 if (skb
->len
> len
) {
7408 BT_ERR("Frame is too long (len %d, expected len %d)",
7410 l2cap_conn_unreliable(conn
, ECOMM
);
7414 /* Allocate skb for the complete frame (with header) */
7415 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7419 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7421 conn
->rx_len
= len
- skb
->len
;
7425 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7427 if (!conn
->rx_len
) {
7428 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7429 l2cap_conn_unreliable(conn
, ECOMM
);
7433 if (skb
->len
> conn
->rx_len
) {
7434 BT_ERR("Fragment is too long (len %d, expected %d)",
7435 skb
->len
, conn
->rx_len
);
7436 kfree_skb(conn
->rx_skb
);
7437 conn
->rx_skb
= NULL
;
7439 l2cap_conn_unreliable(conn
, ECOMM
);
7443 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7445 conn
->rx_len
-= skb
->len
;
7447 if (!conn
->rx_len
) {
7448 /* Complete frame received. l2cap_recv_frame
7449 * takes ownership of the skb so set the global
7450 * rx_skb pointer to NULL first.
7452 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7453 conn
->rx_skb
= NULL
;
7454 l2cap_recv_frame(conn
, rx_skb
);
7464 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7466 struct l2cap_chan
*c
;
7468 read_lock(&chan_list_lock
);
7470 list_for_each_entry(c
, &chan_list
, global_l
) {
7471 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7473 c
->state
, __le16_to_cpu(c
->psm
),
7474 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7475 c
->sec_level
, c
->mode
);
7478 read_unlock(&chan_list_lock
);
7483 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7485 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7488 static const struct file_operations l2cap_debugfs_fops
= {
7489 .open
= l2cap_debugfs_open
,
7491 .llseek
= seq_lseek
,
7492 .release
= single_release
,
7495 static struct dentry
*l2cap_debugfs
;
7497 int __init
l2cap_init(void)
7501 err
= l2cap_init_sockets();
7505 if (IS_ERR_OR_NULL(bt_debugfs
))
7508 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7509 NULL
, &l2cap_debugfs_fops
);
7511 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs
,
7513 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs
,
7519 void l2cap_exit(void)
7521 debugfs_remove(l2cap_debugfs
);
7522 l2cap_cleanup_sockets();
7525 module_param(disable_ertm
, bool, 0644);
7526 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");