2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
49 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_SIG_BREDR
| L2CAP_FC_CONNLESS
, };
51 static LIST_HEAD(chan_list
);
52 static DEFINE_RWLOCK(chan_list_lock
);
54 static u16 le_max_credits
= L2CAP_LE_MAX_CREDITS
;
55 static u16 le_default_mps
= L2CAP_LE_DEFAULT_MPS
;
57 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
58 u8 code
, u8 ident
, u16 dlen
, void *data
);
59 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
61 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
62 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
64 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
65 struct sk_buff_head
*skbs
, u8 event
);
67 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
69 if (hcon
->type
== LE_LINK
) {
70 if (type
== ADDR_LE_DEV_PUBLIC
)
71 return BDADDR_LE_PUBLIC
;
73 return BDADDR_LE_RANDOM
;
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
86 list_for_each_entry(c
, &conn
->chan_l
, list
) {
93 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
98 list_for_each_entry(c
, &conn
->chan_l
, list
) {
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
110 struct l2cap_chan
*c
;
112 mutex_lock(&conn
->chan_lock
);
113 c
= __l2cap_get_chan_by_scid(conn
, cid
);
116 mutex_unlock(&conn
->chan_lock
);
121 /* Find channel with given DCID.
122 * Returns locked channel.
124 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
127 struct l2cap_chan
*c
;
129 mutex_lock(&conn
->chan_lock
);
130 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
133 mutex_unlock(&conn
->chan_lock
);
138 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
141 struct l2cap_chan
*c
;
143 list_for_each_entry(c
, &conn
->chan_l
, list
) {
144 if (c
->ident
== ident
)
150 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
153 struct l2cap_chan
*c
;
155 mutex_lock(&conn
->chan_lock
);
156 c
= __l2cap_get_chan_by_ident(conn
, ident
);
159 mutex_unlock(&conn
->chan_lock
);
164 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
166 struct l2cap_chan
*c
;
168 list_for_each_entry(c
, &chan_list
, global_l
) {
169 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
175 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
179 write_lock(&chan_list_lock
);
181 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
194 for (p
= 0x1001; p
< 0x1100; p
+= 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
196 chan
->psm
= cpu_to_le16(p
);
197 chan
->sport
= cpu_to_le16(p
);
204 write_unlock(&chan_list_lock
);
207 EXPORT_SYMBOL_GPL(l2cap_add_psm
);
209 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
211 write_lock(&chan_list_lock
);
213 /* Override the defaults (which are for conn-oriented) */
214 chan
->omtu
= L2CAP_DEFAULT_MTU
;
215 chan
->chan_type
= L2CAP_CHAN_FIXED
;
219 write_unlock(&chan_list_lock
);
224 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
228 if (conn
->hcon
->type
== LE_LINK
)
229 dyn_end
= L2CAP_CID_LE_DYN_END
;
231 dyn_end
= L2CAP_CID_DYN_END
;
233 for (cid
= L2CAP_CID_DYN_START
; cid
< dyn_end
; cid
++) {
234 if (!__l2cap_get_chan_by_scid(conn
, cid
))
241 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
243 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
244 state_to_string(state
));
247 chan
->ops
->state_change(chan
, state
, 0);
250 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
254 chan
->ops
->state_change(chan
, chan
->state
, err
);
257 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
259 chan
->ops
->state_change(chan
, chan
->state
, err
);
262 static void __set_retrans_timer(struct l2cap_chan
*chan
)
264 if (!delayed_work_pending(&chan
->monitor_timer
) &&
265 chan
->retrans_timeout
) {
266 l2cap_set_timer(chan
, &chan
->retrans_timer
,
267 msecs_to_jiffies(chan
->retrans_timeout
));
271 static void __set_monitor_timer(struct l2cap_chan
*chan
)
273 __clear_retrans_timer(chan
);
274 if (chan
->monitor_timeout
) {
275 l2cap_set_timer(chan
, &chan
->monitor_timer
,
276 msecs_to_jiffies(chan
->monitor_timeout
));
280 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
285 skb_queue_walk(head
, skb
) {
286 if (bt_cb(skb
)->control
.txseq
== seq
)
293 /* ---- L2CAP sequence number lists ---- */
295 /* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
304 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
306 size_t alloc_size
, i
;
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
312 alloc_size
= roundup_pow_of_two(size
);
314 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
318 seq_list
->mask
= alloc_size
- 1;
319 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
320 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
321 for (i
= 0; i
< alloc_size
; i
++)
322 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
327 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
329 kfree(seq_list
->list
);
332 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
335 /* Constant-time check for list membership */
336 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
339 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
341 u16 seq
= seq_list
->head
;
342 u16 mask
= seq_list
->mask
;
344 seq_list
->head
= seq_list
->list
[seq
& mask
];
345 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
347 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
348 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
349 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
355 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
359 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
362 for (i
= 0; i
<= seq_list
->mask
; i
++)
363 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
365 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
366 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
369 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
371 u16 mask
= seq_list
->mask
;
373 /* All appends happen in constant time */
375 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
378 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
379 seq_list
->head
= seq
;
381 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
383 seq_list
->tail
= seq
;
384 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
387 static void l2cap_chan_timeout(struct work_struct
*work
)
389 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
391 struct l2cap_conn
*conn
= chan
->conn
;
394 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
396 mutex_lock(&conn
->chan_lock
);
397 l2cap_chan_lock(chan
);
399 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
400 reason
= ECONNREFUSED
;
401 else if (chan
->state
== BT_CONNECT
&&
402 chan
->sec_level
!= BT_SECURITY_SDP
)
403 reason
= ECONNREFUSED
;
407 l2cap_chan_close(chan
, reason
);
409 l2cap_chan_unlock(chan
);
411 chan
->ops
->close(chan
);
412 mutex_unlock(&conn
->chan_lock
);
414 l2cap_chan_put(chan
);
417 struct l2cap_chan
*l2cap_chan_create(void)
419 struct l2cap_chan
*chan
;
421 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
425 mutex_init(&chan
->lock
);
427 /* Set default lock nesting level */
428 atomic_set(&chan
->nesting
, L2CAP_NESTING_NORMAL
);
430 write_lock(&chan_list_lock
);
431 list_add(&chan
->global_l
, &chan_list
);
432 write_unlock(&chan_list_lock
);
434 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
436 chan
->state
= BT_OPEN
;
438 kref_init(&chan
->kref
);
440 /* This flag is cleared in l2cap_chan_ready() */
441 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
443 BT_DBG("chan %p", chan
);
447 EXPORT_SYMBOL_GPL(l2cap_chan_create
);
449 static void l2cap_chan_destroy(struct kref
*kref
)
451 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
453 BT_DBG("chan %p", chan
);
455 write_lock(&chan_list_lock
);
456 list_del(&chan
->global_l
);
457 write_unlock(&chan_list_lock
);
462 void l2cap_chan_hold(struct l2cap_chan
*c
)
464 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
469 void l2cap_chan_put(struct l2cap_chan
*c
)
471 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
473 kref_put(&c
->kref
, l2cap_chan_destroy
);
475 EXPORT_SYMBOL_GPL(l2cap_chan_put
);
477 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
479 chan
->fcs
= L2CAP_FCS_CRC16
;
480 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
481 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
482 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
483 chan
->remote_max_tx
= chan
->max_tx
;
484 chan
->remote_tx_win
= chan
->tx_win
;
485 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
486 chan
->sec_level
= BT_SECURITY_LOW
;
487 chan
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
488 chan
->retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
489 chan
->monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
490 chan
->conf_state
= 0;
492 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
494 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults
);
496 static void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
499 chan
->sdu_last_frag
= NULL
;
501 chan
->tx_credits
= 0;
502 chan
->rx_credits
= le_max_credits
;
503 chan
->mps
= min_t(u16
, chan
->imtu
, le_default_mps
);
505 skb_queue_head_init(&chan
->tx_q
);
508 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
510 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
511 __le16_to_cpu(chan
->psm
), chan
->dcid
);
513 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
517 switch (chan
->chan_type
) {
518 case L2CAP_CHAN_CONN_ORIENTED
:
519 /* Alloc CID for connection-oriented socket */
520 chan
->scid
= l2cap_alloc_cid(conn
);
521 if (conn
->hcon
->type
== ACL_LINK
)
522 chan
->omtu
= L2CAP_DEFAULT_MTU
;
525 case L2CAP_CHAN_CONN_LESS
:
526 /* Connectionless socket */
527 chan
->scid
= L2CAP_CID_CONN_LESS
;
528 chan
->dcid
= L2CAP_CID_CONN_LESS
;
529 chan
->omtu
= L2CAP_DEFAULT_MTU
;
532 case L2CAP_CHAN_FIXED
:
533 /* Caller will set CID and CID specific MTU values */
537 /* Raw socket can send/recv signalling messages only */
538 chan
->scid
= L2CAP_CID_SIGNALING
;
539 chan
->dcid
= L2CAP_CID_SIGNALING
;
540 chan
->omtu
= L2CAP_DEFAULT_MTU
;
543 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
544 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
545 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
546 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
547 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
548 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
550 l2cap_chan_hold(chan
);
552 /* Only keep a reference for fixed channels if they requested it */
553 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
554 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
555 hci_conn_hold(conn
->hcon
);
557 list_add(&chan
->list
, &conn
->chan_l
);
560 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
562 mutex_lock(&conn
->chan_lock
);
563 __l2cap_chan_add(conn
, chan
);
564 mutex_unlock(&conn
->chan_lock
);
567 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
569 struct l2cap_conn
*conn
= chan
->conn
;
571 __clear_chan_timer(chan
);
573 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
575 chan
->ops
->teardown(chan
, err
);
578 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
579 /* Delete from channel list */
580 list_del(&chan
->list
);
582 l2cap_chan_put(chan
);
586 /* Reference was only held for non-fixed channels or
587 * fixed channels that explicitly requested it using the
588 * FLAG_HOLD_HCI_CONN flag.
590 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
591 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
592 hci_conn_drop(conn
->hcon
);
594 if (mgr
&& mgr
->bredr_chan
== chan
)
595 mgr
->bredr_chan
= NULL
;
598 if (chan
->hs_hchan
) {
599 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
601 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
602 amp_disconnect_logical_link(hs_hchan
);
605 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
609 case L2CAP_MODE_BASIC
:
612 case L2CAP_MODE_LE_FLOWCTL
:
613 skb_queue_purge(&chan
->tx_q
);
616 case L2CAP_MODE_ERTM
:
617 __clear_retrans_timer(chan
);
618 __clear_monitor_timer(chan
);
619 __clear_ack_timer(chan
);
621 skb_queue_purge(&chan
->srej_q
);
623 l2cap_seq_list_free(&chan
->srej_list
);
624 l2cap_seq_list_free(&chan
->retrans_list
);
628 case L2CAP_MODE_STREAMING
:
629 skb_queue_purge(&chan
->tx_q
);
635 EXPORT_SYMBOL_GPL(l2cap_chan_del
);
637 static void l2cap_conn_update_id_addr(struct work_struct
*work
)
639 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
640 id_addr_update_work
);
641 struct hci_conn
*hcon
= conn
->hcon
;
642 struct l2cap_chan
*chan
;
644 mutex_lock(&conn
->chan_lock
);
646 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
647 l2cap_chan_lock(chan
);
648 bacpy(&chan
->dst
, &hcon
->dst
);
649 chan
->dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
650 l2cap_chan_unlock(chan
);
653 mutex_unlock(&conn
->chan_lock
);
656 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
658 struct l2cap_conn
*conn
= chan
->conn
;
659 struct l2cap_le_conn_rsp rsp
;
662 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
663 result
= L2CAP_CR_AUTHORIZATION
;
665 result
= L2CAP_CR_BAD_PSM
;
667 l2cap_state_change(chan
, BT_DISCONN
);
669 rsp
.dcid
= cpu_to_le16(chan
->scid
);
670 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
671 rsp
.mps
= cpu_to_le16(chan
->mps
);
672 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
673 rsp
.result
= cpu_to_le16(result
);
675 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
679 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
681 struct l2cap_conn
*conn
= chan
->conn
;
682 struct l2cap_conn_rsp rsp
;
685 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
686 result
= L2CAP_CR_SEC_BLOCK
;
688 result
= L2CAP_CR_BAD_PSM
;
690 l2cap_state_change(chan
, BT_DISCONN
);
692 rsp
.scid
= cpu_to_le16(chan
->dcid
);
693 rsp
.dcid
= cpu_to_le16(chan
->scid
);
694 rsp
.result
= cpu_to_le16(result
);
695 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
697 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
700 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
702 struct l2cap_conn
*conn
= chan
->conn
;
704 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
706 switch (chan
->state
) {
708 chan
->ops
->teardown(chan
, 0);
713 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
714 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
715 l2cap_send_disconn_req(chan
, reason
);
717 l2cap_chan_del(chan
, reason
);
721 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
722 if (conn
->hcon
->type
== ACL_LINK
)
723 l2cap_chan_connect_reject(chan
);
724 else if (conn
->hcon
->type
== LE_LINK
)
725 l2cap_chan_le_connect_reject(chan
);
728 l2cap_chan_del(chan
, reason
);
733 l2cap_chan_del(chan
, reason
);
737 chan
->ops
->teardown(chan
, 0);
741 EXPORT_SYMBOL(l2cap_chan_close
);
743 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
745 switch (chan
->chan_type
) {
747 switch (chan
->sec_level
) {
748 case BT_SECURITY_HIGH
:
749 case BT_SECURITY_FIPS
:
750 return HCI_AT_DEDICATED_BONDING_MITM
;
751 case BT_SECURITY_MEDIUM
:
752 return HCI_AT_DEDICATED_BONDING
;
754 return HCI_AT_NO_BONDING
;
757 case L2CAP_CHAN_CONN_LESS
:
758 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_3DSP
)) {
759 if (chan
->sec_level
== BT_SECURITY_LOW
)
760 chan
->sec_level
= BT_SECURITY_SDP
;
762 if (chan
->sec_level
== BT_SECURITY_HIGH
||
763 chan
->sec_level
== BT_SECURITY_FIPS
)
764 return HCI_AT_NO_BONDING_MITM
;
766 return HCI_AT_NO_BONDING
;
768 case L2CAP_CHAN_CONN_ORIENTED
:
769 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_SDP
)) {
770 if (chan
->sec_level
== BT_SECURITY_LOW
)
771 chan
->sec_level
= BT_SECURITY_SDP
;
773 if (chan
->sec_level
== BT_SECURITY_HIGH
||
774 chan
->sec_level
== BT_SECURITY_FIPS
)
775 return HCI_AT_NO_BONDING_MITM
;
777 return HCI_AT_NO_BONDING
;
781 switch (chan
->sec_level
) {
782 case BT_SECURITY_HIGH
:
783 case BT_SECURITY_FIPS
:
784 return HCI_AT_GENERAL_BONDING_MITM
;
785 case BT_SECURITY_MEDIUM
:
786 return HCI_AT_GENERAL_BONDING
;
788 return HCI_AT_NO_BONDING
;
794 /* Service level security */
795 int l2cap_chan_check_security(struct l2cap_chan
*chan
, bool initiator
)
797 struct l2cap_conn
*conn
= chan
->conn
;
800 if (conn
->hcon
->type
== LE_LINK
)
801 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
803 auth_type
= l2cap_get_auth_type(chan
);
805 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
,
809 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
813 /* Get next available identificator.
814 * 1 - 128 are used by kernel.
815 * 129 - 199 are reserved.
816 * 200 - 254 are used by utilities like l2ping, etc.
819 mutex_lock(&conn
->ident_lock
);
821 if (++conn
->tx_ident
> 128)
826 mutex_unlock(&conn
->ident_lock
);
831 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
834 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
837 BT_DBG("code 0x%2.2x", code
);
842 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
843 flags
= ACL_START_NO_FLUSH
;
847 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
848 skb
->priority
= HCI_PRIO_MAX
;
850 hci_send_acl(conn
->hchan
, skb
, flags
);
853 static bool __chan_is_moving(struct l2cap_chan
*chan
)
855 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
856 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
859 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
861 struct hci_conn
*hcon
= chan
->conn
->hcon
;
864 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
867 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
869 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
876 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
877 lmp_no_flush_capable(hcon
->hdev
))
878 flags
= ACL_START_NO_FLUSH
;
882 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
883 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
886 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
888 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
889 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
891 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
894 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
895 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
902 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
903 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
910 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
912 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
913 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
915 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
918 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
919 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
926 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
927 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
934 static inline void __unpack_control(struct l2cap_chan
*chan
,
937 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
938 __unpack_extended_control(get_unaligned_le32(skb
->data
),
939 &bt_cb(skb
)->control
);
940 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
942 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
943 &bt_cb(skb
)->control
);
944 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
948 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
952 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
953 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
955 if (control
->sframe
) {
956 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
957 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
958 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
960 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
961 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
967 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
971 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
972 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
974 if (control
->sframe
) {
975 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
976 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
977 packed
|= L2CAP_CTRL_FRAME_TYPE
;
979 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
980 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
986 static inline void __pack_control(struct l2cap_chan
*chan
,
987 struct l2cap_ctrl
*control
,
990 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
991 put_unaligned_le32(__pack_extended_control(control
),
992 skb
->data
+ L2CAP_HDR_SIZE
);
994 put_unaligned_le16(__pack_enhanced_control(control
),
995 skb
->data
+ L2CAP_HDR_SIZE
);
999 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
1001 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1002 return L2CAP_EXT_HDR_SIZE
;
1004 return L2CAP_ENH_HDR_SIZE
;
1007 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
1010 struct sk_buff
*skb
;
1011 struct l2cap_hdr
*lh
;
1012 int hlen
= __ertm_hdr_size(chan
);
1014 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1015 hlen
+= L2CAP_FCS_SIZE
;
1017 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
1020 return ERR_PTR(-ENOMEM
);
1022 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1023 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
1024 lh
->cid
= cpu_to_le16(chan
->dcid
);
1026 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1027 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1029 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1031 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1032 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1033 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1036 skb
->priority
= HCI_PRIO_MAX
;
1040 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1041 struct l2cap_ctrl
*control
)
1043 struct sk_buff
*skb
;
1046 BT_DBG("chan %p, control %p", chan
, control
);
1048 if (!control
->sframe
)
1051 if (__chan_is_moving(chan
))
1054 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1058 if (control
->super
== L2CAP_SUPER_RR
)
1059 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1060 else if (control
->super
== L2CAP_SUPER_RNR
)
1061 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1063 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1064 chan
->last_acked_seq
= control
->reqseq
;
1065 __clear_ack_timer(chan
);
1068 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1069 control
->final
, control
->poll
, control
->super
);
1071 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1072 control_field
= __pack_extended_control(control
);
1074 control_field
= __pack_enhanced_control(control
);
1076 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1078 l2cap_do_send(chan
, skb
);
1081 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1083 struct l2cap_ctrl control
;
1085 BT_DBG("chan %p, poll %d", chan
, poll
);
1087 memset(&control
, 0, sizeof(control
));
1089 control
.poll
= poll
;
1091 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1092 control
.super
= L2CAP_SUPER_RNR
;
1094 control
.super
= L2CAP_SUPER_RR
;
1096 control
.reqseq
= chan
->buffer_seq
;
1097 l2cap_send_sframe(chan
, &control
);
1100 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1102 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
1105 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1108 static bool __amp_capable(struct l2cap_chan
*chan
)
1110 struct l2cap_conn
*conn
= chan
->conn
;
1111 struct hci_dev
*hdev
;
1112 bool amp_available
= false;
1114 if (!conn
->hs_enabled
)
1117 if (!(conn
->fixed_chan_mask
& L2CAP_FC_A2MP
))
1120 read_lock(&hci_dev_list_lock
);
1121 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1122 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1123 test_bit(HCI_UP
, &hdev
->flags
)) {
1124 amp_available
= true;
1128 read_unlock(&hci_dev_list_lock
);
1130 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1131 return amp_available
;
1136 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1138 /* Check EFS parameters */
1142 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1144 struct l2cap_conn
*conn
= chan
->conn
;
1145 struct l2cap_conn_req req
;
1147 req
.scid
= cpu_to_le16(chan
->scid
);
1148 req
.psm
= chan
->psm
;
1150 chan
->ident
= l2cap_get_ident(conn
);
1152 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1154 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1157 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1159 struct l2cap_create_chan_req req
;
1160 req
.scid
= cpu_to_le16(chan
->scid
);
1161 req
.psm
= chan
->psm
;
1162 req
.amp_id
= amp_id
;
1164 chan
->ident
= l2cap_get_ident(chan
->conn
);
1166 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1170 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1172 struct sk_buff
*skb
;
1174 BT_DBG("chan %p", chan
);
1176 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1179 __clear_retrans_timer(chan
);
1180 __clear_monitor_timer(chan
);
1181 __clear_ack_timer(chan
);
1183 chan
->retry_count
= 0;
1184 skb_queue_walk(&chan
->tx_q
, skb
) {
1185 if (bt_cb(skb
)->control
.retries
)
1186 bt_cb(skb
)->control
.retries
= 1;
1191 chan
->expected_tx_seq
= chan
->buffer_seq
;
1193 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1194 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1195 l2cap_seq_list_clear(&chan
->retrans_list
);
1196 l2cap_seq_list_clear(&chan
->srej_list
);
1197 skb_queue_purge(&chan
->srej_q
);
1199 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1200 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1202 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1205 static void l2cap_move_done(struct l2cap_chan
*chan
)
1207 u8 move_role
= chan
->move_role
;
1208 BT_DBG("chan %p", chan
);
1210 chan
->move_state
= L2CAP_MOVE_STABLE
;
1211 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1213 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1216 switch (move_role
) {
1217 case L2CAP_MOVE_ROLE_INITIATOR
:
1218 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1219 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1221 case L2CAP_MOVE_ROLE_RESPONDER
:
1222 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1227 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1229 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1230 chan
->conf_state
= 0;
1231 __clear_chan_timer(chan
);
1233 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
&& !chan
->tx_credits
)
1234 chan
->ops
->suspend(chan
);
1236 chan
->state
= BT_CONNECTED
;
1238 chan
->ops
->ready(chan
);
1241 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1243 struct l2cap_conn
*conn
= chan
->conn
;
1244 struct l2cap_le_conn_req req
;
1246 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1249 req
.psm
= chan
->psm
;
1250 req
.scid
= cpu_to_le16(chan
->scid
);
1251 req
.mtu
= cpu_to_le16(chan
->imtu
);
1252 req
.mps
= cpu_to_le16(chan
->mps
);
1253 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1255 chan
->ident
= l2cap_get_ident(conn
);
1257 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1261 static void l2cap_le_start(struct l2cap_chan
*chan
)
1263 struct l2cap_conn
*conn
= chan
->conn
;
1265 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1269 l2cap_chan_ready(chan
);
1273 if (chan
->state
== BT_CONNECT
)
1274 l2cap_le_connect(chan
);
1277 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1279 if (__amp_capable(chan
)) {
1280 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1281 a2mp_discover_amp(chan
);
1282 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1283 l2cap_le_start(chan
);
1285 l2cap_send_conn_req(chan
);
1289 static void l2cap_request_info(struct l2cap_conn
*conn
)
1291 struct l2cap_info_req req
;
1293 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1296 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1298 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1299 conn
->info_ident
= l2cap_get_ident(conn
);
1301 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1303 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1307 static void l2cap_do_start(struct l2cap_chan
*chan
)
1309 struct l2cap_conn
*conn
= chan
->conn
;
1311 if (conn
->hcon
->type
== LE_LINK
) {
1312 l2cap_le_start(chan
);
1316 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)) {
1317 l2cap_request_info(conn
);
1321 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1324 if (l2cap_chan_check_security(chan
, true) &&
1325 __l2cap_no_conn_pending(chan
))
1326 l2cap_start_connection(chan
);
1329 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1331 u32 local_feat_mask
= l2cap_feat_mask
;
1333 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1336 case L2CAP_MODE_ERTM
:
1337 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1338 case L2CAP_MODE_STREAMING
:
1339 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1345 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1347 struct l2cap_conn
*conn
= chan
->conn
;
1348 struct l2cap_disconn_req req
;
1353 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1354 __clear_retrans_timer(chan
);
1355 __clear_monitor_timer(chan
);
1356 __clear_ack_timer(chan
);
1359 if (chan
->scid
== L2CAP_CID_A2MP
) {
1360 l2cap_state_change(chan
, BT_DISCONN
);
1364 req
.dcid
= cpu_to_le16(chan
->dcid
);
1365 req
.scid
= cpu_to_le16(chan
->scid
);
1366 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1369 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1372 /* ---- L2CAP connections ---- */
1373 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1375 struct l2cap_chan
*chan
, *tmp
;
1377 BT_DBG("conn %p", conn
);
1379 mutex_lock(&conn
->chan_lock
);
1381 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1382 l2cap_chan_lock(chan
);
1384 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1385 l2cap_chan_ready(chan
);
1386 l2cap_chan_unlock(chan
);
1390 if (chan
->state
== BT_CONNECT
) {
1391 if (!l2cap_chan_check_security(chan
, true) ||
1392 !__l2cap_no_conn_pending(chan
)) {
1393 l2cap_chan_unlock(chan
);
1397 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1398 && test_bit(CONF_STATE2_DEVICE
,
1399 &chan
->conf_state
)) {
1400 l2cap_chan_close(chan
, ECONNRESET
);
1401 l2cap_chan_unlock(chan
);
1405 l2cap_start_connection(chan
);
1407 } else if (chan
->state
== BT_CONNECT2
) {
1408 struct l2cap_conn_rsp rsp
;
1410 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1411 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1413 if (l2cap_chan_check_security(chan
, false)) {
1414 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1415 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1416 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1417 chan
->ops
->defer(chan
);
1420 l2cap_state_change(chan
, BT_CONFIG
);
1421 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1422 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1425 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1426 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1429 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1432 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1433 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1434 l2cap_chan_unlock(chan
);
1438 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1439 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1440 l2cap_build_conf_req(chan
, buf
), buf
);
1441 chan
->num_conf_req
++;
1444 l2cap_chan_unlock(chan
);
1447 mutex_unlock(&conn
->chan_lock
);
1450 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1452 struct hci_conn
*hcon
= conn
->hcon
;
1453 struct hci_dev
*hdev
= hcon
->hdev
;
1455 BT_DBG("%s conn %p", hdev
->name
, conn
);
1457 /* For outgoing pairing which doesn't necessarily have an
1458 * associated socket (e.g. mgmt_pair_device).
1461 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1463 /* For LE slave connections, make sure the connection interval
1464 * is in the range of the minium and maximum interval that has
1465 * been configured for this connection. If not, then trigger
1466 * the connection update procedure.
1468 if (hcon
->role
== HCI_ROLE_SLAVE
&&
1469 (hcon
->le_conn_interval
< hcon
->le_conn_min_interval
||
1470 hcon
->le_conn_interval
> hcon
->le_conn_max_interval
)) {
1471 struct l2cap_conn_param_update_req req
;
1473 req
.min
= cpu_to_le16(hcon
->le_conn_min_interval
);
1474 req
.max
= cpu_to_le16(hcon
->le_conn_max_interval
);
1475 req
.latency
= cpu_to_le16(hcon
->le_conn_latency
);
1476 req
.to_multiplier
= cpu_to_le16(hcon
->le_supv_timeout
);
1478 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1479 L2CAP_CONN_PARAM_UPDATE_REQ
, sizeof(req
), &req
);
1483 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1485 struct l2cap_chan
*chan
;
1486 struct hci_conn
*hcon
= conn
->hcon
;
1488 BT_DBG("conn %p", conn
);
1490 if (hcon
->type
== ACL_LINK
)
1491 l2cap_request_info(conn
);
1493 mutex_lock(&conn
->chan_lock
);
1495 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1497 l2cap_chan_lock(chan
);
1499 if (chan
->scid
== L2CAP_CID_A2MP
) {
1500 l2cap_chan_unlock(chan
);
1504 if (hcon
->type
== LE_LINK
) {
1505 l2cap_le_start(chan
);
1506 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1507 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
1508 l2cap_chan_ready(chan
);
1509 } else if (chan
->state
== BT_CONNECT
) {
1510 l2cap_do_start(chan
);
1513 l2cap_chan_unlock(chan
);
1516 mutex_unlock(&conn
->chan_lock
);
1518 if (hcon
->type
== LE_LINK
)
1519 l2cap_le_conn_ready(conn
);
1521 queue_work(hcon
->hdev
->workqueue
, &conn
->pending_rx_work
);
1524 /* Notify sockets that we cannot guaranty reliability anymore */
1525 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1527 struct l2cap_chan
*chan
;
1529 BT_DBG("conn %p", conn
);
1531 mutex_lock(&conn
->chan_lock
);
1533 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1534 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1535 l2cap_chan_set_err(chan
, err
);
1538 mutex_unlock(&conn
->chan_lock
);
1541 static void l2cap_info_timeout(struct work_struct
*work
)
1543 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1546 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1547 conn
->info_ident
= 0;
1549 l2cap_conn_start(conn
);
1554 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1555 * callback is called during registration. The ->remove callback is called
1556 * during unregistration.
1557 * An l2cap_user object can either be explicitly unregistered or when the
1558 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1559 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1560 * External modules must own a reference to the l2cap_conn object if they intend
1561 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1562 * any time if they don't.
1565 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1567 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1570 /* We need to check whether l2cap_conn is registered. If it is not, we
1571 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1572 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1573 * relies on the parent hci_conn object to be locked. This itself relies
1574 * on the hci_dev object to be locked. So we must lock the hci device
1579 if (user
->list
.next
|| user
->list
.prev
) {
1584 /* conn->hchan is NULL after l2cap_conn_del() was called */
1590 ret
= user
->probe(conn
, user
);
1594 list_add(&user
->list
, &conn
->users
);
1598 hci_dev_unlock(hdev
);
1601 EXPORT_SYMBOL(l2cap_register_user
);
1603 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1605 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1609 if (!user
->list
.next
|| !user
->list
.prev
)
1612 list_del(&user
->list
);
1613 user
->list
.next
= NULL
;
1614 user
->list
.prev
= NULL
;
1615 user
->remove(conn
, user
);
1618 hci_dev_unlock(hdev
);
1620 EXPORT_SYMBOL(l2cap_unregister_user
);
1622 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1624 struct l2cap_user
*user
;
1626 while (!list_empty(&conn
->users
)) {
1627 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1628 list_del(&user
->list
);
1629 user
->list
.next
= NULL
;
1630 user
->list
.prev
= NULL
;
1631 user
->remove(conn
, user
);
1635 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1637 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1638 struct l2cap_chan
*chan
, *l
;
1643 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1645 kfree_skb(conn
->rx_skb
);
1647 skb_queue_purge(&conn
->pending_rx
);
1649 /* We can not call flush_work(&conn->pending_rx_work) here since we
1650 * might block if we are running on a worker from the same workqueue
1651 * pending_rx_work is waiting on.
1653 if (work_pending(&conn
->pending_rx_work
))
1654 cancel_work_sync(&conn
->pending_rx_work
);
1656 if (work_pending(&conn
->id_addr_update_work
))
1657 cancel_work_sync(&conn
->id_addr_update_work
);
1659 l2cap_unregister_all_users(conn
);
1661 /* Force the connection to be immediately dropped */
1662 hcon
->disc_timeout
= 0;
1664 mutex_lock(&conn
->chan_lock
);
1667 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1668 l2cap_chan_hold(chan
);
1669 l2cap_chan_lock(chan
);
1671 l2cap_chan_del(chan
, err
);
1673 l2cap_chan_unlock(chan
);
1675 chan
->ops
->close(chan
);
1676 l2cap_chan_put(chan
);
1679 mutex_unlock(&conn
->chan_lock
);
1681 hci_chan_del(conn
->hchan
);
1683 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1684 cancel_delayed_work_sync(&conn
->info_timer
);
1686 hcon
->l2cap_data
= NULL
;
1688 l2cap_conn_put(conn
);
1691 static void l2cap_conn_free(struct kref
*ref
)
1693 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1695 hci_conn_put(conn
->hcon
);
1699 struct l2cap_conn
*l2cap_conn_get(struct l2cap_conn
*conn
)
1701 kref_get(&conn
->ref
);
1704 EXPORT_SYMBOL(l2cap_conn_get
);
1706 void l2cap_conn_put(struct l2cap_conn
*conn
)
1708 kref_put(&conn
->ref
, l2cap_conn_free
);
1710 EXPORT_SYMBOL(l2cap_conn_put
);
1712 /* ---- Socket interface ---- */
1714 /* Find socket with psm and source / destination bdaddr.
1715 * Returns closest match.
1717 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1722 struct l2cap_chan
*c
, *c1
= NULL
;
1724 read_lock(&chan_list_lock
);
1726 list_for_each_entry(c
, &chan_list
, global_l
) {
1727 if (state
&& c
->state
!= state
)
1730 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1733 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1736 if (c
->psm
== psm
) {
1737 int src_match
, dst_match
;
1738 int src_any
, dst_any
;
1741 src_match
= !bacmp(&c
->src
, src
);
1742 dst_match
= !bacmp(&c
->dst
, dst
);
1743 if (src_match
&& dst_match
) {
1745 read_unlock(&chan_list_lock
);
1750 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1751 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1752 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1753 (src_any
&& dst_any
))
1759 l2cap_chan_hold(c1
);
1761 read_unlock(&chan_list_lock
);
1766 static void l2cap_monitor_timeout(struct work_struct
*work
)
1768 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1769 monitor_timer
.work
);
1771 BT_DBG("chan %p", chan
);
1773 l2cap_chan_lock(chan
);
1776 l2cap_chan_unlock(chan
);
1777 l2cap_chan_put(chan
);
1781 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1783 l2cap_chan_unlock(chan
);
1784 l2cap_chan_put(chan
);
1787 static void l2cap_retrans_timeout(struct work_struct
*work
)
1789 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1790 retrans_timer
.work
);
1792 BT_DBG("chan %p", chan
);
1794 l2cap_chan_lock(chan
);
1797 l2cap_chan_unlock(chan
);
1798 l2cap_chan_put(chan
);
1802 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1803 l2cap_chan_unlock(chan
);
1804 l2cap_chan_put(chan
);
1807 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1808 struct sk_buff_head
*skbs
)
1810 struct sk_buff
*skb
;
1811 struct l2cap_ctrl
*control
;
1813 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1815 if (__chan_is_moving(chan
))
1818 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1820 while (!skb_queue_empty(&chan
->tx_q
)) {
1822 skb
= skb_dequeue(&chan
->tx_q
);
1824 bt_cb(skb
)->control
.retries
= 1;
1825 control
= &bt_cb(skb
)->control
;
1827 control
->reqseq
= 0;
1828 control
->txseq
= chan
->next_tx_seq
;
1830 __pack_control(chan
, control
, skb
);
1832 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1833 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1834 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1837 l2cap_do_send(chan
, skb
);
1839 BT_DBG("Sent txseq %u", control
->txseq
);
1841 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1842 chan
->frames_sent
++;
1846 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1848 struct sk_buff
*skb
, *tx_skb
;
1849 struct l2cap_ctrl
*control
;
1852 BT_DBG("chan %p", chan
);
1854 if (chan
->state
!= BT_CONNECTED
)
1857 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1860 if (__chan_is_moving(chan
))
1863 while (chan
->tx_send_head
&&
1864 chan
->unacked_frames
< chan
->remote_tx_win
&&
1865 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1867 skb
= chan
->tx_send_head
;
1869 bt_cb(skb
)->control
.retries
= 1;
1870 control
= &bt_cb(skb
)->control
;
1872 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1875 control
->reqseq
= chan
->buffer_seq
;
1876 chan
->last_acked_seq
= chan
->buffer_seq
;
1877 control
->txseq
= chan
->next_tx_seq
;
1879 __pack_control(chan
, control
, skb
);
1881 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1882 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1883 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1886 /* Clone after data has been modified. Data is assumed to be
1887 read-only (for locking purposes) on cloned sk_buffs.
1889 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1894 __set_retrans_timer(chan
);
1896 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1897 chan
->unacked_frames
++;
1898 chan
->frames_sent
++;
1901 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1902 chan
->tx_send_head
= NULL
;
1904 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1906 l2cap_do_send(chan
, tx_skb
);
1907 BT_DBG("Sent txseq %u", control
->txseq
);
1910 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1911 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1916 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1918 struct l2cap_ctrl control
;
1919 struct sk_buff
*skb
;
1920 struct sk_buff
*tx_skb
;
1923 BT_DBG("chan %p", chan
);
1925 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1928 if (__chan_is_moving(chan
))
1931 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1932 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1934 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1936 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1941 bt_cb(skb
)->control
.retries
++;
1942 control
= bt_cb(skb
)->control
;
1944 if (chan
->max_tx
!= 0 &&
1945 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1946 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1947 l2cap_send_disconn_req(chan
, ECONNRESET
);
1948 l2cap_seq_list_clear(&chan
->retrans_list
);
1952 control
.reqseq
= chan
->buffer_seq
;
1953 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1958 if (skb_cloned(skb
)) {
1959 /* Cloned sk_buffs are read-only, so we need a
1962 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1964 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1968 l2cap_seq_list_clear(&chan
->retrans_list
);
1972 /* Update skb contents */
1973 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1974 put_unaligned_le32(__pack_extended_control(&control
),
1975 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1977 put_unaligned_le16(__pack_enhanced_control(&control
),
1978 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1982 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1983 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
,
1984 tx_skb
->len
- L2CAP_FCS_SIZE
);
1985 put_unaligned_le16(fcs
, skb_tail_pointer(tx_skb
) -
1989 l2cap_do_send(chan
, tx_skb
);
1991 BT_DBG("Resent txseq %d", control
.txseq
);
1993 chan
->last_acked_seq
= chan
->buffer_seq
;
1997 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1998 struct l2cap_ctrl
*control
)
2000 BT_DBG("chan %p, control %p", chan
, control
);
2002 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2003 l2cap_ertm_resend(chan
);
2006 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2007 struct l2cap_ctrl
*control
)
2009 struct sk_buff
*skb
;
2011 BT_DBG("chan %p, control %p", chan
, control
);
2014 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2016 l2cap_seq_list_clear(&chan
->retrans_list
);
2018 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2021 if (chan
->unacked_frames
) {
2022 skb_queue_walk(&chan
->tx_q
, skb
) {
2023 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2024 skb
== chan
->tx_send_head
)
2028 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2029 if (skb
== chan
->tx_send_head
)
2032 l2cap_seq_list_append(&chan
->retrans_list
,
2033 bt_cb(skb
)->control
.txseq
);
2036 l2cap_ertm_resend(chan
);
2040 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2042 struct l2cap_ctrl control
;
2043 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2044 chan
->last_acked_seq
);
2047 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2048 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2050 memset(&control
, 0, sizeof(control
));
2053 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2054 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2055 __clear_ack_timer(chan
);
2056 control
.super
= L2CAP_SUPER_RNR
;
2057 control
.reqseq
= chan
->buffer_seq
;
2058 l2cap_send_sframe(chan
, &control
);
2060 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2061 l2cap_ertm_send(chan
);
2062 /* If any i-frames were sent, they included an ack */
2063 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2067 /* Ack now if the window is 3/4ths full.
2068 * Calculate without mul or div
2070 threshold
= chan
->ack_win
;
2071 threshold
+= threshold
<< 1;
2074 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2077 if (frames_to_ack
>= threshold
) {
2078 __clear_ack_timer(chan
);
2079 control
.super
= L2CAP_SUPER_RR
;
2080 control
.reqseq
= chan
->buffer_seq
;
2081 l2cap_send_sframe(chan
, &control
);
2086 __set_ack_timer(chan
);
2090 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2091 struct msghdr
*msg
, int len
,
2092 int count
, struct sk_buff
*skb
)
2094 struct l2cap_conn
*conn
= chan
->conn
;
2095 struct sk_buff
**frag
;
2098 if (chan
->ops
->memcpy_fromiovec(chan
, skb_put(skb
, count
),
2099 msg
->msg_iov
, count
))
2105 /* Continuation fragments (no L2CAP header) */
2106 frag
= &skb_shinfo(skb
)->frag_list
;
2108 struct sk_buff
*tmp
;
2110 count
= min_t(unsigned int, conn
->mtu
, len
);
2112 tmp
= chan
->ops
->alloc_skb(chan
, 0, count
,
2113 msg
->msg_flags
& MSG_DONTWAIT
);
2115 return PTR_ERR(tmp
);
2119 if (chan
->ops
->memcpy_fromiovec(chan
, skb_put(*frag
, count
),
2120 msg
->msg_iov
, count
))
2126 skb
->len
+= (*frag
)->len
;
2127 skb
->data_len
+= (*frag
)->len
;
2129 frag
= &(*frag
)->next
;
2135 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2136 struct msghdr
*msg
, size_t len
)
2138 struct l2cap_conn
*conn
= chan
->conn
;
2139 struct sk_buff
*skb
;
2140 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2141 struct l2cap_hdr
*lh
;
2143 BT_DBG("chan %p psm 0x%2.2x len %zu", chan
,
2144 __le16_to_cpu(chan
->psm
), len
);
2146 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2148 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2149 msg
->msg_flags
& MSG_DONTWAIT
);
2153 /* Create L2CAP header */
2154 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2155 lh
->cid
= cpu_to_le16(chan
->dcid
);
2156 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2157 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2159 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2160 if (unlikely(err
< 0)) {
2162 return ERR_PTR(err
);
2167 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2168 struct msghdr
*msg
, size_t len
)
2170 struct l2cap_conn
*conn
= chan
->conn
;
2171 struct sk_buff
*skb
;
2173 struct l2cap_hdr
*lh
;
2175 BT_DBG("chan %p len %zu", chan
, len
);
2177 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2179 skb
= chan
->ops
->alloc_skb(chan
, L2CAP_HDR_SIZE
, count
,
2180 msg
->msg_flags
& MSG_DONTWAIT
);
2184 /* Create L2CAP header */
2185 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2186 lh
->cid
= cpu_to_le16(chan
->dcid
);
2187 lh
->len
= cpu_to_le16(len
);
2189 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2190 if (unlikely(err
< 0)) {
2192 return ERR_PTR(err
);
2197 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2198 struct msghdr
*msg
, size_t len
,
2201 struct l2cap_conn
*conn
= chan
->conn
;
2202 struct sk_buff
*skb
;
2203 int err
, count
, hlen
;
2204 struct l2cap_hdr
*lh
;
2206 BT_DBG("chan %p len %zu", chan
, len
);
2209 return ERR_PTR(-ENOTCONN
);
2211 hlen
= __ertm_hdr_size(chan
);
2214 hlen
+= L2CAP_SDULEN_SIZE
;
2216 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2217 hlen
+= L2CAP_FCS_SIZE
;
2219 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2221 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2222 msg
->msg_flags
& MSG_DONTWAIT
);
2226 /* Create L2CAP header */
2227 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2228 lh
->cid
= cpu_to_le16(chan
->dcid
);
2229 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2231 /* Control header is populated later */
2232 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2233 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2235 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2238 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2240 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2241 if (unlikely(err
< 0)) {
2243 return ERR_PTR(err
);
2246 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2247 bt_cb(skb
)->control
.retries
= 0;
2251 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2252 struct sk_buff_head
*seg_queue
,
2253 struct msghdr
*msg
, size_t len
)
2255 struct sk_buff
*skb
;
2260 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2262 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2263 * so fragmented skbs are not used. The HCI layer's handling
2264 * of fragmented skbs is not compatible with ERTM's queueing.
2267 /* PDU size is derived from the HCI MTU */
2268 pdu_len
= chan
->conn
->mtu
;
2270 /* Constrain PDU size for BR/EDR connections */
2272 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2274 /* Adjust for largest possible L2CAP overhead. */
2276 pdu_len
-= L2CAP_FCS_SIZE
;
2278 pdu_len
-= __ertm_hdr_size(chan
);
2280 /* Remote device may have requested smaller PDUs */
2281 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2283 if (len
<= pdu_len
) {
2284 sar
= L2CAP_SAR_UNSEGMENTED
;
2288 sar
= L2CAP_SAR_START
;
2293 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2296 __skb_queue_purge(seg_queue
);
2297 return PTR_ERR(skb
);
2300 bt_cb(skb
)->control
.sar
= sar
;
2301 __skb_queue_tail(seg_queue
, skb
);
2307 if (len
<= pdu_len
) {
2308 sar
= L2CAP_SAR_END
;
2311 sar
= L2CAP_SAR_CONTINUE
;
2318 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2320 size_t len
, u16 sdulen
)
2322 struct l2cap_conn
*conn
= chan
->conn
;
2323 struct sk_buff
*skb
;
2324 int err
, count
, hlen
;
2325 struct l2cap_hdr
*lh
;
2327 BT_DBG("chan %p len %zu", chan
, len
);
2330 return ERR_PTR(-ENOTCONN
);
2332 hlen
= L2CAP_HDR_SIZE
;
2335 hlen
+= L2CAP_SDULEN_SIZE
;
2337 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2339 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2340 msg
->msg_flags
& MSG_DONTWAIT
);
2344 /* Create L2CAP header */
2345 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2346 lh
->cid
= cpu_to_le16(chan
->dcid
);
2347 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2350 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2352 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2353 if (unlikely(err
< 0)) {
2355 return ERR_PTR(err
);
2361 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2362 struct sk_buff_head
*seg_queue
,
2363 struct msghdr
*msg
, size_t len
)
2365 struct sk_buff
*skb
;
2369 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2372 pdu_len
= chan
->remote_mps
- L2CAP_SDULEN_SIZE
;
2378 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2380 __skb_queue_purge(seg_queue
);
2381 return PTR_ERR(skb
);
2384 __skb_queue_tail(seg_queue
, skb
);
2390 pdu_len
+= L2CAP_SDULEN_SIZE
;
2397 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
2399 struct sk_buff
*skb
;
2401 struct sk_buff_head seg_queue
;
2406 /* Connectionless channel */
2407 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2408 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
2410 return PTR_ERR(skb
);
2412 /* Channel lock is released before requesting new skb and then
2413 * reacquired thus we need to recheck channel state.
2415 if (chan
->state
!= BT_CONNECTED
) {
2420 l2cap_do_send(chan
, skb
);
2424 switch (chan
->mode
) {
2425 case L2CAP_MODE_LE_FLOWCTL
:
2426 /* Check outgoing MTU */
2427 if (len
> chan
->omtu
)
2430 if (!chan
->tx_credits
)
2433 __skb_queue_head_init(&seg_queue
);
2435 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2437 if (chan
->state
!= BT_CONNECTED
) {
2438 __skb_queue_purge(&seg_queue
);
2445 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2447 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2448 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2452 if (!chan
->tx_credits
)
2453 chan
->ops
->suspend(chan
);
2459 case L2CAP_MODE_BASIC
:
2460 /* Check outgoing MTU */
2461 if (len
> chan
->omtu
)
2464 /* Create a basic PDU */
2465 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
2467 return PTR_ERR(skb
);
2469 /* Channel lock is released before requesting new skb and then
2470 * reacquired thus we need to recheck channel state.
2472 if (chan
->state
!= BT_CONNECTED
) {
2477 l2cap_do_send(chan
, skb
);
2481 case L2CAP_MODE_ERTM
:
2482 case L2CAP_MODE_STREAMING
:
2483 /* Check outgoing MTU */
2484 if (len
> chan
->omtu
) {
2489 __skb_queue_head_init(&seg_queue
);
2491 /* Do segmentation before calling in to the state machine,
2492 * since it's possible to block while waiting for memory
2495 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2497 /* The channel could have been closed while segmenting,
2498 * check that it is still connected.
2500 if (chan
->state
!= BT_CONNECTED
) {
2501 __skb_queue_purge(&seg_queue
);
2508 if (chan
->mode
== L2CAP_MODE_ERTM
)
2509 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2511 l2cap_streaming_send(chan
, &seg_queue
);
2515 /* If the skbs were not queued for sending, they'll still be in
2516 * seg_queue and need to be purged.
2518 __skb_queue_purge(&seg_queue
);
2522 BT_DBG("bad state %1.1x", chan
->mode
);
2528 EXPORT_SYMBOL_GPL(l2cap_chan_send
);
2530 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2532 struct l2cap_ctrl control
;
2535 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2537 memset(&control
, 0, sizeof(control
));
2539 control
.super
= L2CAP_SUPER_SREJ
;
2541 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2542 seq
= __next_seq(chan
, seq
)) {
2543 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2544 control
.reqseq
= seq
;
2545 l2cap_send_sframe(chan
, &control
);
2546 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2550 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2553 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2555 struct l2cap_ctrl control
;
2557 BT_DBG("chan %p", chan
);
2559 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2562 memset(&control
, 0, sizeof(control
));
2564 control
.super
= L2CAP_SUPER_SREJ
;
2565 control
.reqseq
= chan
->srej_list
.tail
;
2566 l2cap_send_sframe(chan
, &control
);
2569 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2571 struct l2cap_ctrl control
;
2575 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2577 memset(&control
, 0, sizeof(control
));
2579 control
.super
= L2CAP_SUPER_SREJ
;
2581 /* Capture initial list head to allow only one pass through the list. */
2582 initial_head
= chan
->srej_list
.head
;
2585 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2586 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2589 control
.reqseq
= seq
;
2590 l2cap_send_sframe(chan
, &control
);
2591 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2592 } while (chan
->srej_list
.head
!= initial_head
);
2595 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2597 struct sk_buff
*acked_skb
;
2600 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2602 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2605 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2606 chan
->expected_ack_seq
, chan
->unacked_frames
);
2608 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2609 ackseq
= __next_seq(chan
, ackseq
)) {
2611 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2613 skb_unlink(acked_skb
, &chan
->tx_q
);
2614 kfree_skb(acked_skb
);
2615 chan
->unacked_frames
--;
2619 chan
->expected_ack_seq
= reqseq
;
2621 if (chan
->unacked_frames
== 0)
2622 __clear_retrans_timer(chan
);
2624 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2627 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2629 BT_DBG("chan %p", chan
);
2631 chan
->expected_tx_seq
= chan
->buffer_seq
;
2632 l2cap_seq_list_clear(&chan
->srej_list
);
2633 skb_queue_purge(&chan
->srej_q
);
2634 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2637 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2638 struct l2cap_ctrl
*control
,
2639 struct sk_buff_head
*skbs
, u8 event
)
2641 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2645 case L2CAP_EV_DATA_REQUEST
:
2646 if (chan
->tx_send_head
== NULL
)
2647 chan
->tx_send_head
= skb_peek(skbs
);
2649 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2650 l2cap_ertm_send(chan
);
2652 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2653 BT_DBG("Enter LOCAL_BUSY");
2654 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2656 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2657 /* The SREJ_SENT state must be aborted if we are to
2658 * enter the LOCAL_BUSY state.
2660 l2cap_abort_rx_srej_sent(chan
);
2663 l2cap_send_ack(chan
);
2666 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2667 BT_DBG("Exit LOCAL_BUSY");
2668 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2670 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2671 struct l2cap_ctrl local_control
;
2673 memset(&local_control
, 0, sizeof(local_control
));
2674 local_control
.sframe
= 1;
2675 local_control
.super
= L2CAP_SUPER_RR
;
2676 local_control
.poll
= 1;
2677 local_control
.reqseq
= chan
->buffer_seq
;
2678 l2cap_send_sframe(chan
, &local_control
);
2680 chan
->retry_count
= 1;
2681 __set_monitor_timer(chan
);
2682 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2685 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2686 l2cap_process_reqseq(chan
, control
->reqseq
);
2688 case L2CAP_EV_EXPLICIT_POLL
:
2689 l2cap_send_rr_or_rnr(chan
, 1);
2690 chan
->retry_count
= 1;
2691 __set_monitor_timer(chan
);
2692 __clear_ack_timer(chan
);
2693 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2695 case L2CAP_EV_RETRANS_TO
:
2696 l2cap_send_rr_or_rnr(chan
, 1);
2697 chan
->retry_count
= 1;
2698 __set_monitor_timer(chan
);
2699 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2701 case L2CAP_EV_RECV_FBIT
:
2702 /* Nothing to process */
2709 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2710 struct l2cap_ctrl
*control
,
2711 struct sk_buff_head
*skbs
, u8 event
)
2713 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2717 case L2CAP_EV_DATA_REQUEST
:
2718 if (chan
->tx_send_head
== NULL
)
2719 chan
->tx_send_head
= skb_peek(skbs
);
2720 /* Queue data, but don't send. */
2721 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2723 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2724 BT_DBG("Enter LOCAL_BUSY");
2725 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2727 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2728 /* The SREJ_SENT state must be aborted if we are to
2729 * enter the LOCAL_BUSY state.
2731 l2cap_abort_rx_srej_sent(chan
);
2734 l2cap_send_ack(chan
);
2737 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2738 BT_DBG("Exit LOCAL_BUSY");
2739 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2741 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2742 struct l2cap_ctrl local_control
;
2743 memset(&local_control
, 0, sizeof(local_control
));
2744 local_control
.sframe
= 1;
2745 local_control
.super
= L2CAP_SUPER_RR
;
2746 local_control
.poll
= 1;
2747 local_control
.reqseq
= chan
->buffer_seq
;
2748 l2cap_send_sframe(chan
, &local_control
);
2750 chan
->retry_count
= 1;
2751 __set_monitor_timer(chan
);
2752 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2755 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2756 l2cap_process_reqseq(chan
, control
->reqseq
);
2760 case L2CAP_EV_RECV_FBIT
:
2761 if (control
&& control
->final
) {
2762 __clear_monitor_timer(chan
);
2763 if (chan
->unacked_frames
> 0)
2764 __set_retrans_timer(chan
);
2765 chan
->retry_count
= 0;
2766 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2767 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2770 case L2CAP_EV_EXPLICIT_POLL
:
2773 case L2CAP_EV_MONITOR_TO
:
2774 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2775 l2cap_send_rr_or_rnr(chan
, 1);
2776 __set_monitor_timer(chan
);
2777 chan
->retry_count
++;
2779 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2787 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2788 struct sk_buff_head
*skbs
, u8 event
)
2790 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2791 chan
, control
, skbs
, event
, chan
->tx_state
);
2793 switch (chan
->tx_state
) {
2794 case L2CAP_TX_STATE_XMIT
:
2795 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2797 case L2CAP_TX_STATE_WAIT_F
:
2798 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2806 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2807 struct l2cap_ctrl
*control
)
2809 BT_DBG("chan %p, control %p", chan
, control
);
2810 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2813 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2814 struct l2cap_ctrl
*control
)
2816 BT_DBG("chan %p, control %p", chan
, control
);
2817 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2820 /* Copy frame to all raw sockets on that connection */
2821 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2823 struct sk_buff
*nskb
;
2824 struct l2cap_chan
*chan
;
2826 BT_DBG("conn %p", conn
);
2828 mutex_lock(&conn
->chan_lock
);
2830 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2831 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2834 /* Don't send frame to the channel it came from */
2835 if (bt_cb(skb
)->chan
== chan
)
2838 nskb
= skb_clone(skb
, GFP_KERNEL
);
2841 if (chan
->ops
->recv(chan
, nskb
))
2845 mutex_unlock(&conn
->chan_lock
);
2848 /* ---- L2CAP signalling commands ---- */
2849 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2850 u8 ident
, u16 dlen
, void *data
)
2852 struct sk_buff
*skb
, **frag
;
2853 struct l2cap_cmd_hdr
*cmd
;
2854 struct l2cap_hdr
*lh
;
2857 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2858 conn
, code
, ident
, dlen
);
2860 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2863 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2864 count
= min_t(unsigned int, conn
->mtu
, len
);
2866 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2870 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2871 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2873 if (conn
->hcon
->type
== LE_LINK
)
2874 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2876 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2878 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2881 cmd
->len
= cpu_to_le16(dlen
);
2884 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2885 memcpy(skb_put(skb
, count
), data
, count
);
2891 /* Continuation fragments (no L2CAP header) */
2892 frag
= &skb_shinfo(skb
)->frag_list
;
2894 count
= min_t(unsigned int, conn
->mtu
, len
);
2896 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2900 memcpy(skb_put(*frag
, count
), data
, count
);
2905 frag
= &(*frag
)->next
;
2915 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2918 struct l2cap_conf_opt
*opt
= *ptr
;
2921 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2929 *val
= *((u8
*) opt
->val
);
2933 *val
= get_unaligned_le16(opt
->val
);
2937 *val
= get_unaligned_le32(opt
->val
);
2941 *val
= (unsigned long) opt
->val
;
2945 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2949 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2951 struct l2cap_conf_opt
*opt
= *ptr
;
2953 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2960 *((u8
*) opt
->val
) = val
;
2964 put_unaligned_le16(val
, opt
->val
);
2968 put_unaligned_le32(val
, opt
->val
);
2972 memcpy(opt
->val
, (void *) val
, len
);
2976 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2979 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2981 struct l2cap_conf_efs efs
;
2983 switch (chan
->mode
) {
2984 case L2CAP_MODE_ERTM
:
2985 efs
.id
= chan
->local_id
;
2986 efs
.stype
= chan
->local_stype
;
2987 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2988 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2989 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2990 efs
.flush_to
= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
2993 case L2CAP_MODE_STREAMING
:
2995 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2996 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2997 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3006 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3007 (unsigned long) &efs
);
3010 static void l2cap_ack_timeout(struct work_struct
*work
)
3012 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3016 BT_DBG("chan %p", chan
);
3018 l2cap_chan_lock(chan
);
3020 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3021 chan
->last_acked_seq
);
3024 l2cap_send_rr_or_rnr(chan
, 0);
3026 l2cap_chan_unlock(chan
);
3027 l2cap_chan_put(chan
);
3030 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3034 chan
->next_tx_seq
= 0;
3035 chan
->expected_tx_seq
= 0;
3036 chan
->expected_ack_seq
= 0;
3037 chan
->unacked_frames
= 0;
3038 chan
->buffer_seq
= 0;
3039 chan
->frames_sent
= 0;
3040 chan
->last_acked_seq
= 0;
3042 chan
->sdu_last_frag
= NULL
;
3045 skb_queue_head_init(&chan
->tx_q
);
3047 chan
->local_amp_id
= AMP_ID_BREDR
;
3048 chan
->move_id
= AMP_ID_BREDR
;
3049 chan
->move_state
= L2CAP_MOVE_STABLE
;
3050 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3052 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3055 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3056 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3058 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3059 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3060 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3062 skb_queue_head_init(&chan
->srej_q
);
3064 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3068 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3070 l2cap_seq_list_free(&chan
->srej_list
);
3075 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3078 case L2CAP_MODE_STREAMING
:
3079 case L2CAP_MODE_ERTM
:
3080 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3084 return L2CAP_MODE_BASIC
;
3088 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3090 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3093 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3095 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3098 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3099 struct l2cap_conf_rfc
*rfc
)
3101 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3102 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3104 /* Class 1 devices have must have ERTM timeouts
3105 * exceeding the Link Supervision Timeout. The
3106 * default Link Supervision Timeout for AMP
3107 * controllers is 10 seconds.
3109 * Class 1 devices use 0xffffffff for their
3110 * best-effort flush timeout, so the clamping logic
3111 * will result in a timeout that meets the above
3112 * requirement. ERTM timeouts are 16-bit values, so
3113 * the maximum timeout is 65.535 seconds.
3116 /* Convert timeout to milliseconds and round */
3117 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3119 /* This is the recommended formula for class 2 devices
3120 * that start ERTM timers when packets are sent to the
3123 ertm_to
= 3 * ertm_to
+ 500;
3125 if (ertm_to
> 0xffff)
3128 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3129 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3131 rfc
->retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3132 rfc
->monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3136 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3138 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3139 __l2cap_ews_supported(chan
->conn
)) {
3140 /* use extended control field */
3141 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3142 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3144 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3145 L2CAP_DEFAULT_TX_WINDOW
);
3146 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3148 chan
->ack_win
= chan
->tx_win
;
3151 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3153 struct l2cap_conf_req
*req
= data
;
3154 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3155 void *ptr
= req
->data
;
3158 BT_DBG("chan %p", chan
);
3160 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3163 switch (chan
->mode
) {
3164 case L2CAP_MODE_STREAMING
:
3165 case L2CAP_MODE_ERTM
:
3166 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3169 if (__l2cap_efs_supported(chan
->conn
))
3170 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3174 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3179 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3180 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3182 switch (chan
->mode
) {
3183 case L2CAP_MODE_BASIC
:
3187 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3188 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3191 rfc
.mode
= L2CAP_MODE_BASIC
;
3193 rfc
.max_transmit
= 0;
3194 rfc
.retrans_timeout
= 0;
3195 rfc
.monitor_timeout
= 0;
3196 rfc
.max_pdu_size
= 0;
3198 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3199 (unsigned long) &rfc
);
3202 case L2CAP_MODE_ERTM
:
3203 rfc
.mode
= L2CAP_MODE_ERTM
;
3204 rfc
.max_transmit
= chan
->max_tx
;
3206 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3208 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3209 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3211 rfc
.max_pdu_size
= cpu_to_le16(size
);
3213 l2cap_txwin_setup(chan
);
3215 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3216 L2CAP_DEFAULT_TX_WINDOW
);
3218 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3219 (unsigned long) &rfc
);
3221 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3222 l2cap_add_opt_efs(&ptr
, chan
);
3224 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3225 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3228 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3229 if (chan
->fcs
== L2CAP_FCS_NONE
||
3230 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3231 chan
->fcs
= L2CAP_FCS_NONE
;
3232 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3237 case L2CAP_MODE_STREAMING
:
3238 l2cap_txwin_setup(chan
);
3239 rfc
.mode
= L2CAP_MODE_STREAMING
;
3241 rfc
.max_transmit
= 0;
3242 rfc
.retrans_timeout
= 0;
3243 rfc
.monitor_timeout
= 0;
3245 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3246 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3248 rfc
.max_pdu_size
= cpu_to_le16(size
);
3250 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3251 (unsigned long) &rfc
);
3253 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3254 l2cap_add_opt_efs(&ptr
, chan
);
3256 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3257 if (chan
->fcs
== L2CAP_FCS_NONE
||
3258 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3259 chan
->fcs
= L2CAP_FCS_NONE
;
3260 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3266 req
->dcid
= cpu_to_le16(chan
->dcid
);
3267 req
->flags
= cpu_to_le16(0);
3272 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3274 struct l2cap_conf_rsp
*rsp
= data
;
3275 void *ptr
= rsp
->data
;
3276 void *req
= chan
->conf_req
;
3277 int len
= chan
->conf_len
;
3278 int type
, hint
, olen
;
3280 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3281 struct l2cap_conf_efs efs
;
3283 u16 mtu
= L2CAP_DEFAULT_MTU
;
3284 u16 result
= L2CAP_CONF_SUCCESS
;
3287 BT_DBG("chan %p", chan
);
3289 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3290 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3292 hint
= type
& L2CAP_CONF_HINT
;
3293 type
&= L2CAP_CONF_MASK
;
3296 case L2CAP_CONF_MTU
:
3300 case L2CAP_CONF_FLUSH_TO
:
3301 chan
->flush_to
= val
;
3304 case L2CAP_CONF_QOS
:
3307 case L2CAP_CONF_RFC
:
3308 if (olen
== sizeof(rfc
))
3309 memcpy(&rfc
, (void *) val
, olen
);
3312 case L2CAP_CONF_FCS
:
3313 if (val
== L2CAP_FCS_NONE
)
3314 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3317 case L2CAP_CONF_EFS
:
3319 if (olen
== sizeof(efs
))
3320 memcpy(&efs
, (void *) val
, olen
);
3323 case L2CAP_CONF_EWS
:
3324 if (!chan
->conn
->hs_enabled
)
3325 return -ECONNREFUSED
;
3327 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3328 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3329 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3330 chan
->remote_tx_win
= val
;
3337 result
= L2CAP_CONF_UNKNOWN
;
3338 *((u8
*) ptr
++) = type
;
3343 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3346 switch (chan
->mode
) {
3347 case L2CAP_MODE_STREAMING
:
3348 case L2CAP_MODE_ERTM
:
3349 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3350 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3351 chan
->conn
->feat_mask
);
3356 if (__l2cap_efs_supported(chan
->conn
))
3357 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3359 return -ECONNREFUSED
;
3362 if (chan
->mode
!= rfc
.mode
)
3363 return -ECONNREFUSED
;
3369 if (chan
->mode
!= rfc
.mode
) {
3370 result
= L2CAP_CONF_UNACCEPT
;
3371 rfc
.mode
= chan
->mode
;
3373 if (chan
->num_conf_rsp
== 1)
3374 return -ECONNREFUSED
;
3376 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3377 (unsigned long) &rfc
);
3380 if (result
== L2CAP_CONF_SUCCESS
) {
3381 /* Configure output options and let the other side know
3382 * which ones we don't like. */
3384 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3385 result
= L2CAP_CONF_UNACCEPT
;
3388 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3390 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3393 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3394 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3395 efs
.stype
!= chan
->local_stype
) {
3397 result
= L2CAP_CONF_UNACCEPT
;
3399 if (chan
->num_conf_req
>= 1)
3400 return -ECONNREFUSED
;
3402 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3404 (unsigned long) &efs
);
3406 /* Send PENDING Conf Rsp */
3407 result
= L2CAP_CONF_PENDING
;
3408 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3413 case L2CAP_MODE_BASIC
:
3414 chan
->fcs
= L2CAP_FCS_NONE
;
3415 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3418 case L2CAP_MODE_ERTM
:
3419 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3420 chan
->remote_tx_win
= rfc
.txwin_size
;
3422 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3424 chan
->remote_max_tx
= rfc
.max_transmit
;
3426 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3427 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3428 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3429 rfc
.max_pdu_size
= cpu_to_le16(size
);
3430 chan
->remote_mps
= size
;
3432 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3434 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3436 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3437 sizeof(rfc
), (unsigned long) &rfc
);
3439 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3440 chan
->remote_id
= efs
.id
;
3441 chan
->remote_stype
= efs
.stype
;
3442 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3443 chan
->remote_flush_to
=
3444 le32_to_cpu(efs
.flush_to
);
3445 chan
->remote_acc_lat
=
3446 le32_to_cpu(efs
.acc_lat
);
3447 chan
->remote_sdu_itime
=
3448 le32_to_cpu(efs
.sdu_itime
);
3449 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3451 (unsigned long) &efs
);
3455 case L2CAP_MODE_STREAMING
:
3456 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3457 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3458 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3459 rfc
.max_pdu_size
= cpu_to_le16(size
);
3460 chan
->remote_mps
= size
;
3462 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3464 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3465 (unsigned long) &rfc
);
3470 result
= L2CAP_CONF_UNACCEPT
;
3472 memset(&rfc
, 0, sizeof(rfc
));
3473 rfc
.mode
= chan
->mode
;
3476 if (result
== L2CAP_CONF_SUCCESS
)
3477 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3479 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3480 rsp
->result
= cpu_to_le16(result
);
3481 rsp
->flags
= cpu_to_le16(0);
3486 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3487 void *data
, u16
*result
)
3489 struct l2cap_conf_req
*req
= data
;
3490 void *ptr
= req
->data
;
3493 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3494 struct l2cap_conf_efs efs
;
3496 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3498 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3499 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3502 case L2CAP_CONF_MTU
:
3503 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3504 *result
= L2CAP_CONF_UNACCEPT
;
3505 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3508 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3511 case L2CAP_CONF_FLUSH_TO
:
3512 chan
->flush_to
= val
;
3513 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3517 case L2CAP_CONF_RFC
:
3518 if (olen
== sizeof(rfc
))
3519 memcpy(&rfc
, (void *)val
, olen
);
3521 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3522 rfc
.mode
!= chan
->mode
)
3523 return -ECONNREFUSED
;
3527 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3528 sizeof(rfc
), (unsigned long) &rfc
);
3531 case L2CAP_CONF_EWS
:
3532 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3533 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3537 case L2CAP_CONF_EFS
:
3538 if (olen
== sizeof(efs
))
3539 memcpy(&efs
, (void *)val
, olen
);
3541 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3542 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3543 efs
.stype
!= chan
->local_stype
)
3544 return -ECONNREFUSED
;
3546 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3547 (unsigned long) &efs
);
3550 case L2CAP_CONF_FCS
:
3551 if (*result
== L2CAP_CONF_PENDING
)
3552 if (val
== L2CAP_FCS_NONE
)
3553 set_bit(CONF_RECV_NO_FCS
,
3559 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3560 return -ECONNREFUSED
;
3562 chan
->mode
= rfc
.mode
;
3564 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3566 case L2CAP_MODE_ERTM
:
3567 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3568 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3569 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3570 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3571 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3574 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3575 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3576 chan
->local_sdu_itime
=
3577 le32_to_cpu(efs
.sdu_itime
);
3578 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3579 chan
->local_flush_to
=
3580 le32_to_cpu(efs
.flush_to
);
3584 case L2CAP_MODE_STREAMING
:
3585 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3589 req
->dcid
= cpu_to_le16(chan
->dcid
);
3590 req
->flags
= cpu_to_le16(0);
3595 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3596 u16 result
, u16 flags
)
3598 struct l2cap_conf_rsp
*rsp
= data
;
3599 void *ptr
= rsp
->data
;
3601 BT_DBG("chan %p", chan
);
3603 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3604 rsp
->result
= cpu_to_le16(result
);
3605 rsp
->flags
= cpu_to_le16(flags
);
3610 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3612 struct l2cap_le_conn_rsp rsp
;
3613 struct l2cap_conn
*conn
= chan
->conn
;
3615 BT_DBG("chan %p", chan
);
3617 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3618 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3619 rsp
.mps
= cpu_to_le16(chan
->mps
);
3620 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3621 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3623 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3627 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3629 struct l2cap_conn_rsp rsp
;
3630 struct l2cap_conn
*conn
= chan
->conn
;
3634 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3635 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3636 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3637 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3640 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3642 rsp_code
= L2CAP_CONN_RSP
;
3644 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3646 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3648 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3651 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3652 l2cap_build_conf_req(chan
, buf
), buf
);
3653 chan
->num_conf_req
++;
3656 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3660 /* Use sane default values in case a misbehaving remote device
3661 * did not send an RFC or extended window size option.
3663 u16 txwin_ext
= chan
->ack_win
;
3664 struct l2cap_conf_rfc rfc
= {
3666 .retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3667 .monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3668 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3669 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3672 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3674 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3677 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3678 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3681 case L2CAP_CONF_RFC
:
3682 if (olen
== sizeof(rfc
))
3683 memcpy(&rfc
, (void *)val
, olen
);
3685 case L2CAP_CONF_EWS
:
3692 case L2CAP_MODE_ERTM
:
3693 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3694 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3695 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3696 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3697 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3699 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3702 case L2CAP_MODE_STREAMING
:
3703 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3707 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3708 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3711 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3713 if (cmd_len
< sizeof(*rej
))
3716 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3719 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3720 cmd
->ident
== conn
->info_ident
) {
3721 cancel_delayed_work(&conn
->info_timer
);
3723 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3724 conn
->info_ident
= 0;
3726 l2cap_conn_start(conn
);
3732 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3733 struct l2cap_cmd_hdr
*cmd
,
3734 u8
*data
, u8 rsp_code
, u8 amp_id
)
3736 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3737 struct l2cap_conn_rsp rsp
;
3738 struct l2cap_chan
*chan
= NULL
, *pchan
;
3739 int result
, status
= L2CAP_CS_NO_INFO
;
3741 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3742 __le16 psm
= req
->psm
;
3744 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3746 /* Check if we have socket listening on psm */
3747 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3748 &conn
->hcon
->dst
, ACL_LINK
);
3750 result
= L2CAP_CR_BAD_PSM
;
3754 mutex_lock(&conn
->chan_lock
);
3755 l2cap_chan_lock(pchan
);
3757 /* Check if the ACL is secure enough (if not SDP) */
3758 if (psm
!= cpu_to_le16(L2CAP_PSM_SDP
) &&
3759 !hci_conn_check_link_mode(conn
->hcon
)) {
3760 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3761 result
= L2CAP_CR_SEC_BLOCK
;
3765 result
= L2CAP_CR_NO_MEM
;
3767 /* Check if we already have channel with that dcid */
3768 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3771 chan
= pchan
->ops
->new_connection(pchan
);
3775 /* For certain devices (ex: HID mouse), support for authentication,
3776 * pairing and bonding is optional. For such devices, inorder to avoid
3777 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3778 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3780 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3782 bacpy(&chan
->src
, &conn
->hcon
->src
);
3783 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3784 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
3785 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
3788 chan
->local_amp_id
= amp_id
;
3790 __l2cap_chan_add(conn
, chan
);
3794 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3796 chan
->ident
= cmd
->ident
;
3798 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3799 if (l2cap_chan_check_security(chan
, false)) {
3800 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3801 l2cap_state_change(chan
, BT_CONNECT2
);
3802 result
= L2CAP_CR_PEND
;
3803 status
= L2CAP_CS_AUTHOR_PEND
;
3804 chan
->ops
->defer(chan
);
3806 /* Force pending result for AMP controllers.
3807 * The connection will succeed after the
3808 * physical link is up.
3810 if (amp_id
== AMP_ID_BREDR
) {
3811 l2cap_state_change(chan
, BT_CONFIG
);
3812 result
= L2CAP_CR_SUCCESS
;
3814 l2cap_state_change(chan
, BT_CONNECT2
);
3815 result
= L2CAP_CR_PEND
;
3817 status
= L2CAP_CS_NO_INFO
;
3820 l2cap_state_change(chan
, BT_CONNECT2
);
3821 result
= L2CAP_CR_PEND
;
3822 status
= L2CAP_CS_AUTHEN_PEND
;
3825 l2cap_state_change(chan
, BT_CONNECT2
);
3826 result
= L2CAP_CR_PEND
;
3827 status
= L2CAP_CS_NO_INFO
;
3831 l2cap_chan_unlock(pchan
);
3832 mutex_unlock(&conn
->chan_lock
);
3833 l2cap_chan_put(pchan
);
3836 rsp
.scid
= cpu_to_le16(scid
);
3837 rsp
.dcid
= cpu_to_le16(dcid
);
3838 rsp
.result
= cpu_to_le16(result
);
3839 rsp
.status
= cpu_to_le16(status
);
3840 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3842 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3843 struct l2cap_info_req info
;
3844 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3846 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3847 conn
->info_ident
= l2cap_get_ident(conn
);
3849 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3851 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3852 sizeof(info
), &info
);
3855 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3856 result
== L2CAP_CR_SUCCESS
) {
3858 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3859 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3860 l2cap_build_conf_req(chan
, buf
), buf
);
3861 chan
->num_conf_req
++;
3867 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3868 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3870 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3871 struct hci_conn
*hcon
= conn
->hcon
;
3873 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3877 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3878 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3879 mgmt_device_connected(hdev
, hcon
, 0, NULL
, 0);
3880 hci_dev_unlock(hdev
);
3882 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3886 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3887 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3890 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3891 u16 scid
, dcid
, result
, status
;
3892 struct l2cap_chan
*chan
;
3896 if (cmd_len
< sizeof(*rsp
))
3899 scid
= __le16_to_cpu(rsp
->scid
);
3900 dcid
= __le16_to_cpu(rsp
->dcid
);
3901 result
= __le16_to_cpu(rsp
->result
);
3902 status
= __le16_to_cpu(rsp
->status
);
3904 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3905 dcid
, scid
, result
, status
);
3907 mutex_lock(&conn
->chan_lock
);
3910 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3916 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3925 l2cap_chan_lock(chan
);
3928 case L2CAP_CR_SUCCESS
:
3929 l2cap_state_change(chan
, BT_CONFIG
);
3932 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3934 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3937 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3938 l2cap_build_conf_req(chan
, req
), req
);
3939 chan
->num_conf_req
++;
3943 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3947 l2cap_chan_del(chan
, ECONNREFUSED
);
3951 l2cap_chan_unlock(chan
);
3954 mutex_unlock(&conn
->chan_lock
);
3959 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3961 /* FCS is enabled only in ERTM or streaming mode, if one or both
3964 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3965 chan
->fcs
= L2CAP_FCS_NONE
;
3966 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
3967 chan
->fcs
= L2CAP_FCS_CRC16
;
3970 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3971 u8 ident
, u16 flags
)
3973 struct l2cap_conn
*conn
= chan
->conn
;
3975 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3978 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3979 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3981 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
3982 l2cap_build_conf_rsp(chan
, data
,
3983 L2CAP_CONF_SUCCESS
, flags
), data
);
3986 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
3989 struct l2cap_cmd_rej_cid rej
;
3991 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3992 rej
.scid
= __cpu_to_le16(scid
);
3993 rej
.dcid
= __cpu_to_le16(dcid
);
3995 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3998 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
3999 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4002 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4005 struct l2cap_chan
*chan
;
4008 if (cmd_len
< sizeof(*req
))
4011 dcid
= __le16_to_cpu(req
->dcid
);
4012 flags
= __le16_to_cpu(req
->flags
);
4014 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4016 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4018 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4022 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4023 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4028 /* Reject if config buffer is too small. */
4029 len
= cmd_len
- sizeof(*req
);
4030 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4031 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4032 l2cap_build_conf_rsp(chan
, rsp
,
4033 L2CAP_CONF_REJECT
, flags
), rsp
);
4038 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4039 chan
->conf_len
+= len
;
4041 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4042 /* Incomplete config. Send empty response. */
4043 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4044 l2cap_build_conf_rsp(chan
, rsp
,
4045 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4049 /* Complete config. */
4050 len
= l2cap_parse_conf_req(chan
, rsp
);
4052 l2cap_send_disconn_req(chan
, ECONNRESET
);
4056 chan
->ident
= cmd
->ident
;
4057 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4058 chan
->num_conf_rsp
++;
4060 /* Reset config buffer. */
4063 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4066 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4067 set_default_fcs(chan
);
4069 if (chan
->mode
== L2CAP_MODE_ERTM
||
4070 chan
->mode
== L2CAP_MODE_STREAMING
)
4071 err
= l2cap_ertm_init(chan
);
4074 l2cap_send_disconn_req(chan
, -err
);
4076 l2cap_chan_ready(chan
);
4081 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4083 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4084 l2cap_build_conf_req(chan
, buf
), buf
);
4085 chan
->num_conf_req
++;
4088 /* Got Conf Rsp PENDING from remote side and assume we sent
4089 Conf Rsp PENDING in the code above */
4090 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4091 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4093 /* check compatibility */
4095 /* Send rsp for BR/EDR channel */
4097 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4099 chan
->ident
= cmd
->ident
;
4103 l2cap_chan_unlock(chan
);
4107 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4108 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4111 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4112 u16 scid
, flags
, result
;
4113 struct l2cap_chan
*chan
;
4114 int len
= cmd_len
- sizeof(*rsp
);
4117 if (cmd_len
< sizeof(*rsp
))
4120 scid
= __le16_to_cpu(rsp
->scid
);
4121 flags
= __le16_to_cpu(rsp
->flags
);
4122 result
= __le16_to_cpu(rsp
->result
);
4124 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4127 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4132 case L2CAP_CONF_SUCCESS
:
4133 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4134 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4137 case L2CAP_CONF_PENDING
:
4138 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4140 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4143 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4146 l2cap_send_disconn_req(chan
, ECONNRESET
);
4150 if (!chan
->hs_hcon
) {
4151 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4154 if (l2cap_check_efs(chan
)) {
4155 amp_create_logical_link(chan
);
4156 chan
->ident
= cmd
->ident
;
4162 case L2CAP_CONF_UNACCEPT
:
4163 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4166 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4167 l2cap_send_disconn_req(chan
, ECONNRESET
);
4171 /* throw out any old stored conf requests */
4172 result
= L2CAP_CONF_SUCCESS
;
4173 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4176 l2cap_send_disconn_req(chan
, ECONNRESET
);
4180 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4181 L2CAP_CONF_REQ
, len
, req
);
4182 chan
->num_conf_req
++;
4183 if (result
!= L2CAP_CONF_SUCCESS
)
4189 l2cap_chan_set_err(chan
, ECONNRESET
);
4191 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4192 l2cap_send_disconn_req(chan
, ECONNRESET
);
4196 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4199 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4201 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4202 set_default_fcs(chan
);
4204 if (chan
->mode
== L2CAP_MODE_ERTM
||
4205 chan
->mode
== L2CAP_MODE_STREAMING
)
4206 err
= l2cap_ertm_init(chan
);
4209 l2cap_send_disconn_req(chan
, -err
);
4211 l2cap_chan_ready(chan
);
4215 l2cap_chan_unlock(chan
);
4219 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4220 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4223 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4224 struct l2cap_disconn_rsp rsp
;
4226 struct l2cap_chan
*chan
;
4228 if (cmd_len
!= sizeof(*req
))
4231 scid
= __le16_to_cpu(req
->scid
);
4232 dcid
= __le16_to_cpu(req
->dcid
);
4234 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4236 mutex_lock(&conn
->chan_lock
);
4238 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4240 mutex_unlock(&conn
->chan_lock
);
4241 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4245 l2cap_chan_lock(chan
);
4247 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4248 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4249 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4251 chan
->ops
->set_shutdown(chan
);
4253 l2cap_chan_hold(chan
);
4254 l2cap_chan_del(chan
, ECONNRESET
);
4256 l2cap_chan_unlock(chan
);
4258 chan
->ops
->close(chan
);
4259 l2cap_chan_put(chan
);
4261 mutex_unlock(&conn
->chan_lock
);
4266 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4267 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4270 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4272 struct l2cap_chan
*chan
;
4274 if (cmd_len
!= sizeof(*rsp
))
4277 scid
= __le16_to_cpu(rsp
->scid
);
4278 dcid
= __le16_to_cpu(rsp
->dcid
);
4280 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4282 mutex_lock(&conn
->chan_lock
);
4284 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4286 mutex_unlock(&conn
->chan_lock
);
4290 l2cap_chan_lock(chan
);
4292 l2cap_chan_hold(chan
);
4293 l2cap_chan_del(chan
, 0);
4295 l2cap_chan_unlock(chan
);
4297 chan
->ops
->close(chan
);
4298 l2cap_chan_put(chan
);
4300 mutex_unlock(&conn
->chan_lock
);
4305 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4306 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4309 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4312 if (cmd_len
!= sizeof(*req
))
4315 type
= __le16_to_cpu(req
->type
);
4317 BT_DBG("type 0x%4.4x", type
);
4319 if (type
== L2CAP_IT_FEAT_MASK
) {
4321 u32 feat_mask
= l2cap_feat_mask
;
4322 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4323 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4324 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4326 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4328 if (conn
->hs_enabled
)
4329 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4330 | L2CAP_FEAT_EXT_WINDOW
;
4332 put_unaligned_le32(feat_mask
, rsp
->data
);
4333 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4335 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4337 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4339 if (conn
->hs_enabled
)
4340 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4342 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4344 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4345 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4346 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4347 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4350 struct l2cap_info_rsp rsp
;
4351 rsp
.type
= cpu_to_le16(type
);
4352 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
4353 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4360 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4361 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4364 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4367 if (cmd_len
< sizeof(*rsp
))
4370 type
= __le16_to_cpu(rsp
->type
);
4371 result
= __le16_to_cpu(rsp
->result
);
4373 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4375 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4376 if (cmd
->ident
!= conn
->info_ident
||
4377 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4380 cancel_delayed_work(&conn
->info_timer
);
4382 if (result
!= L2CAP_IR_SUCCESS
) {
4383 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4384 conn
->info_ident
= 0;
4386 l2cap_conn_start(conn
);
4392 case L2CAP_IT_FEAT_MASK
:
4393 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4395 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4396 struct l2cap_info_req req
;
4397 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4399 conn
->info_ident
= l2cap_get_ident(conn
);
4401 l2cap_send_cmd(conn
, conn
->info_ident
,
4402 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4404 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4405 conn
->info_ident
= 0;
4407 l2cap_conn_start(conn
);
4411 case L2CAP_IT_FIXED_CHAN
:
4412 conn
->fixed_chan_mask
= rsp
->data
[0];
4413 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4414 conn
->info_ident
= 0;
4416 l2cap_conn_start(conn
);
4423 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4424 struct l2cap_cmd_hdr
*cmd
,
4425 u16 cmd_len
, void *data
)
4427 struct l2cap_create_chan_req
*req
= data
;
4428 struct l2cap_create_chan_rsp rsp
;
4429 struct l2cap_chan
*chan
;
4430 struct hci_dev
*hdev
;
4433 if (cmd_len
!= sizeof(*req
))
4436 if (!conn
->hs_enabled
)
4439 psm
= le16_to_cpu(req
->psm
);
4440 scid
= le16_to_cpu(req
->scid
);
4442 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4444 /* For controller id 0 make BR/EDR connection */
4445 if (req
->amp_id
== AMP_ID_BREDR
) {
4446 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4451 /* Validate AMP controller id */
4452 hdev
= hci_dev_get(req
->amp_id
);
4456 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4461 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4464 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4465 struct hci_conn
*hs_hcon
;
4467 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4471 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4476 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4478 mgr
->bredr_chan
= chan
;
4479 chan
->hs_hcon
= hs_hcon
;
4480 chan
->fcs
= L2CAP_FCS_NONE
;
4481 conn
->mtu
= hdev
->block_mtu
;
4490 rsp
.scid
= cpu_to_le16(scid
);
4491 rsp
.result
= cpu_to_le16(L2CAP_CR_BAD_AMP
);
4492 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4494 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4500 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4502 struct l2cap_move_chan_req req
;
4505 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4507 ident
= l2cap_get_ident(chan
->conn
);
4508 chan
->ident
= ident
;
4510 req
.icid
= cpu_to_le16(chan
->scid
);
4511 req
.dest_amp_id
= dest_amp_id
;
4513 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4516 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4519 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4521 struct l2cap_move_chan_rsp rsp
;
4523 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4525 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4526 rsp
.result
= cpu_to_le16(result
);
4528 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4532 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4534 struct l2cap_move_chan_cfm cfm
;
4536 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4538 chan
->ident
= l2cap_get_ident(chan
->conn
);
4540 cfm
.icid
= cpu_to_le16(chan
->scid
);
4541 cfm
.result
= cpu_to_le16(result
);
4543 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4546 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4549 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4551 struct l2cap_move_chan_cfm cfm
;
4553 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4555 cfm
.icid
= cpu_to_le16(icid
);
4556 cfm
.result
= cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4558 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4562 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4565 struct l2cap_move_chan_cfm_rsp rsp
;
4567 BT_DBG("icid 0x%4.4x", icid
);
4569 rsp
.icid
= cpu_to_le16(icid
);
4570 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4573 static void __release_logical_link(struct l2cap_chan
*chan
)
4575 chan
->hs_hchan
= NULL
;
4576 chan
->hs_hcon
= NULL
;
4578 /* Placeholder - release the logical link */
4581 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4583 /* Logical link setup failed */
4584 if (chan
->state
!= BT_CONNECTED
) {
4585 /* Create channel failure, disconnect */
4586 l2cap_send_disconn_req(chan
, ECONNRESET
);
4590 switch (chan
->move_role
) {
4591 case L2CAP_MOVE_ROLE_RESPONDER
:
4592 l2cap_move_done(chan
);
4593 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4595 case L2CAP_MOVE_ROLE_INITIATOR
:
4596 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4597 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4598 /* Remote has only sent pending or
4599 * success responses, clean up
4601 l2cap_move_done(chan
);
4604 /* Other amp move states imply that the move
4605 * has already aborted
4607 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4612 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4613 struct hci_chan
*hchan
)
4615 struct l2cap_conf_rsp rsp
;
4617 chan
->hs_hchan
= hchan
;
4618 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4620 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4622 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4625 set_default_fcs(chan
);
4627 err
= l2cap_ertm_init(chan
);
4629 l2cap_send_disconn_req(chan
, -err
);
4631 l2cap_chan_ready(chan
);
4635 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4636 struct hci_chan
*hchan
)
4638 chan
->hs_hcon
= hchan
->conn
;
4639 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4641 BT_DBG("move_state %d", chan
->move_state
);
4643 switch (chan
->move_state
) {
4644 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4645 /* Move confirm will be sent after a success
4646 * response is received
4648 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4650 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4651 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4652 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4653 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4654 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4655 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4656 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4657 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4658 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4662 /* Move was not in expected state, free the channel */
4663 __release_logical_link(chan
);
4665 chan
->move_state
= L2CAP_MOVE_STABLE
;
4669 /* Call with chan locked */
4670 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4673 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4676 l2cap_logical_fail(chan
);
4677 __release_logical_link(chan
);
4681 if (chan
->state
!= BT_CONNECTED
) {
4682 /* Ignore logical link if channel is on BR/EDR */
4683 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4684 l2cap_logical_finish_create(chan
, hchan
);
4686 l2cap_logical_finish_move(chan
, hchan
);
4690 void l2cap_move_start(struct l2cap_chan
*chan
)
4692 BT_DBG("chan %p", chan
);
4694 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4695 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4697 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4698 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4699 /* Placeholder - start physical link setup */
4701 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4702 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4704 l2cap_move_setup(chan
);
4705 l2cap_send_move_chan_req(chan
, 0);
4709 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4710 u8 local_amp_id
, u8 remote_amp_id
)
4712 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4713 local_amp_id
, remote_amp_id
);
4715 chan
->fcs
= L2CAP_FCS_NONE
;
4717 /* Outgoing channel on AMP */
4718 if (chan
->state
== BT_CONNECT
) {
4719 if (result
== L2CAP_CR_SUCCESS
) {
4720 chan
->local_amp_id
= local_amp_id
;
4721 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4723 /* Revert to BR/EDR connect */
4724 l2cap_send_conn_req(chan
);
4730 /* Incoming channel on AMP */
4731 if (__l2cap_no_conn_pending(chan
)) {
4732 struct l2cap_conn_rsp rsp
;
4734 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4735 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4737 if (result
== L2CAP_CR_SUCCESS
) {
4738 /* Send successful response */
4739 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4740 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4742 /* Send negative response */
4743 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4744 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4747 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4750 if (result
== L2CAP_CR_SUCCESS
) {
4751 l2cap_state_change(chan
, BT_CONFIG
);
4752 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4753 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4755 l2cap_build_conf_req(chan
, buf
), buf
);
4756 chan
->num_conf_req
++;
4761 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4764 l2cap_move_setup(chan
);
4765 chan
->move_id
= local_amp_id
;
4766 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4768 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4771 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4773 struct hci_chan
*hchan
= NULL
;
4775 /* Placeholder - get hci_chan for logical link */
4778 if (hchan
->state
== BT_CONNECTED
) {
4779 /* Logical link is ready to go */
4780 chan
->hs_hcon
= hchan
->conn
;
4781 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4782 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4783 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4785 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4787 /* Wait for logical link to be ready */
4788 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4791 /* Logical link not available */
4792 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4796 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4798 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4800 if (result
== -EINVAL
)
4801 rsp_result
= L2CAP_MR_BAD_ID
;
4803 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4805 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4808 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4809 chan
->move_state
= L2CAP_MOVE_STABLE
;
4811 /* Restart data transmission */
4812 l2cap_ertm_send(chan
);
4815 /* Invoke with locked chan */
4816 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4818 u8 local_amp_id
= chan
->local_amp_id
;
4819 u8 remote_amp_id
= chan
->remote_amp_id
;
4821 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4822 chan
, result
, local_amp_id
, remote_amp_id
);
4824 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4825 l2cap_chan_unlock(chan
);
4829 if (chan
->state
!= BT_CONNECTED
) {
4830 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4831 } else if (result
!= L2CAP_MR_SUCCESS
) {
4832 l2cap_do_move_cancel(chan
, result
);
4834 switch (chan
->move_role
) {
4835 case L2CAP_MOVE_ROLE_INITIATOR
:
4836 l2cap_do_move_initiate(chan
, local_amp_id
,
4839 case L2CAP_MOVE_ROLE_RESPONDER
:
4840 l2cap_do_move_respond(chan
, result
);
4843 l2cap_do_move_cancel(chan
, result
);
4849 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4850 struct l2cap_cmd_hdr
*cmd
,
4851 u16 cmd_len
, void *data
)
4853 struct l2cap_move_chan_req
*req
= data
;
4854 struct l2cap_move_chan_rsp rsp
;
4855 struct l2cap_chan
*chan
;
4857 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4859 if (cmd_len
!= sizeof(*req
))
4862 icid
= le16_to_cpu(req
->icid
);
4864 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4866 if (!conn
->hs_enabled
)
4869 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4871 rsp
.icid
= cpu_to_le16(icid
);
4872 rsp
.result
= cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4873 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4878 chan
->ident
= cmd
->ident
;
4880 if (chan
->scid
< L2CAP_CID_DYN_START
||
4881 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4882 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4883 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4884 result
= L2CAP_MR_NOT_ALLOWED
;
4885 goto send_move_response
;
4888 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4889 result
= L2CAP_MR_SAME_ID
;
4890 goto send_move_response
;
4893 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4894 struct hci_dev
*hdev
;
4895 hdev
= hci_dev_get(req
->dest_amp_id
);
4896 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4897 !test_bit(HCI_UP
, &hdev
->flags
)) {
4901 result
= L2CAP_MR_BAD_ID
;
4902 goto send_move_response
;
4907 /* Detect a move collision. Only send a collision response
4908 * if this side has "lost", otherwise proceed with the move.
4909 * The winner has the larger bd_addr.
4911 if ((__chan_is_moving(chan
) ||
4912 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4913 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4914 result
= L2CAP_MR_COLLISION
;
4915 goto send_move_response
;
4918 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4919 l2cap_move_setup(chan
);
4920 chan
->move_id
= req
->dest_amp_id
;
4923 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4924 /* Moving to BR/EDR */
4925 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4926 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4927 result
= L2CAP_MR_PEND
;
4929 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4930 result
= L2CAP_MR_SUCCESS
;
4933 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4934 /* Placeholder - uncomment when amp functions are available */
4935 /*amp_accept_physical(chan, req->dest_amp_id);*/
4936 result
= L2CAP_MR_PEND
;
4940 l2cap_send_move_chan_rsp(chan
, result
);
4942 l2cap_chan_unlock(chan
);
4947 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4949 struct l2cap_chan
*chan
;
4950 struct hci_chan
*hchan
= NULL
;
4952 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4954 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4958 __clear_chan_timer(chan
);
4959 if (result
== L2CAP_MR_PEND
)
4960 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4962 switch (chan
->move_state
) {
4963 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4964 /* Move confirm will be sent when logical link
4967 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4969 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4970 if (result
== L2CAP_MR_PEND
) {
4972 } else if (test_bit(CONN_LOCAL_BUSY
,
4973 &chan
->conn_state
)) {
4974 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4976 /* Logical link is up or moving to BR/EDR,
4979 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4980 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4983 case L2CAP_MOVE_WAIT_RSP
:
4985 if (result
== L2CAP_MR_SUCCESS
) {
4986 /* Remote is ready, send confirm immediately
4987 * after logical link is ready
4989 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4991 /* Both logical link and move success
4992 * are required to confirm
4994 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
4997 /* Placeholder - get hci_chan for logical link */
4999 /* Logical link not available */
5000 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5004 /* If the logical link is not yet connected, do not
5005 * send confirmation.
5007 if (hchan
->state
!= BT_CONNECTED
)
5010 /* Logical link is already ready to go */
5012 chan
->hs_hcon
= hchan
->conn
;
5013 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5015 if (result
== L2CAP_MR_SUCCESS
) {
5016 /* Can confirm now */
5017 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5019 /* Now only need move success
5022 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5025 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5028 /* Any other amp move state means the move failed. */
5029 chan
->move_id
= chan
->local_amp_id
;
5030 l2cap_move_done(chan
);
5031 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5034 l2cap_chan_unlock(chan
);
5037 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5040 struct l2cap_chan
*chan
;
5042 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5044 /* Could not locate channel, icid is best guess */
5045 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5049 __clear_chan_timer(chan
);
5051 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5052 if (result
== L2CAP_MR_COLLISION
) {
5053 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5055 /* Cleanup - cancel move */
5056 chan
->move_id
= chan
->local_amp_id
;
5057 l2cap_move_done(chan
);
5061 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5063 l2cap_chan_unlock(chan
);
5066 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5067 struct l2cap_cmd_hdr
*cmd
,
5068 u16 cmd_len
, void *data
)
5070 struct l2cap_move_chan_rsp
*rsp
= data
;
5073 if (cmd_len
!= sizeof(*rsp
))
5076 icid
= le16_to_cpu(rsp
->icid
);
5077 result
= le16_to_cpu(rsp
->result
);
5079 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5081 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5082 l2cap_move_continue(conn
, icid
, result
);
5084 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5089 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5090 struct l2cap_cmd_hdr
*cmd
,
5091 u16 cmd_len
, void *data
)
5093 struct l2cap_move_chan_cfm
*cfm
= data
;
5094 struct l2cap_chan
*chan
;
5097 if (cmd_len
!= sizeof(*cfm
))
5100 icid
= le16_to_cpu(cfm
->icid
);
5101 result
= le16_to_cpu(cfm
->result
);
5103 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5105 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5107 /* Spec requires a response even if the icid was not found */
5108 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5112 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5113 if (result
== L2CAP_MC_CONFIRMED
) {
5114 chan
->local_amp_id
= chan
->move_id
;
5115 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5116 __release_logical_link(chan
);
5118 chan
->move_id
= chan
->local_amp_id
;
5121 l2cap_move_done(chan
);
5124 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5126 l2cap_chan_unlock(chan
);
5131 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5132 struct l2cap_cmd_hdr
*cmd
,
5133 u16 cmd_len
, void *data
)
5135 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5136 struct l2cap_chan
*chan
;
5139 if (cmd_len
!= sizeof(*rsp
))
5142 icid
= le16_to_cpu(rsp
->icid
);
5144 BT_DBG("icid 0x%4.4x", icid
);
5146 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5150 __clear_chan_timer(chan
);
5152 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5153 chan
->local_amp_id
= chan
->move_id
;
5155 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5156 __release_logical_link(chan
);
5158 l2cap_move_done(chan
);
5161 l2cap_chan_unlock(chan
);
5166 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5167 struct l2cap_cmd_hdr
*cmd
,
5168 u16 cmd_len
, u8
*data
)
5170 struct hci_conn
*hcon
= conn
->hcon
;
5171 struct l2cap_conn_param_update_req
*req
;
5172 struct l2cap_conn_param_update_rsp rsp
;
5173 u16 min
, max
, latency
, to_multiplier
;
5176 if (hcon
->role
!= HCI_ROLE_MASTER
)
5179 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5182 req
= (struct l2cap_conn_param_update_req
*) data
;
5183 min
= __le16_to_cpu(req
->min
);
5184 max
= __le16_to_cpu(req
->max
);
5185 latency
= __le16_to_cpu(req
->latency
);
5186 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5188 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5189 min
, max
, latency
, to_multiplier
);
5191 memset(&rsp
, 0, sizeof(rsp
));
5193 err
= hci_check_conn_params(min
, max
, latency
, to_multiplier
);
5195 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5197 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5199 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5205 store_hint
= hci_le_conn_update(hcon
, min
, max
, latency
,
5207 mgmt_new_conn_param(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
,
5208 store_hint
, min
, max
, latency
,
5216 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5217 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5220 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5221 u16 dcid
, mtu
, mps
, credits
, result
;
5222 struct l2cap_chan
*chan
;
5225 if (cmd_len
< sizeof(*rsp
))
5228 dcid
= __le16_to_cpu(rsp
->dcid
);
5229 mtu
= __le16_to_cpu(rsp
->mtu
);
5230 mps
= __le16_to_cpu(rsp
->mps
);
5231 credits
= __le16_to_cpu(rsp
->credits
);
5232 result
= __le16_to_cpu(rsp
->result
);
5234 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23))
5237 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5238 dcid
, mtu
, mps
, credits
, result
);
5240 mutex_lock(&conn
->chan_lock
);
5242 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5250 l2cap_chan_lock(chan
);
5253 case L2CAP_CR_SUCCESS
:
5257 chan
->remote_mps
= mps
;
5258 chan
->tx_credits
= credits
;
5259 l2cap_chan_ready(chan
);
5263 l2cap_chan_del(chan
, ECONNREFUSED
);
5267 l2cap_chan_unlock(chan
);
5270 mutex_unlock(&conn
->chan_lock
);
5275 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5276 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5281 switch (cmd
->code
) {
5282 case L2CAP_COMMAND_REJ
:
5283 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5286 case L2CAP_CONN_REQ
:
5287 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5290 case L2CAP_CONN_RSP
:
5291 case L2CAP_CREATE_CHAN_RSP
:
5292 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5295 case L2CAP_CONF_REQ
:
5296 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5299 case L2CAP_CONF_RSP
:
5300 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5303 case L2CAP_DISCONN_REQ
:
5304 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5307 case L2CAP_DISCONN_RSP
:
5308 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5311 case L2CAP_ECHO_REQ
:
5312 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5315 case L2CAP_ECHO_RSP
:
5318 case L2CAP_INFO_REQ
:
5319 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5322 case L2CAP_INFO_RSP
:
5323 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5326 case L2CAP_CREATE_CHAN_REQ
:
5327 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5330 case L2CAP_MOVE_CHAN_REQ
:
5331 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5334 case L2CAP_MOVE_CHAN_RSP
:
5335 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5338 case L2CAP_MOVE_CHAN_CFM
:
5339 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5342 case L2CAP_MOVE_CHAN_CFM_RSP
:
5343 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5347 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5355 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5356 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5359 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5360 struct l2cap_le_conn_rsp rsp
;
5361 struct l2cap_chan
*chan
, *pchan
;
5362 u16 dcid
, scid
, credits
, mtu
, mps
;
5366 if (cmd_len
!= sizeof(*req
))
5369 scid
= __le16_to_cpu(req
->scid
);
5370 mtu
= __le16_to_cpu(req
->mtu
);
5371 mps
= __le16_to_cpu(req
->mps
);
5376 if (mtu
< 23 || mps
< 23)
5379 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5382 /* Check if we have socket listening on psm */
5383 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5384 &conn
->hcon
->dst
, LE_LINK
);
5386 result
= L2CAP_CR_BAD_PSM
;
5391 mutex_lock(&conn
->chan_lock
);
5392 l2cap_chan_lock(pchan
);
5394 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
)) {
5395 result
= L2CAP_CR_AUTHENTICATION
;
5397 goto response_unlock
;
5400 /* Check if we already have channel with that dcid */
5401 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5402 result
= L2CAP_CR_NO_MEM
;
5404 goto response_unlock
;
5407 chan
= pchan
->ops
->new_connection(pchan
);
5409 result
= L2CAP_CR_NO_MEM
;
5410 goto response_unlock
;
5413 l2cap_le_flowctl_init(chan
);
5415 bacpy(&chan
->src
, &conn
->hcon
->src
);
5416 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5417 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
5418 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
5422 chan
->remote_mps
= mps
;
5423 chan
->tx_credits
= __le16_to_cpu(req
->credits
);
5425 __l2cap_chan_add(conn
, chan
);
5427 credits
= chan
->rx_credits
;
5429 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5431 chan
->ident
= cmd
->ident
;
5433 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5434 l2cap_state_change(chan
, BT_CONNECT2
);
5435 /* The following result value is actually not defined
5436 * for LE CoC but we use it to let the function know
5437 * that it should bail out after doing its cleanup
5438 * instead of sending a response.
5440 result
= L2CAP_CR_PEND
;
5441 chan
->ops
->defer(chan
);
5443 l2cap_chan_ready(chan
);
5444 result
= L2CAP_CR_SUCCESS
;
5448 l2cap_chan_unlock(pchan
);
5449 mutex_unlock(&conn
->chan_lock
);
5450 l2cap_chan_put(pchan
);
5452 if (result
== L2CAP_CR_PEND
)
5457 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5458 rsp
.mps
= cpu_to_le16(chan
->mps
);
5464 rsp
.dcid
= cpu_to_le16(dcid
);
5465 rsp
.credits
= cpu_to_le16(credits
);
5466 rsp
.result
= cpu_to_le16(result
);
5468 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5473 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5474 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5477 struct l2cap_le_credits
*pkt
;
5478 struct l2cap_chan
*chan
;
5479 u16 cid
, credits
, max_credits
;
5481 if (cmd_len
!= sizeof(*pkt
))
5484 pkt
= (struct l2cap_le_credits
*) data
;
5485 cid
= __le16_to_cpu(pkt
->cid
);
5486 credits
= __le16_to_cpu(pkt
->credits
);
5488 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5490 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5494 max_credits
= LE_FLOWCTL_MAX_CREDITS
- chan
->tx_credits
;
5495 if (credits
> max_credits
) {
5496 BT_ERR("LE credits overflow");
5497 l2cap_send_disconn_req(chan
, ECONNRESET
);
5498 l2cap_chan_unlock(chan
);
5500 /* Return 0 so that we don't trigger an unnecessary
5501 * command reject packet.
5506 chan
->tx_credits
+= credits
;
5508 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
5509 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
5513 if (chan
->tx_credits
)
5514 chan
->ops
->resume(chan
);
5516 l2cap_chan_unlock(chan
);
5521 static inline int l2cap_le_command_rej(struct l2cap_conn
*conn
,
5522 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5525 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
5526 struct l2cap_chan
*chan
;
5528 if (cmd_len
< sizeof(*rej
))
5531 mutex_lock(&conn
->chan_lock
);
5533 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5537 l2cap_chan_lock(chan
);
5538 l2cap_chan_del(chan
, ECONNREFUSED
);
5539 l2cap_chan_unlock(chan
);
5542 mutex_unlock(&conn
->chan_lock
);
5546 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5547 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5552 switch (cmd
->code
) {
5553 case L2CAP_COMMAND_REJ
:
5554 l2cap_le_command_rej(conn
, cmd
, cmd_len
, data
);
5557 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5558 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5561 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5564 case L2CAP_LE_CONN_RSP
:
5565 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5568 case L2CAP_LE_CONN_REQ
:
5569 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5572 case L2CAP_LE_CREDITS
:
5573 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5576 case L2CAP_DISCONN_REQ
:
5577 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5580 case L2CAP_DISCONN_RSP
:
5581 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5585 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5593 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5594 struct sk_buff
*skb
)
5596 struct hci_conn
*hcon
= conn
->hcon
;
5597 struct l2cap_cmd_hdr
*cmd
;
5601 if (hcon
->type
!= LE_LINK
)
5604 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5607 cmd
= (void *) skb
->data
;
5608 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5610 len
= le16_to_cpu(cmd
->len
);
5612 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5614 if (len
!= skb
->len
|| !cmd
->ident
) {
5615 BT_DBG("corrupted command");
5619 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5621 struct l2cap_cmd_rej_unk rej
;
5623 BT_ERR("Wrong link type (%d)", err
);
5625 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5626 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5634 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5635 struct sk_buff
*skb
)
5637 struct hci_conn
*hcon
= conn
->hcon
;
5638 u8
*data
= skb
->data
;
5640 struct l2cap_cmd_hdr cmd
;
5643 l2cap_raw_recv(conn
, skb
);
5645 if (hcon
->type
!= ACL_LINK
)
5648 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5650 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5651 data
+= L2CAP_CMD_HDR_SIZE
;
5652 len
-= L2CAP_CMD_HDR_SIZE
;
5654 cmd_len
= le16_to_cpu(cmd
.len
);
5656 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5659 if (cmd_len
> len
|| !cmd
.ident
) {
5660 BT_DBG("corrupted command");
5664 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5666 struct l2cap_cmd_rej_unk rej
;
5668 BT_ERR("Wrong link type (%d)", err
);
5670 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5671 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5683 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5685 u16 our_fcs
, rcv_fcs
;
5688 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5689 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5691 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5693 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5694 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5695 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5696 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5698 if (our_fcs
!= rcv_fcs
)
5704 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5706 struct l2cap_ctrl control
;
5708 BT_DBG("chan %p", chan
);
5710 memset(&control
, 0, sizeof(control
));
5713 control
.reqseq
= chan
->buffer_seq
;
5714 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5716 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5717 control
.super
= L2CAP_SUPER_RNR
;
5718 l2cap_send_sframe(chan
, &control
);
5721 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5722 chan
->unacked_frames
> 0)
5723 __set_retrans_timer(chan
);
5725 /* Send pending iframes */
5726 l2cap_ertm_send(chan
);
5728 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5729 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5730 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5733 control
.super
= L2CAP_SUPER_RR
;
5734 l2cap_send_sframe(chan
, &control
);
5738 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5739 struct sk_buff
**last_frag
)
5741 /* skb->len reflects data in skb as well as all fragments
5742 * skb->data_len reflects only data in fragments
5744 if (!skb_has_frag_list(skb
))
5745 skb_shinfo(skb
)->frag_list
= new_frag
;
5747 new_frag
->next
= NULL
;
5749 (*last_frag
)->next
= new_frag
;
5750 *last_frag
= new_frag
;
5752 skb
->len
+= new_frag
->len
;
5753 skb
->data_len
+= new_frag
->len
;
5754 skb
->truesize
+= new_frag
->truesize
;
5757 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5758 struct l2cap_ctrl
*control
)
5762 switch (control
->sar
) {
5763 case L2CAP_SAR_UNSEGMENTED
:
5767 err
= chan
->ops
->recv(chan
, skb
);
5770 case L2CAP_SAR_START
:
5774 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5775 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5777 if (chan
->sdu_len
> chan
->imtu
) {
5782 if (skb
->len
>= chan
->sdu_len
)
5786 chan
->sdu_last_frag
= skb
;
5792 case L2CAP_SAR_CONTINUE
:
5796 append_skb_frag(chan
->sdu
, skb
,
5797 &chan
->sdu_last_frag
);
5800 if (chan
->sdu
->len
>= chan
->sdu_len
)
5810 append_skb_frag(chan
->sdu
, skb
,
5811 &chan
->sdu_last_frag
);
5814 if (chan
->sdu
->len
!= chan
->sdu_len
)
5817 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5820 /* Reassembly complete */
5822 chan
->sdu_last_frag
= NULL
;
5830 kfree_skb(chan
->sdu
);
5832 chan
->sdu_last_frag
= NULL
;
5839 static int l2cap_resegment(struct l2cap_chan
*chan
)
5845 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5849 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5852 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5853 l2cap_tx(chan
, NULL
, NULL
, event
);
5856 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5859 /* Pass sequential frames to l2cap_reassemble_sdu()
5860 * until a gap is encountered.
5863 BT_DBG("chan %p", chan
);
5865 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5866 struct sk_buff
*skb
;
5867 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5868 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5870 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5875 skb_unlink(skb
, &chan
->srej_q
);
5876 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5877 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5882 if (skb_queue_empty(&chan
->srej_q
)) {
5883 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5884 l2cap_send_ack(chan
);
5890 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5891 struct l2cap_ctrl
*control
)
5893 struct sk_buff
*skb
;
5895 BT_DBG("chan %p, control %p", chan
, control
);
5897 if (control
->reqseq
== chan
->next_tx_seq
) {
5898 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5899 l2cap_send_disconn_req(chan
, ECONNRESET
);
5903 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5906 BT_DBG("Seq %d not available for retransmission",
5911 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5912 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5913 l2cap_send_disconn_req(chan
, ECONNRESET
);
5917 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5919 if (control
->poll
) {
5920 l2cap_pass_to_tx(chan
, control
);
5922 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5923 l2cap_retransmit(chan
, control
);
5924 l2cap_ertm_send(chan
);
5926 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5927 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5928 chan
->srej_save_reqseq
= control
->reqseq
;
5931 l2cap_pass_to_tx_fbit(chan
, control
);
5933 if (control
->final
) {
5934 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5935 !test_and_clear_bit(CONN_SREJ_ACT
,
5937 l2cap_retransmit(chan
, control
);
5939 l2cap_retransmit(chan
, control
);
5940 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5941 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5942 chan
->srej_save_reqseq
= control
->reqseq
;
5948 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5949 struct l2cap_ctrl
*control
)
5951 struct sk_buff
*skb
;
5953 BT_DBG("chan %p, control %p", chan
, control
);
5955 if (control
->reqseq
== chan
->next_tx_seq
) {
5956 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5957 l2cap_send_disconn_req(chan
, ECONNRESET
);
5961 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5963 if (chan
->max_tx
&& skb
&&
5964 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5965 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5966 l2cap_send_disconn_req(chan
, ECONNRESET
);
5970 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5972 l2cap_pass_to_tx(chan
, control
);
5974 if (control
->final
) {
5975 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
5976 l2cap_retransmit_all(chan
, control
);
5978 l2cap_retransmit_all(chan
, control
);
5979 l2cap_ertm_send(chan
);
5980 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
5981 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
5985 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
5987 BT_DBG("chan %p, txseq %d", chan
, txseq
);
5989 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
5990 chan
->expected_tx_seq
);
5992 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
5993 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5995 /* See notes below regarding "double poll" and
5998 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5999 BT_DBG("Invalid/Ignore - after SREJ");
6000 return L2CAP_TXSEQ_INVALID_IGNORE
;
6002 BT_DBG("Invalid - in window after SREJ sent");
6003 return L2CAP_TXSEQ_INVALID
;
6007 if (chan
->srej_list
.head
== txseq
) {
6008 BT_DBG("Expected SREJ");
6009 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6012 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6013 BT_DBG("Duplicate SREJ - txseq already stored");
6014 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6017 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6018 BT_DBG("Unexpected SREJ - not requested");
6019 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6023 if (chan
->expected_tx_seq
== txseq
) {
6024 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6026 BT_DBG("Invalid - txseq outside tx window");
6027 return L2CAP_TXSEQ_INVALID
;
6030 return L2CAP_TXSEQ_EXPECTED
;
6034 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6035 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6036 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6037 return L2CAP_TXSEQ_DUPLICATE
;
6040 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6041 /* A source of invalid packets is a "double poll" condition,
6042 * where delays cause us to send multiple poll packets. If
6043 * the remote stack receives and processes both polls,
6044 * sequence numbers can wrap around in such a way that a
6045 * resent frame has a sequence number that looks like new data
6046 * with a sequence gap. This would trigger an erroneous SREJ
6049 * Fortunately, this is impossible with a tx window that's
6050 * less than half of the maximum sequence number, which allows
6051 * invalid frames to be safely ignored.
6053 * With tx window sizes greater than half of the tx window
6054 * maximum, the frame is invalid and cannot be ignored. This
6055 * causes a disconnect.
6058 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6059 BT_DBG("Invalid/Ignore - txseq outside tx window");
6060 return L2CAP_TXSEQ_INVALID_IGNORE
;
6062 BT_DBG("Invalid - txseq outside tx window");
6063 return L2CAP_TXSEQ_INVALID
;
6066 BT_DBG("Unexpected - txseq indicates missing frames");
6067 return L2CAP_TXSEQ_UNEXPECTED
;
6071 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6072 struct l2cap_ctrl
*control
,
6073 struct sk_buff
*skb
, u8 event
)
6076 bool skb_in_use
= false;
6078 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6082 case L2CAP_EV_RECV_IFRAME
:
6083 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6084 case L2CAP_TXSEQ_EXPECTED
:
6085 l2cap_pass_to_tx(chan
, control
);
6087 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6088 BT_DBG("Busy, discarding expected seq %d",
6093 chan
->expected_tx_seq
= __next_seq(chan
,
6096 chan
->buffer_seq
= chan
->expected_tx_seq
;
6099 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6103 if (control
->final
) {
6104 if (!test_and_clear_bit(CONN_REJ_ACT
,
6105 &chan
->conn_state
)) {
6107 l2cap_retransmit_all(chan
, control
);
6108 l2cap_ertm_send(chan
);
6112 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6113 l2cap_send_ack(chan
);
6115 case L2CAP_TXSEQ_UNEXPECTED
:
6116 l2cap_pass_to_tx(chan
, control
);
6118 /* Can't issue SREJ frames in the local busy state.
6119 * Drop this frame, it will be seen as missing
6120 * when local busy is exited.
6122 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6123 BT_DBG("Busy, discarding unexpected seq %d",
6128 /* There was a gap in the sequence, so an SREJ
6129 * must be sent for each missing frame. The
6130 * current frame is stored for later use.
6132 skb_queue_tail(&chan
->srej_q
, skb
);
6134 BT_DBG("Queued %p (queue len %d)", skb
,
6135 skb_queue_len(&chan
->srej_q
));
6137 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6138 l2cap_seq_list_clear(&chan
->srej_list
);
6139 l2cap_send_srej(chan
, control
->txseq
);
6141 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6143 case L2CAP_TXSEQ_DUPLICATE
:
6144 l2cap_pass_to_tx(chan
, control
);
6146 case L2CAP_TXSEQ_INVALID_IGNORE
:
6148 case L2CAP_TXSEQ_INVALID
:
6150 l2cap_send_disconn_req(chan
, ECONNRESET
);
6154 case L2CAP_EV_RECV_RR
:
6155 l2cap_pass_to_tx(chan
, control
);
6156 if (control
->final
) {
6157 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6159 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6160 !__chan_is_moving(chan
)) {
6162 l2cap_retransmit_all(chan
, control
);
6165 l2cap_ertm_send(chan
);
6166 } else if (control
->poll
) {
6167 l2cap_send_i_or_rr_or_rnr(chan
);
6169 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6170 &chan
->conn_state
) &&
6171 chan
->unacked_frames
)
6172 __set_retrans_timer(chan
);
6174 l2cap_ertm_send(chan
);
6177 case L2CAP_EV_RECV_RNR
:
6178 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6179 l2cap_pass_to_tx(chan
, control
);
6180 if (control
&& control
->poll
) {
6181 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6182 l2cap_send_rr_or_rnr(chan
, 0);
6184 __clear_retrans_timer(chan
);
6185 l2cap_seq_list_clear(&chan
->retrans_list
);
6187 case L2CAP_EV_RECV_REJ
:
6188 l2cap_handle_rej(chan
, control
);
6190 case L2CAP_EV_RECV_SREJ
:
6191 l2cap_handle_srej(chan
, control
);
6197 if (skb
&& !skb_in_use
) {
6198 BT_DBG("Freeing %p", skb
);
6205 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6206 struct l2cap_ctrl
*control
,
6207 struct sk_buff
*skb
, u8 event
)
6210 u16 txseq
= control
->txseq
;
6211 bool skb_in_use
= false;
6213 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6217 case L2CAP_EV_RECV_IFRAME
:
6218 switch (l2cap_classify_txseq(chan
, txseq
)) {
6219 case L2CAP_TXSEQ_EXPECTED
:
6220 /* Keep frame for reassembly later */
6221 l2cap_pass_to_tx(chan
, control
);
6222 skb_queue_tail(&chan
->srej_q
, skb
);
6224 BT_DBG("Queued %p (queue len %d)", skb
,
6225 skb_queue_len(&chan
->srej_q
));
6227 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6229 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6230 l2cap_seq_list_pop(&chan
->srej_list
);
6232 l2cap_pass_to_tx(chan
, control
);
6233 skb_queue_tail(&chan
->srej_q
, skb
);
6235 BT_DBG("Queued %p (queue len %d)", skb
,
6236 skb_queue_len(&chan
->srej_q
));
6238 err
= l2cap_rx_queued_iframes(chan
);
6243 case L2CAP_TXSEQ_UNEXPECTED
:
6244 /* Got a frame that can't be reassembled yet.
6245 * Save it for later, and send SREJs to cover
6246 * the missing frames.
6248 skb_queue_tail(&chan
->srej_q
, skb
);
6250 BT_DBG("Queued %p (queue len %d)", skb
,
6251 skb_queue_len(&chan
->srej_q
));
6253 l2cap_pass_to_tx(chan
, control
);
6254 l2cap_send_srej(chan
, control
->txseq
);
6256 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6257 /* This frame was requested with an SREJ, but
6258 * some expected retransmitted frames are
6259 * missing. Request retransmission of missing
6262 skb_queue_tail(&chan
->srej_q
, skb
);
6264 BT_DBG("Queued %p (queue len %d)", skb
,
6265 skb_queue_len(&chan
->srej_q
));
6267 l2cap_pass_to_tx(chan
, control
);
6268 l2cap_send_srej_list(chan
, control
->txseq
);
6270 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6271 /* We've already queued this frame. Drop this copy. */
6272 l2cap_pass_to_tx(chan
, control
);
6274 case L2CAP_TXSEQ_DUPLICATE
:
6275 /* Expecting a later sequence number, so this frame
6276 * was already received. Ignore it completely.
6279 case L2CAP_TXSEQ_INVALID_IGNORE
:
6281 case L2CAP_TXSEQ_INVALID
:
6283 l2cap_send_disconn_req(chan
, ECONNRESET
);
6287 case L2CAP_EV_RECV_RR
:
6288 l2cap_pass_to_tx(chan
, control
);
6289 if (control
->final
) {
6290 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6292 if (!test_and_clear_bit(CONN_REJ_ACT
,
6293 &chan
->conn_state
)) {
6295 l2cap_retransmit_all(chan
, control
);
6298 l2cap_ertm_send(chan
);
6299 } else if (control
->poll
) {
6300 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6301 &chan
->conn_state
) &&
6302 chan
->unacked_frames
) {
6303 __set_retrans_timer(chan
);
6306 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6307 l2cap_send_srej_tail(chan
);
6309 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6310 &chan
->conn_state
) &&
6311 chan
->unacked_frames
)
6312 __set_retrans_timer(chan
);
6314 l2cap_send_ack(chan
);
6317 case L2CAP_EV_RECV_RNR
:
6318 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6319 l2cap_pass_to_tx(chan
, control
);
6320 if (control
->poll
) {
6321 l2cap_send_srej_tail(chan
);
6323 struct l2cap_ctrl rr_control
;
6324 memset(&rr_control
, 0, sizeof(rr_control
));
6325 rr_control
.sframe
= 1;
6326 rr_control
.super
= L2CAP_SUPER_RR
;
6327 rr_control
.reqseq
= chan
->buffer_seq
;
6328 l2cap_send_sframe(chan
, &rr_control
);
6332 case L2CAP_EV_RECV_REJ
:
6333 l2cap_handle_rej(chan
, control
);
6335 case L2CAP_EV_RECV_SREJ
:
6336 l2cap_handle_srej(chan
, control
);
6340 if (skb
&& !skb_in_use
) {
6341 BT_DBG("Freeing %p", skb
);
6348 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6350 BT_DBG("chan %p", chan
);
6352 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6355 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6357 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6359 return l2cap_resegment(chan
);
6362 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6363 struct l2cap_ctrl
*control
,
6364 struct sk_buff
*skb
, u8 event
)
6368 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6374 l2cap_process_reqseq(chan
, control
->reqseq
);
6376 if (!skb_queue_empty(&chan
->tx_q
))
6377 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6379 chan
->tx_send_head
= NULL
;
6381 /* Rewind next_tx_seq to the point expected
6384 chan
->next_tx_seq
= control
->reqseq
;
6385 chan
->unacked_frames
= 0;
6387 err
= l2cap_finish_move(chan
);
6391 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6392 l2cap_send_i_or_rr_or_rnr(chan
);
6394 if (event
== L2CAP_EV_RECV_IFRAME
)
6397 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6400 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6401 struct l2cap_ctrl
*control
,
6402 struct sk_buff
*skb
, u8 event
)
6406 if (!control
->final
)
6409 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6411 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6412 l2cap_process_reqseq(chan
, control
->reqseq
);
6414 if (!skb_queue_empty(&chan
->tx_q
))
6415 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6417 chan
->tx_send_head
= NULL
;
6419 /* Rewind next_tx_seq to the point expected
6422 chan
->next_tx_seq
= control
->reqseq
;
6423 chan
->unacked_frames
= 0;
6426 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6428 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6430 err
= l2cap_resegment(chan
);
6433 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6438 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6440 /* Make sure reqseq is for a packet that has been sent but not acked */
6443 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6444 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6447 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6448 struct sk_buff
*skb
, u8 event
)
6452 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6453 control
, skb
, event
, chan
->rx_state
);
6455 if (__valid_reqseq(chan
, control
->reqseq
)) {
6456 switch (chan
->rx_state
) {
6457 case L2CAP_RX_STATE_RECV
:
6458 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6460 case L2CAP_RX_STATE_SREJ_SENT
:
6461 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6464 case L2CAP_RX_STATE_WAIT_P
:
6465 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6467 case L2CAP_RX_STATE_WAIT_F
:
6468 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6475 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6476 control
->reqseq
, chan
->next_tx_seq
,
6477 chan
->expected_ack_seq
);
6478 l2cap_send_disconn_req(chan
, ECONNRESET
);
6484 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6485 struct sk_buff
*skb
)
6489 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6492 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6493 L2CAP_TXSEQ_EXPECTED
) {
6494 l2cap_pass_to_tx(chan
, control
);
6496 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6497 __next_seq(chan
, chan
->buffer_seq
));
6499 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6501 l2cap_reassemble_sdu(chan
, skb
, control
);
6504 kfree_skb(chan
->sdu
);
6507 chan
->sdu_last_frag
= NULL
;
6511 BT_DBG("Freeing %p", skb
);
6516 chan
->last_acked_seq
= control
->txseq
;
6517 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6522 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6524 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6528 __unpack_control(chan
, skb
);
6533 * We can just drop the corrupted I-frame here.
6534 * Receiver will miss it and start proper recovery
6535 * procedures and ask for retransmission.
6537 if (l2cap_check_fcs(chan
, skb
))
6540 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6541 len
-= L2CAP_SDULEN_SIZE
;
6543 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6544 len
-= L2CAP_FCS_SIZE
;
6546 if (len
> chan
->mps
) {
6547 l2cap_send_disconn_req(chan
, ECONNRESET
);
6551 if (!control
->sframe
) {
6554 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6555 control
->sar
, control
->reqseq
, control
->final
,
6558 /* Validate F-bit - F=0 always valid, F=1 only
6559 * valid in TX WAIT_F
6561 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6564 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6565 event
= L2CAP_EV_RECV_IFRAME
;
6566 err
= l2cap_rx(chan
, control
, skb
, event
);
6568 err
= l2cap_stream_rx(chan
, control
, skb
);
6572 l2cap_send_disconn_req(chan
, ECONNRESET
);
6574 const u8 rx_func_to_event
[4] = {
6575 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6576 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6579 /* Only I-frames are expected in streaming mode */
6580 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6583 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6584 control
->reqseq
, control
->final
, control
->poll
,
6588 BT_ERR("Trailing bytes: %d in sframe", len
);
6589 l2cap_send_disconn_req(chan
, ECONNRESET
);
6593 /* Validate F and P bits */
6594 if (control
->final
&& (control
->poll
||
6595 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6598 event
= rx_func_to_event
[control
->super
];
6599 if (l2cap_rx(chan
, control
, skb
, event
))
6600 l2cap_send_disconn_req(chan
, ECONNRESET
);
6610 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6612 struct l2cap_conn
*conn
= chan
->conn
;
6613 struct l2cap_le_credits pkt
;
6616 /* We return more credits to the sender only after the amount of
6617 * credits falls below half of the initial amount.
6619 if (chan
->rx_credits
>= (le_max_credits
+ 1) / 2)
6622 return_credits
= le_max_credits
- chan
->rx_credits
;
6624 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6626 chan
->rx_credits
+= return_credits
;
6628 pkt
.cid
= cpu_to_le16(chan
->scid
);
6629 pkt
.credits
= cpu_to_le16(return_credits
);
6631 chan
->ident
= l2cap_get_ident(conn
);
6633 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6636 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6640 if (!chan
->rx_credits
) {
6641 BT_ERR("No credits to receive LE L2CAP data");
6642 l2cap_send_disconn_req(chan
, ECONNRESET
);
6646 if (chan
->imtu
< skb
->len
) {
6647 BT_ERR("Too big LE L2CAP PDU");
6652 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6654 l2cap_chan_le_send_credits(chan
);
6661 sdu_len
= get_unaligned_le16(skb
->data
);
6662 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6664 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6665 sdu_len
, skb
->len
, chan
->imtu
);
6667 if (sdu_len
> chan
->imtu
) {
6668 BT_ERR("Too big LE L2CAP SDU length received");
6673 if (skb
->len
> sdu_len
) {
6674 BT_ERR("Too much LE L2CAP data received");
6679 if (skb
->len
== sdu_len
)
6680 return chan
->ops
->recv(chan
, skb
);
6683 chan
->sdu_len
= sdu_len
;
6684 chan
->sdu_last_frag
= skb
;
6689 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6690 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6692 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6693 BT_ERR("Too much LE L2CAP data received");
6698 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6701 if (chan
->sdu
->len
== chan
->sdu_len
) {
6702 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6705 chan
->sdu_last_frag
= NULL
;
6713 kfree_skb(chan
->sdu
);
6715 chan
->sdu_last_frag
= NULL
;
6719 /* We can't return an error here since we took care of the skb
6720 * freeing internally. An error return would cause the caller to
6721 * do a double-free of the skb.
6726 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6727 struct sk_buff
*skb
)
6729 struct l2cap_chan
*chan
;
6731 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6733 if (cid
== L2CAP_CID_A2MP
) {
6734 chan
= a2mp_channel_create(conn
, skb
);
6740 l2cap_chan_lock(chan
);
6742 BT_DBG("unknown cid 0x%4.4x", cid
);
6743 /* Drop packet and return */
6749 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6751 if (chan
->state
!= BT_CONNECTED
)
6754 switch (chan
->mode
) {
6755 case L2CAP_MODE_LE_FLOWCTL
:
6756 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6761 case L2CAP_MODE_BASIC
:
6762 /* If socket recv buffers overflows we drop data here
6763 * which is *bad* because L2CAP has to be reliable.
6764 * But we don't have any other choice. L2CAP doesn't
6765 * provide flow control mechanism. */
6767 if (chan
->imtu
< skb
->len
) {
6768 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6772 if (!chan
->ops
->recv(chan
, skb
))
6776 case L2CAP_MODE_ERTM
:
6777 case L2CAP_MODE_STREAMING
:
6778 l2cap_data_rcv(chan
, skb
);
6782 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6790 l2cap_chan_unlock(chan
);
6793 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6794 struct sk_buff
*skb
)
6796 struct hci_conn
*hcon
= conn
->hcon
;
6797 struct l2cap_chan
*chan
;
6799 if (hcon
->type
!= ACL_LINK
)
6802 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
6807 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6809 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6812 if (chan
->imtu
< skb
->len
)
6815 /* Store remote BD_ADDR and PSM for msg_name */
6816 bacpy(&bt_cb(skb
)->bdaddr
, &hcon
->dst
);
6817 bt_cb(skb
)->psm
= psm
;
6819 if (!chan
->ops
->recv(chan
, skb
)) {
6820 l2cap_chan_put(chan
);
6825 l2cap_chan_put(chan
);
6830 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6832 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6833 struct hci_conn
*hcon
= conn
->hcon
;
6837 if (hcon
->state
!= BT_CONNECTED
) {
6838 BT_DBG("queueing pending rx skb");
6839 skb_queue_tail(&conn
->pending_rx
, skb
);
6843 skb_pull(skb
, L2CAP_HDR_SIZE
);
6844 cid
= __le16_to_cpu(lh
->cid
);
6845 len
= __le16_to_cpu(lh
->len
);
6847 if (len
!= skb
->len
) {
6852 /* Since we can't actively block incoming LE connections we must
6853 * at least ensure that we ignore incoming data from them.
6855 if (hcon
->type
== LE_LINK
&&
6856 hci_bdaddr_list_lookup(&hcon
->hdev
->blacklist
, &hcon
->dst
,
6857 bdaddr_type(hcon
, hcon
->dst_type
))) {
6862 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6865 case L2CAP_CID_SIGNALING
:
6866 l2cap_sig_channel(conn
, skb
);
6869 case L2CAP_CID_CONN_LESS
:
6870 psm
= get_unaligned((__le16
*) skb
->data
);
6871 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6872 l2cap_conless_channel(conn
, psm
, skb
);
6875 case L2CAP_CID_LE_SIGNALING
:
6876 l2cap_le_sig_channel(conn
, skb
);
6880 l2cap_data_channel(conn
, cid
, skb
);
6885 static void process_pending_rx(struct work_struct
*work
)
6887 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
6889 struct sk_buff
*skb
;
6893 while ((skb
= skb_dequeue(&conn
->pending_rx
)))
6894 l2cap_recv_frame(conn
, skb
);
6897 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
6899 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6900 struct hci_chan
*hchan
;
6905 hchan
= hci_chan_create(hcon
);
6909 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
6911 hci_chan_del(hchan
);
6915 kref_init(&conn
->ref
);
6916 hcon
->l2cap_data
= conn
;
6917 conn
->hcon
= hci_conn_get(hcon
);
6918 conn
->hchan
= hchan
;
6920 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
6922 switch (hcon
->type
) {
6924 if (hcon
->hdev
->le_mtu
) {
6925 conn
->mtu
= hcon
->hdev
->le_mtu
;
6930 conn
->mtu
= hcon
->hdev
->acl_mtu
;
6934 conn
->feat_mask
= 0;
6936 if (hcon
->type
== ACL_LINK
)
6937 conn
->hs_enabled
= test_bit(HCI_HS_ENABLED
,
6938 &hcon
->hdev
->dev_flags
);
6940 mutex_init(&conn
->ident_lock
);
6941 mutex_init(&conn
->chan_lock
);
6943 INIT_LIST_HEAD(&conn
->chan_l
);
6944 INIT_LIST_HEAD(&conn
->users
);
6946 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
6948 skb_queue_head_init(&conn
->pending_rx
);
6949 INIT_WORK(&conn
->pending_rx_work
, process_pending_rx
);
6950 INIT_WORK(&conn
->id_addr_update_work
, l2cap_conn_update_id_addr
);
6952 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
6957 static bool is_valid_psm(u16 psm
, u8 dst_type
) {
6961 if (bdaddr_type_is_le(dst_type
))
6962 return (psm
<= 0x00ff);
6964 /* PSM must be odd and lsb of upper byte must be 0 */
6965 return ((psm
& 0x0101) == 0x0001);
6968 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
6969 bdaddr_t
*dst
, u8 dst_type
)
6971 struct l2cap_conn
*conn
;
6972 struct hci_conn
*hcon
;
6973 struct hci_dev
*hdev
;
6976 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
6977 dst_type
, __le16_to_cpu(psm
));
6979 hdev
= hci_get_route(dst
, &chan
->src
);
6981 return -EHOSTUNREACH
;
6985 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
6986 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
6991 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !psm
) {
6996 if (chan
->chan_type
== L2CAP_CHAN_FIXED
&& !cid
) {
7001 switch (chan
->mode
) {
7002 case L2CAP_MODE_BASIC
:
7004 case L2CAP_MODE_LE_FLOWCTL
:
7005 l2cap_le_flowctl_init(chan
);
7007 case L2CAP_MODE_ERTM
:
7008 case L2CAP_MODE_STREAMING
:
7017 switch (chan
->state
) {
7021 /* Already connecting */
7026 /* Already connected */
7040 /* Set destination address and psm */
7041 bacpy(&chan
->dst
, dst
);
7042 chan
->dst_type
= dst_type
;
7047 if (bdaddr_type_is_le(dst_type
)) {
7050 /* Convert from L2CAP channel address type to HCI address type
7052 if (dst_type
== BDADDR_LE_PUBLIC
)
7053 dst_type
= ADDR_LE_DEV_PUBLIC
;
7055 dst_type
= ADDR_LE_DEV_RANDOM
;
7057 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
7058 role
= HCI_ROLE_SLAVE
;
7060 role
= HCI_ROLE_MASTER
;
7062 hcon
= hci_connect_le(hdev
, dst
, dst_type
, chan
->sec_level
,
7063 HCI_LE_CONN_TIMEOUT
, role
);
7065 u8 auth_type
= l2cap_get_auth_type(chan
);
7066 hcon
= hci_connect_acl(hdev
, dst
, chan
->sec_level
, auth_type
);
7070 err
= PTR_ERR(hcon
);
7074 conn
= l2cap_conn_add(hcon
);
7076 hci_conn_drop(hcon
);
7081 mutex_lock(&conn
->chan_lock
);
7082 l2cap_chan_lock(chan
);
7084 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
7085 hci_conn_drop(hcon
);
7090 /* Update source addr of the socket */
7091 bacpy(&chan
->src
, &hcon
->src
);
7092 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
7094 __l2cap_chan_add(conn
, chan
);
7096 /* l2cap_chan_add takes its own ref so we can drop this one */
7097 hci_conn_drop(hcon
);
7099 l2cap_state_change(chan
, BT_CONNECT
);
7100 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
7102 /* Release chan->sport so that it can be reused by other
7103 * sockets (as it's only used for listening sockets).
7105 write_lock(&chan_list_lock
);
7107 write_unlock(&chan_list_lock
);
7109 if (hcon
->state
== BT_CONNECTED
) {
7110 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
7111 __clear_chan_timer(chan
);
7112 if (l2cap_chan_check_security(chan
, true))
7113 l2cap_state_change(chan
, BT_CONNECTED
);
7115 l2cap_do_start(chan
);
7121 l2cap_chan_unlock(chan
);
7122 mutex_unlock(&conn
->chan_lock
);
7124 hci_dev_unlock(hdev
);
7128 EXPORT_SYMBOL_GPL(l2cap_chan_connect
);
7130 /* ---- L2CAP interface with lower layer (HCI) ---- */
7132 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7134 int exact
= 0, lm1
= 0, lm2
= 0;
7135 struct l2cap_chan
*c
;
7137 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7139 /* Find listening sockets and check their link_mode */
7140 read_lock(&chan_list_lock
);
7141 list_for_each_entry(c
, &chan_list
, global_l
) {
7142 if (c
->state
!= BT_LISTEN
)
7145 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7146 lm1
|= HCI_LM_ACCEPT
;
7147 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7148 lm1
|= HCI_LM_MASTER
;
7150 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7151 lm2
|= HCI_LM_ACCEPT
;
7152 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7153 lm2
|= HCI_LM_MASTER
;
7156 read_unlock(&chan_list_lock
);
7158 return exact
? lm1
: lm2
;
7161 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7162 * from an existing channel in the list or from the beginning of the
7163 * global list (by passing NULL as first parameter).
7165 static struct l2cap_chan
*l2cap_global_fixed_chan(struct l2cap_chan
*c
,
7166 bdaddr_t
*src
, u8 link_type
)
7168 read_lock(&chan_list_lock
);
7171 c
= list_next_entry(c
, global_l
);
7173 c
= list_entry(chan_list
.next
, typeof(*c
), global_l
);
7175 list_for_each_entry_from(c
, &chan_list
, global_l
) {
7176 if (c
->chan_type
!= L2CAP_CHAN_FIXED
)
7178 if (c
->state
!= BT_LISTEN
)
7180 if (bacmp(&c
->src
, src
) && bacmp(&c
->src
, BDADDR_ANY
))
7182 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
7184 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
7188 read_unlock(&chan_list_lock
);
7192 read_unlock(&chan_list_lock
);
7197 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7199 struct hci_dev
*hdev
= hcon
->hdev
;
7200 struct l2cap_conn
*conn
;
7201 struct l2cap_chan
*pchan
;
7204 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7207 l2cap_conn_del(hcon
, bt_to_errno(status
));
7211 conn
= l2cap_conn_add(hcon
);
7215 dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
7217 /* If device is blocked, do not create channels for it */
7218 if (hci_bdaddr_list_lookup(&hdev
->blacklist
, &hcon
->dst
, dst_type
))
7221 /* Find fixed channels and notify them of the new connection. We
7222 * use multiple individual lookups, continuing each time where
7223 * we left off, because the list lock would prevent calling the
7224 * potentially sleeping l2cap_chan_lock() function.
7226 pchan
= l2cap_global_fixed_chan(NULL
, &hdev
->bdaddr
, hcon
->type
);
7228 struct l2cap_chan
*chan
, *next
;
7230 /* Client fixed channels should override server ones */
7231 if (__l2cap_get_chan_by_dcid(conn
, pchan
->scid
))
7234 l2cap_chan_lock(pchan
);
7235 chan
= pchan
->ops
->new_connection(pchan
);
7237 bacpy(&chan
->src
, &hcon
->src
);
7238 bacpy(&chan
->dst
, &hcon
->dst
);
7239 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
7240 chan
->dst_type
= dst_type
;
7242 __l2cap_chan_add(conn
, chan
);
7245 l2cap_chan_unlock(pchan
);
7247 next
= l2cap_global_fixed_chan(pchan
, &hdev
->bdaddr
,
7249 l2cap_chan_put(pchan
);
7253 l2cap_conn_ready(conn
);
7256 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7258 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7260 BT_DBG("hcon %p", hcon
);
7263 return HCI_ERROR_REMOTE_USER_TERM
;
7264 return conn
->disc_reason
;
7267 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7269 BT_DBG("hcon %p reason %d", hcon
, reason
);
7271 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7274 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7276 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7279 if (encrypt
== 0x00) {
7280 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7281 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7282 } else if (chan
->sec_level
== BT_SECURITY_HIGH
||
7283 chan
->sec_level
== BT_SECURITY_FIPS
)
7284 l2cap_chan_close(chan
, ECONNREFUSED
);
7286 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7287 __clear_chan_timer(chan
);
7291 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7293 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7294 struct l2cap_chan
*chan
;
7299 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7301 mutex_lock(&conn
->chan_lock
);
7303 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7304 l2cap_chan_lock(chan
);
7306 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7307 state_to_string(chan
->state
));
7309 if (chan
->scid
== L2CAP_CID_A2MP
) {
7310 l2cap_chan_unlock(chan
);
7314 if (!status
&& encrypt
)
7315 chan
->sec_level
= hcon
->sec_level
;
7317 if (!__l2cap_no_conn_pending(chan
)) {
7318 l2cap_chan_unlock(chan
);
7322 if (!status
&& (chan
->state
== BT_CONNECTED
||
7323 chan
->state
== BT_CONFIG
)) {
7324 chan
->ops
->resume(chan
);
7325 l2cap_check_encryption(chan
, encrypt
);
7326 l2cap_chan_unlock(chan
);
7330 if (chan
->state
== BT_CONNECT
) {
7332 l2cap_start_connection(chan
);
7334 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7335 } else if (chan
->state
== BT_CONNECT2
) {
7336 struct l2cap_conn_rsp rsp
;
7340 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7341 res
= L2CAP_CR_PEND
;
7342 stat
= L2CAP_CS_AUTHOR_PEND
;
7343 chan
->ops
->defer(chan
);
7345 l2cap_state_change(chan
, BT_CONFIG
);
7346 res
= L2CAP_CR_SUCCESS
;
7347 stat
= L2CAP_CS_NO_INFO
;
7350 l2cap_state_change(chan
, BT_DISCONN
);
7351 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7352 res
= L2CAP_CR_SEC_BLOCK
;
7353 stat
= L2CAP_CS_NO_INFO
;
7356 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7357 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7358 rsp
.result
= cpu_to_le16(res
);
7359 rsp
.status
= cpu_to_le16(stat
);
7360 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7363 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7364 res
== L2CAP_CR_SUCCESS
) {
7366 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7367 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7369 l2cap_build_conf_req(chan
, buf
),
7371 chan
->num_conf_req
++;
7375 l2cap_chan_unlock(chan
);
7378 mutex_unlock(&conn
->chan_lock
);
7383 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7385 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7386 struct l2cap_hdr
*hdr
;
7389 /* For AMP controller do not create l2cap conn */
7390 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
7394 conn
= l2cap_conn_add(hcon
);
7399 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7403 case ACL_START_NO_FLUSH
:
7406 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7407 kfree_skb(conn
->rx_skb
);
7408 conn
->rx_skb
= NULL
;
7410 l2cap_conn_unreliable(conn
, ECOMM
);
7413 /* Start fragment always begin with Basic L2CAP header */
7414 if (skb
->len
< L2CAP_HDR_SIZE
) {
7415 BT_ERR("Frame is too short (len %d)", skb
->len
);
7416 l2cap_conn_unreliable(conn
, ECOMM
);
7420 hdr
= (struct l2cap_hdr
*) skb
->data
;
7421 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7423 if (len
== skb
->len
) {
7424 /* Complete frame received */
7425 l2cap_recv_frame(conn
, skb
);
7429 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7431 if (skb
->len
> len
) {
7432 BT_ERR("Frame is too long (len %d, expected len %d)",
7434 l2cap_conn_unreliable(conn
, ECOMM
);
7438 /* Allocate skb for the complete frame (with header) */
7439 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7443 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7445 conn
->rx_len
= len
- skb
->len
;
7449 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7451 if (!conn
->rx_len
) {
7452 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7453 l2cap_conn_unreliable(conn
, ECOMM
);
7457 if (skb
->len
> conn
->rx_len
) {
7458 BT_ERR("Fragment is too long (len %d, expected %d)",
7459 skb
->len
, conn
->rx_len
);
7460 kfree_skb(conn
->rx_skb
);
7461 conn
->rx_skb
= NULL
;
7463 l2cap_conn_unreliable(conn
, ECOMM
);
7467 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7469 conn
->rx_len
-= skb
->len
;
7471 if (!conn
->rx_len
) {
7472 /* Complete frame received. l2cap_recv_frame
7473 * takes ownership of the skb so set the global
7474 * rx_skb pointer to NULL first.
7476 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7477 conn
->rx_skb
= NULL
;
7478 l2cap_recv_frame(conn
, rx_skb
);
7488 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7490 struct l2cap_chan
*c
;
7492 read_lock(&chan_list_lock
);
7494 list_for_each_entry(c
, &chan_list
, global_l
) {
7495 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7497 c
->state
, __le16_to_cpu(c
->psm
),
7498 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7499 c
->sec_level
, c
->mode
);
7502 read_unlock(&chan_list_lock
);
7507 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7509 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7512 static const struct file_operations l2cap_debugfs_fops
= {
7513 .open
= l2cap_debugfs_open
,
7515 .llseek
= seq_lseek
,
7516 .release
= single_release
,
7519 static struct dentry
*l2cap_debugfs
;
7521 int __init
l2cap_init(void)
7525 err
= l2cap_init_sockets();
7529 if (IS_ERR_OR_NULL(bt_debugfs
))
7532 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7533 NULL
, &l2cap_debugfs_fops
);
7535 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs
,
7537 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs
,
7543 void l2cap_exit(void)
7545 debugfs_remove(l2cap_debugfs
);
7546 l2cap_cleanup_sockets();
7549 module_param(disable_ertm
, bool, 0644);
7550 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");