2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
47 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
| L2CAP_FC_CONNLESS
, };
49 static LIST_HEAD(chan_list
);
50 static DEFINE_RWLOCK(chan_list_lock
);
52 static u16 le_max_credits
= L2CAP_LE_MAX_CREDITS
;
53 static u16 le_default_mps
= L2CAP_LE_DEFAULT_MPS
;
55 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
56 u8 code
, u8 ident
, u16 dlen
, void *data
);
57 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
59 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
60 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
62 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
63 struct sk_buff_head
*skbs
, u8 event
);
65 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
67 if (hcon
->type
== LE_LINK
) {
68 if (type
== ADDR_LE_DEV_PUBLIC
)
69 return BDADDR_LE_PUBLIC
;
71 return BDADDR_LE_RANDOM
;
77 /* ---- L2CAP channels ---- */
79 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
84 list_for_each_entry(c
, &conn
->chan_l
, list
) {
91 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
96 list_for_each_entry(c
, &conn
->chan_l
, list
) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
108 struct l2cap_chan
*c
;
110 mutex_lock(&conn
->chan_lock
);
111 c
= __l2cap_get_chan_by_scid(conn
, cid
);
114 mutex_unlock(&conn
->chan_lock
);
119 /* Find channel with given DCID.
120 * Returns locked channel.
122 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
125 struct l2cap_chan
*c
;
127 mutex_lock(&conn
->chan_lock
);
128 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
131 mutex_unlock(&conn
->chan_lock
);
136 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
139 struct l2cap_chan
*c
;
141 list_for_each_entry(c
, &conn
->chan_l
, list
) {
142 if (c
->ident
== ident
)
148 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
151 struct l2cap_chan
*c
;
153 mutex_lock(&conn
->chan_lock
);
154 c
= __l2cap_get_chan_by_ident(conn
, ident
);
157 mutex_unlock(&conn
->chan_lock
);
162 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
164 struct l2cap_chan
*c
;
166 list_for_each_entry(c
, &chan_list
, global_l
) {
167 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
173 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
177 write_lock(&chan_list_lock
);
179 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
192 for (p
= 0x1001; p
< 0x1100; p
+= 2)
193 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
194 chan
->psm
= cpu_to_le16(p
);
195 chan
->sport
= cpu_to_le16(p
);
202 write_unlock(&chan_list_lock
);
206 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
208 write_lock(&chan_list_lock
);
212 write_unlock(&chan_list_lock
);
217 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
221 if (conn
->hcon
->type
== LE_LINK
)
222 dyn_end
= L2CAP_CID_LE_DYN_END
;
224 dyn_end
= L2CAP_CID_DYN_END
;
226 for (cid
= L2CAP_CID_DYN_START
; cid
< dyn_end
; cid
++) {
227 if (!__l2cap_get_chan_by_scid(conn
, cid
))
234 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
236 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
237 state_to_string(state
));
240 chan
->ops
->state_change(chan
, state
, 0);
243 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
247 chan
->ops
->state_change(chan
, chan
->state
, err
);
250 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
252 chan
->ops
->state_change(chan
, chan
->state
, err
);
255 static void __set_retrans_timer(struct l2cap_chan
*chan
)
257 if (!delayed_work_pending(&chan
->monitor_timer
) &&
258 chan
->retrans_timeout
) {
259 l2cap_set_timer(chan
, &chan
->retrans_timer
,
260 msecs_to_jiffies(chan
->retrans_timeout
));
264 static void __set_monitor_timer(struct l2cap_chan
*chan
)
266 __clear_retrans_timer(chan
);
267 if (chan
->monitor_timeout
) {
268 l2cap_set_timer(chan
, &chan
->monitor_timer
,
269 msecs_to_jiffies(chan
->monitor_timeout
));
273 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
278 skb_queue_walk(head
, skb
) {
279 if (bt_cb(skb
)->control
.txseq
== seq
)
286 /* ---- L2CAP sequence number lists ---- */
288 /* For ERTM, ordered lists of sequence numbers must be tracked for
289 * SREJ requests that are received and for frames that are to be
290 * retransmitted. These seq_list functions implement a singly-linked
291 * list in an array, where membership in the list can also be checked
292 * in constant time. Items can also be added to the tail of the list
293 * and removed from the head in constant time, without further memory
297 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
299 size_t alloc_size
, i
;
301 /* Allocated size is a power of 2 to map sequence numbers
302 * (which may be up to 14 bits) in to a smaller array that is
303 * sized for the negotiated ERTM transmit windows.
305 alloc_size
= roundup_pow_of_two(size
);
307 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
311 seq_list
->mask
= alloc_size
- 1;
312 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
313 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
314 for (i
= 0; i
< alloc_size
; i
++)
315 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
320 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
322 kfree(seq_list
->list
);
325 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
328 /* Constant-time check for list membership */
329 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
332 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
334 u16 mask
= seq_list
->mask
;
336 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
337 /* In case someone tries to pop the head of an empty list */
338 return L2CAP_SEQ_LIST_CLEAR
;
339 } else if (seq_list
->head
== seq
) {
340 /* Head can be removed in constant time */
341 seq_list
->head
= seq_list
->list
[seq
& mask
];
342 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
344 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
345 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
346 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
349 /* Walk the list to find the sequence number */
350 u16 prev
= seq_list
->head
;
351 while (seq_list
->list
[prev
& mask
] != seq
) {
352 prev
= seq_list
->list
[prev
& mask
];
353 if (prev
== L2CAP_SEQ_LIST_TAIL
)
354 return L2CAP_SEQ_LIST_CLEAR
;
357 /* Unlink the number from the list and clear it */
358 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
359 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
360 if (seq_list
->tail
== seq
)
361 seq_list
->tail
= prev
;
366 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
368 /* Remove the head in constant time */
369 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
372 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
376 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
379 for (i
= 0; i
<= seq_list
->mask
; i
++)
380 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
382 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
383 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
386 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
388 u16 mask
= seq_list
->mask
;
390 /* All appends happen in constant time */
392 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
395 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
396 seq_list
->head
= seq
;
398 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
400 seq_list
->tail
= seq
;
401 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
404 static void l2cap_chan_timeout(struct work_struct
*work
)
406 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
408 struct l2cap_conn
*conn
= chan
->conn
;
411 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
413 mutex_lock(&conn
->chan_lock
);
414 l2cap_chan_lock(chan
);
416 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
417 reason
= ECONNREFUSED
;
418 else if (chan
->state
== BT_CONNECT
&&
419 chan
->sec_level
!= BT_SECURITY_SDP
)
420 reason
= ECONNREFUSED
;
424 l2cap_chan_close(chan
, reason
);
426 l2cap_chan_unlock(chan
);
428 chan
->ops
->close(chan
);
429 mutex_unlock(&conn
->chan_lock
);
431 l2cap_chan_put(chan
);
434 struct l2cap_chan
*l2cap_chan_create(void)
436 struct l2cap_chan
*chan
;
438 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
442 mutex_init(&chan
->lock
);
444 write_lock(&chan_list_lock
);
445 list_add(&chan
->global_l
, &chan_list
);
446 write_unlock(&chan_list_lock
);
448 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
450 chan
->state
= BT_OPEN
;
452 kref_init(&chan
->kref
);
454 /* This flag is cleared in l2cap_chan_ready() */
455 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
457 BT_DBG("chan %p", chan
);
462 static void l2cap_chan_destroy(struct kref
*kref
)
464 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
466 BT_DBG("chan %p", chan
);
468 write_lock(&chan_list_lock
);
469 list_del(&chan
->global_l
);
470 write_unlock(&chan_list_lock
);
475 void l2cap_chan_hold(struct l2cap_chan
*c
)
477 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
482 void l2cap_chan_put(struct l2cap_chan
*c
)
484 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
486 kref_put(&c
->kref
, l2cap_chan_destroy
);
489 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
491 chan
->fcs
= L2CAP_FCS_CRC16
;
492 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
493 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
494 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
495 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
496 chan
->sec_level
= BT_SECURITY_LOW
;
498 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
501 void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
503 chan
->imtu
= L2CAP_DEFAULT_MTU
;
504 chan
->omtu
= L2CAP_LE_MIN_MTU
;
505 chan
->mode
= L2CAP_MODE_LE_FLOWCTL
;
506 chan
->tx_credits
= 0;
507 chan
->rx_credits
= le_max_credits
;
509 if (chan
->imtu
< L2CAP_LE_DEFAULT_MPS
)
510 chan
->mps
= chan
->imtu
;
512 chan
->mps
= L2CAP_LE_DEFAULT_MPS
;
515 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
517 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
518 __le16_to_cpu(chan
->psm
), chan
->dcid
);
520 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
524 switch (chan
->chan_type
) {
525 case L2CAP_CHAN_CONN_ORIENTED
:
526 if (conn
->hcon
->type
== LE_LINK
) {
527 if (chan
->dcid
== L2CAP_CID_ATT
) {
528 chan
->omtu
= L2CAP_DEFAULT_MTU
;
529 chan
->scid
= L2CAP_CID_ATT
;
531 chan
->scid
= l2cap_alloc_cid(conn
);
534 /* Alloc CID for connection-oriented socket */
535 chan
->scid
= l2cap_alloc_cid(conn
);
536 chan
->omtu
= L2CAP_DEFAULT_MTU
;
540 case L2CAP_CHAN_CONN_LESS
:
541 /* Connectionless socket */
542 chan
->scid
= L2CAP_CID_CONN_LESS
;
543 chan
->dcid
= L2CAP_CID_CONN_LESS
;
544 chan
->omtu
= L2CAP_DEFAULT_MTU
;
547 case L2CAP_CHAN_CONN_FIX_A2MP
:
548 chan
->scid
= L2CAP_CID_A2MP
;
549 chan
->dcid
= L2CAP_CID_A2MP
;
550 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
551 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
555 /* Raw socket can send/recv signalling messages only */
556 chan
->scid
= L2CAP_CID_SIGNALING
;
557 chan
->dcid
= L2CAP_CID_SIGNALING
;
558 chan
->omtu
= L2CAP_DEFAULT_MTU
;
561 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
562 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
563 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
564 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
565 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
566 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
568 l2cap_chan_hold(chan
);
570 hci_conn_hold(conn
->hcon
);
572 list_add(&chan
->list
, &conn
->chan_l
);
575 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
577 mutex_lock(&conn
->chan_lock
);
578 __l2cap_chan_add(conn
, chan
);
579 mutex_unlock(&conn
->chan_lock
);
582 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
584 struct l2cap_conn
*conn
= chan
->conn
;
586 __clear_chan_timer(chan
);
588 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
591 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
592 /* Delete from channel list */
593 list_del(&chan
->list
);
595 l2cap_chan_put(chan
);
599 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
600 hci_conn_drop(conn
->hcon
);
602 if (mgr
&& mgr
->bredr_chan
== chan
)
603 mgr
->bredr_chan
= NULL
;
606 if (chan
->hs_hchan
) {
607 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
609 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
610 amp_disconnect_logical_link(hs_hchan
);
613 chan
->ops
->teardown(chan
, err
);
615 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
619 case L2CAP_MODE_BASIC
:
622 case L2CAP_MODE_LE_FLOWCTL
:
623 skb_queue_purge(&chan
->tx_q
);
626 case L2CAP_MODE_ERTM
:
627 __clear_retrans_timer(chan
);
628 __clear_monitor_timer(chan
);
629 __clear_ack_timer(chan
);
631 skb_queue_purge(&chan
->srej_q
);
633 l2cap_seq_list_free(&chan
->srej_list
);
634 l2cap_seq_list_free(&chan
->retrans_list
);
638 case L2CAP_MODE_STREAMING
:
639 skb_queue_purge(&chan
->tx_q
);
646 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
648 struct l2cap_conn
*conn
= chan
->conn
;
649 struct l2cap_le_conn_rsp rsp
;
652 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
653 result
= L2CAP_CR_AUTHORIZATION
;
655 result
= L2CAP_CR_BAD_PSM
;
657 l2cap_state_change(chan
, BT_DISCONN
);
659 rsp
.dcid
= cpu_to_le16(chan
->scid
);
660 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
661 rsp
.mps
= cpu_to_le16(chan
->mps
);
662 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
663 rsp
.result
= cpu_to_le16(result
);
665 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
669 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
671 struct l2cap_conn
*conn
= chan
->conn
;
672 struct l2cap_conn_rsp rsp
;
675 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
676 result
= L2CAP_CR_SEC_BLOCK
;
678 result
= L2CAP_CR_BAD_PSM
;
680 l2cap_state_change(chan
, BT_DISCONN
);
682 rsp
.scid
= cpu_to_le16(chan
->dcid
);
683 rsp
.dcid
= cpu_to_le16(chan
->scid
);
684 rsp
.result
= cpu_to_le16(result
);
685 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
687 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
690 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
692 struct l2cap_conn
*conn
= chan
->conn
;
694 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
696 switch (chan
->state
) {
698 chan
->ops
->teardown(chan
, 0);
703 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
704 * check for chan->psm.
706 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& chan
->psm
) {
707 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
708 l2cap_send_disconn_req(chan
, reason
);
710 l2cap_chan_del(chan
, reason
);
714 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
715 if (conn
->hcon
->type
== ACL_LINK
)
716 l2cap_chan_connect_reject(chan
);
717 else if (conn
->hcon
->type
== LE_LINK
)
718 l2cap_chan_le_connect_reject(chan
);
721 l2cap_chan_del(chan
, reason
);
726 l2cap_chan_del(chan
, reason
);
730 chan
->ops
->teardown(chan
, 0);
735 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
737 switch (chan
->chan_type
) {
739 switch (chan
->sec_level
) {
740 case BT_SECURITY_HIGH
:
741 return HCI_AT_DEDICATED_BONDING_MITM
;
742 case BT_SECURITY_MEDIUM
:
743 return HCI_AT_DEDICATED_BONDING
;
745 return HCI_AT_NO_BONDING
;
748 case L2CAP_CHAN_CONN_LESS
:
749 if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_3DSP
)) {
750 if (chan
->sec_level
== BT_SECURITY_LOW
)
751 chan
->sec_level
= BT_SECURITY_SDP
;
753 if (chan
->sec_level
== BT_SECURITY_HIGH
)
754 return HCI_AT_NO_BONDING_MITM
;
756 return HCI_AT_NO_BONDING
;
758 case L2CAP_CHAN_CONN_ORIENTED
:
759 if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
760 if (chan
->sec_level
== BT_SECURITY_LOW
)
761 chan
->sec_level
= BT_SECURITY_SDP
;
763 if (chan
->sec_level
== BT_SECURITY_HIGH
)
764 return HCI_AT_NO_BONDING_MITM
;
766 return HCI_AT_NO_BONDING
;
770 switch (chan
->sec_level
) {
771 case BT_SECURITY_HIGH
:
772 return HCI_AT_GENERAL_BONDING_MITM
;
773 case BT_SECURITY_MEDIUM
:
774 return HCI_AT_GENERAL_BONDING
;
776 return HCI_AT_NO_BONDING
;
782 /* Service level security */
783 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
785 struct l2cap_conn
*conn
= chan
->conn
;
788 if (conn
->hcon
->type
== LE_LINK
)
789 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
791 auth_type
= l2cap_get_auth_type(chan
);
793 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
796 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
800 /* Get next available identificator.
801 * 1 - 128 are used by kernel.
802 * 129 - 199 are reserved.
803 * 200 - 254 are used by utilities like l2ping, etc.
806 spin_lock(&conn
->lock
);
808 if (++conn
->tx_ident
> 128)
813 spin_unlock(&conn
->lock
);
818 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
821 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
824 BT_DBG("code 0x%2.2x", code
);
829 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
830 flags
= ACL_START_NO_FLUSH
;
834 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
835 skb
->priority
= HCI_PRIO_MAX
;
837 hci_send_acl(conn
->hchan
, skb
, flags
);
840 static bool __chan_is_moving(struct l2cap_chan
*chan
)
842 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
843 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
846 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
848 struct hci_conn
*hcon
= chan
->conn
->hcon
;
851 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
854 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
856 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
863 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
864 lmp_no_flush_capable(hcon
->hdev
))
865 flags
= ACL_START_NO_FLUSH
;
869 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
870 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
873 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
875 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
876 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
878 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
881 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
882 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
889 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
890 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
897 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
899 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
900 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
902 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
905 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
906 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
913 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
914 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
921 static inline void __unpack_control(struct l2cap_chan
*chan
,
924 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
925 __unpack_extended_control(get_unaligned_le32(skb
->data
),
926 &bt_cb(skb
)->control
);
927 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
929 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
930 &bt_cb(skb
)->control
);
931 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
935 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
939 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
940 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
942 if (control
->sframe
) {
943 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
944 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
945 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
947 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
948 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
954 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
958 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
959 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
961 if (control
->sframe
) {
962 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
963 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
964 packed
|= L2CAP_CTRL_FRAME_TYPE
;
966 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
967 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
973 static inline void __pack_control(struct l2cap_chan
*chan
,
974 struct l2cap_ctrl
*control
,
977 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
978 put_unaligned_le32(__pack_extended_control(control
),
979 skb
->data
+ L2CAP_HDR_SIZE
);
981 put_unaligned_le16(__pack_enhanced_control(control
),
982 skb
->data
+ L2CAP_HDR_SIZE
);
986 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
988 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
989 return L2CAP_EXT_HDR_SIZE
;
991 return L2CAP_ENH_HDR_SIZE
;
994 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
998 struct l2cap_hdr
*lh
;
999 int hlen
= __ertm_hdr_size(chan
);
1001 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1002 hlen
+= L2CAP_FCS_SIZE
;
1004 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
1007 return ERR_PTR(-ENOMEM
);
1009 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1010 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
1011 lh
->cid
= cpu_to_le16(chan
->dcid
);
1013 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1014 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1016 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1018 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1019 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1020 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1023 skb
->priority
= HCI_PRIO_MAX
;
1027 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1028 struct l2cap_ctrl
*control
)
1030 struct sk_buff
*skb
;
1033 BT_DBG("chan %p, control %p", chan
, control
);
1035 if (!control
->sframe
)
1038 if (__chan_is_moving(chan
))
1041 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1045 if (control
->super
== L2CAP_SUPER_RR
)
1046 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1047 else if (control
->super
== L2CAP_SUPER_RNR
)
1048 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1050 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1051 chan
->last_acked_seq
= control
->reqseq
;
1052 __clear_ack_timer(chan
);
1055 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1056 control
->final
, control
->poll
, control
->super
);
1058 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1059 control_field
= __pack_extended_control(control
);
1061 control_field
= __pack_enhanced_control(control
);
1063 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1065 l2cap_do_send(chan
, skb
);
1068 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1070 struct l2cap_ctrl control
;
1072 BT_DBG("chan %p, poll %d", chan
, poll
);
1074 memset(&control
, 0, sizeof(control
));
1076 control
.poll
= poll
;
1078 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1079 control
.super
= L2CAP_SUPER_RNR
;
1081 control
.super
= L2CAP_SUPER_RR
;
1083 control
.reqseq
= chan
->buffer_seq
;
1084 l2cap_send_sframe(chan
, &control
);
1087 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1089 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1092 static bool __amp_capable(struct l2cap_chan
*chan
)
1094 struct l2cap_conn
*conn
= chan
->conn
;
1095 struct hci_dev
*hdev
;
1096 bool amp_available
= false;
1098 if (!conn
->hs_enabled
)
1101 if (!(conn
->fixed_chan_mask
& L2CAP_FC_A2MP
))
1104 read_lock(&hci_dev_list_lock
);
1105 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1106 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1107 test_bit(HCI_UP
, &hdev
->flags
)) {
1108 amp_available
= true;
1112 read_unlock(&hci_dev_list_lock
);
1114 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1115 return amp_available
;
1120 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1122 /* Check EFS parameters */
1126 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1128 struct l2cap_conn
*conn
= chan
->conn
;
1129 struct l2cap_conn_req req
;
1131 req
.scid
= cpu_to_le16(chan
->scid
);
1132 req
.psm
= chan
->psm
;
1134 chan
->ident
= l2cap_get_ident(conn
);
1136 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1138 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1141 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1143 struct l2cap_create_chan_req req
;
1144 req
.scid
= cpu_to_le16(chan
->scid
);
1145 req
.psm
= chan
->psm
;
1146 req
.amp_id
= amp_id
;
1148 chan
->ident
= l2cap_get_ident(chan
->conn
);
1150 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1154 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1156 struct sk_buff
*skb
;
1158 BT_DBG("chan %p", chan
);
1160 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1163 __clear_retrans_timer(chan
);
1164 __clear_monitor_timer(chan
);
1165 __clear_ack_timer(chan
);
1167 chan
->retry_count
= 0;
1168 skb_queue_walk(&chan
->tx_q
, skb
) {
1169 if (bt_cb(skb
)->control
.retries
)
1170 bt_cb(skb
)->control
.retries
= 1;
1175 chan
->expected_tx_seq
= chan
->buffer_seq
;
1177 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1178 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1179 l2cap_seq_list_clear(&chan
->retrans_list
);
1180 l2cap_seq_list_clear(&chan
->srej_list
);
1181 skb_queue_purge(&chan
->srej_q
);
1183 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1184 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1186 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1189 static void l2cap_move_done(struct l2cap_chan
*chan
)
1191 u8 move_role
= chan
->move_role
;
1192 BT_DBG("chan %p", chan
);
1194 chan
->move_state
= L2CAP_MOVE_STABLE
;
1195 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1197 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1200 switch (move_role
) {
1201 case L2CAP_MOVE_ROLE_INITIATOR
:
1202 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1203 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1205 case L2CAP_MOVE_ROLE_RESPONDER
:
1206 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1211 static void l2cap_le_flowctl_start(struct l2cap_chan
*chan
)
1214 chan
->sdu_last_frag
= NULL
;
1217 if (chan
->imtu
< L2CAP_LE_DEFAULT_MPS
)
1218 chan
->mps
= chan
->imtu
;
1220 chan
->mps
= le_default_mps
;
1222 skb_queue_head_init(&chan
->tx_q
);
1224 if (!chan
->tx_credits
)
1225 chan
->ops
->suspend(chan
);
1228 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1230 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1231 chan
->conf_state
= 0;
1232 __clear_chan_timer(chan
);
1234 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
)
1235 l2cap_le_flowctl_start(chan
);
1237 chan
->state
= BT_CONNECTED
;
1239 chan
->ops
->ready(chan
);
1242 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1244 struct l2cap_conn
*conn
= chan
->conn
;
1245 struct l2cap_le_conn_req req
;
1247 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1250 req
.psm
= chan
->psm
;
1251 req
.scid
= cpu_to_le16(chan
->scid
);
1252 req
.mtu
= cpu_to_le16(chan
->imtu
);
1253 req
.mps
= cpu_to_le16(chan
->mps
);
1254 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1256 chan
->ident
= l2cap_get_ident(conn
);
1258 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1262 static void l2cap_le_start(struct l2cap_chan
*chan
)
1264 struct l2cap_conn
*conn
= chan
->conn
;
1266 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1270 l2cap_chan_ready(chan
);
1274 if (chan
->state
== BT_CONNECT
)
1275 l2cap_le_connect(chan
);
1278 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1280 if (__amp_capable(chan
)) {
1281 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1282 a2mp_discover_amp(chan
);
1283 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1284 l2cap_le_start(chan
);
1286 l2cap_send_conn_req(chan
);
1290 static void l2cap_do_start(struct l2cap_chan
*chan
)
1292 struct l2cap_conn
*conn
= chan
->conn
;
1294 if (conn
->hcon
->type
== LE_LINK
) {
1295 l2cap_le_start(chan
);
1299 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1300 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1303 if (l2cap_chan_check_security(chan
) &&
1304 __l2cap_no_conn_pending(chan
)) {
1305 l2cap_start_connection(chan
);
1308 struct l2cap_info_req req
;
1309 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1311 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1312 conn
->info_ident
= l2cap_get_ident(conn
);
1314 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1316 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1321 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1323 u32 local_feat_mask
= l2cap_feat_mask
;
1325 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1328 case L2CAP_MODE_ERTM
:
1329 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1330 case L2CAP_MODE_STREAMING
:
1331 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1337 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1339 struct l2cap_conn
*conn
= chan
->conn
;
1340 struct l2cap_disconn_req req
;
1345 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1346 __clear_retrans_timer(chan
);
1347 __clear_monitor_timer(chan
);
1348 __clear_ack_timer(chan
);
1351 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1352 l2cap_state_change(chan
, BT_DISCONN
);
1356 req
.dcid
= cpu_to_le16(chan
->dcid
);
1357 req
.scid
= cpu_to_le16(chan
->scid
);
1358 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1361 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1364 /* ---- L2CAP connections ---- */
1365 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1367 struct l2cap_chan
*chan
, *tmp
;
1369 BT_DBG("conn %p", conn
);
1371 mutex_lock(&conn
->chan_lock
);
1373 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1374 l2cap_chan_lock(chan
);
1376 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1377 l2cap_chan_unlock(chan
);
1381 if (chan
->state
== BT_CONNECT
) {
1382 if (!l2cap_chan_check_security(chan
) ||
1383 !__l2cap_no_conn_pending(chan
)) {
1384 l2cap_chan_unlock(chan
);
1388 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1389 && test_bit(CONF_STATE2_DEVICE
,
1390 &chan
->conf_state
)) {
1391 l2cap_chan_close(chan
, ECONNRESET
);
1392 l2cap_chan_unlock(chan
);
1396 l2cap_start_connection(chan
);
1398 } else if (chan
->state
== BT_CONNECT2
) {
1399 struct l2cap_conn_rsp rsp
;
1401 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1402 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1404 if (l2cap_chan_check_security(chan
)) {
1405 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1406 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1407 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1408 chan
->ops
->defer(chan
);
1411 l2cap_state_change(chan
, BT_CONFIG
);
1412 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1413 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1416 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1417 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1420 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1423 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1424 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1425 l2cap_chan_unlock(chan
);
1429 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1430 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1431 l2cap_build_conf_req(chan
, buf
), buf
);
1432 chan
->num_conf_req
++;
1435 l2cap_chan_unlock(chan
);
1438 mutex_unlock(&conn
->chan_lock
);
1441 /* Find socket with cid and source/destination bdaddr.
1442 * Returns closest match, locked.
1444 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1448 struct l2cap_chan
*c
, *c1
= NULL
;
1450 read_lock(&chan_list_lock
);
1452 list_for_each_entry(c
, &chan_list
, global_l
) {
1453 if (state
&& c
->state
!= state
)
1456 if (c
->scid
== cid
) {
1457 int src_match
, dst_match
;
1458 int src_any
, dst_any
;
1461 src_match
= !bacmp(&c
->src
, src
);
1462 dst_match
= !bacmp(&c
->dst
, dst
);
1463 if (src_match
&& dst_match
) {
1464 read_unlock(&chan_list_lock
);
1469 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1470 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1471 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1472 (src_any
&& dst_any
))
1477 read_unlock(&chan_list_lock
);
1482 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1484 struct hci_conn
*hcon
= conn
->hcon
;
1485 struct l2cap_chan
*chan
, *pchan
;
1490 /* Check if we have socket listening on cid */
1491 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_ATT
,
1492 &hcon
->src
, &hcon
->dst
);
1496 /* Client ATT sockets should override the server one */
1497 if (__l2cap_get_chan_by_dcid(conn
, L2CAP_CID_ATT
))
1500 dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
1502 /* If device is blocked, do not create a channel for it */
1503 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
, dst_type
))
1506 l2cap_chan_lock(pchan
);
1508 chan
= pchan
->ops
->new_connection(pchan
);
1512 chan
->dcid
= L2CAP_CID_ATT
;
1514 bacpy(&chan
->src
, &hcon
->src
);
1515 bacpy(&chan
->dst
, &hcon
->dst
);
1516 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1517 chan
->dst_type
= dst_type
;
1519 __l2cap_chan_add(conn
, chan
);
1522 l2cap_chan_unlock(pchan
);
1525 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1527 struct l2cap_chan
*chan
;
1528 struct hci_conn
*hcon
= conn
->hcon
;
1530 BT_DBG("conn %p", conn
);
1532 /* For outgoing pairing which doesn't necessarily have an
1533 * associated socket (e.g. mgmt_pair_device).
1535 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1536 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1538 mutex_lock(&conn
->chan_lock
);
1540 if (hcon
->type
== LE_LINK
)
1541 l2cap_le_conn_ready(conn
);
1543 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1545 l2cap_chan_lock(chan
);
1547 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1548 l2cap_chan_unlock(chan
);
1552 if (hcon
->type
== LE_LINK
) {
1553 l2cap_le_start(chan
);
1554 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1555 l2cap_chan_ready(chan
);
1557 } else if (chan
->state
== BT_CONNECT
) {
1558 l2cap_do_start(chan
);
1561 l2cap_chan_unlock(chan
);
1564 mutex_unlock(&conn
->chan_lock
);
1567 /* Notify sockets that we cannot guaranty reliability anymore */
1568 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1570 struct l2cap_chan
*chan
;
1572 BT_DBG("conn %p", conn
);
1574 mutex_lock(&conn
->chan_lock
);
1576 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1577 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1578 l2cap_chan_set_err(chan
, err
);
1581 mutex_unlock(&conn
->chan_lock
);
1584 static void l2cap_info_timeout(struct work_struct
*work
)
1586 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1589 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1590 conn
->info_ident
= 0;
1592 l2cap_conn_start(conn
);
1597 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1598 * callback is called during registration. The ->remove callback is called
1599 * during unregistration.
1600 * An l2cap_user object can either be explicitly unregistered or when the
1601 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1602 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1603 * External modules must own a reference to the l2cap_conn object if they intend
1604 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1605 * any time if they don't.
1608 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1610 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1613 /* We need to check whether l2cap_conn is registered. If it is not, we
1614 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1615 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1616 * relies on the parent hci_conn object to be locked. This itself relies
1617 * on the hci_dev object to be locked. So we must lock the hci device
1622 if (user
->list
.next
|| user
->list
.prev
) {
1627 /* conn->hchan is NULL after l2cap_conn_del() was called */
1633 ret
= user
->probe(conn
, user
);
1637 list_add(&user
->list
, &conn
->users
);
1641 hci_dev_unlock(hdev
);
1644 EXPORT_SYMBOL(l2cap_register_user
);
1646 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1648 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1652 if (!user
->list
.next
|| !user
->list
.prev
)
1655 list_del(&user
->list
);
1656 user
->list
.next
= NULL
;
1657 user
->list
.prev
= NULL
;
1658 user
->remove(conn
, user
);
1661 hci_dev_unlock(hdev
);
1663 EXPORT_SYMBOL(l2cap_unregister_user
);
1665 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1667 struct l2cap_user
*user
;
1669 while (!list_empty(&conn
->users
)) {
1670 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1671 list_del(&user
->list
);
1672 user
->list
.next
= NULL
;
1673 user
->list
.prev
= NULL
;
1674 user
->remove(conn
, user
);
1678 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1680 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1681 struct l2cap_chan
*chan
, *l
;
1686 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1688 kfree_skb(conn
->rx_skb
);
1690 l2cap_unregister_all_users(conn
);
1692 mutex_lock(&conn
->chan_lock
);
1695 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1696 l2cap_chan_hold(chan
);
1697 l2cap_chan_lock(chan
);
1699 l2cap_chan_del(chan
, err
);
1701 l2cap_chan_unlock(chan
);
1703 chan
->ops
->close(chan
);
1704 l2cap_chan_put(chan
);
1707 mutex_unlock(&conn
->chan_lock
);
1709 hci_chan_del(conn
->hchan
);
1711 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1712 cancel_delayed_work_sync(&conn
->info_timer
);
1714 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1715 cancel_delayed_work_sync(&conn
->security_timer
);
1716 smp_chan_destroy(conn
);
1719 hcon
->l2cap_data
= NULL
;
1721 l2cap_conn_put(conn
);
1724 static void security_timeout(struct work_struct
*work
)
1726 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1727 security_timer
.work
);
1729 BT_DBG("conn %p", conn
);
1731 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1732 smp_chan_destroy(conn
);
1733 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1737 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
1739 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1740 struct hci_chan
*hchan
;
1745 hchan
= hci_chan_create(hcon
);
1749 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1751 hci_chan_del(hchan
);
1755 kref_init(&conn
->ref
);
1756 hcon
->l2cap_data
= conn
;
1758 hci_conn_get(conn
->hcon
);
1759 conn
->hchan
= hchan
;
1761 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1763 switch (hcon
->type
) {
1765 if (hcon
->hdev
->le_mtu
) {
1766 conn
->mtu
= hcon
->hdev
->le_mtu
;
1771 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1775 conn
->feat_mask
= 0;
1777 if (hcon
->type
== ACL_LINK
)
1778 conn
->hs_enabled
= test_bit(HCI_HS_ENABLED
,
1779 &hcon
->hdev
->dev_flags
);
1781 spin_lock_init(&conn
->lock
);
1782 mutex_init(&conn
->chan_lock
);
1784 INIT_LIST_HEAD(&conn
->chan_l
);
1785 INIT_LIST_HEAD(&conn
->users
);
1787 if (hcon
->type
== LE_LINK
)
1788 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1790 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1792 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1797 static void l2cap_conn_free(struct kref
*ref
)
1799 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1801 hci_conn_put(conn
->hcon
);
1805 void l2cap_conn_get(struct l2cap_conn
*conn
)
1807 kref_get(&conn
->ref
);
1809 EXPORT_SYMBOL(l2cap_conn_get
);
1811 void l2cap_conn_put(struct l2cap_conn
*conn
)
1813 kref_put(&conn
->ref
, l2cap_conn_free
);
1815 EXPORT_SYMBOL(l2cap_conn_put
);
1817 /* ---- Socket interface ---- */
1819 /* Find socket with psm and source / destination bdaddr.
1820 * Returns closest match.
1822 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1827 struct l2cap_chan
*c
, *c1
= NULL
;
1829 read_lock(&chan_list_lock
);
1831 list_for_each_entry(c
, &chan_list
, global_l
) {
1832 if (state
&& c
->state
!= state
)
1835 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1838 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1841 if (c
->psm
== psm
) {
1842 int src_match
, dst_match
;
1843 int src_any
, dst_any
;
1846 src_match
= !bacmp(&c
->src
, src
);
1847 dst_match
= !bacmp(&c
->dst
, dst
);
1848 if (src_match
&& dst_match
) {
1849 read_unlock(&chan_list_lock
);
1854 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1855 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1856 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1857 (src_any
&& dst_any
))
1862 read_unlock(&chan_list_lock
);
1867 static bool is_valid_psm(u16 psm
, u8 dst_type
)
1872 if (bdaddr_type_is_le(dst_type
))
1873 return (psm
< 0x00ff);
1875 /* PSM must be odd and lsb of upper byte must be 0 */
1876 return ((psm
& 0x0101) == 0x0001);
1879 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1880 bdaddr_t
*dst
, u8 dst_type
)
1882 struct l2cap_conn
*conn
;
1883 struct hci_conn
*hcon
;
1884 struct hci_dev
*hdev
;
1888 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
1889 dst_type
, __le16_to_cpu(psm
));
1891 hdev
= hci_get_route(dst
, &chan
->src
);
1893 return -EHOSTUNREACH
;
1897 l2cap_chan_lock(chan
);
1899 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
1900 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1905 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1910 switch (chan
->mode
) {
1911 case L2CAP_MODE_BASIC
:
1912 case L2CAP_MODE_LE_FLOWCTL
:
1914 case L2CAP_MODE_ERTM
:
1915 case L2CAP_MODE_STREAMING
:
1924 switch (chan
->state
) {
1928 /* Already connecting */
1933 /* Already connected */
1947 /* Set destination address and psm */
1948 bacpy(&chan
->dst
, dst
);
1949 chan
->dst_type
= dst_type
;
1954 auth_type
= l2cap_get_auth_type(chan
);
1956 if (bdaddr_type_is_le(dst_type
))
1957 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1958 chan
->sec_level
, auth_type
);
1960 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1961 chan
->sec_level
, auth_type
);
1964 err
= PTR_ERR(hcon
);
1968 conn
= l2cap_conn_add(hcon
);
1970 hci_conn_drop(hcon
);
1975 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
1976 hci_conn_drop(hcon
);
1981 /* Update source addr of the socket */
1982 bacpy(&chan
->src
, &hcon
->src
);
1983 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1985 l2cap_chan_unlock(chan
);
1986 l2cap_chan_add(conn
, chan
);
1987 l2cap_chan_lock(chan
);
1989 /* l2cap_chan_add takes its own ref so we can drop this one */
1990 hci_conn_drop(hcon
);
1992 l2cap_state_change(chan
, BT_CONNECT
);
1993 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
1995 if (hcon
->state
== BT_CONNECTED
) {
1996 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1997 __clear_chan_timer(chan
);
1998 if (l2cap_chan_check_security(chan
))
1999 l2cap_state_change(chan
, BT_CONNECTED
);
2001 l2cap_do_start(chan
);
2007 l2cap_chan_unlock(chan
);
2008 hci_dev_unlock(hdev
);
2013 static void l2cap_monitor_timeout(struct work_struct
*work
)
2015 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2016 monitor_timer
.work
);
2018 BT_DBG("chan %p", chan
);
2020 l2cap_chan_lock(chan
);
2023 l2cap_chan_unlock(chan
);
2024 l2cap_chan_put(chan
);
2028 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
2030 l2cap_chan_unlock(chan
);
2031 l2cap_chan_put(chan
);
2034 static void l2cap_retrans_timeout(struct work_struct
*work
)
2036 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2037 retrans_timer
.work
);
2039 BT_DBG("chan %p", chan
);
2041 l2cap_chan_lock(chan
);
2044 l2cap_chan_unlock(chan
);
2045 l2cap_chan_put(chan
);
2049 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
2050 l2cap_chan_unlock(chan
);
2051 l2cap_chan_put(chan
);
2054 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
2055 struct sk_buff_head
*skbs
)
2057 struct sk_buff
*skb
;
2058 struct l2cap_ctrl
*control
;
2060 BT_DBG("chan %p, skbs %p", chan
, skbs
);
2062 if (__chan_is_moving(chan
))
2065 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2067 while (!skb_queue_empty(&chan
->tx_q
)) {
2069 skb
= skb_dequeue(&chan
->tx_q
);
2071 bt_cb(skb
)->control
.retries
= 1;
2072 control
= &bt_cb(skb
)->control
;
2074 control
->reqseq
= 0;
2075 control
->txseq
= chan
->next_tx_seq
;
2077 __pack_control(chan
, control
, skb
);
2079 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2080 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
2081 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
2084 l2cap_do_send(chan
, skb
);
2086 BT_DBG("Sent txseq %u", control
->txseq
);
2088 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
2089 chan
->frames_sent
++;
2093 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
2095 struct sk_buff
*skb
, *tx_skb
;
2096 struct l2cap_ctrl
*control
;
2099 BT_DBG("chan %p", chan
);
2101 if (chan
->state
!= BT_CONNECTED
)
2104 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2107 if (__chan_is_moving(chan
))
2110 while (chan
->tx_send_head
&&
2111 chan
->unacked_frames
< chan
->remote_tx_win
&&
2112 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
2114 skb
= chan
->tx_send_head
;
2116 bt_cb(skb
)->control
.retries
= 1;
2117 control
= &bt_cb(skb
)->control
;
2119 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2122 control
->reqseq
= chan
->buffer_seq
;
2123 chan
->last_acked_seq
= chan
->buffer_seq
;
2124 control
->txseq
= chan
->next_tx_seq
;
2126 __pack_control(chan
, control
, skb
);
2128 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2129 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
2130 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
2133 /* Clone after data has been modified. Data is assumed to be
2134 read-only (for locking purposes) on cloned sk_buffs.
2136 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2141 __set_retrans_timer(chan
);
2143 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
2144 chan
->unacked_frames
++;
2145 chan
->frames_sent
++;
2148 if (skb_queue_is_last(&chan
->tx_q
, skb
))
2149 chan
->tx_send_head
= NULL
;
2151 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
2153 l2cap_do_send(chan
, tx_skb
);
2154 BT_DBG("Sent txseq %u", control
->txseq
);
2157 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
2158 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
2163 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
2165 struct l2cap_ctrl control
;
2166 struct sk_buff
*skb
;
2167 struct sk_buff
*tx_skb
;
2170 BT_DBG("chan %p", chan
);
2172 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2175 if (__chan_is_moving(chan
))
2178 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
2179 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
2181 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
2183 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2188 bt_cb(skb
)->control
.retries
++;
2189 control
= bt_cb(skb
)->control
;
2191 if (chan
->max_tx
!= 0 &&
2192 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
2193 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
2194 l2cap_send_disconn_req(chan
, ECONNRESET
);
2195 l2cap_seq_list_clear(&chan
->retrans_list
);
2199 control
.reqseq
= chan
->buffer_seq
;
2200 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2205 if (skb_cloned(skb
)) {
2206 /* Cloned sk_buffs are read-only, so we need a
2209 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
2211 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2215 l2cap_seq_list_clear(&chan
->retrans_list
);
2219 /* Update skb contents */
2220 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
2221 put_unaligned_le32(__pack_extended_control(&control
),
2222 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2224 put_unaligned_le16(__pack_enhanced_control(&control
),
2225 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2228 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2229 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
2230 put_unaligned_le16(fcs
, skb_put(tx_skb
,
2234 l2cap_do_send(chan
, tx_skb
);
2236 BT_DBG("Resent txseq %d", control
.txseq
);
2238 chan
->last_acked_seq
= chan
->buffer_seq
;
2242 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2243 struct l2cap_ctrl
*control
)
2245 BT_DBG("chan %p, control %p", chan
, control
);
2247 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2248 l2cap_ertm_resend(chan
);
2251 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2252 struct l2cap_ctrl
*control
)
2254 struct sk_buff
*skb
;
2256 BT_DBG("chan %p, control %p", chan
, control
);
2259 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2261 l2cap_seq_list_clear(&chan
->retrans_list
);
2263 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2266 if (chan
->unacked_frames
) {
2267 skb_queue_walk(&chan
->tx_q
, skb
) {
2268 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2269 skb
== chan
->tx_send_head
)
2273 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2274 if (skb
== chan
->tx_send_head
)
2277 l2cap_seq_list_append(&chan
->retrans_list
,
2278 bt_cb(skb
)->control
.txseq
);
2281 l2cap_ertm_resend(chan
);
2285 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2287 struct l2cap_ctrl control
;
2288 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2289 chan
->last_acked_seq
);
2292 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2293 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2295 memset(&control
, 0, sizeof(control
));
2298 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2299 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2300 __clear_ack_timer(chan
);
2301 control
.super
= L2CAP_SUPER_RNR
;
2302 control
.reqseq
= chan
->buffer_seq
;
2303 l2cap_send_sframe(chan
, &control
);
2305 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2306 l2cap_ertm_send(chan
);
2307 /* If any i-frames were sent, they included an ack */
2308 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2312 /* Ack now if the window is 3/4ths full.
2313 * Calculate without mul or div
2315 threshold
= chan
->ack_win
;
2316 threshold
+= threshold
<< 1;
2319 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2322 if (frames_to_ack
>= threshold
) {
2323 __clear_ack_timer(chan
);
2324 control
.super
= L2CAP_SUPER_RR
;
2325 control
.reqseq
= chan
->buffer_seq
;
2326 l2cap_send_sframe(chan
, &control
);
2331 __set_ack_timer(chan
);
2335 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2336 struct msghdr
*msg
, int len
,
2337 int count
, struct sk_buff
*skb
)
2339 struct l2cap_conn
*conn
= chan
->conn
;
2340 struct sk_buff
**frag
;
2343 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
2349 /* Continuation fragments (no L2CAP header) */
2350 frag
= &skb_shinfo(skb
)->frag_list
;
2352 struct sk_buff
*tmp
;
2354 count
= min_t(unsigned int, conn
->mtu
, len
);
2356 tmp
= chan
->ops
->alloc_skb(chan
, count
,
2357 msg
->msg_flags
& MSG_DONTWAIT
);
2359 return PTR_ERR(tmp
);
2363 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
2366 (*frag
)->priority
= skb
->priority
;
2371 skb
->len
+= (*frag
)->len
;
2372 skb
->data_len
+= (*frag
)->len
;
2374 frag
= &(*frag
)->next
;
2380 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2381 struct msghdr
*msg
, size_t len
,
2384 struct l2cap_conn
*conn
= chan
->conn
;
2385 struct sk_buff
*skb
;
2386 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2387 struct l2cap_hdr
*lh
;
2389 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan
,
2390 __le16_to_cpu(chan
->psm
), len
, priority
);
2392 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2394 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2395 msg
->msg_flags
& MSG_DONTWAIT
);
2399 skb
->priority
= priority
;
2401 /* Create L2CAP header */
2402 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2403 lh
->cid
= cpu_to_le16(chan
->dcid
);
2404 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2405 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2407 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2408 if (unlikely(err
< 0)) {
2410 return ERR_PTR(err
);
2415 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2416 struct msghdr
*msg
, size_t len
,
2419 struct l2cap_conn
*conn
= chan
->conn
;
2420 struct sk_buff
*skb
;
2422 struct l2cap_hdr
*lh
;
2424 BT_DBG("chan %p len %zu", chan
, len
);
2426 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2428 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2429 msg
->msg_flags
& MSG_DONTWAIT
);
2433 skb
->priority
= priority
;
2435 /* Create L2CAP header */
2436 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2437 lh
->cid
= cpu_to_le16(chan
->dcid
);
2438 lh
->len
= cpu_to_le16(len
);
2440 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2441 if (unlikely(err
< 0)) {
2443 return ERR_PTR(err
);
2448 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2449 struct msghdr
*msg
, size_t len
,
2452 struct l2cap_conn
*conn
= chan
->conn
;
2453 struct sk_buff
*skb
;
2454 int err
, count
, hlen
;
2455 struct l2cap_hdr
*lh
;
2457 BT_DBG("chan %p len %zu", chan
, len
);
2460 return ERR_PTR(-ENOTCONN
);
2462 hlen
= __ertm_hdr_size(chan
);
2465 hlen
+= L2CAP_SDULEN_SIZE
;
2467 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2468 hlen
+= L2CAP_FCS_SIZE
;
2470 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2472 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2473 msg
->msg_flags
& MSG_DONTWAIT
);
2477 /* Create L2CAP header */
2478 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2479 lh
->cid
= cpu_to_le16(chan
->dcid
);
2480 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2482 /* Control header is populated later */
2483 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2484 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2486 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2489 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2491 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2492 if (unlikely(err
< 0)) {
2494 return ERR_PTR(err
);
2497 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2498 bt_cb(skb
)->control
.retries
= 0;
2502 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2503 struct sk_buff_head
*seg_queue
,
2504 struct msghdr
*msg
, size_t len
)
2506 struct sk_buff
*skb
;
2511 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2513 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2514 * so fragmented skbs are not used. The HCI layer's handling
2515 * of fragmented skbs is not compatible with ERTM's queueing.
2518 /* PDU size is derived from the HCI MTU */
2519 pdu_len
= chan
->conn
->mtu
;
2521 /* Constrain PDU size for BR/EDR connections */
2523 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2525 /* Adjust for largest possible L2CAP overhead. */
2527 pdu_len
-= L2CAP_FCS_SIZE
;
2529 pdu_len
-= __ertm_hdr_size(chan
);
2531 /* Remote device may have requested smaller PDUs */
2532 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2534 if (len
<= pdu_len
) {
2535 sar
= L2CAP_SAR_UNSEGMENTED
;
2539 sar
= L2CAP_SAR_START
;
2541 pdu_len
-= L2CAP_SDULEN_SIZE
;
2545 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2548 __skb_queue_purge(seg_queue
);
2549 return PTR_ERR(skb
);
2552 bt_cb(skb
)->control
.sar
= sar
;
2553 __skb_queue_tail(seg_queue
, skb
);
2558 pdu_len
+= L2CAP_SDULEN_SIZE
;
2561 if (len
<= pdu_len
) {
2562 sar
= L2CAP_SAR_END
;
2565 sar
= L2CAP_SAR_CONTINUE
;
2572 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2574 size_t len
, u16 sdulen
)
2576 struct l2cap_conn
*conn
= chan
->conn
;
2577 struct sk_buff
*skb
;
2578 int err
, count
, hlen
;
2579 struct l2cap_hdr
*lh
;
2581 BT_DBG("chan %p len %zu", chan
, len
);
2584 return ERR_PTR(-ENOTCONN
);
2586 hlen
= L2CAP_HDR_SIZE
;
2589 hlen
+= L2CAP_SDULEN_SIZE
;
2591 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2593 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2594 msg
->msg_flags
& MSG_DONTWAIT
);
2598 /* Create L2CAP header */
2599 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2600 lh
->cid
= cpu_to_le16(chan
->dcid
);
2601 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2604 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2606 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2607 if (unlikely(err
< 0)) {
2609 return ERR_PTR(err
);
2615 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2616 struct sk_buff_head
*seg_queue
,
2617 struct msghdr
*msg
, size_t len
)
2619 struct sk_buff
*skb
;
2623 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2625 pdu_len
= chan
->conn
->mtu
- L2CAP_HDR_SIZE
;
2627 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2630 pdu_len
-= L2CAP_SDULEN_SIZE
;
2636 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2638 __skb_queue_purge(seg_queue
);
2639 return PTR_ERR(skb
);
2642 __skb_queue_tail(seg_queue
, skb
);
2648 pdu_len
+= L2CAP_SDULEN_SIZE
;
2655 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2658 struct sk_buff
*skb
;
2660 struct sk_buff_head seg_queue
;
2665 /* Connectionless channel */
2666 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2667 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2669 return PTR_ERR(skb
);
2671 l2cap_do_send(chan
, skb
);
2675 switch (chan
->mode
) {
2676 case L2CAP_MODE_LE_FLOWCTL
:
2677 /* Check outgoing MTU */
2678 if (len
> chan
->omtu
)
2681 if (!chan
->tx_credits
)
2684 __skb_queue_head_init(&seg_queue
);
2686 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2688 if (chan
->state
!= BT_CONNECTED
) {
2689 __skb_queue_purge(&seg_queue
);
2696 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2698 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2699 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2703 if (!chan
->tx_credits
)
2704 chan
->ops
->suspend(chan
);
2710 case L2CAP_MODE_BASIC
:
2711 /* Check outgoing MTU */
2712 if (len
> chan
->omtu
)
2715 /* Create a basic PDU */
2716 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2718 return PTR_ERR(skb
);
2720 l2cap_do_send(chan
, skb
);
2724 case L2CAP_MODE_ERTM
:
2725 case L2CAP_MODE_STREAMING
:
2726 /* Check outgoing MTU */
2727 if (len
> chan
->omtu
) {
2732 __skb_queue_head_init(&seg_queue
);
2734 /* Do segmentation before calling in to the state machine,
2735 * since it's possible to block while waiting for memory
2738 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2740 /* The channel could have been closed while segmenting,
2741 * check that it is still connected.
2743 if (chan
->state
!= BT_CONNECTED
) {
2744 __skb_queue_purge(&seg_queue
);
2751 if (chan
->mode
== L2CAP_MODE_ERTM
)
2752 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2754 l2cap_streaming_send(chan
, &seg_queue
);
2758 /* If the skbs were not queued for sending, they'll still be in
2759 * seg_queue and need to be purged.
2761 __skb_queue_purge(&seg_queue
);
2765 BT_DBG("bad state %1.1x", chan
->mode
);
2772 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2774 struct l2cap_ctrl control
;
2777 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2779 memset(&control
, 0, sizeof(control
));
2781 control
.super
= L2CAP_SUPER_SREJ
;
2783 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2784 seq
= __next_seq(chan
, seq
)) {
2785 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2786 control
.reqseq
= seq
;
2787 l2cap_send_sframe(chan
, &control
);
2788 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2792 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2795 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2797 struct l2cap_ctrl control
;
2799 BT_DBG("chan %p", chan
);
2801 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2804 memset(&control
, 0, sizeof(control
));
2806 control
.super
= L2CAP_SUPER_SREJ
;
2807 control
.reqseq
= chan
->srej_list
.tail
;
2808 l2cap_send_sframe(chan
, &control
);
2811 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2813 struct l2cap_ctrl control
;
2817 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2819 memset(&control
, 0, sizeof(control
));
2821 control
.super
= L2CAP_SUPER_SREJ
;
2823 /* Capture initial list head to allow only one pass through the list. */
2824 initial_head
= chan
->srej_list
.head
;
2827 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2828 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2831 control
.reqseq
= seq
;
2832 l2cap_send_sframe(chan
, &control
);
2833 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2834 } while (chan
->srej_list
.head
!= initial_head
);
2837 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2839 struct sk_buff
*acked_skb
;
2842 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2844 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2847 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2848 chan
->expected_ack_seq
, chan
->unacked_frames
);
2850 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2851 ackseq
= __next_seq(chan
, ackseq
)) {
2853 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2855 skb_unlink(acked_skb
, &chan
->tx_q
);
2856 kfree_skb(acked_skb
);
2857 chan
->unacked_frames
--;
2861 chan
->expected_ack_seq
= reqseq
;
2863 if (chan
->unacked_frames
== 0)
2864 __clear_retrans_timer(chan
);
2866 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2869 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2871 BT_DBG("chan %p", chan
);
2873 chan
->expected_tx_seq
= chan
->buffer_seq
;
2874 l2cap_seq_list_clear(&chan
->srej_list
);
2875 skb_queue_purge(&chan
->srej_q
);
2876 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2879 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2880 struct l2cap_ctrl
*control
,
2881 struct sk_buff_head
*skbs
, u8 event
)
2883 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2887 case L2CAP_EV_DATA_REQUEST
:
2888 if (chan
->tx_send_head
== NULL
)
2889 chan
->tx_send_head
= skb_peek(skbs
);
2891 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2892 l2cap_ertm_send(chan
);
2894 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2895 BT_DBG("Enter LOCAL_BUSY");
2896 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2898 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2899 /* The SREJ_SENT state must be aborted if we are to
2900 * enter the LOCAL_BUSY state.
2902 l2cap_abort_rx_srej_sent(chan
);
2905 l2cap_send_ack(chan
);
2908 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2909 BT_DBG("Exit LOCAL_BUSY");
2910 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2912 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2913 struct l2cap_ctrl local_control
;
2915 memset(&local_control
, 0, sizeof(local_control
));
2916 local_control
.sframe
= 1;
2917 local_control
.super
= L2CAP_SUPER_RR
;
2918 local_control
.poll
= 1;
2919 local_control
.reqseq
= chan
->buffer_seq
;
2920 l2cap_send_sframe(chan
, &local_control
);
2922 chan
->retry_count
= 1;
2923 __set_monitor_timer(chan
);
2924 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2927 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2928 l2cap_process_reqseq(chan
, control
->reqseq
);
2930 case L2CAP_EV_EXPLICIT_POLL
:
2931 l2cap_send_rr_or_rnr(chan
, 1);
2932 chan
->retry_count
= 1;
2933 __set_monitor_timer(chan
);
2934 __clear_ack_timer(chan
);
2935 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2937 case L2CAP_EV_RETRANS_TO
:
2938 l2cap_send_rr_or_rnr(chan
, 1);
2939 chan
->retry_count
= 1;
2940 __set_monitor_timer(chan
);
2941 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2943 case L2CAP_EV_RECV_FBIT
:
2944 /* Nothing to process */
2951 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2952 struct l2cap_ctrl
*control
,
2953 struct sk_buff_head
*skbs
, u8 event
)
2955 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2959 case L2CAP_EV_DATA_REQUEST
:
2960 if (chan
->tx_send_head
== NULL
)
2961 chan
->tx_send_head
= skb_peek(skbs
);
2962 /* Queue data, but don't send. */
2963 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2965 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2966 BT_DBG("Enter LOCAL_BUSY");
2967 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2969 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2970 /* The SREJ_SENT state must be aborted if we are to
2971 * enter the LOCAL_BUSY state.
2973 l2cap_abort_rx_srej_sent(chan
);
2976 l2cap_send_ack(chan
);
2979 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2980 BT_DBG("Exit LOCAL_BUSY");
2981 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2983 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2984 struct l2cap_ctrl local_control
;
2985 memset(&local_control
, 0, sizeof(local_control
));
2986 local_control
.sframe
= 1;
2987 local_control
.super
= L2CAP_SUPER_RR
;
2988 local_control
.poll
= 1;
2989 local_control
.reqseq
= chan
->buffer_seq
;
2990 l2cap_send_sframe(chan
, &local_control
);
2992 chan
->retry_count
= 1;
2993 __set_monitor_timer(chan
);
2994 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2997 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2998 l2cap_process_reqseq(chan
, control
->reqseq
);
3002 case L2CAP_EV_RECV_FBIT
:
3003 if (control
&& control
->final
) {
3004 __clear_monitor_timer(chan
);
3005 if (chan
->unacked_frames
> 0)
3006 __set_retrans_timer(chan
);
3007 chan
->retry_count
= 0;
3008 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3009 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
3012 case L2CAP_EV_EXPLICIT_POLL
:
3015 case L2CAP_EV_MONITOR_TO
:
3016 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
3017 l2cap_send_rr_or_rnr(chan
, 1);
3018 __set_monitor_timer(chan
);
3019 chan
->retry_count
++;
3021 l2cap_send_disconn_req(chan
, ECONNABORTED
);
3029 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
3030 struct sk_buff_head
*skbs
, u8 event
)
3032 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3033 chan
, control
, skbs
, event
, chan
->tx_state
);
3035 switch (chan
->tx_state
) {
3036 case L2CAP_TX_STATE_XMIT
:
3037 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
3039 case L2CAP_TX_STATE_WAIT_F
:
3040 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
3048 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
3049 struct l2cap_ctrl
*control
)
3051 BT_DBG("chan %p, control %p", chan
, control
);
3052 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
3055 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
3056 struct l2cap_ctrl
*control
)
3058 BT_DBG("chan %p, control %p", chan
, control
);
3059 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
3062 /* Copy frame to all raw sockets on that connection */
3063 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3065 struct sk_buff
*nskb
;
3066 struct l2cap_chan
*chan
;
3068 BT_DBG("conn %p", conn
);
3070 mutex_lock(&conn
->chan_lock
);
3072 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
3073 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
3076 /* Don't send frame to the channel it came from */
3077 if (bt_cb(skb
)->chan
== chan
)
3080 nskb
= skb_clone(skb
, GFP_KERNEL
);
3083 if (chan
->ops
->recv(chan
, nskb
))
3087 mutex_unlock(&conn
->chan_lock
);
3090 /* ---- L2CAP signalling commands ---- */
3091 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
3092 u8 ident
, u16 dlen
, void *data
)
3094 struct sk_buff
*skb
, **frag
;
3095 struct l2cap_cmd_hdr
*cmd
;
3096 struct l2cap_hdr
*lh
;
3099 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3100 conn
, code
, ident
, dlen
);
3102 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
3105 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
3106 count
= min_t(unsigned int, conn
->mtu
, len
);
3108 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
3112 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
3113 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
3115 if (conn
->hcon
->type
== LE_LINK
)
3116 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
3118 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
3120 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
3123 cmd
->len
= cpu_to_le16(dlen
);
3126 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
3127 memcpy(skb_put(skb
, count
), data
, count
);
3133 /* Continuation fragments (no L2CAP header) */
3134 frag
= &skb_shinfo(skb
)->frag_list
;
3136 count
= min_t(unsigned int, conn
->mtu
, len
);
3138 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
3142 memcpy(skb_put(*frag
, count
), data
, count
);
3147 frag
= &(*frag
)->next
;
3157 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
3160 struct l2cap_conf_opt
*opt
= *ptr
;
3163 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
3171 *val
= *((u8
*) opt
->val
);
3175 *val
= get_unaligned_le16(opt
->val
);
3179 *val
= get_unaligned_le32(opt
->val
);
3183 *val
= (unsigned long) opt
->val
;
3187 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
3191 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
3193 struct l2cap_conf_opt
*opt
= *ptr
;
3195 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
3202 *((u8
*) opt
->val
) = val
;
3206 put_unaligned_le16(val
, opt
->val
);
3210 put_unaligned_le32(val
, opt
->val
);
3214 memcpy(opt
->val
, (void *) val
, len
);
3218 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
3221 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
3223 struct l2cap_conf_efs efs
;
3225 switch (chan
->mode
) {
3226 case L2CAP_MODE_ERTM
:
3227 efs
.id
= chan
->local_id
;
3228 efs
.stype
= chan
->local_stype
;
3229 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3230 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3231 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
3232 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3235 case L2CAP_MODE_STREAMING
:
3237 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3238 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3239 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3248 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3249 (unsigned long) &efs
);
3252 static void l2cap_ack_timeout(struct work_struct
*work
)
3254 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3258 BT_DBG("chan %p", chan
);
3260 l2cap_chan_lock(chan
);
3262 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3263 chan
->last_acked_seq
);
3266 l2cap_send_rr_or_rnr(chan
, 0);
3268 l2cap_chan_unlock(chan
);
3269 l2cap_chan_put(chan
);
3272 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3276 chan
->next_tx_seq
= 0;
3277 chan
->expected_tx_seq
= 0;
3278 chan
->expected_ack_seq
= 0;
3279 chan
->unacked_frames
= 0;
3280 chan
->buffer_seq
= 0;
3281 chan
->frames_sent
= 0;
3282 chan
->last_acked_seq
= 0;
3284 chan
->sdu_last_frag
= NULL
;
3287 skb_queue_head_init(&chan
->tx_q
);
3289 chan
->local_amp_id
= AMP_ID_BREDR
;
3290 chan
->move_id
= AMP_ID_BREDR
;
3291 chan
->move_state
= L2CAP_MOVE_STABLE
;
3292 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3294 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3297 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3298 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3300 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3301 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3302 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3304 skb_queue_head_init(&chan
->srej_q
);
3306 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3310 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3312 l2cap_seq_list_free(&chan
->srej_list
);
3317 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3320 case L2CAP_MODE_STREAMING
:
3321 case L2CAP_MODE_ERTM
:
3322 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3326 return L2CAP_MODE_BASIC
;
3330 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3332 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3335 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3337 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3340 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3341 struct l2cap_conf_rfc
*rfc
)
3343 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3344 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3346 /* Class 1 devices have must have ERTM timeouts
3347 * exceeding the Link Supervision Timeout. The
3348 * default Link Supervision Timeout for AMP
3349 * controllers is 10 seconds.
3351 * Class 1 devices use 0xffffffff for their
3352 * best-effort flush timeout, so the clamping logic
3353 * will result in a timeout that meets the above
3354 * requirement. ERTM timeouts are 16-bit values, so
3355 * the maximum timeout is 65.535 seconds.
3358 /* Convert timeout to milliseconds and round */
3359 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3361 /* This is the recommended formula for class 2 devices
3362 * that start ERTM timers when packets are sent to the
3365 ertm_to
= 3 * ertm_to
+ 500;
3367 if (ertm_to
> 0xffff)
3370 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3371 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3373 rfc
->retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3374 rfc
->monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3378 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3380 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3381 __l2cap_ews_supported(chan
->conn
)) {
3382 /* use extended control field */
3383 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3384 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3386 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3387 L2CAP_DEFAULT_TX_WINDOW
);
3388 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3390 chan
->ack_win
= chan
->tx_win
;
3393 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3395 struct l2cap_conf_req
*req
= data
;
3396 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3397 void *ptr
= req
->data
;
3400 BT_DBG("chan %p", chan
);
3402 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3405 switch (chan
->mode
) {
3406 case L2CAP_MODE_STREAMING
:
3407 case L2CAP_MODE_ERTM
:
3408 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3411 if (__l2cap_efs_supported(chan
->conn
))
3412 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3416 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3421 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3422 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3424 switch (chan
->mode
) {
3425 case L2CAP_MODE_BASIC
:
3426 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3427 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3430 rfc
.mode
= L2CAP_MODE_BASIC
;
3432 rfc
.max_transmit
= 0;
3433 rfc
.retrans_timeout
= 0;
3434 rfc
.monitor_timeout
= 0;
3435 rfc
.max_pdu_size
= 0;
3437 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3438 (unsigned long) &rfc
);
3441 case L2CAP_MODE_ERTM
:
3442 rfc
.mode
= L2CAP_MODE_ERTM
;
3443 rfc
.max_transmit
= chan
->max_tx
;
3445 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3447 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3448 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3450 rfc
.max_pdu_size
= cpu_to_le16(size
);
3452 l2cap_txwin_setup(chan
);
3454 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3455 L2CAP_DEFAULT_TX_WINDOW
);
3457 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3458 (unsigned long) &rfc
);
3460 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3461 l2cap_add_opt_efs(&ptr
, chan
);
3463 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3464 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3467 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3468 if (chan
->fcs
== L2CAP_FCS_NONE
||
3469 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3470 chan
->fcs
= L2CAP_FCS_NONE
;
3471 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3476 case L2CAP_MODE_STREAMING
:
3477 l2cap_txwin_setup(chan
);
3478 rfc
.mode
= L2CAP_MODE_STREAMING
;
3480 rfc
.max_transmit
= 0;
3481 rfc
.retrans_timeout
= 0;
3482 rfc
.monitor_timeout
= 0;
3484 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3485 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3487 rfc
.max_pdu_size
= cpu_to_le16(size
);
3489 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3490 (unsigned long) &rfc
);
3492 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3493 l2cap_add_opt_efs(&ptr
, chan
);
3495 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3496 if (chan
->fcs
== L2CAP_FCS_NONE
||
3497 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3498 chan
->fcs
= L2CAP_FCS_NONE
;
3499 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3505 req
->dcid
= cpu_to_le16(chan
->dcid
);
3506 req
->flags
= __constant_cpu_to_le16(0);
3511 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3513 struct l2cap_conf_rsp
*rsp
= data
;
3514 void *ptr
= rsp
->data
;
3515 void *req
= chan
->conf_req
;
3516 int len
= chan
->conf_len
;
3517 int type
, hint
, olen
;
3519 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3520 struct l2cap_conf_efs efs
;
3522 u16 mtu
= L2CAP_DEFAULT_MTU
;
3523 u16 result
= L2CAP_CONF_SUCCESS
;
3526 BT_DBG("chan %p", chan
);
3528 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3529 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3531 hint
= type
& L2CAP_CONF_HINT
;
3532 type
&= L2CAP_CONF_MASK
;
3535 case L2CAP_CONF_MTU
:
3539 case L2CAP_CONF_FLUSH_TO
:
3540 chan
->flush_to
= val
;
3543 case L2CAP_CONF_QOS
:
3546 case L2CAP_CONF_RFC
:
3547 if (olen
== sizeof(rfc
))
3548 memcpy(&rfc
, (void *) val
, olen
);
3551 case L2CAP_CONF_FCS
:
3552 if (val
== L2CAP_FCS_NONE
)
3553 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3556 case L2CAP_CONF_EFS
:
3558 if (olen
== sizeof(efs
))
3559 memcpy(&efs
, (void *) val
, olen
);
3562 case L2CAP_CONF_EWS
:
3563 if (!chan
->conn
->hs_enabled
)
3564 return -ECONNREFUSED
;
3566 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3567 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3568 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3569 chan
->remote_tx_win
= val
;
3576 result
= L2CAP_CONF_UNKNOWN
;
3577 *((u8
*) ptr
++) = type
;
3582 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3585 switch (chan
->mode
) {
3586 case L2CAP_MODE_STREAMING
:
3587 case L2CAP_MODE_ERTM
:
3588 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3589 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3590 chan
->conn
->feat_mask
);
3595 if (__l2cap_efs_supported(chan
->conn
))
3596 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3598 return -ECONNREFUSED
;
3601 if (chan
->mode
!= rfc
.mode
)
3602 return -ECONNREFUSED
;
3608 if (chan
->mode
!= rfc
.mode
) {
3609 result
= L2CAP_CONF_UNACCEPT
;
3610 rfc
.mode
= chan
->mode
;
3612 if (chan
->num_conf_rsp
== 1)
3613 return -ECONNREFUSED
;
3615 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3616 (unsigned long) &rfc
);
3619 if (result
== L2CAP_CONF_SUCCESS
) {
3620 /* Configure output options and let the other side know
3621 * which ones we don't like. */
3623 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3624 result
= L2CAP_CONF_UNACCEPT
;
3627 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3629 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3632 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3633 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3634 efs
.stype
!= chan
->local_stype
) {
3636 result
= L2CAP_CONF_UNACCEPT
;
3638 if (chan
->num_conf_req
>= 1)
3639 return -ECONNREFUSED
;
3641 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3643 (unsigned long) &efs
);
3645 /* Send PENDING Conf Rsp */
3646 result
= L2CAP_CONF_PENDING
;
3647 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3652 case L2CAP_MODE_BASIC
:
3653 chan
->fcs
= L2CAP_FCS_NONE
;
3654 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3657 case L2CAP_MODE_ERTM
:
3658 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3659 chan
->remote_tx_win
= rfc
.txwin_size
;
3661 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3663 chan
->remote_max_tx
= rfc
.max_transmit
;
3665 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3666 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3667 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3668 rfc
.max_pdu_size
= cpu_to_le16(size
);
3669 chan
->remote_mps
= size
;
3671 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3673 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3675 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3676 sizeof(rfc
), (unsigned long) &rfc
);
3678 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3679 chan
->remote_id
= efs
.id
;
3680 chan
->remote_stype
= efs
.stype
;
3681 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3682 chan
->remote_flush_to
=
3683 le32_to_cpu(efs
.flush_to
);
3684 chan
->remote_acc_lat
=
3685 le32_to_cpu(efs
.acc_lat
);
3686 chan
->remote_sdu_itime
=
3687 le32_to_cpu(efs
.sdu_itime
);
3688 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3690 (unsigned long) &efs
);
3694 case L2CAP_MODE_STREAMING
:
3695 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3696 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3697 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3698 rfc
.max_pdu_size
= cpu_to_le16(size
);
3699 chan
->remote_mps
= size
;
3701 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3703 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3704 (unsigned long) &rfc
);
3709 result
= L2CAP_CONF_UNACCEPT
;
3711 memset(&rfc
, 0, sizeof(rfc
));
3712 rfc
.mode
= chan
->mode
;
3715 if (result
== L2CAP_CONF_SUCCESS
)
3716 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3718 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3719 rsp
->result
= cpu_to_le16(result
);
3720 rsp
->flags
= __constant_cpu_to_le16(0);
3725 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3726 void *data
, u16
*result
)
3728 struct l2cap_conf_req
*req
= data
;
3729 void *ptr
= req
->data
;
3732 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3733 struct l2cap_conf_efs efs
;
3735 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3737 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3738 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3741 case L2CAP_CONF_MTU
:
3742 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3743 *result
= L2CAP_CONF_UNACCEPT
;
3744 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3747 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3750 case L2CAP_CONF_FLUSH_TO
:
3751 chan
->flush_to
= val
;
3752 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3756 case L2CAP_CONF_RFC
:
3757 if (olen
== sizeof(rfc
))
3758 memcpy(&rfc
, (void *)val
, olen
);
3760 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3761 rfc
.mode
!= chan
->mode
)
3762 return -ECONNREFUSED
;
3766 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3767 sizeof(rfc
), (unsigned long) &rfc
);
3770 case L2CAP_CONF_EWS
:
3771 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3772 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3776 case L2CAP_CONF_EFS
:
3777 if (olen
== sizeof(efs
))
3778 memcpy(&efs
, (void *)val
, olen
);
3780 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3781 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3782 efs
.stype
!= chan
->local_stype
)
3783 return -ECONNREFUSED
;
3785 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3786 (unsigned long) &efs
);
3789 case L2CAP_CONF_FCS
:
3790 if (*result
== L2CAP_CONF_PENDING
)
3791 if (val
== L2CAP_FCS_NONE
)
3792 set_bit(CONF_RECV_NO_FCS
,
3798 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3799 return -ECONNREFUSED
;
3801 chan
->mode
= rfc
.mode
;
3803 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3805 case L2CAP_MODE_ERTM
:
3806 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3807 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3808 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3809 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3810 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3813 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3814 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3815 chan
->local_sdu_itime
=
3816 le32_to_cpu(efs
.sdu_itime
);
3817 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3818 chan
->local_flush_to
=
3819 le32_to_cpu(efs
.flush_to
);
3823 case L2CAP_MODE_STREAMING
:
3824 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3828 req
->dcid
= cpu_to_le16(chan
->dcid
);
3829 req
->flags
= __constant_cpu_to_le16(0);
3834 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3835 u16 result
, u16 flags
)
3837 struct l2cap_conf_rsp
*rsp
= data
;
3838 void *ptr
= rsp
->data
;
3840 BT_DBG("chan %p", chan
);
3842 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3843 rsp
->result
= cpu_to_le16(result
);
3844 rsp
->flags
= cpu_to_le16(flags
);
3849 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3851 struct l2cap_le_conn_rsp rsp
;
3852 struct l2cap_conn
*conn
= chan
->conn
;
3854 BT_DBG("chan %p", chan
);
3856 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3857 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3858 rsp
.mps
= cpu_to_le16(chan
->mps
);
3859 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3860 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3862 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3866 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3868 struct l2cap_conn_rsp rsp
;
3869 struct l2cap_conn
*conn
= chan
->conn
;
3873 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3874 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3875 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3876 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3879 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3881 rsp_code
= L2CAP_CONN_RSP
;
3883 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3885 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3887 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3890 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3891 l2cap_build_conf_req(chan
, buf
), buf
);
3892 chan
->num_conf_req
++;
3895 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3899 /* Use sane default values in case a misbehaving remote device
3900 * did not send an RFC or extended window size option.
3902 u16 txwin_ext
= chan
->ack_win
;
3903 struct l2cap_conf_rfc rfc
= {
3905 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3906 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3907 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3908 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3911 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3913 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3916 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3917 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3920 case L2CAP_CONF_RFC
:
3921 if (olen
== sizeof(rfc
))
3922 memcpy(&rfc
, (void *)val
, olen
);
3924 case L2CAP_CONF_EWS
:
3931 case L2CAP_MODE_ERTM
:
3932 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3933 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3934 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3935 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3936 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3938 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3941 case L2CAP_MODE_STREAMING
:
3942 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3946 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3947 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3950 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3952 if (cmd_len
< sizeof(*rej
))
3955 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3958 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3959 cmd
->ident
== conn
->info_ident
) {
3960 cancel_delayed_work(&conn
->info_timer
);
3962 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3963 conn
->info_ident
= 0;
3965 l2cap_conn_start(conn
);
3971 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3972 struct l2cap_cmd_hdr
*cmd
,
3973 u8
*data
, u8 rsp_code
, u8 amp_id
)
3975 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3976 struct l2cap_conn_rsp rsp
;
3977 struct l2cap_chan
*chan
= NULL
, *pchan
;
3978 int result
, status
= L2CAP_CS_NO_INFO
;
3980 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3981 __le16 psm
= req
->psm
;
3983 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3985 /* Check if we have socket listening on psm */
3986 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3987 &conn
->hcon
->dst
, ACL_LINK
);
3989 result
= L2CAP_CR_BAD_PSM
;
3993 mutex_lock(&conn
->chan_lock
);
3994 l2cap_chan_lock(pchan
);
3996 /* Check if the ACL is secure enough (if not SDP) */
3997 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3998 !hci_conn_check_link_mode(conn
->hcon
)) {
3999 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
4000 result
= L2CAP_CR_SEC_BLOCK
;
4004 result
= L2CAP_CR_NO_MEM
;
4006 /* Check if we already have channel with that dcid */
4007 if (__l2cap_get_chan_by_dcid(conn
, scid
))
4010 chan
= pchan
->ops
->new_connection(pchan
);
4014 /* For certain devices (ex: HID mouse), support for authentication,
4015 * pairing and bonding is optional. For such devices, inorder to avoid
4016 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4017 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4019 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
4021 bacpy(&chan
->src
, &conn
->hcon
->src
);
4022 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
4023 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
4024 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
4027 chan
->local_amp_id
= amp_id
;
4029 __l2cap_chan_add(conn
, chan
);
4033 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
4035 chan
->ident
= cmd
->ident
;
4037 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
4038 if (l2cap_chan_check_security(chan
)) {
4039 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
4040 l2cap_state_change(chan
, BT_CONNECT2
);
4041 result
= L2CAP_CR_PEND
;
4042 status
= L2CAP_CS_AUTHOR_PEND
;
4043 chan
->ops
->defer(chan
);
4045 /* Force pending result for AMP controllers.
4046 * The connection will succeed after the
4047 * physical link is up.
4049 if (amp_id
== AMP_ID_BREDR
) {
4050 l2cap_state_change(chan
, BT_CONFIG
);
4051 result
= L2CAP_CR_SUCCESS
;
4053 l2cap_state_change(chan
, BT_CONNECT2
);
4054 result
= L2CAP_CR_PEND
;
4056 status
= L2CAP_CS_NO_INFO
;
4059 l2cap_state_change(chan
, BT_CONNECT2
);
4060 result
= L2CAP_CR_PEND
;
4061 status
= L2CAP_CS_AUTHEN_PEND
;
4064 l2cap_state_change(chan
, BT_CONNECT2
);
4065 result
= L2CAP_CR_PEND
;
4066 status
= L2CAP_CS_NO_INFO
;
4070 l2cap_chan_unlock(pchan
);
4071 mutex_unlock(&conn
->chan_lock
);
4074 rsp
.scid
= cpu_to_le16(scid
);
4075 rsp
.dcid
= cpu_to_le16(dcid
);
4076 rsp
.result
= cpu_to_le16(result
);
4077 rsp
.status
= cpu_to_le16(status
);
4078 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
4080 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
4081 struct l2cap_info_req info
;
4082 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4084 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
4085 conn
->info_ident
= l2cap_get_ident(conn
);
4087 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
4089 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
4090 sizeof(info
), &info
);
4093 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
4094 result
== L2CAP_CR_SUCCESS
) {
4096 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4097 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4098 l2cap_build_conf_req(chan
, buf
), buf
);
4099 chan
->num_conf_req
++;
4105 static int l2cap_connect_req(struct l2cap_conn
*conn
,
4106 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4108 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
4109 struct hci_conn
*hcon
= conn
->hcon
;
4111 if (cmd_len
< sizeof(struct l2cap_conn_req
))
4115 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
4116 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
4117 mgmt_device_connected(hdev
, &hcon
->dst
, hcon
->type
,
4118 hcon
->dst_type
, 0, NULL
, 0,
4120 hci_dev_unlock(hdev
);
4122 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
4126 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
4127 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4130 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
4131 u16 scid
, dcid
, result
, status
;
4132 struct l2cap_chan
*chan
;
4136 if (cmd_len
< sizeof(*rsp
))
4139 scid
= __le16_to_cpu(rsp
->scid
);
4140 dcid
= __le16_to_cpu(rsp
->dcid
);
4141 result
= __le16_to_cpu(rsp
->result
);
4142 status
= __le16_to_cpu(rsp
->status
);
4144 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4145 dcid
, scid
, result
, status
);
4147 mutex_lock(&conn
->chan_lock
);
4150 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4156 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
4165 l2cap_chan_lock(chan
);
4168 case L2CAP_CR_SUCCESS
:
4169 l2cap_state_change(chan
, BT_CONFIG
);
4172 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4174 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
4177 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4178 l2cap_build_conf_req(chan
, req
), req
);
4179 chan
->num_conf_req
++;
4183 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4187 l2cap_chan_del(chan
, ECONNREFUSED
);
4191 l2cap_chan_unlock(chan
);
4194 mutex_unlock(&conn
->chan_lock
);
4199 static inline void set_default_fcs(struct l2cap_chan
*chan
)
4201 /* FCS is enabled only in ERTM or streaming mode, if one or both
4204 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
4205 chan
->fcs
= L2CAP_FCS_NONE
;
4206 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
4207 chan
->fcs
= L2CAP_FCS_CRC16
;
4210 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
4211 u8 ident
, u16 flags
)
4213 struct l2cap_conn
*conn
= chan
->conn
;
4215 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
4218 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
4219 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
4221 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
4222 l2cap_build_conf_rsp(chan
, data
,
4223 L2CAP_CONF_SUCCESS
, flags
), data
);
4226 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
4229 struct l2cap_cmd_rej_cid rej
;
4231 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4232 rej
.scid
= __cpu_to_le16(scid
);
4233 rej
.dcid
= __cpu_to_le16(dcid
);
4235 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4238 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4239 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4242 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4245 struct l2cap_chan
*chan
;
4248 if (cmd_len
< sizeof(*req
))
4251 dcid
= __le16_to_cpu(req
->dcid
);
4252 flags
= __le16_to_cpu(req
->flags
);
4254 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4256 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4258 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4262 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4263 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4268 /* Reject if config buffer is too small. */
4269 len
= cmd_len
- sizeof(*req
);
4270 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4271 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4272 l2cap_build_conf_rsp(chan
, rsp
,
4273 L2CAP_CONF_REJECT
, flags
), rsp
);
4278 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4279 chan
->conf_len
+= len
;
4281 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4282 /* Incomplete config. Send empty response. */
4283 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4284 l2cap_build_conf_rsp(chan
, rsp
,
4285 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4289 /* Complete config. */
4290 len
= l2cap_parse_conf_req(chan
, rsp
);
4292 l2cap_send_disconn_req(chan
, ECONNRESET
);
4296 chan
->ident
= cmd
->ident
;
4297 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4298 chan
->num_conf_rsp
++;
4300 /* Reset config buffer. */
4303 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4306 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4307 set_default_fcs(chan
);
4309 if (chan
->mode
== L2CAP_MODE_ERTM
||
4310 chan
->mode
== L2CAP_MODE_STREAMING
)
4311 err
= l2cap_ertm_init(chan
);
4314 l2cap_send_disconn_req(chan
, -err
);
4316 l2cap_chan_ready(chan
);
4321 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4323 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4324 l2cap_build_conf_req(chan
, buf
), buf
);
4325 chan
->num_conf_req
++;
4328 /* Got Conf Rsp PENDING from remote side and asume we sent
4329 Conf Rsp PENDING in the code above */
4330 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4331 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4333 /* check compatibility */
4335 /* Send rsp for BR/EDR channel */
4337 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4339 chan
->ident
= cmd
->ident
;
4343 l2cap_chan_unlock(chan
);
4347 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4348 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4351 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4352 u16 scid
, flags
, result
;
4353 struct l2cap_chan
*chan
;
4354 int len
= cmd_len
- sizeof(*rsp
);
4357 if (cmd_len
< sizeof(*rsp
))
4360 scid
= __le16_to_cpu(rsp
->scid
);
4361 flags
= __le16_to_cpu(rsp
->flags
);
4362 result
= __le16_to_cpu(rsp
->result
);
4364 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4367 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4372 case L2CAP_CONF_SUCCESS
:
4373 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4374 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4377 case L2CAP_CONF_PENDING
:
4378 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4380 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4383 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4386 l2cap_send_disconn_req(chan
, ECONNRESET
);
4390 if (!chan
->hs_hcon
) {
4391 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4394 if (l2cap_check_efs(chan
)) {
4395 amp_create_logical_link(chan
);
4396 chan
->ident
= cmd
->ident
;
4402 case L2CAP_CONF_UNACCEPT
:
4403 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4406 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4407 l2cap_send_disconn_req(chan
, ECONNRESET
);
4411 /* throw out any old stored conf requests */
4412 result
= L2CAP_CONF_SUCCESS
;
4413 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4416 l2cap_send_disconn_req(chan
, ECONNRESET
);
4420 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4421 L2CAP_CONF_REQ
, len
, req
);
4422 chan
->num_conf_req
++;
4423 if (result
!= L2CAP_CONF_SUCCESS
)
4429 l2cap_chan_set_err(chan
, ECONNRESET
);
4431 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4432 l2cap_send_disconn_req(chan
, ECONNRESET
);
4436 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4439 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4441 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4442 set_default_fcs(chan
);
4444 if (chan
->mode
== L2CAP_MODE_ERTM
||
4445 chan
->mode
== L2CAP_MODE_STREAMING
)
4446 err
= l2cap_ertm_init(chan
);
4449 l2cap_send_disconn_req(chan
, -err
);
4451 l2cap_chan_ready(chan
);
4455 l2cap_chan_unlock(chan
);
4459 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4460 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4463 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4464 struct l2cap_disconn_rsp rsp
;
4466 struct l2cap_chan
*chan
;
4468 if (cmd_len
!= sizeof(*req
))
4471 scid
= __le16_to_cpu(req
->scid
);
4472 dcid
= __le16_to_cpu(req
->dcid
);
4474 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4476 mutex_lock(&conn
->chan_lock
);
4478 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4480 mutex_unlock(&conn
->chan_lock
);
4481 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4485 l2cap_chan_lock(chan
);
4487 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4488 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4489 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4491 chan
->ops
->set_shutdown(chan
);
4493 l2cap_chan_hold(chan
);
4494 l2cap_chan_del(chan
, ECONNRESET
);
4496 l2cap_chan_unlock(chan
);
4498 chan
->ops
->close(chan
);
4499 l2cap_chan_put(chan
);
4501 mutex_unlock(&conn
->chan_lock
);
4506 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4507 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4510 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4512 struct l2cap_chan
*chan
;
4514 if (cmd_len
!= sizeof(*rsp
))
4517 scid
= __le16_to_cpu(rsp
->scid
);
4518 dcid
= __le16_to_cpu(rsp
->dcid
);
4520 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4522 mutex_lock(&conn
->chan_lock
);
4524 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4526 mutex_unlock(&conn
->chan_lock
);
4530 l2cap_chan_lock(chan
);
4532 l2cap_chan_hold(chan
);
4533 l2cap_chan_del(chan
, 0);
4535 l2cap_chan_unlock(chan
);
4537 chan
->ops
->close(chan
);
4538 l2cap_chan_put(chan
);
4540 mutex_unlock(&conn
->chan_lock
);
4545 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4546 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4549 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4552 if (cmd_len
!= sizeof(*req
))
4555 type
= __le16_to_cpu(req
->type
);
4557 BT_DBG("type 0x%4.4x", type
);
4559 if (type
== L2CAP_IT_FEAT_MASK
) {
4561 u32 feat_mask
= l2cap_feat_mask
;
4562 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4563 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4564 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4566 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4568 if (conn
->hs_enabled
)
4569 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4570 | L2CAP_FEAT_EXT_WINDOW
;
4572 put_unaligned_le32(feat_mask
, rsp
->data
);
4573 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4575 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4577 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4579 if (conn
->hs_enabled
)
4580 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4582 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4584 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4585 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4586 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4587 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4590 struct l2cap_info_rsp rsp
;
4591 rsp
.type
= cpu_to_le16(type
);
4592 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
4593 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4600 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4601 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4604 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4607 if (cmd_len
< sizeof(*rsp
))
4610 type
= __le16_to_cpu(rsp
->type
);
4611 result
= __le16_to_cpu(rsp
->result
);
4613 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4615 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4616 if (cmd
->ident
!= conn
->info_ident
||
4617 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4620 cancel_delayed_work(&conn
->info_timer
);
4622 if (result
!= L2CAP_IR_SUCCESS
) {
4623 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4624 conn
->info_ident
= 0;
4626 l2cap_conn_start(conn
);
4632 case L2CAP_IT_FEAT_MASK
:
4633 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4635 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4636 struct l2cap_info_req req
;
4637 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4639 conn
->info_ident
= l2cap_get_ident(conn
);
4641 l2cap_send_cmd(conn
, conn
->info_ident
,
4642 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4644 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4645 conn
->info_ident
= 0;
4647 l2cap_conn_start(conn
);
4651 case L2CAP_IT_FIXED_CHAN
:
4652 conn
->fixed_chan_mask
= rsp
->data
[0];
4653 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4654 conn
->info_ident
= 0;
4656 l2cap_conn_start(conn
);
4663 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4664 struct l2cap_cmd_hdr
*cmd
,
4665 u16 cmd_len
, void *data
)
4667 struct l2cap_create_chan_req
*req
= data
;
4668 struct l2cap_create_chan_rsp rsp
;
4669 struct l2cap_chan
*chan
;
4670 struct hci_dev
*hdev
;
4673 if (cmd_len
!= sizeof(*req
))
4676 if (!conn
->hs_enabled
)
4679 psm
= le16_to_cpu(req
->psm
);
4680 scid
= le16_to_cpu(req
->scid
);
4682 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4684 /* For controller id 0 make BR/EDR connection */
4685 if (req
->amp_id
== AMP_ID_BREDR
) {
4686 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4691 /* Validate AMP controller id */
4692 hdev
= hci_dev_get(req
->amp_id
);
4696 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4701 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4704 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4705 struct hci_conn
*hs_hcon
;
4707 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4711 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4716 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4718 mgr
->bredr_chan
= chan
;
4719 chan
->hs_hcon
= hs_hcon
;
4720 chan
->fcs
= L2CAP_FCS_NONE
;
4721 conn
->mtu
= hdev
->block_mtu
;
4730 rsp
.scid
= cpu_to_le16(scid
);
4731 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_BAD_AMP
);
4732 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4734 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4740 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4742 struct l2cap_move_chan_req req
;
4745 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4747 ident
= l2cap_get_ident(chan
->conn
);
4748 chan
->ident
= ident
;
4750 req
.icid
= cpu_to_le16(chan
->scid
);
4751 req
.dest_amp_id
= dest_amp_id
;
4753 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4756 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4759 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4761 struct l2cap_move_chan_rsp rsp
;
4763 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4765 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4766 rsp
.result
= cpu_to_le16(result
);
4768 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4772 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4774 struct l2cap_move_chan_cfm cfm
;
4776 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4778 chan
->ident
= l2cap_get_ident(chan
->conn
);
4780 cfm
.icid
= cpu_to_le16(chan
->scid
);
4781 cfm
.result
= cpu_to_le16(result
);
4783 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4786 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4789 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4791 struct l2cap_move_chan_cfm cfm
;
4793 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4795 cfm
.icid
= cpu_to_le16(icid
);
4796 cfm
.result
= __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4798 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4802 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4805 struct l2cap_move_chan_cfm_rsp rsp
;
4807 BT_DBG("icid 0x%4.4x", icid
);
4809 rsp
.icid
= cpu_to_le16(icid
);
4810 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4813 static void __release_logical_link(struct l2cap_chan
*chan
)
4815 chan
->hs_hchan
= NULL
;
4816 chan
->hs_hcon
= NULL
;
4818 /* Placeholder - release the logical link */
4821 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4823 /* Logical link setup failed */
4824 if (chan
->state
!= BT_CONNECTED
) {
4825 /* Create channel failure, disconnect */
4826 l2cap_send_disconn_req(chan
, ECONNRESET
);
4830 switch (chan
->move_role
) {
4831 case L2CAP_MOVE_ROLE_RESPONDER
:
4832 l2cap_move_done(chan
);
4833 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4835 case L2CAP_MOVE_ROLE_INITIATOR
:
4836 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4837 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4838 /* Remote has only sent pending or
4839 * success responses, clean up
4841 l2cap_move_done(chan
);
4844 /* Other amp move states imply that the move
4845 * has already aborted
4847 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4852 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4853 struct hci_chan
*hchan
)
4855 struct l2cap_conf_rsp rsp
;
4857 chan
->hs_hchan
= hchan
;
4858 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4860 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4862 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4865 set_default_fcs(chan
);
4867 err
= l2cap_ertm_init(chan
);
4869 l2cap_send_disconn_req(chan
, -err
);
4871 l2cap_chan_ready(chan
);
4875 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4876 struct hci_chan
*hchan
)
4878 chan
->hs_hcon
= hchan
->conn
;
4879 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4881 BT_DBG("move_state %d", chan
->move_state
);
4883 switch (chan
->move_state
) {
4884 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4885 /* Move confirm will be sent after a success
4886 * response is received
4888 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4890 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4891 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4892 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4893 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4894 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4895 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4896 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4897 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4898 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4902 /* Move was not in expected state, free the channel */
4903 __release_logical_link(chan
);
4905 chan
->move_state
= L2CAP_MOVE_STABLE
;
4909 /* Call with chan locked */
4910 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4913 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4916 l2cap_logical_fail(chan
);
4917 __release_logical_link(chan
);
4921 if (chan
->state
!= BT_CONNECTED
) {
4922 /* Ignore logical link if channel is on BR/EDR */
4923 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4924 l2cap_logical_finish_create(chan
, hchan
);
4926 l2cap_logical_finish_move(chan
, hchan
);
4930 void l2cap_move_start(struct l2cap_chan
*chan
)
4932 BT_DBG("chan %p", chan
);
4934 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4935 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4937 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4938 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4939 /* Placeholder - start physical link setup */
4941 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4942 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4944 l2cap_move_setup(chan
);
4945 l2cap_send_move_chan_req(chan
, 0);
4949 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4950 u8 local_amp_id
, u8 remote_amp_id
)
4952 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4953 local_amp_id
, remote_amp_id
);
4955 chan
->fcs
= L2CAP_FCS_NONE
;
4957 /* Outgoing channel on AMP */
4958 if (chan
->state
== BT_CONNECT
) {
4959 if (result
== L2CAP_CR_SUCCESS
) {
4960 chan
->local_amp_id
= local_amp_id
;
4961 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4963 /* Revert to BR/EDR connect */
4964 l2cap_send_conn_req(chan
);
4970 /* Incoming channel on AMP */
4971 if (__l2cap_no_conn_pending(chan
)) {
4972 struct l2cap_conn_rsp rsp
;
4974 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4975 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4977 if (result
== L2CAP_CR_SUCCESS
) {
4978 /* Send successful response */
4979 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
4980 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4982 /* Send negative response */
4983 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4984 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4987 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4990 if (result
== L2CAP_CR_SUCCESS
) {
4991 l2cap_state_change(chan
, BT_CONFIG
);
4992 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4993 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4995 l2cap_build_conf_req(chan
, buf
), buf
);
4996 chan
->num_conf_req
++;
5001 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
5004 l2cap_move_setup(chan
);
5005 chan
->move_id
= local_amp_id
;
5006 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
5008 l2cap_send_move_chan_req(chan
, remote_amp_id
);
5011 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
5013 struct hci_chan
*hchan
= NULL
;
5015 /* Placeholder - get hci_chan for logical link */
5018 if (hchan
->state
== BT_CONNECTED
) {
5019 /* Logical link is ready to go */
5020 chan
->hs_hcon
= hchan
->conn
;
5021 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5022 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
5023 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
5025 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5027 /* Wait for logical link to be ready */
5028 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5031 /* Logical link not available */
5032 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
5036 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
5038 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
5040 if (result
== -EINVAL
)
5041 rsp_result
= L2CAP_MR_BAD_ID
;
5043 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
5045 l2cap_send_move_chan_rsp(chan
, rsp_result
);
5048 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
5049 chan
->move_state
= L2CAP_MOVE_STABLE
;
5051 /* Restart data transmission */
5052 l2cap_ertm_send(chan
);
5055 /* Invoke with locked chan */
5056 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
5058 u8 local_amp_id
= chan
->local_amp_id
;
5059 u8 remote_amp_id
= chan
->remote_amp_id
;
5061 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5062 chan
, result
, local_amp_id
, remote_amp_id
);
5064 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
5065 l2cap_chan_unlock(chan
);
5069 if (chan
->state
!= BT_CONNECTED
) {
5070 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
5071 } else if (result
!= L2CAP_MR_SUCCESS
) {
5072 l2cap_do_move_cancel(chan
, result
);
5074 switch (chan
->move_role
) {
5075 case L2CAP_MOVE_ROLE_INITIATOR
:
5076 l2cap_do_move_initiate(chan
, local_amp_id
,
5079 case L2CAP_MOVE_ROLE_RESPONDER
:
5080 l2cap_do_move_respond(chan
, result
);
5083 l2cap_do_move_cancel(chan
, result
);
5089 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
5090 struct l2cap_cmd_hdr
*cmd
,
5091 u16 cmd_len
, void *data
)
5093 struct l2cap_move_chan_req
*req
= data
;
5094 struct l2cap_move_chan_rsp rsp
;
5095 struct l2cap_chan
*chan
;
5097 u16 result
= L2CAP_MR_NOT_ALLOWED
;
5099 if (cmd_len
!= sizeof(*req
))
5102 icid
= le16_to_cpu(req
->icid
);
5104 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
5106 if (!conn
->hs_enabled
)
5109 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5111 rsp
.icid
= cpu_to_le16(icid
);
5112 rsp
.result
= __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
5113 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
5118 chan
->ident
= cmd
->ident
;
5120 if (chan
->scid
< L2CAP_CID_DYN_START
||
5121 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
5122 (chan
->mode
!= L2CAP_MODE_ERTM
&&
5123 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
5124 result
= L2CAP_MR_NOT_ALLOWED
;
5125 goto send_move_response
;
5128 if (chan
->local_amp_id
== req
->dest_amp_id
) {
5129 result
= L2CAP_MR_SAME_ID
;
5130 goto send_move_response
;
5133 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
5134 struct hci_dev
*hdev
;
5135 hdev
= hci_dev_get(req
->dest_amp_id
);
5136 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
5137 !test_bit(HCI_UP
, &hdev
->flags
)) {
5141 result
= L2CAP_MR_BAD_ID
;
5142 goto send_move_response
;
5147 /* Detect a move collision. Only send a collision response
5148 * if this side has "lost", otherwise proceed with the move.
5149 * The winner has the larger bd_addr.
5151 if ((__chan_is_moving(chan
) ||
5152 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
5153 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
5154 result
= L2CAP_MR_COLLISION
;
5155 goto send_move_response
;
5158 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5159 l2cap_move_setup(chan
);
5160 chan
->move_id
= req
->dest_amp_id
;
5163 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
5164 /* Moving to BR/EDR */
5165 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5166 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
5167 result
= L2CAP_MR_PEND
;
5169 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
5170 result
= L2CAP_MR_SUCCESS
;
5173 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
5174 /* Placeholder - uncomment when amp functions are available */
5175 /*amp_accept_physical(chan, req->dest_amp_id);*/
5176 result
= L2CAP_MR_PEND
;
5180 l2cap_send_move_chan_rsp(chan
, result
);
5182 l2cap_chan_unlock(chan
);
5187 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
5189 struct l2cap_chan
*chan
;
5190 struct hci_chan
*hchan
= NULL
;
5192 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5194 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5198 __clear_chan_timer(chan
);
5199 if (result
== L2CAP_MR_PEND
)
5200 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
5202 switch (chan
->move_state
) {
5203 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
5204 /* Move confirm will be sent when logical link
5207 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5209 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
5210 if (result
== L2CAP_MR_PEND
) {
5212 } else if (test_bit(CONN_LOCAL_BUSY
,
5213 &chan
->conn_state
)) {
5214 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
5216 /* Logical link is up or moving to BR/EDR,
5219 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
5220 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5223 case L2CAP_MOVE_WAIT_RSP
:
5225 if (result
== L2CAP_MR_SUCCESS
) {
5226 /* Remote is ready, send confirm immediately
5227 * after logical link is ready
5229 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5231 /* Both logical link and move success
5232 * are required to confirm
5234 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5237 /* Placeholder - get hci_chan for logical link */
5239 /* Logical link not available */
5240 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5244 /* If the logical link is not yet connected, do not
5245 * send confirmation.
5247 if (hchan
->state
!= BT_CONNECTED
)
5250 /* Logical link is already ready to go */
5252 chan
->hs_hcon
= hchan
->conn
;
5253 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5255 if (result
== L2CAP_MR_SUCCESS
) {
5256 /* Can confirm now */
5257 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5259 /* Now only need move success
5262 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5265 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5268 /* Any other amp move state means the move failed. */
5269 chan
->move_id
= chan
->local_amp_id
;
5270 l2cap_move_done(chan
);
5271 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5274 l2cap_chan_unlock(chan
);
5277 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5280 struct l2cap_chan
*chan
;
5282 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5284 /* Could not locate channel, icid is best guess */
5285 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5289 __clear_chan_timer(chan
);
5291 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5292 if (result
== L2CAP_MR_COLLISION
) {
5293 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5295 /* Cleanup - cancel move */
5296 chan
->move_id
= chan
->local_amp_id
;
5297 l2cap_move_done(chan
);
5301 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5303 l2cap_chan_unlock(chan
);
5306 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5307 struct l2cap_cmd_hdr
*cmd
,
5308 u16 cmd_len
, void *data
)
5310 struct l2cap_move_chan_rsp
*rsp
= data
;
5313 if (cmd_len
!= sizeof(*rsp
))
5316 icid
= le16_to_cpu(rsp
->icid
);
5317 result
= le16_to_cpu(rsp
->result
);
5319 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5321 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5322 l2cap_move_continue(conn
, icid
, result
);
5324 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5329 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5330 struct l2cap_cmd_hdr
*cmd
,
5331 u16 cmd_len
, void *data
)
5333 struct l2cap_move_chan_cfm
*cfm
= data
;
5334 struct l2cap_chan
*chan
;
5337 if (cmd_len
!= sizeof(*cfm
))
5340 icid
= le16_to_cpu(cfm
->icid
);
5341 result
= le16_to_cpu(cfm
->result
);
5343 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5345 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5347 /* Spec requires a response even if the icid was not found */
5348 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5352 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5353 if (result
== L2CAP_MC_CONFIRMED
) {
5354 chan
->local_amp_id
= chan
->move_id
;
5355 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5356 __release_logical_link(chan
);
5358 chan
->move_id
= chan
->local_amp_id
;
5361 l2cap_move_done(chan
);
5364 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5366 l2cap_chan_unlock(chan
);
5371 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5372 struct l2cap_cmd_hdr
*cmd
,
5373 u16 cmd_len
, void *data
)
5375 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5376 struct l2cap_chan
*chan
;
5379 if (cmd_len
!= sizeof(*rsp
))
5382 icid
= le16_to_cpu(rsp
->icid
);
5384 BT_DBG("icid 0x%4.4x", icid
);
5386 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5390 __clear_chan_timer(chan
);
5392 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5393 chan
->local_amp_id
= chan
->move_id
;
5395 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5396 __release_logical_link(chan
);
5398 l2cap_move_done(chan
);
5401 l2cap_chan_unlock(chan
);
5406 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
5411 if (min
> max
|| min
< 6 || max
> 3200)
5414 if (to_multiplier
< 10 || to_multiplier
> 3200)
5417 if (max
>= to_multiplier
* 8)
5420 max_latency
= (to_multiplier
* 8 / max
) - 1;
5421 if (latency
> 499 || latency
> max_latency
)
5427 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5428 struct l2cap_cmd_hdr
*cmd
,
5429 u16 cmd_len
, u8
*data
)
5431 struct hci_conn
*hcon
= conn
->hcon
;
5432 struct l2cap_conn_param_update_req
*req
;
5433 struct l2cap_conn_param_update_rsp rsp
;
5434 u16 min
, max
, latency
, to_multiplier
;
5437 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
5440 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5443 req
= (struct l2cap_conn_param_update_req
*) data
;
5444 min
= __le16_to_cpu(req
->min
);
5445 max
= __le16_to_cpu(req
->max
);
5446 latency
= __le16_to_cpu(req
->latency
);
5447 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5449 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5450 min
, max
, latency
, to_multiplier
);
5452 memset(&rsp
, 0, sizeof(rsp
));
5454 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
5456 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5458 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5460 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5464 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
5469 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5470 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5473 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5474 u16 dcid
, mtu
, mps
, credits
, result
;
5475 struct l2cap_chan
*chan
;
5478 if (cmd_len
< sizeof(*rsp
))
5481 dcid
= __le16_to_cpu(rsp
->dcid
);
5482 mtu
= __le16_to_cpu(rsp
->mtu
);
5483 mps
= __le16_to_cpu(rsp
->mps
);
5484 credits
= __le16_to_cpu(rsp
->credits
);
5485 result
= __le16_to_cpu(rsp
->result
);
5487 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23))
5490 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5491 dcid
, mtu
, mps
, credits
, result
);
5493 mutex_lock(&conn
->chan_lock
);
5495 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5503 l2cap_chan_lock(chan
);
5506 case L2CAP_CR_SUCCESS
:
5510 chan
->remote_mps
= mps
;
5511 chan
->tx_credits
= credits
;
5512 l2cap_chan_ready(chan
);
5516 l2cap_chan_del(chan
, ECONNREFUSED
);
5520 l2cap_chan_unlock(chan
);
5523 mutex_unlock(&conn
->chan_lock
);
5528 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5529 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5534 switch (cmd
->code
) {
5535 case L2CAP_COMMAND_REJ
:
5536 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5539 case L2CAP_CONN_REQ
:
5540 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5543 case L2CAP_CONN_RSP
:
5544 case L2CAP_CREATE_CHAN_RSP
:
5545 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5548 case L2CAP_CONF_REQ
:
5549 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5552 case L2CAP_CONF_RSP
:
5553 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5556 case L2CAP_DISCONN_REQ
:
5557 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5560 case L2CAP_DISCONN_RSP
:
5561 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5564 case L2CAP_ECHO_REQ
:
5565 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5568 case L2CAP_ECHO_RSP
:
5571 case L2CAP_INFO_REQ
:
5572 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5575 case L2CAP_INFO_RSP
:
5576 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5579 case L2CAP_CREATE_CHAN_REQ
:
5580 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5583 case L2CAP_MOVE_CHAN_REQ
:
5584 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5587 case L2CAP_MOVE_CHAN_RSP
:
5588 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5591 case L2CAP_MOVE_CHAN_CFM
:
5592 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5595 case L2CAP_MOVE_CHAN_CFM_RSP
:
5596 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5600 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5608 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5609 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5612 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5613 struct l2cap_le_conn_rsp rsp
;
5614 struct l2cap_chan
*chan
, *pchan
;
5615 u16 dcid
, scid
, credits
, mtu
, mps
;
5619 if (cmd_len
!= sizeof(*req
))
5622 scid
= __le16_to_cpu(req
->scid
);
5623 mtu
= __le16_to_cpu(req
->mtu
);
5624 mps
= __le16_to_cpu(req
->mps
);
5629 if (mtu
< 23 || mps
< 23)
5632 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5635 /* Check if we have socket listening on psm */
5636 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5637 &conn
->hcon
->dst
, LE_LINK
);
5639 result
= L2CAP_CR_BAD_PSM
;
5644 mutex_lock(&conn
->chan_lock
);
5645 l2cap_chan_lock(pchan
);
5647 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
)) {
5648 result
= L2CAP_CR_AUTHENTICATION
;
5650 goto response_unlock
;
5653 /* Check if we already have channel with that dcid */
5654 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5655 result
= L2CAP_CR_NO_MEM
;
5657 goto response_unlock
;
5660 chan
= pchan
->ops
->new_connection(pchan
);
5662 result
= L2CAP_CR_NO_MEM
;
5663 goto response_unlock
;
5666 bacpy(&chan
->src
, &conn
->hcon
->src
);
5667 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5668 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
5669 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
5673 chan
->remote_mps
= mps
;
5674 chan
->tx_credits
= __le16_to_cpu(req
->credits
);
5676 __l2cap_chan_add(conn
, chan
);
5678 credits
= chan
->rx_credits
;
5680 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5682 chan
->ident
= cmd
->ident
;
5684 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5685 l2cap_state_change(chan
, BT_CONNECT2
);
5686 result
= L2CAP_CR_PEND
;
5687 chan
->ops
->defer(chan
);
5689 l2cap_chan_ready(chan
);
5690 result
= L2CAP_CR_SUCCESS
;
5694 l2cap_chan_unlock(pchan
);
5695 mutex_unlock(&conn
->chan_lock
);
5697 if (result
== L2CAP_CR_PEND
)
5702 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5703 rsp
.mps
= cpu_to_le16(chan
->mps
);
5709 rsp
.dcid
= cpu_to_le16(dcid
);
5710 rsp
.credits
= cpu_to_le16(credits
);
5711 rsp
.result
= cpu_to_le16(result
);
5713 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5718 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5719 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5722 struct l2cap_le_credits
*pkt
;
5723 struct l2cap_chan
*chan
;
5726 if (cmd_len
!= sizeof(*pkt
))
5729 pkt
= (struct l2cap_le_credits
*) data
;
5730 cid
= __le16_to_cpu(pkt
->cid
);
5731 credits
= __le16_to_cpu(pkt
->credits
);
5733 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5735 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5739 chan
->tx_credits
+= credits
;
5741 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
5742 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
5746 if (chan
->tx_credits
)
5747 chan
->ops
->resume(chan
);
5749 l2cap_chan_unlock(chan
);
5754 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5755 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5760 if (!enable_lecoc
) {
5761 switch (cmd
->code
) {
5762 case L2CAP_LE_CONN_REQ
:
5763 case L2CAP_LE_CONN_RSP
:
5764 case L2CAP_LE_CREDITS
:
5765 case L2CAP_DISCONN_REQ
:
5766 case L2CAP_DISCONN_RSP
:
5771 switch (cmd
->code
) {
5772 case L2CAP_COMMAND_REJ
:
5775 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5776 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5779 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5782 case L2CAP_LE_CONN_RSP
:
5783 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5786 case L2CAP_LE_CONN_REQ
:
5787 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5790 case L2CAP_LE_CREDITS
:
5791 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5794 case L2CAP_DISCONN_REQ
:
5795 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5798 case L2CAP_DISCONN_RSP
:
5799 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5803 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5811 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5812 struct sk_buff
*skb
)
5814 struct hci_conn
*hcon
= conn
->hcon
;
5815 struct l2cap_cmd_hdr
*cmd
;
5819 if (hcon
->type
!= LE_LINK
)
5822 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5825 cmd
= (void *) skb
->data
;
5826 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5828 len
= le16_to_cpu(cmd
->len
);
5830 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5832 if (len
!= skb
->len
|| !cmd
->ident
) {
5833 BT_DBG("corrupted command");
5837 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5839 struct l2cap_cmd_rej_unk rej
;
5841 BT_ERR("Wrong link type (%d)", err
);
5843 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5844 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5852 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5853 struct sk_buff
*skb
)
5855 struct hci_conn
*hcon
= conn
->hcon
;
5856 u8
*data
= skb
->data
;
5858 struct l2cap_cmd_hdr cmd
;
5861 l2cap_raw_recv(conn
, skb
);
5863 if (hcon
->type
!= ACL_LINK
)
5866 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5868 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5869 data
+= L2CAP_CMD_HDR_SIZE
;
5870 len
-= L2CAP_CMD_HDR_SIZE
;
5872 cmd_len
= le16_to_cpu(cmd
.len
);
5874 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5877 if (cmd_len
> len
|| !cmd
.ident
) {
5878 BT_DBG("corrupted command");
5882 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5884 struct l2cap_cmd_rej_unk rej
;
5886 BT_ERR("Wrong link type (%d)", err
);
5888 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5889 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5901 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5903 u16 our_fcs
, rcv_fcs
;
5906 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5907 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5909 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5911 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5912 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5913 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5914 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5916 if (our_fcs
!= rcv_fcs
)
5922 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5924 struct l2cap_ctrl control
;
5926 BT_DBG("chan %p", chan
);
5928 memset(&control
, 0, sizeof(control
));
5931 control
.reqseq
= chan
->buffer_seq
;
5932 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5934 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5935 control
.super
= L2CAP_SUPER_RNR
;
5936 l2cap_send_sframe(chan
, &control
);
5939 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5940 chan
->unacked_frames
> 0)
5941 __set_retrans_timer(chan
);
5943 /* Send pending iframes */
5944 l2cap_ertm_send(chan
);
5946 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5947 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5948 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5951 control
.super
= L2CAP_SUPER_RR
;
5952 l2cap_send_sframe(chan
, &control
);
5956 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5957 struct sk_buff
**last_frag
)
5959 /* skb->len reflects data in skb as well as all fragments
5960 * skb->data_len reflects only data in fragments
5962 if (!skb_has_frag_list(skb
))
5963 skb_shinfo(skb
)->frag_list
= new_frag
;
5965 new_frag
->next
= NULL
;
5967 (*last_frag
)->next
= new_frag
;
5968 *last_frag
= new_frag
;
5970 skb
->len
+= new_frag
->len
;
5971 skb
->data_len
+= new_frag
->len
;
5972 skb
->truesize
+= new_frag
->truesize
;
5975 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5976 struct l2cap_ctrl
*control
)
5980 switch (control
->sar
) {
5981 case L2CAP_SAR_UNSEGMENTED
:
5985 err
= chan
->ops
->recv(chan
, skb
);
5988 case L2CAP_SAR_START
:
5992 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5993 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5995 if (chan
->sdu_len
> chan
->imtu
) {
6000 if (skb
->len
>= chan
->sdu_len
)
6004 chan
->sdu_last_frag
= skb
;
6010 case L2CAP_SAR_CONTINUE
:
6014 append_skb_frag(chan
->sdu
, skb
,
6015 &chan
->sdu_last_frag
);
6018 if (chan
->sdu
->len
>= chan
->sdu_len
)
6028 append_skb_frag(chan
->sdu
, skb
,
6029 &chan
->sdu_last_frag
);
6032 if (chan
->sdu
->len
!= chan
->sdu_len
)
6035 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6038 /* Reassembly complete */
6040 chan
->sdu_last_frag
= NULL
;
6048 kfree_skb(chan
->sdu
);
6050 chan
->sdu_last_frag
= NULL
;
6057 static int l2cap_resegment(struct l2cap_chan
*chan
)
6063 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
6067 if (chan
->mode
!= L2CAP_MODE_ERTM
)
6070 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
6071 l2cap_tx(chan
, NULL
, NULL
, event
);
6074 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
6077 /* Pass sequential frames to l2cap_reassemble_sdu()
6078 * until a gap is encountered.
6081 BT_DBG("chan %p", chan
);
6083 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6084 struct sk_buff
*skb
;
6085 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6086 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
6088 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
6093 skb_unlink(skb
, &chan
->srej_q
);
6094 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6095 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
6100 if (skb_queue_empty(&chan
->srej_q
)) {
6101 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6102 l2cap_send_ack(chan
);
6108 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
6109 struct l2cap_ctrl
*control
)
6111 struct sk_buff
*skb
;
6113 BT_DBG("chan %p, control %p", chan
, control
);
6115 if (control
->reqseq
== chan
->next_tx_seq
) {
6116 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
6117 l2cap_send_disconn_req(chan
, ECONNRESET
);
6121 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
6124 BT_DBG("Seq %d not available for retransmission",
6129 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
6130 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
6131 l2cap_send_disconn_req(chan
, ECONNRESET
);
6135 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6137 if (control
->poll
) {
6138 l2cap_pass_to_tx(chan
, control
);
6140 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6141 l2cap_retransmit(chan
, control
);
6142 l2cap_ertm_send(chan
);
6144 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
6145 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6146 chan
->srej_save_reqseq
= control
->reqseq
;
6149 l2cap_pass_to_tx_fbit(chan
, control
);
6151 if (control
->final
) {
6152 if (chan
->srej_save_reqseq
!= control
->reqseq
||
6153 !test_and_clear_bit(CONN_SREJ_ACT
,
6155 l2cap_retransmit(chan
, control
);
6157 l2cap_retransmit(chan
, control
);
6158 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
6159 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6160 chan
->srej_save_reqseq
= control
->reqseq
;
6166 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
6167 struct l2cap_ctrl
*control
)
6169 struct sk_buff
*skb
;
6171 BT_DBG("chan %p, control %p", chan
, control
);
6173 if (control
->reqseq
== chan
->next_tx_seq
) {
6174 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
6175 l2cap_send_disconn_req(chan
, ECONNRESET
);
6179 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
6181 if (chan
->max_tx
&& skb
&&
6182 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
6183 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
6184 l2cap_send_disconn_req(chan
, ECONNRESET
);
6188 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6190 l2cap_pass_to_tx(chan
, control
);
6192 if (control
->final
) {
6193 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
6194 l2cap_retransmit_all(chan
, control
);
6196 l2cap_retransmit_all(chan
, control
);
6197 l2cap_ertm_send(chan
);
6198 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
6199 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
6203 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
6205 BT_DBG("chan %p, txseq %d", chan
, txseq
);
6207 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
6208 chan
->expected_tx_seq
);
6210 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
6211 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6213 /* See notes below regarding "double poll" and
6216 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6217 BT_DBG("Invalid/Ignore - after SREJ");
6218 return L2CAP_TXSEQ_INVALID_IGNORE
;
6220 BT_DBG("Invalid - in window after SREJ sent");
6221 return L2CAP_TXSEQ_INVALID
;
6225 if (chan
->srej_list
.head
== txseq
) {
6226 BT_DBG("Expected SREJ");
6227 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6230 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6231 BT_DBG("Duplicate SREJ - txseq already stored");
6232 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6235 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6236 BT_DBG("Unexpected SREJ - not requested");
6237 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6241 if (chan
->expected_tx_seq
== txseq
) {
6242 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6244 BT_DBG("Invalid - txseq outside tx window");
6245 return L2CAP_TXSEQ_INVALID
;
6248 return L2CAP_TXSEQ_EXPECTED
;
6252 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6253 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6254 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6255 return L2CAP_TXSEQ_DUPLICATE
;
6258 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6259 /* A source of invalid packets is a "double poll" condition,
6260 * where delays cause us to send multiple poll packets. If
6261 * the remote stack receives and processes both polls,
6262 * sequence numbers can wrap around in such a way that a
6263 * resent frame has a sequence number that looks like new data
6264 * with a sequence gap. This would trigger an erroneous SREJ
6267 * Fortunately, this is impossible with a tx window that's
6268 * less than half of the maximum sequence number, which allows
6269 * invalid frames to be safely ignored.
6271 * With tx window sizes greater than half of the tx window
6272 * maximum, the frame is invalid and cannot be ignored. This
6273 * causes a disconnect.
6276 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6277 BT_DBG("Invalid/Ignore - txseq outside tx window");
6278 return L2CAP_TXSEQ_INVALID_IGNORE
;
6280 BT_DBG("Invalid - txseq outside tx window");
6281 return L2CAP_TXSEQ_INVALID
;
6284 BT_DBG("Unexpected - txseq indicates missing frames");
6285 return L2CAP_TXSEQ_UNEXPECTED
;
6289 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6290 struct l2cap_ctrl
*control
,
6291 struct sk_buff
*skb
, u8 event
)
6294 bool skb_in_use
= false;
6296 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6300 case L2CAP_EV_RECV_IFRAME
:
6301 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6302 case L2CAP_TXSEQ_EXPECTED
:
6303 l2cap_pass_to_tx(chan
, control
);
6305 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6306 BT_DBG("Busy, discarding expected seq %d",
6311 chan
->expected_tx_seq
= __next_seq(chan
,
6314 chan
->buffer_seq
= chan
->expected_tx_seq
;
6317 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6321 if (control
->final
) {
6322 if (!test_and_clear_bit(CONN_REJ_ACT
,
6323 &chan
->conn_state
)) {
6325 l2cap_retransmit_all(chan
, control
);
6326 l2cap_ertm_send(chan
);
6330 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6331 l2cap_send_ack(chan
);
6333 case L2CAP_TXSEQ_UNEXPECTED
:
6334 l2cap_pass_to_tx(chan
, control
);
6336 /* Can't issue SREJ frames in the local busy state.
6337 * Drop this frame, it will be seen as missing
6338 * when local busy is exited.
6340 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6341 BT_DBG("Busy, discarding unexpected seq %d",
6346 /* There was a gap in the sequence, so an SREJ
6347 * must be sent for each missing frame. The
6348 * current frame is stored for later use.
6350 skb_queue_tail(&chan
->srej_q
, skb
);
6352 BT_DBG("Queued %p (queue len %d)", skb
,
6353 skb_queue_len(&chan
->srej_q
));
6355 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6356 l2cap_seq_list_clear(&chan
->srej_list
);
6357 l2cap_send_srej(chan
, control
->txseq
);
6359 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6361 case L2CAP_TXSEQ_DUPLICATE
:
6362 l2cap_pass_to_tx(chan
, control
);
6364 case L2CAP_TXSEQ_INVALID_IGNORE
:
6366 case L2CAP_TXSEQ_INVALID
:
6368 l2cap_send_disconn_req(chan
, ECONNRESET
);
6372 case L2CAP_EV_RECV_RR
:
6373 l2cap_pass_to_tx(chan
, control
);
6374 if (control
->final
) {
6375 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6377 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6378 !__chan_is_moving(chan
)) {
6380 l2cap_retransmit_all(chan
, control
);
6383 l2cap_ertm_send(chan
);
6384 } else if (control
->poll
) {
6385 l2cap_send_i_or_rr_or_rnr(chan
);
6387 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6388 &chan
->conn_state
) &&
6389 chan
->unacked_frames
)
6390 __set_retrans_timer(chan
);
6392 l2cap_ertm_send(chan
);
6395 case L2CAP_EV_RECV_RNR
:
6396 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6397 l2cap_pass_to_tx(chan
, control
);
6398 if (control
&& control
->poll
) {
6399 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6400 l2cap_send_rr_or_rnr(chan
, 0);
6402 __clear_retrans_timer(chan
);
6403 l2cap_seq_list_clear(&chan
->retrans_list
);
6405 case L2CAP_EV_RECV_REJ
:
6406 l2cap_handle_rej(chan
, control
);
6408 case L2CAP_EV_RECV_SREJ
:
6409 l2cap_handle_srej(chan
, control
);
6415 if (skb
&& !skb_in_use
) {
6416 BT_DBG("Freeing %p", skb
);
6423 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6424 struct l2cap_ctrl
*control
,
6425 struct sk_buff
*skb
, u8 event
)
6428 u16 txseq
= control
->txseq
;
6429 bool skb_in_use
= false;
6431 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6435 case L2CAP_EV_RECV_IFRAME
:
6436 switch (l2cap_classify_txseq(chan
, txseq
)) {
6437 case L2CAP_TXSEQ_EXPECTED
:
6438 /* Keep frame for reassembly later */
6439 l2cap_pass_to_tx(chan
, control
);
6440 skb_queue_tail(&chan
->srej_q
, skb
);
6442 BT_DBG("Queued %p (queue len %d)", skb
,
6443 skb_queue_len(&chan
->srej_q
));
6445 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6447 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6448 l2cap_seq_list_pop(&chan
->srej_list
);
6450 l2cap_pass_to_tx(chan
, control
);
6451 skb_queue_tail(&chan
->srej_q
, skb
);
6453 BT_DBG("Queued %p (queue len %d)", skb
,
6454 skb_queue_len(&chan
->srej_q
));
6456 err
= l2cap_rx_queued_iframes(chan
);
6461 case L2CAP_TXSEQ_UNEXPECTED
:
6462 /* Got a frame that can't be reassembled yet.
6463 * Save it for later, and send SREJs to cover
6464 * the missing frames.
6466 skb_queue_tail(&chan
->srej_q
, skb
);
6468 BT_DBG("Queued %p (queue len %d)", skb
,
6469 skb_queue_len(&chan
->srej_q
));
6471 l2cap_pass_to_tx(chan
, control
);
6472 l2cap_send_srej(chan
, control
->txseq
);
6474 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6475 /* This frame was requested with an SREJ, but
6476 * some expected retransmitted frames are
6477 * missing. Request retransmission of missing
6480 skb_queue_tail(&chan
->srej_q
, skb
);
6482 BT_DBG("Queued %p (queue len %d)", skb
,
6483 skb_queue_len(&chan
->srej_q
));
6485 l2cap_pass_to_tx(chan
, control
);
6486 l2cap_send_srej_list(chan
, control
->txseq
);
6488 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6489 /* We've already queued this frame. Drop this copy. */
6490 l2cap_pass_to_tx(chan
, control
);
6492 case L2CAP_TXSEQ_DUPLICATE
:
6493 /* Expecting a later sequence number, so this frame
6494 * was already received. Ignore it completely.
6497 case L2CAP_TXSEQ_INVALID_IGNORE
:
6499 case L2CAP_TXSEQ_INVALID
:
6501 l2cap_send_disconn_req(chan
, ECONNRESET
);
6505 case L2CAP_EV_RECV_RR
:
6506 l2cap_pass_to_tx(chan
, control
);
6507 if (control
->final
) {
6508 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6510 if (!test_and_clear_bit(CONN_REJ_ACT
,
6511 &chan
->conn_state
)) {
6513 l2cap_retransmit_all(chan
, control
);
6516 l2cap_ertm_send(chan
);
6517 } else if (control
->poll
) {
6518 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6519 &chan
->conn_state
) &&
6520 chan
->unacked_frames
) {
6521 __set_retrans_timer(chan
);
6524 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6525 l2cap_send_srej_tail(chan
);
6527 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6528 &chan
->conn_state
) &&
6529 chan
->unacked_frames
)
6530 __set_retrans_timer(chan
);
6532 l2cap_send_ack(chan
);
6535 case L2CAP_EV_RECV_RNR
:
6536 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6537 l2cap_pass_to_tx(chan
, control
);
6538 if (control
->poll
) {
6539 l2cap_send_srej_tail(chan
);
6541 struct l2cap_ctrl rr_control
;
6542 memset(&rr_control
, 0, sizeof(rr_control
));
6543 rr_control
.sframe
= 1;
6544 rr_control
.super
= L2CAP_SUPER_RR
;
6545 rr_control
.reqseq
= chan
->buffer_seq
;
6546 l2cap_send_sframe(chan
, &rr_control
);
6550 case L2CAP_EV_RECV_REJ
:
6551 l2cap_handle_rej(chan
, control
);
6553 case L2CAP_EV_RECV_SREJ
:
6554 l2cap_handle_srej(chan
, control
);
6558 if (skb
&& !skb_in_use
) {
6559 BT_DBG("Freeing %p", skb
);
6566 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6568 BT_DBG("chan %p", chan
);
6570 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6573 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6575 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6577 return l2cap_resegment(chan
);
6580 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6581 struct l2cap_ctrl
*control
,
6582 struct sk_buff
*skb
, u8 event
)
6586 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6592 l2cap_process_reqseq(chan
, control
->reqseq
);
6594 if (!skb_queue_empty(&chan
->tx_q
))
6595 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6597 chan
->tx_send_head
= NULL
;
6599 /* Rewind next_tx_seq to the point expected
6602 chan
->next_tx_seq
= control
->reqseq
;
6603 chan
->unacked_frames
= 0;
6605 err
= l2cap_finish_move(chan
);
6609 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6610 l2cap_send_i_or_rr_or_rnr(chan
);
6612 if (event
== L2CAP_EV_RECV_IFRAME
)
6615 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6618 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6619 struct l2cap_ctrl
*control
,
6620 struct sk_buff
*skb
, u8 event
)
6624 if (!control
->final
)
6627 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6629 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6630 l2cap_process_reqseq(chan
, control
->reqseq
);
6632 if (!skb_queue_empty(&chan
->tx_q
))
6633 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6635 chan
->tx_send_head
= NULL
;
6637 /* Rewind next_tx_seq to the point expected
6640 chan
->next_tx_seq
= control
->reqseq
;
6641 chan
->unacked_frames
= 0;
6644 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6646 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6648 err
= l2cap_resegment(chan
);
6651 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6656 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6658 /* Make sure reqseq is for a packet that has been sent but not acked */
6661 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6662 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6665 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6666 struct sk_buff
*skb
, u8 event
)
6670 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6671 control
, skb
, event
, chan
->rx_state
);
6673 if (__valid_reqseq(chan
, control
->reqseq
)) {
6674 switch (chan
->rx_state
) {
6675 case L2CAP_RX_STATE_RECV
:
6676 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6678 case L2CAP_RX_STATE_SREJ_SENT
:
6679 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6682 case L2CAP_RX_STATE_WAIT_P
:
6683 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6685 case L2CAP_RX_STATE_WAIT_F
:
6686 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6693 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6694 control
->reqseq
, chan
->next_tx_seq
,
6695 chan
->expected_ack_seq
);
6696 l2cap_send_disconn_req(chan
, ECONNRESET
);
6702 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6703 struct sk_buff
*skb
)
6707 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6710 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6711 L2CAP_TXSEQ_EXPECTED
) {
6712 l2cap_pass_to_tx(chan
, control
);
6714 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6715 __next_seq(chan
, chan
->buffer_seq
));
6717 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6719 l2cap_reassemble_sdu(chan
, skb
, control
);
6722 kfree_skb(chan
->sdu
);
6725 chan
->sdu_last_frag
= NULL
;
6729 BT_DBG("Freeing %p", skb
);
6734 chan
->last_acked_seq
= control
->txseq
;
6735 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6740 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6742 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6746 __unpack_control(chan
, skb
);
6751 * We can just drop the corrupted I-frame here.
6752 * Receiver will miss it and start proper recovery
6753 * procedures and ask for retransmission.
6755 if (l2cap_check_fcs(chan
, skb
))
6758 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6759 len
-= L2CAP_SDULEN_SIZE
;
6761 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6762 len
-= L2CAP_FCS_SIZE
;
6764 if (len
> chan
->mps
) {
6765 l2cap_send_disconn_req(chan
, ECONNRESET
);
6769 if (!control
->sframe
) {
6772 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6773 control
->sar
, control
->reqseq
, control
->final
,
6776 /* Validate F-bit - F=0 always valid, F=1 only
6777 * valid in TX WAIT_F
6779 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6782 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6783 event
= L2CAP_EV_RECV_IFRAME
;
6784 err
= l2cap_rx(chan
, control
, skb
, event
);
6786 err
= l2cap_stream_rx(chan
, control
, skb
);
6790 l2cap_send_disconn_req(chan
, ECONNRESET
);
6792 const u8 rx_func_to_event
[4] = {
6793 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6794 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6797 /* Only I-frames are expected in streaming mode */
6798 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6801 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6802 control
->reqseq
, control
->final
, control
->poll
,
6806 BT_ERR("Trailing bytes: %d in sframe", len
);
6807 l2cap_send_disconn_req(chan
, ECONNRESET
);
6811 /* Validate F and P bits */
6812 if (control
->final
&& (control
->poll
||
6813 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6816 event
= rx_func_to_event
[control
->super
];
6817 if (l2cap_rx(chan
, control
, skb
, event
))
6818 l2cap_send_disconn_req(chan
, ECONNRESET
);
6828 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6830 struct l2cap_conn
*conn
= chan
->conn
;
6831 struct l2cap_le_credits pkt
;
6834 /* We return more credits to the sender only after the amount of
6835 * credits falls below half of the initial amount.
6837 if (chan
->rx_credits
>= (le_max_credits
+ 1) / 2)
6840 return_credits
= le_max_credits
- chan
->rx_credits
;
6842 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6844 chan
->rx_credits
+= return_credits
;
6846 pkt
.cid
= cpu_to_le16(chan
->scid
);
6847 pkt
.credits
= cpu_to_le16(return_credits
);
6849 chan
->ident
= l2cap_get_ident(conn
);
6851 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6854 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6858 if (!chan
->rx_credits
) {
6859 BT_ERR("No credits to receive LE L2CAP data");
6863 if (chan
->imtu
< skb
->len
) {
6864 BT_ERR("Too big LE L2CAP PDU");
6869 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6871 l2cap_chan_le_send_credits(chan
);
6878 sdu_len
= get_unaligned_le16(skb
->data
);
6879 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6881 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6882 sdu_len
, skb
->len
, chan
->imtu
);
6884 if (sdu_len
> chan
->imtu
) {
6885 BT_ERR("Too big LE L2CAP SDU length received");
6890 if (skb
->len
> sdu_len
) {
6891 BT_ERR("Too much LE L2CAP data received");
6896 if (skb
->len
== sdu_len
)
6897 return chan
->ops
->recv(chan
, skb
);
6900 chan
->sdu_len
= sdu_len
;
6901 chan
->sdu_last_frag
= skb
;
6906 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6907 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6909 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6910 BT_ERR("Too much LE L2CAP data received");
6915 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6918 if (chan
->sdu
->len
== chan
->sdu_len
) {
6919 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6922 chan
->sdu_last_frag
= NULL
;
6930 kfree_skb(chan
->sdu
);
6932 chan
->sdu_last_frag
= NULL
;
6936 /* We can't return an error here since we took care of the skb
6937 * freeing internally. An error return would cause the caller to
6938 * do a double-free of the skb.
6943 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6944 struct sk_buff
*skb
)
6946 struct l2cap_chan
*chan
;
6948 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6950 if (cid
== L2CAP_CID_A2MP
) {
6951 chan
= a2mp_channel_create(conn
, skb
);
6957 l2cap_chan_lock(chan
);
6959 BT_DBG("unknown cid 0x%4.4x", cid
);
6960 /* Drop packet and return */
6966 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6968 if (chan
->state
!= BT_CONNECTED
)
6971 switch (chan
->mode
) {
6972 case L2CAP_MODE_LE_FLOWCTL
:
6973 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6978 case L2CAP_MODE_BASIC
:
6979 /* If socket recv buffers overflows we drop data here
6980 * which is *bad* because L2CAP has to be reliable.
6981 * But we don't have any other choice. L2CAP doesn't
6982 * provide flow control mechanism. */
6984 if (chan
->imtu
< skb
->len
)
6987 if (!chan
->ops
->recv(chan
, skb
))
6991 case L2CAP_MODE_ERTM
:
6992 case L2CAP_MODE_STREAMING
:
6993 l2cap_data_rcv(chan
, skb
);
6997 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
7005 l2cap_chan_unlock(chan
);
7008 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
7009 struct sk_buff
*skb
)
7011 struct hci_conn
*hcon
= conn
->hcon
;
7012 struct l2cap_chan
*chan
;
7014 if (hcon
->type
!= ACL_LINK
)
7017 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
7022 BT_DBG("chan %p, len %d", chan
, skb
->len
);
7024 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
7027 if (chan
->imtu
< skb
->len
)
7030 /* Store remote BD_ADDR and PSM for msg_name */
7031 bacpy(&bt_cb(skb
)->bdaddr
, &hcon
->dst
);
7032 bt_cb(skb
)->psm
= psm
;
7034 if (!chan
->ops
->recv(chan
, skb
))
7041 static void l2cap_att_channel(struct l2cap_conn
*conn
,
7042 struct sk_buff
*skb
)
7044 struct hci_conn
*hcon
= conn
->hcon
;
7045 struct l2cap_chan
*chan
;
7047 if (hcon
->type
!= LE_LINK
)
7050 chan
= l2cap_global_chan_by_scid(BT_CONNECTED
, L2CAP_CID_ATT
,
7051 &hcon
->src
, &hcon
->dst
);
7055 BT_DBG("chan %p, len %d", chan
, skb
->len
);
7057 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
))
7060 if (chan
->imtu
< skb
->len
)
7063 if (!chan
->ops
->recv(chan
, skb
))
7070 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
7072 struct l2cap_hdr
*lh
= (void *) skb
->data
;
7076 skb_pull(skb
, L2CAP_HDR_SIZE
);
7077 cid
= __le16_to_cpu(lh
->cid
);
7078 len
= __le16_to_cpu(lh
->len
);
7080 if (len
!= skb
->len
) {
7085 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
7088 case L2CAP_CID_SIGNALING
:
7089 l2cap_sig_channel(conn
, skb
);
7092 case L2CAP_CID_CONN_LESS
:
7093 psm
= get_unaligned((__le16
*) skb
->data
);
7094 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
7095 l2cap_conless_channel(conn
, psm
, skb
);
7099 l2cap_att_channel(conn
, skb
);
7102 case L2CAP_CID_LE_SIGNALING
:
7103 l2cap_le_sig_channel(conn
, skb
);
7107 if (smp_sig_channel(conn
, skb
))
7108 l2cap_conn_del(conn
->hcon
, EACCES
);
7112 l2cap_data_channel(conn
, cid
, skb
);
7117 /* ---- L2CAP interface with lower layer (HCI) ---- */
7119 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7121 int exact
= 0, lm1
= 0, lm2
= 0;
7122 struct l2cap_chan
*c
;
7124 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7126 /* Find listening sockets and check their link_mode */
7127 read_lock(&chan_list_lock
);
7128 list_for_each_entry(c
, &chan_list
, global_l
) {
7129 if (c
->state
!= BT_LISTEN
)
7132 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7133 lm1
|= HCI_LM_ACCEPT
;
7134 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7135 lm1
|= HCI_LM_MASTER
;
7137 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7138 lm2
|= HCI_LM_ACCEPT
;
7139 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7140 lm2
|= HCI_LM_MASTER
;
7143 read_unlock(&chan_list_lock
);
7145 return exact
? lm1
: lm2
;
7148 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7150 struct l2cap_conn
*conn
;
7152 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7155 conn
= l2cap_conn_add(hcon
);
7157 l2cap_conn_ready(conn
);
7159 l2cap_conn_del(hcon
, bt_to_errno(status
));
7163 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7165 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7167 BT_DBG("hcon %p", hcon
);
7170 return HCI_ERROR_REMOTE_USER_TERM
;
7171 return conn
->disc_reason
;
7174 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7176 BT_DBG("hcon %p reason %d", hcon
, reason
);
7178 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7181 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7183 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7186 if (encrypt
== 0x00) {
7187 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7188 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7189 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
7190 l2cap_chan_close(chan
, ECONNREFUSED
);
7192 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7193 __clear_chan_timer(chan
);
7197 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7199 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7200 struct l2cap_chan
*chan
;
7205 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7207 if (hcon
->type
== LE_LINK
) {
7208 if (!status
&& encrypt
)
7209 smp_distribute_keys(conn
, 0);
7210 cancel_delayed_work(&conn
->security_timer
);
7213 mutex_lock(&conn
->chan_lock
);
7215 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7216 l2cap_chan_lock(chan
);
7218 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7219 state_to_string(chan
->state
));
7221 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
7222 l2cap_chan_unlock(chan
);
7226 if (chan
->scid
== L2CAP_CID_ATT
) {
7227 if (!status
&& encrypt
) {
7228 chan
->sec_level
= hcon
->sec_level
;
7229 l2cap_chan_ready(chan
);
7232 l2cap_chan_unlock(chan
);
7236 if (!__l2cap_no_conn_pending(chan
)) {
7237 l2cap_chan_unlock(chan
);
7241 if (!status
&& (chan
->state
== BT_CONNECTED
||
7242 chan
->state
== BT_CONFIG
)) {
7243 chan
->ops
->resume(chan
);
7244 l2cap_check_encryption(chan
, encrypt
);
7245 l2cap_chan_unlock(chan
);
7249 if (chan
->state
== BT_CONNECT
) {
7251 l2cap_start_connection(chan
);
7253 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7254 } else if (chan
->state
== BT_CONNECT2
) {
7255 struct l2cap_conn_rsp rsp
;
7259 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7260 res
= L2CAP_CR_PEND
;
7261 stat
= L2CAP_CS_AUTHOR_PEND
;
7262 chan
->ops
->defer(chan
);
7264 l2cap_state_change(chan
, BT_CONFIG
);
7265 res
= L2CAP_CR_SUCCESS
;
7266 stat
= L2CAP_CS_NO_INFO
;
7269 l2cap_state_change(chan
, BT_DISCONN
);
7270 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7271 res
= L2CAP_CR_SEC_BLOCK
;
7272 stat
= L2CAP_CS_NO_INFO
;
7275 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7276 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7277 rsp
.result
= cpu_to_le16(res
);
7278 rsp
.status
= cpu_to_le16(stat
);
7279 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7282 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7283 res
== L2CAP_CR_SUCCESS
) {
7285 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7286 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7288 l2cap_build_conf_req(chan
, buf
),
7290 chan
->num_conf_req
++;
7294 l2cap_chan_unlock(chan
);
7297 mutex_unlock(&conn
->chan_lock
);
7302 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7304 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7305 struct l2cap_hdr
*hdr
;
7308 /* For AMP controller do not create l2cap conn */
7309 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
7313 conn
= l2cap_conn_add(hcon
);
7318 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7322 case ACL_START_NO_FLUSH
:
7325 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7326 kfree_skb(conn
->rx_skb
);
7327 conn
->rx_skb
= NULL
;
7329 l2cap_conn_unreliable(conn
, ECOMM
);
7332 /* Start fragment always begin with Basic L2CAP header */
7333 if (skb
->len
< L2CAP_HDR_SIZE
) {
7334 BT_ERR("Frame is too short (len %d)", skb
->len
);
7335 l2cap_conn_unreliable(conn
, ECOMM
);
7339 hdr
= (struct l2cap_hdr
*) skb
->data
;
7340 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7342 if (len
== skb
->len
) {
7343 /* Complete frame received */
7344 l2cap_recv_frame(conn
, skb
);
7348 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7350 if (skb
->len
> len
) {
7351 BT_ERR("Frame is too long (len %d, expected len %d)",
7353 l2cap_conn_unreliable(conn
, ECOMM
);
7357 /* Allocate skb for the complete frame (with header) */
7358 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7362 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7364 conn
->rx_len
= len
- skb
->len
;
7368 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7370 if (!conn
->rx_len
) {
7371 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7372 l2cap_conn_unreliable(conn
, ECOMM
);
7376 if (skb
->len
> conn
->rx_len
) {
7377 BT_ERR("Fragment is too long (len %d, expected %d)",
7378 skb
->len
, conn
->rx_len
);
7379 kfree_skb(conn
->rx_skb
);
7380 conn
->rx_skb
= NULL
;
7382 l2cap_conn_unreliable(conn
, ECOMM
);
7386 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7388 conn
->rx_len
-= skb
->len
;
7390 if (!conn
->rx_len
) {
7391 /* Complete frame received. l2cap_recv_frame
7392 * takes ownership of the skb so set the global
7393 * rx_skb pointer to NULL first.
7395 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7396 conn
->rx_skb
= NULL
;
7397 l2cap_recv_frame(conn
, rx_skb
);
7407 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7409 struct l2cap_chan
*c
;
7411 read_lock(&chan_list_lock
);
7413 list_for_each_entry(c
, &chan_list
, global_l
) {
7414 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7416 c
->state
, __le16_to_cpu(c
->psm
),
7417 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7418 c
->sec_level
, c
->mode
);
7421 read_unlock(&chan_list_lock
);
7426 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7428 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7431 static const struct file_operations l2cap_debugfs_fops
= {
7432 .open
= l2cap_debugfs_open
,
7434 .llseek
= seq_lseek
,
7435 .release
= single_release
,
7438 static struct dentry
*l2cap_debugfs
;
7440 int __init
l2cap_init(void)
7444 err
= l2cap_init_sockets();
7448 if (IS_ERR_OR_NULL(bt_debugfs
))
7451 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7452 NULL
, &l2cap_debugfs_fops
);
7454 debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs
,
7456 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs
,
7462 void l2cap_exit(void)
7464 debugfs_remove(l2cap_debugfs
);
7465 l2cap_cleanup_sockets();
7468 module_param(disable_ertm
, bool, 0644);
7469 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");