2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
47 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
| L2CAP_FC_CONNLESS
, };
49 static LIST_HEAD(chan_list
);
50 static DEFINE_RWLOCK(chan_list_lock
);
52 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
53 u8 code
, u8 ident
, u16 dlen
, void *data
);
54 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
56 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
57 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
59 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
60 struct sk_buff_head
*skbs
, u8 event
);
62 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
64 if (hcon
->type
== LE_LINK
) {
65 if (type
== ADDR_LE_DEV_PUBLIC
)
66 return BDADDR_LE_PUBLIC
;
68 return BDADDR_LE_RANDOM
;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
81 list_for_each_entry(c
, &conn
->chan_l
, list
) {
88 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
93 list_for_each_entry(c
, &conn
->chan_l
, list
) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
105 struct l2cap_chan
*c
;
107 mutex_lock(&conn
->chan_lock
);
108 c
= __l2cap_get_chan_by_scid(conn
, cid
);
111 mutex_unlock(&conn
->chan_lock
);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
122 struct l2cap_chan
*c
;
124 mutex_lock(&conn
->chan_lock
);
125 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
128 mutex_unlock(&conn
->chan_lock
);
133 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
136 struct l2cap_chan
*c
;
138 list_for_each_entry(c
, &conn
->chan_l
, list
) {
139 if (c
->ident
== ident
)
145 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
148 struct l2cap_chan
*c
;
150 mutex_lock(&conn
->chan_lock
);
151 c
= __l2cap_get_chan_by_ident(conn
, ident
);
154 mutex_unlock(&conn
->chan_lock
);
159 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
161 struct l2cap_chan
*c
;
163 list_for_each_entry(c
, &chan_list
, global_l
) {
164 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
170 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
174 write_lock(&chan_list_lock
);
176 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
189 for (p
= 0x1001; p
< 0x1100; p
+= 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
191 chan
->psm
= cpu_to_le16(p
);
192 chan
->sport
= cpu_to_le16(p
);
199 write_unlock(&chan_list_lock
);
203 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
205 write_lock(&chan_list_lock
);
209 write_unlock(&chan_list_lock
);
214 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
216 u16 cid
= L2CAP_CID_DYN_START
;
218 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
219 if (!__l2cap_get_chan_by_scid(conn
, cid
))
226 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
228 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
229 state_to_string(state
));
232 chan
->ops
->state_change(chan
, state
, 0);
235 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
239 chan
->ops
->state_change(chan
, chan
->state
, err
);
242 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
244 chan
->ops
->state_change(chan
, chan
->state
, err
);
247 static void __set_retrans_timer(struct l2cap_chan
*chan
)
249 if (!delayed_work_pending(&chan
->monitor_timer
) &&
250 chan
->retrans_timeout
) {
251 l2cap_set_timer(chan
, &chan
->retrans_timer
,
252 msecs_to_jiffies(chan
->retrans_timeout
));
256 static void __set_monitor_timer(struct l2cap_chan
*chan
)
258 __clear_retrans_timer(chan
);
259 if (chan
->monitor_timeout
) {
260 l2cap_set_timer(chan
, &chan
->monitor_timer
,
261 msecs_to_jiffies(chan
->monitor_timeout
));
265 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
270 skb_queue_walk(head
, skb
) {
271 if (bt_cb(skb
)->control
.txseq
== seq
)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
291 size_t alloc_size
, i
;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size
= roundup_pow_of_two(size
);
299 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
303 seq_list
->mask
= alloc_size
- 1;
304 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
305 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
306 for (i
= 0; i
< alloc_size
; i
++)
307 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
314 kfree(seq_list
->list
);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
320 /* Constant-time check for list membership */
321 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
324 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
326 u16 mask
= seq_list
->mask
;
328 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR
;
331 } else if (seq_list
->head
== seq
) {
332 /* Head can be removed in constant time */
333 seq_list
->head
= seq_list
->list
[seq
& mask
];
334 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
336 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
337 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
338 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
341 /* Walk the list to find the sequence number */
342 u16 prev
= seq_list
->head
;
343 while (seq_list
->list
[prev
& mask
] != seq
) {
344 prev
= seq_list
->list
[prev
& mask
];
345 if (prev
== L2CAP_SEQ_LIST_TAIL
)
346 return L2CAP_SEQ_LIST_CLEAR
;
349 /* Unlink the number from the list and clear it */
350 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
351 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
352 if (seq_list
->tail
== seq
)
353 seq_list
->tail
= prev
;
358 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
368 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
371 for (i
= 0; i
<= seq_list
->mask
; i
++)
372 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
374 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
375 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
378 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
380 u16 mask
= seq_list
->mask
;
382 /* All appends happen in constant time */
384 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
387 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
388 seq_list
->head
= seq
;
390 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
392 seq_list
->tail
= seq
;
393 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
396 static void l2cap_chan_timeout(struct work_struct
*work
)
398 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
400 struct l2cap_conn
*conn
= chan
->conn
;
403 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
405 mutex_lock(&conn
->chan_lock
);
406 l2cap_chan_lock(chan
);
408 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
409 reason
= ECONNREFUSED
;
410 else if (chan
->state
== BT_CONNECT
&&
411 chan
->sec_level
!= BT_SECURITY_SDP
)
412 reason
= ECONNREFUSED
;
416 l2cap_chan_close(chan
, reason
);
418 l2cap_chan_unlock(chan
);
420 chan
->ops
->close(chan
);
421 mutex_unlock(&conn
->chan_lock
);
423 l2cap_chan_put(chan
);
426 struct l2cap_chan
*l2cap_chan_create(void)
428 struct l2cap_chan
*chan
;
430 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
434 mutex_init(&chan
->lock
);
436 write_lock(&chan_list_lock
);
437 list_add(&chan
->global_l
, &chan_list
);
438 write_unlock(&chan_list_lock
);
440 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
442 chan
->state
= BT_OPEN
;
444 kref_init(&chan
->kref
);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
449 BT_DBG("chan %p", chan
);
454 static void l2cap_chan_destroy(struct kref
*kref
)
456 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
458 BT_DBG("chan %p", chan
);
460 write_lock(&chan_list_lock
);
461 list_del(&chan
->global_l
);
462 write_unlock(&chan_list_lock
);
467 void l2cap_chan_hold(struct l2cap_chan
*c
)
469 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
474 void l2cap_chan_put(struct l2cap_chan
*c
)
476 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
478 kref_put(&c
->kref
, l2cap_chan_destroy
);
481 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
483 chan
->fcs
= L2CAP_FCS_CRC16
;
484 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
485 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
486 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
487 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
488 chan
->sec_level
= BT_SECURITY_LOW
;
490 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
493 void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
495 chan
->imtu
= L2CAP_DEFAULT_MTU
;
496 chan
->omtu
= L2CAP_LE_MIN_MTU
;
497 chan
->mode
= L2CAP_MODE_LE_FLOWCTL
;
500 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
502 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
503 __le16_to_cpu(chan
->psm
), chan
->dcid
);
505 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
509 switch (chan
->chan_type
) {
510 case L2CAP_CHAN_CONN_ORIENTED
:
511 if (conn
->hcon
->type
== LE_LINK
) {
513 chan
->omtu
= L2CAP_DEFAULT_MTU
;
514 if (chan
->dcid
== L2CAP_CID_ATT
)
515 chan
->scid
= L2CAP_CID_ATT
;
517 chan
->scid
= l2cap_alloc_cid(conn
);
519 /* Alloc CID for connection-oriented socket */
520 chan
->scid
= l2cap_alloc_cid(conn
);
521 chan
->omtu
= L2CAP_DEFAULT_MTU
;
525 case L2CAP_CHAN_CONN_LESS
:
526 /* Connectionless socket */
527 chan
->scid
= L2CAP_CID_CONN_LESS
;
528 chan
->dcid
= L2CAP_CID_CONN_LESS
;
529 chan
->omtu
= L2CAP_DEFAULT_MTU
;
532 case L2CAP_CHAN_CONN_FIX_A2MP
:
533 chan
->scid
= L2CAP_CID_A2MP
;
534 chan
->dcid
= L2CAP_CID_A2MP
;
535 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
536 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
540 /* Raw socket can send/recv signalling messages only */
541 chan
->scid
= L2CAP_CID_SIGNALING
;
542 chan
->dcid
= L2CAP_CID_SIGNALING
;
543 chan
->omtu
= L2CAP_DEFAULT_MTU
;
546 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
547 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
548 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
549 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
550 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
551 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
553 l2cap_chan_hold(chan
);
555 hci_conn_hold(conn
->hcon
);
557 list_add(&chan
->list
, &conn
->chan_l
);
560 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
562 mutex_lock(&conn
->chan_lock
);
563 __l2cap_chan_add(conn
, chan
);
564 mutex_unlock(&conn
->chan_lock
);
567 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
569 struct l2cap_conn
*conn
= chan
->conn
;
571 __clear_chan_timer(chan
);
573 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
576 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
577 /* Delete from channel list */
578 list_del(&chan
->list
);
580 l2cap_chan_put(chan
);
584 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
585 hci_conn_drop(conn
->hcon
);
587 if (mgr
&& mgr
->bredr_chan
== chan
)
588 mgr
->bredr_chan
= NULL
;
591 if (chan
->hs_hchan
) {
592 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
594 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
595 amp_disconnect_logical_link(hs_hchan
);
598 chan
->ops
->teardown(chan
, err
);
600 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
604 case L2CAP_MODE_BASIC
:
607 case L2CAP_MODE_LE_FLOWCTL
:
610 case L2CAP_MODE_ERTM
:
611 __clear_retrans_timer(chan
);
612 __clear_monitor_timer(chan
);
613 __clear_ack_timer(chan
);
615 skb_queue_purge(&chan
->srej_q
);
617 l2cap_seq_list_free(&chan
->srej_list
);
618 l2cap_seq_list_free(&chan
->retrans_list
);
622 case L2CAP_MODE_STREAMING
:
623 skb_queue_purge(&chan
->tx_q
);
630 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
632 struct l2cap_conn
*conn
= chan
->conn
;
633 struct l2cap_le_conn_rsp rsp
;
636 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
637 result
= L2CAP_CR_AUTHORIZATION
;
639 result
= L2CAP_CR_BAD_PSM
;
641 l2cap_state_change(chan
, BT_DISCONN
);
643 rsp
.dcid
= cpu_to_le16(chan
->scid
);
644 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
645 rsp
.mps
= __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS
);
646 rsp
.credits
= __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS
);
647 rsp
.result
= cpu_to_le16(result
);
649 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
653 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
655 struct l2cap_conn
*conn
= chan
->conn
;
656 struct l2cap_conn_rsp rsp
;
659 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
660 result
= L2CAP_CR_SEC_BLOCK
;
662 result
= L2CAP_CR_BAD_PSM
;
664 l2cap_state_change(chan
, BT_DISCONN
);
666 rsp
.scid
= cpu_to_le16(chan
->dcid
);
667 rsp
.dcid
= cpu_to_le16(chan
->scid
);
668 rsp
.result
= cpu_to_le16(result
);
669 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
671 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
674 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
676 struct l2cap_conn
*conn
= chan
->conn
;
678 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
680 switch (chan
->state
) {
682 chan
->ops
->teardown(chan
, 0);
687 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
688 * check for chan->psm.
690 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& chan
->psm
) {
691 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
692 l2cap_send_disconn_req(chan
, reason
);
694 l2cap_chan_del(chan
, reason
);
698 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
699 if (conn
->hcon
->type
== ACL_LINK
)
700 l2cap_chan_connect_reject(chan
);
701 else if (conn
->hcon
->type
== LE_LINK
)
702 l2cap_chan_le_connect_reject(chan
);
705 l2cap_chan_del(chan
, reason
);
710 l2cap_chan_del(chan
, reason
);
714 chan
->ops
->teardown(chan
, 0);
719 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
721 switch (chan
->chan_type
) {
723 switch (chan
->sec_level
) {
724 case BT_SECURITY_HIGH
:
725 return HCI_AT_DEDICATED_BONDING_MITM
;
726 case BT_SECURITY_MEDIUM
:
727 return HCI_AT_DEDICATED_BONDING
;
729 return HCI_AT_NO_BONDING
;
732 case L2CAP_CHAN_CONN_LESS
:
733 if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_3DSP
)) {
734 if (chan
->sec_level
== BT_SECURITY_LOW
)
735 chan
->sec_level
= BT_SECURITY_SDP
;
737 if (chan
->sec_level
== BT_SECURITY_HIGH
)
738 return HCI_AT_NO_BONDING_MITM
;
740 return HCI_AT_NO_BONDING
;
742 case L2CAP_CHAN_CONN_ORIENTED
:
743 if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
744 if (chan
->sec_level
== BT_SECURITY_LOW
)
745 chan
->sec_level
= BT_SECURITY_SDP
;
747 if (chan
->sec_level
== BT_SECURITY_HIGH
)
748 return HCI_AT_NO_BONDING_MITM
;
750 return HCI_AT_NO_BONDING
;
754 switch (chan
->sec_level
) {
755 case BT_SECURITY_HIGH
:
756 return HCI_AT_GENERAL_BONDING_MITM
;
757 case BT_SECURITY_MEDIUM
:
758 return HCI_AT_GENERAL_BONDING
;
760 return HCI_AT_NO_BONDING
;
766 /* Service level security */
767 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
769 struct l2cap_conn
*conn
= chan
->conn
;
772 if (conn
->hcon
->type
== LE_LINK
)
773 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
775 auth_type
= l2cap_get_auth_type(chan
);
777 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
780 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
784 /* Get next available identificator.
785 * 1 - 128 are used by kernel.
786 * 129 - 199 are reserved.
787 * 200 - 254 are used by utilities like l2ping, etc.
790 spin_lock(&conn
->lock
);
792 if (++conn
->tx_ident
> 128)
797 spin_unlock(&conn
->lock
);
802 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
805 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
808 BT_DBG("code 0x%2.2x", code
);
813 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
814 flags
= ACL_START_NO_FLUSH
;
818 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
819 skb
->priority
= HCI_PRIO_MAX
;
821 hci_send_acl(conn
->hchan
, skb
, flags
);
824 static bool __chan_is_moving(struct l2cap_chan
*chan
)
826 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
827 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
830 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
832 struct hci_conn
*hcon
= chan
->conn
->hcon
;
835 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
838 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
840 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
847 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
848 lmp_no_flush_capable(hcon
->hdev
))
849 flags
= ACL_START_NO_FLUSH
;
853 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
854 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
857 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
859 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
860 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
862 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
865 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
866 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
873 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
874 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
881 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
883 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
884 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
886 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
889 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
890 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
897 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
898 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
905 static inline void __unpack_control(struct l2cap_chan
*chan
,
908 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
909 __unpack_extended_control(get_unaligned_le32(skb
->data
),
910 &bt_cb(skb
)->control
);
911 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
913 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
914 &bt_cb(skb
)->control
);
915 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
919 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
923 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
924 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
926 if (control
->sframe
) {
927 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
928 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
929 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
931 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
932 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
938 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
942 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
943 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
945 if (control
->sframe
) {
946 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
947 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
948 packed
|= L2CAP_CTRL_FRAME_TYPE
;
950 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
951 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
957 static inline void __pack_control(struct l2cap_chan
*chan
,
958 struct l2cap_ctrl
*control
,
961 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
962 put_unaligned_le32(__pack_extended_control(control
),
963 skb
->data
+ L2CAP_HDR_SIZE
);
965 put_unaligned_le16(__pack_enhanced_control(control
),
966 skb
->data
+ L2CAP_HDR_SIZE
);
970 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
972 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
973 return L2CAP_EXT_HDR_SIZE
;
975 return L2CAP_ENH_HDR_SIZE
;
978 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
982 struct l2cap_hdr
*lh
;
983 int hlen
= __ertm_hdr_size(chan
);
985 if (chan
->fcs
== L2CAP_FCS_CRC16
)
986 hlen
+= L2CAP_FCS_SIZE
;
988 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
991 return ERR_PTR(-ENOMEM
);
993 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
994 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
995 lh
->cid
= cpu_to_le16(chan
->dcid
);
997 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
998 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1000 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1002 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1003 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1004 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1007 skb
->priority
= HCI_PRIO_MAX
;
1011 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1012 struct l2cap_ctrl
*control
)
1014 struct sk_buff
*skb
;
1017 BT_DBG("chan %p, control %p", chan
, control
);
1019 if (!control
->sframe
)
1022 if (__chan_is_moving(chan
))
1025 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1029 if (control
->super
== L2CAP_SUPER_RR
)
1030 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1031 else if (control
->super
== L2CAP_SUPER_RNR
)
1032 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1034 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1035 chan
->last_acked_seq
= control
->reqseq
;
1036 __clear_ack_timer(chan
);
1039 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1040 control
->final
, control
->poll
, control
->super
);
1042 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1043 control_field
= __pack_extended_control(control
);
1045 control_field
= __pack_enhanced_control(control
);
1047 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1049 l2cap_do_send(chan
, skb
);
1052 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1054 struct l2cap_ctrl control
;
1056 BT_DBG("chan %p, poll %d", chan
, poll
);
1058 memset(&control
, 0, sizeof(control
));
1060 control
.poll
= poll
;
1062 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1063 control
.super
= L2CAP_SUPER_RNR
;
1065 control
.super
= L2CAP_SUPER_RR
;
1067 control
.reqseq
= chan
->buffer_seq
;
1068 l2cap_send_sframe(chan
, &control
);
1071 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1073 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1076 static bool __amp_capable(struct l2cap_chan
*chan
)
1078 struct l2cap_conn
*conn
= chan
->conn
;
1079 struct hci_dev
*hdev
;
1080 bool amp_available
= false;
1082 if (!conn
->hs_enabled
)
1085 if (!(conn
->fixed_chan_mask
& L2CAP_FC_A2MP
))
1088 read_lock(&hci_dev_list_lock
);
1089 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1090 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1091 test_bit(HCI_UP
, &hdev
->flags
)) {
1092 amp_available
= true;
1096 read_unlock(&hci_dev_list_lock
);
1098 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1099 return amp_available
;
1104 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1106 /* Check EFS parameters */
1110 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1112 struct l2cap_conn
*conn
= chan
->conn
;
1113 struct l2cap_conn_req req
;
1115 req
.scid
= cpu_to_le16(chan
->scid
);
1116 req
.psm
= chan
->psm
;
1118 chan
->ident
= l2cap_get_ident(conn
);
1120 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1122 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1125 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1127 struct l2cap_create_chan_req req
;
1128 req
.scid
= cpu_to_le16(chan
->scid
);
1129 req
.psm
= chan
->psm
;
1130 req
.amp_id
= amp_id
;
1132 chan
->ident
= l2cap_get_ident(chan
->conn
);
1134 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1138 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1140 struct sk_buff
*skb
;
1142 BT_DBG("chan %p", chan
);
1144 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1147 __clear_retrans_timer(chan
);
1148 __clear_monitor_timer(chan
);
1149 __clear_ack_timer(chan
);
1151 chan
->retry_count
= 0;
1152 skb_queue_walk(&chan
->tx_q
, skb
) {
1153 if (bt_cb(skb
)->control
.retries
)
1154 bt_cb(skb
)->control
.retries
= 1;
1159 chan
->expected_tx_seq
= chan
->buffer_seq
;
1161 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1162 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1163 l2cap_seq_list_clear(&chan
->retrans_list
);
1164 l2cap_seq_list_clear(&chan
->srej_list
);
1165 skb_queue_purge(&chan
->srej_q
);
1167 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1168 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1170 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1173 static void l2cap_move_done(struct l2cap_chan
*chan
)
1175 u8 move_role
= chan
->move_role
;
1176 BT_DBG("chan %p", chan
);
1178 chan
->move_state
= L2CAP_MOVE_STABLE
;
1179 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1181 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1184 switch (move_role
) {
1185 case L2CAP_MOVE_ROLE_INITIATOR
:
1186 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1187 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1189 case L2CAP_MOVE_ROLE_RESPONDER
:
1190 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1195 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1197 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1198 chan
->conf_state
= 0;
1199 __clear_chan_timer(chan
);
1201 chan
->state
= BT_CONNECTED
;
1203 chan
->ops
->ready(chan
);
1206 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1208 struct l2cap_conn
*conn
= chan
->conn
;
1209 struct l2cap_le_conn_req req
;
1211 req
.psm
= chan
->psm
;
1212 req
.scid
= cpu_to_le16(chan
->scid
);
1213 req
.mtu
= cpu_to_le16(chan
->imtu
);
1214 req
.mps
= __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS
);
1215 req
.credits
= __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS
);
1217 chan
->ident
= l2cap_get_ident(conn
);
1219 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1223 static void l2cap_le_start(struct l2cap_chan
*chan
)
1225 struct l2cap_conn
*conn
= chan
->conn
;
1227 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1231 l2cap_chan_ready(chan
);
1235 if (chan
->state
== BT_CONNECT
)
1236 l2cap_le_connect(chan
);
1239 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1241 if (__amp_capable(chan
)) {
1242 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1243 a2mp_discover_amp(chan
);
1244 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1245 l2cap_le_start(chan
);
1247 l2cap_send_conn_req(chan
);
1251 static void l2cap_do_start(struct l2cap_chan
*chan
)
1253 struct l2cap_conn
*conn
= chan
->conn
;
1255 if (conn
->hcon
->type
== LE_LINK
) {
1256 l2cap_le_start(chan
);
1260 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1261 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1264 if (l2cap_chan_check_security(chan
) &&
1265 __l2cap_no_conn_pending(chan
)) {
1266 l2cap_start_connection(chan
);
1269 struct l2cap_info_req req
;
1270 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1272 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1273 conn
->info_ident
= l2cap_get_ident(conn
);
1275 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1277 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1282 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1284 u32 local_feat_mask
= l2cap_feat_mask
;
1286 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1289 case L2CAP_MODE_ERTM
:
1290 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1291 case L2CAP_MODE_STREAMING
:
1292 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1298 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1300 struct l2cap_conn
*conn
= chan
->conn
;
1301 struct l2cap_disconn_req req
;
1306 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1307 __clear_retrans_timer(chan
);
1308 __clear_monitor_timer(chan
);
1309 __clear_ack_timer(chan
);
1312 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1313 l2cap_state_change(chan
, BT_DISCONN
);
1317 req
.dcid
= cpu_to_le16(chan
->dcid
);
1318 req
.scid
= cpu_to_le16(chan
->scid
);
1319 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1322 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1325 /* ---- L2CAP connections ---- */
1326 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1328 struct l2cap_chan
*chan
, *tmp
;
1330 BT_DBG("conn %p", conn
);
1332 mutex_lock(&conn
->chan_lock
);
1334 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1335 l2cap_chan_lock(chan
);
1337 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1338 l2cap_chan_unlock(chan
);
1342 if (chan
->state
== BT_CONNECT
) {
1343 if (!l2cap_chan_check_security(chan
) ||
1344 !__l2cap_no_conn_pending(chan
)) {
1345 l2cap_chan_unlock(chan
);
1349 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1350 && test_bit(CONF_STATE2_DEVICE
,
1351 &chan
->conf_state
)) {
1352 l2cap_chan_close(chan
, ECONNRESET
);
1353 l2cap_chan_unlock(chan
);
1357 l2cap_start_connection(chan
);
1359 } else if (chan
->state
== BT_CONNECT2
) {
1360 struct l2cap_conn_rsp rsp
;
1362 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1363 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1365 if (l2cap_chan_check_security(chan
)) {
1366 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1367 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1368 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1369 chan
->ops
->defer(chan
);
1372 l2cap_state_change(chan
, BT_CONFIG
);
1373 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1374 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1377 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1378 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1381 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1384 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1385 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1386 l2cap_chan_unlock(chan
);
1390 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1391 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1392 l2cap_build_conf_req(chan
, buf
), buf
);
1393 chan
->num_conf_req
++;
1396 l2cap_chan_unlock(chan
);
1399 mutex_unlock(&conn
->chan_lock
);
1402 /* Find socket with cid and source/destination bdaddr.
1403 * Returns closest match, locked.
1405 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1409 struct l2cap_chan
*c
, *c1
= NULL
;
1411 read_lock(&chan_list_lock
);
1413 list_for_each_entry(c
, &chan_list
, global_l
) {
1414 if (state
&& c
->state
!= state
)
1417 if (c
->scid
== cid
) {
1418 int src_match
, dst_match
;
1419 int src_any
, dst_any
;
1422 src_match
= !bacmp(&c
->src
, src
);
1423 dst_match
= !bacmp(&c
->dst
, dst
);
1424 if (src_match
&& dst_match
) {
1425 read_unlock(&chan_list_lock
);
1430 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1431 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1432 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1433 (src_any
&& dst_any
))
1438 read_unlock(&chan_list_lock
);
1443 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1445 struct hci_conn
*hcon
= conn
->hcon
;
1446 struct l2cap_chan
*chan
, *pchan
;
1451 /* Check if we have socket listening on cid */
1452 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_ATT
,
1453 &hcon
->src
, &hcon
->dst
);
1457 /* Client ATT sockets should override the server one */
1458 if (__l2cap_get_chan_by_dcid(conn
, L2CAP_CID_ATT
))
1461 dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
1463 /* If device is blocked, do not create a channel for it */
1464 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
, dst_type
))
1467 l2cap_chan_lock(pchan
);
1469 chan
= pchan
->ops
->new_connection(pchan
);
1473 chan
->dcid
= L2CAP_CID_ATT
;
1475 bacpy(&chan
->src
, &hcon
->src
);
1476 bacpy(&chan
->dst
, &hcon
->dst
);
1477 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1478 chan
->dst_type
= dst_type
;
1480 __l2cap_chan_add(conn
, chan
);
1483 l2cap_chan_unlock(pchan
);
1486 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1488 struct l2cap_chan
*chan
;
1489 struct hci_conn
*hcon
= conn
->hcon
;
1491 BT_DBG("conn %p", conn
);
1493 /* For outgoing pairing which doesn't necessarily have an
1494 * associated socket (e.g. mgmt_pair_device).
1496 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1497 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1499 mutex_lock(&conn
->chan_lock
);
1501 if (hcon
->type
== LE_LINK
)
1502 l2cap_le_conn_ready(conn
);
1504 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1506 l2cap_chan_lock(chan
);
1508 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1509 l2cap_chan_unlock(chan
);
1513 if (hcon
->type
== LE_LINK
) {
1514 l2cap_le_start(chan
);
1515 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1516 l2cap_chan_ready(chan
);
1518 } else if (chan
->state
== BT_CONNECT
) {
1519 l2cap_do_start(chan
);
1522 l2cap_chan_unlock(chan
);
1525 mutex_unlock(&conn
->chan_lock
);
1528 /* Notify sockets that we cannot guaranty reliability anymore */
1529 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1531 struct l2cap_chan
*chan
;
1533 BT_DBG("conn %p", conn
);
1535 mutex_lock(&conn
->chan_lock
);
1537 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1538 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1539 l2cap_chan_set_err(chan
, err
);
1542 mutex_unlock(&conn
->chan_lock
);
1545 static void l2cap_info_timeout(struct work_struct
*work
)
1547 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1550 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1551 conn
->info_ident
= 0;
1553 l2cap_conn_start(conn
);
1558 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1559 * callback is called during registration. The ->remove callback is called
1560 * during unregistration.
1561 * An l2cap_user object can either be explicitly unregistered or when the
1562 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1563 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1564 * External modules must own a reference to the l2cap_conn object if they intend
1565 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1566 * any time if they don't.
1569 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1571 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1574 /* We need to check whether l2cap_conn is registered. If it is not, we
1575 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1576 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1577 * relies on the parent hci_conn object to be locked. This itself relies
1578 * on the hci_dev object to be locked. So we must lock the hci device
1583 if (user
->list
.next
|| user
->list
.prev
) {
1588 /* conn->hchan is NULL after l2cap_conn_del() was called */
1594 ret
= user
->probe(conn
, user
);
1598 list_add(&user
->list
, &conn
->users
);
1602 hci_dev_unlock(hdev
);
1605 EXPORT_SYMBOL(l2cap_register_user
);
1607 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1609 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1613 if (!user
->list
.next
|| !user
->list
.prev
)
1616 list_del(&user
->list
);
1617 user
->list
.next
= NULL
;
1618 user
->list
.prev
= NULL
;
1619 user
->remove(conn
, user
);
1622 hci_dev_unlock(hdev
);
1624 EXPORT_SYMBOL(l2cap_unregister_user
);
1626 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1628 struct l2cap_user
*user
;
1630 while (!list_empty(&conn
->users
)) {
1631 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1632 list_del(&user
->list
);
1633 user
->list
.next
= NULL
;
1634 user
->list
.prev
= NULL
;
1635 user
->remove(conn
, user
);
1639 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1641 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1642 struct l2cap_chan
*chan
, *l
;
1647 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1649 kfree_skb(conn
->rx_skb
);
1651 l2cap_unregister_all_users(conn
);
1653 mutex_lock(&conn
->chan_lock
);
1656 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1657 l2cap_chan_hold(chan
);
1658 l2cap_chan_lock(chan
);
1660 l2cap_chan_del(chan
, err
);
1662 l2cap_chan_unlock(chan
);
1664 chan
->ops
->close(chan
);
1665 l2cap_chan_put(chan
);
1668 mutex_unlock(&conn
->chan_lock
);
1670 hci_chan_del(conn
->hchan
);
1672 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1673 cancel_delayed_work_sync(&conn
->info_timer
);
1675 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1676 cancel_delayed_work_sync(&conn
->security_timer
);
1677 smp_chan_destroy(conn
);
1680 hcon
->l2cap_data
= NULL
;
1682 l2cap_conn_put(conn
);
1685 static void security_timeout(struct work_struct
*work
)
1687 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1688 security_timer
.work
);
1690 BT_DBG("conn %p", conn
);
1692 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1693 smp_chan_destroy(conn
);
1694 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1698 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
1700 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1701 struct hci_chan
*hchan
;
1706 hchan
= hci_chan_create(hcon
);
1710 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1712 hci_chan_del(hchan
);
1716 kref_init(&conn
->ref
);
1717 hcon
->l2cap_data
= conn
;
1719 hci_conn_get(conn
->hcon
);
1720 conn
->hchan
= hchan
;
1722 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1724 switch (hcon
->type
) {
1726 if (hcon
->hdev
->le_mtu
) {
1727 conn
->mtu
= hcon
->hdev
->le_mtu
;
1732 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1736 conn
->feat_mask
= 0;
1738 if (hcon
->type
== ACL_LINK
)
1739 conn
->hs_enabled
= test_bit(HCI_HS_ENABLED
,
1740 &hcon
->hdev
->dev_flags
);
1742 spin_lock_init(&conn
->lock
);
1743 mutex_init(&conn
->chan_lock
);
1745 INIT_LIST_HEAD(&conn
->chan_l
);
1746 INIT_LIST_HEAD(&conn
->users
);
1748 if (hcon
->type
== LE_LINK
)
1749 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1751 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1753 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1758 static void l2cap_conn_free(struct kref
*ref
)
1760 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1762 hci_conn_put(conn
->hcon
);
1766 void l2cap_conn_get(struct l2cap_conn
*conn
)
1768 kref_get(&conn
->ref
);
1770 EXPORT_SYMBOL(l2cap_conn_get
);
1772 void l2cap_conn_put(struct l2cap_conn
*conn
)
1774 kref_put(&conn
->ref
, l2cap_conn_free
);
1776 EXPORT_SYMBOL(l2cap_conn_put
);
1778 /* ---- Socket interface ---- */
1780 /* Find socket with psm and source / destination bdaddr.
1781 * Returns closest match.
1783 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1788 struct l2cap_chan
*c
, *c1
= NULL
;
1790 read_lock(&chan_list_lock
);
1792 list_for_each_entry(c
, &chan_list
, global_l
) {
1793 if (state
&& c
->state
!= state
)
1796 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1799 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1802 if (c
->psm
== psm
) {
1803 int src_match
, dst_match
;
1804 int src_any
, dst_any
;
1807 src_match
= !bacmp(&c
->src
, src
);
1808 dst_match
= !bacmp(&c
->dst
, dst
);
1809 if (src_match
&& dst_match
) {
1810 read_unlock(&chan_list_lock
);
1815 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1816 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1817 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1818 (src_any
&& dst_any
))
1823 read_unlock(&chan_list_lock
);
1828 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1829 bdaddr_t
*dst
, u8 dst_type
)
1831 struct l2cap_conn
*conn
;
1832 struct hci_conn
*hcon
;
1833 struct hci_dev
*hdev
;
1837 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
1838 dst_type
, __le16_to_cpu(psm
));
1840 hdev
= hci_get_route(dst
, &chan
->src
);
1842 return -EHOSTUNREACH
;
1846 l2cap_chan_lock(chan
);
1848 /* PSM must be odd and lsb of upper byte must be 0 */
1849 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1850 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1855 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1860 switch (chan
->mode
) {
1861 case L2CAP_MODE_BASIC
:
1862 case L2CAP_MODE_LE_FLOWCTL
:
1864 case L2CAP_MODE_ERTM
:
1865 case L2CAP_MODE_STREAMING
:
1874 switch (chan
->state
) {
1878 /* Already connecting */
1883 /* Already connected */
1897 /* Set destination address and psm */
1898 bacpy(&chan
->dst
, dst
);
1899 chan
->dst_type
= dst_type
;
1904 auth_type
= l2cap_get_auth_type(chan
);
1906 if (bdaddr_type_is_le(dst_type
))
1907 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1908 chan
->sec_level
, auth_type
);
1910 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1911 chan
->sec_level
, auth_type
);
1914 err
= PTR_ERR(hcon
);
1918 conn
= l2cap_conn_add(hcon
);
1920 hci_conn_drop(hcon
);
1925 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
1926 hci_conn_drop(hcon
);
1931 /* Update source addr of the socket */
1932 bacpy(&chan
->src
, &hcon
->src
);
1933 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1935 l2cap_chan_unlock(chan
);
1936 l2cap_chan_add(conn
, chan
);
1937 l2cap_chan_lock(chan
);
1939 /* l2cap_chan_add takes its own ref so we can drop this one */
1940 hci_conn_drop(hcon
);
1942 l2cap_state_change(chan
, BT_CONNECT
);
1943 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
1945 if (hcon
->state
== BT_CONNECTED
) {
1946 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1947 __clear_chan_timer(chan
);
1948 if (l2cap_chan_check_security(chan
))
1949 l2cap_state_change(chan
, BT_CONNECTED
);
1951 l2cap_do_start(chan
);
1957 l2cap_chan_unlock(chan
);
1958 hci_dev_unlock(hdev
);
1963 static void l2cap_monitor_timeout(struct work_struct
*work
)
1965 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1966 monitor_timer
.work
);
1968 BT_DBG("chan %p", chan
);
1970 l2cap_chan_lock(chan
);
1973 l2cap_chan_unlock(chan
);
1974 l2cap_chan_put(chan
);
1978 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1980 l2cap_chan_unlock(chan
);
1981 l2cap_chan_put(chan
);
1984 static void l2cap_retrans_timeout(struct work_struct
*work
)
1986 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1987 retrans_timer
.work
);
1989 BT_DBG("chan %p", chan
);
1991 l2cap_chan_lock(chan
);
1994 l2cap_chan_unlock(chan
);
1995 l2cap_chan_put(chan
);
1999 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
2000 l2cap_chan_unlock(chan
);
2001 l2cap_chan_put(chan
);
2004 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
2005 struct sk_buff_head
*skbs
)
2007 struct sk_buff
*skb
;
2008 struct l2cap_ctrl
*control
;
2010 BT_DBG("chan %p, skbs %p", chan
, skbs
);
2012 if (__chan_is_moving(chan
))
2015 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2017 while (!skb_queue_empty(&chan
->tx_q
)) {
2019 skb
= skb_dequeue(&chan
->tx_q
);
2021 bt_cb(skb
)->control
.retries
= 1;
2022 control
= &bt_cb(skb
)->control
;
2024 control
->reqseq
= 0;
2025 control
->txseq
= chan
->next_tx_seq
;
2027 __pack_control(chan
, control
, skb
);
2029 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2030 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
2031 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
2034 l2cap_do_send(chan
, skb
);
2036 BT_DBG("Sent txseq %u", control
->txseq
);
2038 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
2039 chan
->frames_sent
++;
2043 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
2045 struct sk_buff
*skb
, *tx_skb
;
2046 struct l2cap_ctrl
*control
;
2049 BT_DBG("chan %p", chan
);
2051 if (chan
->state
!= BT_CONNECTED
)
2054 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2057 if (__chan_is_moving(chan
))
2060 while (chan
->tx_send_head
&&
2061 chan
->unacked_frames
< chan
->remote_tx_win
&&
2062 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
2064 skb
= chan
->tx_send_head
;
2066 bt_cb(skb
)->control
.retries
= 1;
2067 control
= &bt_cb(skb
)->control
;
2069 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2072 control
->reqseq
= chan
->buffer_seq
;
2073 chan
->last_acked_seq
= chan
->buffer_seq
;
2074 control
->txseq
= chan
->next_tx_seq
;
2076 __pack_control(chan
, control
, skb
);
2078 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2079 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
2080 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
2083 /* Clone after data has been modified. Data is assumed to be
2084 read-only (for locking purposes) on cloned sk_buffs.
2086 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2091 __set_retrans_timer(chan
);
2093 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
2094 chan
->unacked_frames
++;
2095 chan
->frames_sent
++;
2098 if (skb_queue_is_last(&chan
->tx_q
, skb
))
2099 chan
->tx_send_head
= NULL
;
2101 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
2103 l2cap_do_send(chan
, tx_skb
);
2104 BT_DBG("Sent txseq %u", control
->txseq
);
2107 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
2108 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
2113 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
2115 struct l2cap_ctrl control
;
2116 struct sk_buff
*skb
;
2117 struct sk_buff
*tx_skb
;
2120 BT_DBG("chan %p", chan
);
2122 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2125 if (__chan_is_moving(chan
))
2128 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
2129 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
2131 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
2133 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2138 bt_cb(skb
)->control
.retries
++;
2139 control
= bt_cb(skb
)->control
;
2141 if (chan
->max_tx
!= 0 &&
2142 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
2143 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
2144 l2cap_send_disconn_req(chan
, ECONNRESET
);
2145 l2cap_seq_list_clear(&chan
->retrans_list
);
2149 control
.reqseq
= chan
->buffer_seq
;
2150 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2155 if (skb_cloned(skb
)) {
2156 /* Cloned sk_buffs are read-only, so we need a
2159 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
2161 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2165 l2cap_seq_list_clear(&chan
->retrans_list
);
2169 /* Update skb contents */
2170 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
2171 put_unaligned_le32(__pack_extended_control(&control
),
2172 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2174 put_unaligned_le16(__pack_enhanced_control(&control
),
2175 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2178 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2179 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
2180 put_unaligned_le16(fcs
, skb_put(tx_skb
,
2184 l2cap_do_send(chan
, tx_skb
);
2186 BT_DBG("Resent txseq %d", control
.txseq
);
2188 chan
->last_acked_seq
= chan
->buffer_seq
;
2192 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2193 struct l2cap_ctrl
*control
)
2195 BT_DBG("chan %p, control %p", chan
, control
);
2197 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2198 l2cap_ertm_resend(chan
);
2201 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2202 struct l2cap_ctrl
*control
)
2204 struct sk_buff
*skb
;
2206 BT_DBG("chan %p, control %p", chan
, control
);
2209 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2211 l2cap_seq_list_clear(&chan
->retrans_list
);
2213 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2216 if (chan
->unacked_frames
) {
2217 skb_queue_walk(&chan
->tx_q
, skb
) {
2218 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2219 skb
== chan
->tx_send_head
)
2223 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2224 if (skb
== chan
->tx_send_head
)
2227 l2cap_seq_list_append(&chan
->retrans_list
,
2228 bt_cb(skb
)->control
.txseq
);
2231 l2cap_ertm_resend(chan
);
2235 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2237 struct l2cap_ctrl control
;
2238 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2239 chan
->last_acked_seq
);
2242 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2243 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2245 memset(&control
, 0, sizeof(control
));
2248 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2249 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2250 __clear_ack_timer(chan
);
2251 control
.super
= L2CAP_SUPER_RNR
;
2252 control
.reqseq
= chan
->buffer_seq
;
2253 l2cap_send_sframe(chan
, &control
);
2255 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2256 l2cap_ertm_send(chan
);
2257 /* If any i-frames were sent, they included an ack */
2258 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2262 /* Ack now if the window is 3/4ths full.
2263 * Calculate without mul or div
2265 threshold
= chan
->ack_win
;
2266 threshold
+= threshold
<< 1;
2269 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2272 if (frames_to_ack
>= threshold
) {
2273 __clear_ack_timer(chan
);
2274 control
.super
= L2CAP_SUPER_RR
;
2275 control
.reqseq
= chan
->buffer_seq
;
2276 l2cap_send_sframe(chan
, &control
);
2281 __set_ack_timer(chan
);
2285 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2286 struct msghdr
*msg
, int len
,
2287 int count
, struct sk_buff
*skb
)
2289 struct l2cap_conn
*conn
= chan
->conn
;
2290 struct sk_buff
**frag
;
2293 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
2299 /* Continuation fragments (no L2CAP header) */
2300 frag
= &skb_shinfo(skb
)->frag_list
;
2302 struct sk_buff
*tmp
;
2304 count
= min_t(unsigned int, conn
->mtu
, len
);
2306 tmp
= chan
->ops
->alloc_skb(chan
, count
,
2307 msg
->msg_flags
& MSG_DONTWAIT
);
2309 return PTR_ERR(tmp
);
2313 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
2316 (*frag
)->priority
= skb
->priority
;
2321 skb
->len
+= (*frag
)->len
;
2322 skb
->data_len
+= (*frag
)->len
;
2324 frag
= &(*frag
)->next
;
2330 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2331 struct msghdr
*msg
, size_t len
,
2334 struct l2cap_conn
*conn
= chan
->conn
;
2335 struct sk_buff
*skb
;
2336 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2337 struct l2cap_hdr
*lh
;
2339 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan
,
2340 __le16_to_cpu(chan
->psm
), len
, priority
);
2342 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2344 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2345 msg
->msg_flags
& MSG_DONTWAIT
);
2349 skb
->priority
= priority
;
2351 /* Create L2CAP header */
2352 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2353 lh
->cid
= cpu_to_le16(chan
->dcid
);
2354 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2355 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2357 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2358 if (unlikely(err
< 0)) {
2360 return ERR_PTR(err
);
2365 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2366 struct msghdr
*msg
, size_t len
,
2369 struct l2cap_conn
*conn
= chan
->conn
;
2370 struct sk_buff
*skb
;
2372 struct l2cap_hdr
*lh
;
2374 BT_DBG("chan %p len %zu", chan
, len
);
2376 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2378 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2379 msg
->msg_flags
& MSG_DONTWAIT
);
2383 skb
->priority
= priority
;
2385 /* Create L2CAP header */
2386 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2387 lh
->cid
= cpu_to_le16(chan
->dcid
);
2388 lh
->len
= cpu_to_le16(len
);
2390 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2391 if (unlikely(err
< 0)) {
2393 return ERR_PTR(err
);
2398 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2399 struct msghdr
*msg
, size_t len
,
2402 struct l2cap_conn
*conn
= chan
->conn
;
2403 struct sk_buff
*skb
;
2404 int err
, count
, hlen
;
2405 struct l2cap_hdr
*lh
;
2407 BT_DBG("chan %p len %zu", chan
, len
);
2410 return ERR_PTR(-ENOTCONN
);
2412 hlen
= __ertm_hdr_size(chan
);
2415 hlen
+= L2CAP_SDULEN_SIZE
;
2417 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2418 hlen
+= L2CAP_FCS_SIZE
;
2420 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2422 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2423 msg
->msg_flags
& MSG_DONTWAIT
);
2427 /* Create L2CAP header */
2428 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2429 lh
->cid
= cpu_to_le16(chan
->dcid
);
2430 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2432 /* Control header is populated later */
2433 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2434 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2436 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2439 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2441 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2442 if (unlikely(err
< 0)) {
2444 return ERR_PTR(err
);
2447 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2448 bt_cb(skb
)->control
.retries
= 0;
2452 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2453 struct sk_buff_head
*seg_queue
,
2454 struct msghdr
*msg
, size_t len
)
2456 struct sk_buff
*skb
;
2461 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2463 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2464 * so fragmented skbs are not used. The HCI layer's handling
2465 * of fragmented skbs is not compatible with ERTM's queueing.
2468 /* PDU size is derived from the HCI MTU */
2469 pdu_len
= chan
->conn
->mtu
;
2471 /* Constrain PDU size for BR/EDR connections */
2473 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2475 /* Adjust for largest possible L2CAP overhead. */
2477 pdu_len
-= L2CAP_FCS_SIZE
;
2479 pdu_len
-= __ertm_hdr_size(chan
);
2481 /* Remote device may have requested smaller PDUs */
2482 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2484 if (len
<= pdu_len
) {
2485 sar
= L2CAP_SAR_UNSEGMENTED
;
2489 sar
= L2CAP_SAR_START
;
2491 pdu_len
-= L2CAP_SDULEN_SIZE
;
2495 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2498 __skb_queue_purge(seg_queue
);
2499 return PTR_ERR(skb
);
2502 bt_cb(skb
)->control
.sar
= sar
;
2503 __skb_queue_tail(seg_queue
, skb
);
2508 pdu_len
+= L2CAP_SDULEN_SIZE
;
2511 if (len
<= pdu_len
) {
2512 sar
= L2CAP_SAR_END
;
2515 sar
= L2CAP_SAR_CONTINUE
;
2522 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2525 struct sk_buff
*skb
;
2527 struct sk_buff_head seg_queue
;
2532 /* Connectionless channel */
2533 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2534 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2536 return PTR_ERR(skb
);
2538 l2cap_do_send(chan
, skb
);
2542 switch (chan
->mode
) {
2543 case L2CAP_MODE_BASIC
:
2544 case L2CAP_MODE_LE_FLOWCTL
:
2545 /* Check outgoing MTU */
2546 if (len
> chan
->omtu
)
2549 /* Create a basic PDU */
2550 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2552 return PTR_ERR(skb
);
2554 l2cap_do_send(chan
, skb
);
2558 case L2CAP_MODE_ERTM
:
2559 case L2CAP_MODE_STREAMING
:
2560 /* Check outgoing MTU */
2561 if (len
> chan
->omtu
) {
2566 __skb_queue_head_init(&seg_queue
);
2568 /* Do segmentation before calling in to the state machine,
2569 * since it's possible to block while waiting for memory
2572 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2574 /* The channel could have been closed while segmenting,
2575 * check that it is still connected.
2577 if (chan
->state
!= BT_CONNECTED
) {
2578 __skb_queue_purge(&seg_queue
);
2585 if (chan
->mode
== L2CAP_MODE_ERTM
)
2586 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2588 l2cap_streaming_send(chan
, &seg_queue
);
2592 /* If the skbs were not queued for sending, they'll still be in
2593 * seg_queue and need to be purged.
2595 __skb_queue_purge(&seg_queue
);
2599 BT_DBG("bad state %1.1x", chan
->mode
);
2606 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2608 struct l2cap_ctrl control
;
2611 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2613 memset(&control
, 0, sizeof(control
));
2615 control
.super
= L2CAP_SUPER_SREJ
;
2617 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2618 seq
= __next_seq(chan
, seq
)) {
2619 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2620 control
.reqseq
= seq
;
2621 l2cap_send_sframe(chan
, &control
);
2622 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2626 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2629 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2631 struct l2cap_ctrl control
;
2633 BT_DBG("chan %p", chan
);
2635 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2638 memset(&control
, 0, sizeof(control
));
2640 control
.super
= L2CAP_SUPER_SREJ
;
2641 control
.reqseq
= chan
->srej_list
.tail
;
2642 l2cap_send_sframe(chan
, &control
);
2645 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2647 struct l2cap_ctrl control
;
2651 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2653 memset(&control
, 0, sizeof(control
));
2655 control
.super
= L2CAP_SUPER_SREJ
;
2657 /* Capture initial list head to allow only one pass through the list. */
2658 initial_head
= chan
->srej_list
.head
;
2661 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2662 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2665 control
.reqseq
= seq
;
2666 l2cap_send_sframe(chan
, &control
);
2667 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2668 } while (chan
->srej_list
.head
!= initial_head
);
2671 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2673 struct sk_buff
*acked_skb
;
2676 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2678 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2681 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2682 chan
->expected_ack_seq
, chan
->unacked_frames
);
2684 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2685 ackseq
= __next_seq(chan
, ackseq
)) {
2687 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2689 skb_unlink(acked_skb
, &chan
->tx_q
);
2690 kfree_skb(acked_skb
);
2691 chan
->unacked_frames
--;
2695 chan
->expected_ack_seq
= reqseq
;
2697 if (chan
->unacked_frames
== 0)
2698 __clear_retrans_timer(chan
);
2700 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2703 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2705 BT_DBG("chan %p", chan
);
2707 chan
->expected_tx_seq
= chan
->buffer_seq
;
2708 l2cap_seq_list_clear(&chan
->srej_list
);
2709 skb_queue_purge(&chan
->srej_q
);
2710 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2713 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2714 struct l2cap_ctrl
*control
,
2715 struct sk_buff_head
*skbs
, u8 event
)
2717 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2721 case L2CAP_EV_DATA_REQUEST
:
2722 if (chan
->tx_send_head
== NULL
)
2723 chan
->tx_send_head
= skb_peek(skbs
);
2725 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2726 l2cap_ertm_send(chan
);
2728 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2729 BT_DBG("Enter LOCAL_BUSY");
2730 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2732 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2733 /* The SREJ_SENT state must be aborted if we are to
2734 * enter the LOCAL_BUSY state.
2736 l2cap_abort_rx_srej_sent(chan
);
2739 l2cap_send_ack(chan
);
2742 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2743 BT_DBG("Exit LOCAL_BUSY");
2744 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2746 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2747 struct l2cap_ctrl local_control
;
2749 memset(&local_control
, 0, sizeof(local_control
));
2750 local_control
.sframe
= 1;
2751 local_control
.super
= L2CAP_SUPER_RR
;
2752 local_control
.poll
= 1;
2753 local_control
.reqseq
= chan
->buffer_seq
;
2754 l2cap_send_sframe(chan
, &local_control
);
2756 chan
->retry_count
= 1;
2757 __set_monitor_timer(chan
);
2758 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2761 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2762 l2cap_process_reqseq(chan
, control
->reqseq
);
2764 case L2CAP_EV_EXPLICIT_POLL
:
2765 l2cap_send_rr_or_rnr(chan
, 1);
2766 chan
->retry_count
= 1;
2767 __set_monitor_timer(chan
);
2768 __clear_ack_timer(chan
);
2769 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2771 case L2CAP_EV_RETRANS_TO
:
2772 l2cap_send_rr_or_rnr(chan
, 1);
2773 chan
->retry_count
= 1;
2774 __set_monitor_timer(chan
);
2775 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2777 case L2CAP_EV_RECV_FBIT
:
2778 /* Nothing to process */
2785 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2786 struct l2cap_ctrl
*control
,
2787 struct sk_buff_head
*skbs
, u8 event
)
2789 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2793 case L2CAP_EV_DATA_REQUEST
:
2794 if (chan
->tx_send_head
== NULL
)
2795 chan
->tx_send_head
= skb_peek(skbs
);
2796 /* Queue data, but don't send. */
2797 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2799 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2800 BT_DBG("Enter LOCAL_BUSY");
2801 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2803 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2804 /* The SREJ_SENT state must be aborted if we are to
2805 * enter the LOCAL_BUSY state.
2807 l2cap_abort_rx_srej_sent(chan
);
2810 l2cap_send_ack(chan
);
2813 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2814 BT_DBG("Exit LOCAL_BUSY");
2815 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2817 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2818 struct l2cap_ctrl local_control
;
2819 memset(&local_control
, 0, sizeof(local_control
));
2820 local_control
.sframe
= 1;
2821 local_control
.super
= L2CAP_SUPER_RR
;
2822 local_control
.poll
= 1;
2823 local_control
.reqseq
= chan
->buffer_seq
;
2824 l2cap_send_sframe(chan
, &local_control
);
2826 chan
->retry_count
= 1;
2827 __set_monitor_timer(chan
);
2828 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2831 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2832 l2cap_process_reqseq(chan
, control
->reqseq
);
2836 case L2CAP_EV_RECV_FBIT
:
2837 if (control
&& control
->final
) {
2838 __clear_monitor_timer(chan
);
2839 if (chan
->unacked_frames
> 0)
2840 __set_retrans_timer(chan
);
2841 chan
->retry_count
= 0;
2842 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2843 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2846 case L2CAP_EV_EXPLICIT_POLL
:
2849 case L2CAP_EV_MONITOR_TO
:
2850 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2851 l2cap_send_rr_or_rnr(chan
, 1);
2852 __set_monitor_timer(chan
);
2853 chan
->retry_count
++;
2855 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2863 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2864 struct sk_buff_head
*skbs
, u8 event
)
2866 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2867 chan
, control
, skbs
, event
, chan
->tx_state
);
2869 switch (chan
->tx_state
) {
2870 case L2CAP_TX_STATE_XMIT
:
2871 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2873 case L2CAP_TX_STATE_WAIT_F
:
2874 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2882 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2883 struct l2cap_ctrl
*control
)
2885 BT_DBG("chan %p, control %p", chan
, control
);
2886 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2889 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2890 struct l2cap_ctrl
*control
)
2892 BT_DBG("chan %p, control %p", chan
, control
);
2893 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2896 /* Copy frame to all raw sockets on that connection */
2897 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2899 struct sk_buff
*nskb
;
2900 struct l2cap_chan
*chan
;
2902 BT_DBG("conn %p", conn
);
2904 mutex_lock(&conn
->chan_lock
);
2906 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2907 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2910 /* Don't send frame to the channel it came from */
2911 if (bt_cb(skb
)->chan
== chan
)
2914 nskb
= skb_clone(skb
, GFP_KERNEL
);
2917 if (chan
->ops
->recv(chan
, nskb
))
2921 mutex_unlock(&conn
->chan_lock
);
2924 /* ---- L2CAP signalling commands ---- */
2925 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2926 u8 ident
, u16 dlen
, void *data
)
2928 struct sk_buff
*skb
, **frag
;
2929 struct l2cap_cmd_hdr
*cmd
;
2930 struct l2cap_hdr
*lh
;
2933 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2934 conn
, code
, ident
, dlen
);
2936 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2939 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2940 count
= min_t(unsigned int, conn
->mtu
, len
);
2942 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2946 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2947 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2949 if (conn
->hcon
->type
== LE_LINK
)
2950 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2952 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2954 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2957 cmd
->len
= cpu_to_le16(dlen
);
2960 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2961 memcpy(skb_put(skb
, count
), data
, count
);
2967 /* Continuation fragments (no L2CAP header) */
2968 frag
= &skb_shinfo(skb
)->frag_list
;
2970 count
= min_t(unsigned int, conn
->mtu
, len
);
2972 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2976 memcpy(skb_put(*frag
, count
), data
, count
);
2981 frag
= &(*frag
)->next
;
2991 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2994 struct l2cap_conf_opt
*opt
= *ptr
;
2997 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
3005 *val
= *((u8
*) opt
->val
);
3009 *val
= get_unaligned_le16(opt
->val
);
3013 *val
= get_unaligned_le32(opt
->val
);
3017 *val
= (unsigned long) opt
->val
;
3021 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
3025 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
3027 struct l2cap_conf_opt
*opt
= *ptr
;
3029 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
3036 *((u8
*) opt
->val
) = val
;
3040 put_unaligned_le16(val
, opt
->val
);
3044 put_unaligned_le32(val
, opt
->val
);
3048 memcpy(opt
->val
, (void *) val
, len
);
3052 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
3055 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
3057 struct l2cap_conf_efs efs
;
3059 switch (chan
->mode
) {
3060 case L2CAP_MODE_ERTM
:
3061 efs
.id
= chan
->local_id
;
3062 efs
.stype
= chan
->local_stype
;
3063 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3064 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3065 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
3066 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3069 case L2CAP_MODE_STREAMING
:
3071 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3072 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3073 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3082 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3083 (unsigned long) &efs
);
3086 static void l2cap_ack_timeout(struct work_struct
*work
)
3088 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3092 BT_DBG("chan %p", chan
);
3094 l2cap_chan_lock(chan
);
3096 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3097 chan
->last_acked_seq
);
3100 l2cap_send_rr_or_rnr(chan
, 0);
3102 l2cap_chan_unlock(chan
);
3103 l2cap_chan_put(chan
);
3106 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3110 chan
->next_tx_seq
= 0;
3111 chan
->expected_tx_seq
= 0;
3112 chan
->expected_ack_seq
= 0;
3113 chan
->unacked_frames
= 0;
3114 chan
->buffer_seq
= 0;
3115 chan
->frames_sent
= 0;
3116 chan
->last_acked_seq
= 0;
3118 chan
->sdu_last_frag
= NULL
;
3121 skb_queue_head_init(&chan
->tx_q
);
3123 chan
->local_amp_id
= AMP_ID_BREDR
;
3124 chan
->move_id
= AMP_ID_BREDR
;
3125 chan
->move_state
= L2CAP_MOVE_STABLE
;
3126 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3128 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3131 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3132 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3134 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3135 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3136 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3138 skb_queue_head_init(&chan
->srej_q
);
3140 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3144 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3146 l2cap_seq_list_free(&chan
->srej_list
);
3151 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3154 case L2CAP_MODE_STREAMING
:
3155 case L2CAP_MODE_ERTM
:
3156 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3160 return L2CAP_MODE_BASIC
;
3164 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3166 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3169 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3171 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3174 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3175 struct l2cap_conf_rfc
*rfc
)
3177 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3178 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3180 /* Class 1 devices have must have ERTM timeouts
3181 * exceeding the Link Supervision Timeout. The
3182 * default Link Supervision Timeout for AMP
3183 * controllers is 10 seconds.
3185 * Class 1 devices use 0xffffffff for their
3186 * best-effort flush timeout, so the clamping logic
3187 * will result in a timeout that meets the above
3188 * requirement. ERTM timeouts are 16-bit values, so
3189 * the maximum timeout is 65.535 seconds.
3192 /* Convert timeout to milliseconds and round */
3193 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3195 /* This is the recommended formula for class 2 devices
3196 * that start ERTM timers when packets are sent to the
3199 ertm_to
= 3 * ertm_to
+ 500;
3201 if (ertm_to
> 0xffff)
3204 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3205 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3207 rfc
->retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3208 rfc
->monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3212 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3214 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3215 __l2cap_ews_supported(chan
->conn
)) {
3216 /* use extended control field */
3217 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3218 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3220 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3221 L2CAP_DEFAULT_TX_WINDOW
);
3222 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3224 chan
->ack_win
= chan
->tx_win
;
3227 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3229 struct l2cap_conf_req
*req
= data
;
3230 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3231 void *ptr
= req
->data
;
3234 BT_DBG("chan %p", chan
);
3236 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3239 switch (chan
->mode
) {
3240 case L2CAP_MODE_STREAMING
:
3241 case L2CAP_MODE_ERTM
:
3242 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3245 if (__l2cap_efs_supported(chan
->conn
))
3246 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3250 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3255 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3256 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3258 switch (chan
->mode
) {
3259 case L2CAP_MODE_BASIC
:
3260 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3261 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3264 rfc
.mode
= L2CAP_MODE_BASIC
;
3266 rfc
.max_transmit
= 0;
3267 rfc
.retrans_timeout
= 0;
3268 rfc
.monitor_timeout
= 0;
3269 rfc
.max_pdu_size
= 0;
3271 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3272 (unsigned long) &rfc
);
3275 case L2CAP_MODE_ERTM
:
3276 rfc
.mode
= L2CAP_MODE_ERTM
;
3277 rfc
.max_transmit
= chan
->max_tx
;
3279 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3281 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3282 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3284 rfc
.max_pdu_size
= cpu_to_le16(size
);
3286 l2cap_txwin_setup(chan
);
3288 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3289 L2CAP_DEFAULT_TX_WINDOW
);
3291 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3292 (unsigned long) &rfc
);
3294 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3295 l2cap_add_opt_efs(&ptr
, chan
);
3297 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3298 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3301 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3302 if (chan
->fcs
== L2CAP_FCS_NONE
||
3303 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3304 chan
->fcs
= L2CAP_FCS_NONE
;
3305 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3310 case L2CAP_MODE_STREAMING
:
3311 l2cap_txwin_setup(chan
);
3312 rfc
.mode
= L2CAP_MODE_STREAMING
;
3314 rfc
.max_transmit
= 0;
3315 rfc
.retrans_timeout
= 0;
3316 rfc
.monitor_timeout
= 0;
3318 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3319 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3321 rfc
.max_pdu_size
= cpu_to_le16(size
);
3323 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3324 (unsigned long) &rfc
);
3326 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3327 l2cap_add_opt_efs(&ptr
, chan
);
3329 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3330 if (chan
->fcs
== L2CAP_FCS_NONE
||
3331 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3332 chan
->fcs
= L2CAP_FCS_NONE
;
3333 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3339 req
->dcid
= cpu_to_le16(chan
->dcid
);
3340 req
->flags
= __constant_cpu_to_le16(0);
3345 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3347 struct l2cap_conf_rsp
*rsp
= data
;
3348 void *ptr
= rsp
->data
;
3349 void *req
= chan
->conf_req
;
3350 int len
= chan
->conf_len
;
3351 int type
, hint
, olen
;
3353 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3354 struct l2cap_conf_efs efs
;
3356 u16 mtu
= L2CAP_DEFAULT_MTU
;
3357 u16 result
= L2CAP_CONF_SUCCESS
;
3360 BT_DBG("chan %p", chan
);
3362 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3363 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3365 hint
= type
& L2CAP_CONF_HINT
;
3366 type
&= L2CAP_CONF_MASK
;
3369 case L2CAP_CONF_MTU
:
3373 case L2CAP_CONF_FLUSH_TO
:
3374 chan
->flush_to
= val
;
3377 case L2CAP_CONF_QOS
:
3380 case L2CAP_CONF_RFC
:
3381 if (olen
== sizeof(rfc
))
3382 memcpy(&rfc
, (void *) val
, olen
);
3385 case L2CAP_CONF_FCS
:
3386 if (val
== L2CAP_FCS_NONE
)
3387 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3390 case L2CAP_CONF_EFS
:
3392 if (olen
== sizeof(efs
))
3393 memcpy(&efs
, (void *) val
, olen
);
3396 case L2CAP_CONF_EWS
:
3397 if (!chan
->conn
->hs_enabled
)
3398 return -ECONNREFUSED
;
3400 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3401 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3402 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3403 chan
->remote_tx_win
= val
;
3410 result
= L2CAP_CONF_UNKNOWN
;
3411 *((u8
*) ptr
++) = type
;
3416 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3419 switch (chan
->mode
) {
3420 case L2CAP_MODE_STREAMING
:
3421 case L2CAP_MODE_ERTM
:
3422 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3423 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3424 chan
->conn
->feat_mask
);
3429 if (__l2cap_efs_supported(chan
->conn
))
3430 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3432 return -ECONNREFUSED
;
3435 if (chan
->mode
!= rfc
.mode
)
3436 return -ECONNREFUSED
;
3442 if (chan
->mode
!= rfc
.mode
) {
3443 result
= L2CAP_CONF_UNACCEPT
;
3444 rfc
.mode
= chan
->mode
;
3446 if (chan
->num_conf_rsp
== 1)
3447 return -ECONNREFUSED
;
3449 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3450 (unsigned long) &rfc
);
3453 if (result
== L2CAP_CONF_SUCCESS
) {
3454 /* Configure output options and let the other side know
3455 * which ones we don't like. */
3457 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3458 result
= L2CAP_CONF_UNACCEPT
;
3461 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3463 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3466 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3467 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3468 efs
.stype
!= chan
->local_stype
) {
3470 result
= L2CAP_CONF_UNACCEPT
;
3472 if (chan
->num_conf_req
>= 1)
3473 return -ECONNREFUSED
;
3475 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3477 (unsigned long) &efs
);
3479 /* Send PENDING Conf Rsp */
3480 result
= L2CAP_CONF_PENDING
;
3481 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3486 case L2CAP_MODE_BASIC
:
3487 chan
->fcs
= L2CAP_FCS_NONE
;
3488 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3491 case L2CAP_MODE_ERTM
:
3492 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3493 chan
->remote_tx_win
= rfc
.txwin_size
;
3495 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3497 chan
->remote_max_tx
= rfc
.max_transmit
;
3499 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3500 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3501 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3502 rfc
.max_pdu_size
= cpu_to_le16(size
);
3503 chan
->remote_mps
= size
;
3505 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3507 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3509 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3510 sizeof(rfc
), (unsigned long) &rfc
);
3512 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3513 chan
->remote_id
= efs
.id
;
3514 chan
->remote_stype
= efs
.stype
;
3515 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3516 chan
->remote_flush_to
=
3517 le32_to_cpu(efs
.flush_to
);
3518 chan
->remote_acc_lat
=
3519 le32_to_cpu(efs
.acc_lat
);
3520 chan
->remote_sdu_itime
=
3521 le32_to_cpu(efs
.sdu_itime
);
3522 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3524 (unsigned long) &efs
);
3528 case L2CAP_MODE_STREAMING
:
3529 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3530 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3531 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3532 rfc
.max_pdu_size
= cpu_to_le16(size
);
3533 chan
->remote_mps
= size
;
3535 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3537 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3538 (unsigned long) &rfc
);
3543 result
= L2CAP_CONF_UNACCEPT
;
3545 memset(&rfc
, 0, sizeof(rfc
));
3546 rfc
.mode
= chan
->mode
;
3549 if (result
== L2CAP_CONF_SUCCESS
)
3550 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3552 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3553 rsp
->result
= cpu_to_le16(result
);
3554 rsp
->flags
= __constant_cpu_to_le16(0);
3559 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3560 void *data
, u16
*result
)
3562 struct l2cap_conf_req
*req
= data
;
3563 void *ptr
= req
->data
;
3566 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3567 struct l2cap_conf_efs efs
;
3569 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3571 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3572 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3575 case L2CAP_CONF_MTU
:
3576 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3577 *result
= L2CAP_CONF_UNACCEPT
;
3578 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3581 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3584 case L2CAP_CONF_FLUSH_TO
:
3585 chan
->flush_to
= val
;
3586 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3590 case L2CAP_CONF_RFC
:
3591 if (olen
== sizeof(rfc
))
3592 memcpy(&rfc
, (void *)val
, olen
);
3594 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3595 rfc
.mode
!= chan
->mode
)
3596 return -ECONNREFUSED
;
3600 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3601 sizeof(rfc
), (unsigned long) &rfc
);
3604 case L2CAP_CONF_EWS
:
3605 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3606 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3610 case L2CAP_CONF_EFS
:
3611 if (olen
== sizeof(efs
))
3612 memcpy(&efs
, (void *)val
, olen
);
3614 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3615 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3616 efs
.stype
!= chan
->local_stype
)
3617 return -ECONNREFUSED
;
3619 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3620 (unsigned long) &efs
);
3623 case L2CAP_CONF_FCS
:
3624 if (*result
== L2CAP_CONF_PENDING
)
3625 if (val
== L2CAP_FCS_NONE
)
3626 set_bit(CONF_RECV_NO_FCS
,
3632 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3633 return -ECONNREFUSED
;
3635 chan
->mode
= rfc
.mode
;
3637 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3639 case L2CAP_MODE_ERTM
:
3640 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3641 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3642 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3643 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3644 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3647 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3648 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3649 chan
->local_sdu_itime
=
3650 le32_to_cpu(efs
.sdu_itime
);
3651 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3652 chan
->local_flush_to
=
3653 le32_to_cpu(efs
.flush_to
);
3657 case L2CAP_MODE_STREAMING
:
3658 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3662 req
->dcid
= cpu_to_le16(chan
->dcid
);
3663 req
->flags
= __constant_cpu_to_le16(0);
3668 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3669 u16 result
, u16 flags
)
3671 struct l2cap_conf_rsp
*rsp
= data
;
3672 void *ptr
= rsp
->data
;
3674 BT_DBG("chan %p", chan
);
3676 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3677 rsp
->result
= cpu_to_le16(result
);
3678 rsp
->flags
= cpu_to_le16(flags
);
3683 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3685 struct l2cap_le_conn_rsp rsp
;
3686 struct l2cap_conn
*conn
= chan
->conn
;
3688 BT_DBG("chan %p", chan
);
3690 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3691 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3692 rsp
.mps
= __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS
);
3693 rsp
.credits
= __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS
);
3694 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3696 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3700 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3702 struct l2cap_conn_rsp rsp
;
3703 struct l2cap_conn
*conn
= chan
->conn
;
3707 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3708 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3709 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3710 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3713 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3715 rsp_code
= L2CAP_CONN_RSP
;
3717 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3719 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3721 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3724 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3725 l2cap_build_conf_req(chan
, buf
), buf
);
3726 chan
->num_conf_req
++;
3729 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3733 /* Use sane default values in case a misbehaving remote device
3734 * did not send an RFC or extended window size option.
3736 u16 txwin_ext
= chan
->ack_win
;
3737 struct l2cap_conf_rfc rfc
= {
3739 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3740 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3741 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3742 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3745 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3747 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3750 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3751 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3754 case L2CAP_CONF_RFC
:
3755 if (olen
== sizeof(rfc
))
3756 memcpy(&rfc
, (void *)val
, olen
);
3758 case L2CAP_CONF_EWS
:
3765 case L2CAP_MODE_ERTM
:
3766 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3767 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3768 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3769 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3770 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3772 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3775 case L2CAP_MODE_STREAMING
:
3776 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3780 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3781 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3784 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3786 if (cmd_len
< sizeof(*rej
))
3789 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3792 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3793 cmd
->ident
== conn
->info_ident
) {
3794 cancel_delayed_work(&conn
->info_timer
);
3796 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3797 conn
->info_ident
= 0;
3799 l2cap_conn_start(conn
);
3805 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3806 struct l2cap_cmd_hdr
*cmd
,
3807 u8
*data
, u8 rsp_code
, u8 amp_id
)
3809 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3810 struct l2cap_conn_rsp rsp
;
3811 struct l2cap_chan
*chan
= NULL
, *pchan
;
3812 int result
, status
= L2CAP_CS_NO_INFO
;
3814 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3815 __le16 psm
= req
->psm
;
3817 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3819 /* Check if we have socket listening on psm */
3820 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3821 &conn
->hcon
->dst
, ACL_LINK
);
3823 result
= L2CAP_CR_BAD_PSM
;
3827 mutex_lock(&conn
->chan_lock
);
3828 l2cap_chan_lock(pchan
);
3830 /* Check if the ACL is secure enough (if not SDP) */
3831 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3832 !hci_conn_check_link_mode(conn
->hcon
)) {
3833 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3834 result
= L2CAP_CR_SEC_BLOCK
;
3838 result
= L2CAP_CR_NO_MEM
;
3840 /* Check if we already have channel with that dcid */
3841 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3844 chan
= pchan
->ops
->new_connection(pchan
);
3848 /* For certain devices (ex: HID mouse), support for authentication,
3849 * pairing and bonding is optional. For such devices, inorder to avoid
3850 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3851 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3853 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3855 bacpy(&chan
->src
, &conn
->hcon
->src
);
3856 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3857 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
3858 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
3861 chan
->local_amp_id
= amp_id
;
3863 __l2cap_chan_add(conn
, chan
);
3867 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3869 chan
->ident
= cmd
->ident
;
3871 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3872 if (l2cap_chan_check_security(chan
)) {
3873 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3874 l2cap_state_change(chan
, BT_CONNECT2
);
3875 result
= L2CAP_CR_PEND
;
3876 status
= L2CAP_CS_AUTHOR_PEND
;
3877 chan
->ops
->defer(chan
);
3879 /* Force pending result for AMP controllers.
3880 * The connection will succeed after the
3881 * physical link is up.
3883 if (amp_id
== AMP_ID_BREDR
) {
3884 l2cap_state_change(chan
, BT_CONFIG
);
3885 result
= L2CAP_CR_SUCCESS
;
3887 l2cap_state_change(chan
, BT_CONNECT2
);
3888 result
= L2CAP_CR_PEND
;
3890 status
= L2CAP_CS_NO_INFO
;
3893 l2cap_state_change(chan
, BT_CONNECT2
);
3894 result
= L2CAP_CR_PEND
;
3895 status
= L2CAP_CS_AUTHEN_PEND
;
3898 l2cap_state_change(chan
, BT_CONNECT2
);
3899 result
= L2CAP_CR_PEND
;
3900 status
= L2CAP_CS_NO_INFO
;
3904 l2cap_chan_unlock(pchan
);
3905 mutex_unlock(&conn
->chan_lock
);
3908 rsp
.scid
= cpu_to_le16(scid
);
3909 rsp
.dcid
= cpu_to_le16(dcid
);
3910 rsp
.result
= cpu_to_le16(result
);
3911 rsp
.status
= cpu_to_le16(status
);
3912 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3914 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3915 struct l2cap_info_req info
;
3916 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3918 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3919 conn
->info_ident
= l2cap_get_ident(conn
);
3921 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3923 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3924 sizeof(info
), &info
);
3927 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3928 result
== L2CAP_CR_SUCCESS
) {
3930 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3931 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3932 l2cap_build_conf_req(chan
, buf
), buf
);
3933 chan
->num_conf_req
++;
3939 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3940 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3942 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3943 struct hci_conn
*hcon
= conn
->hcon
;
3945 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3949 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3950 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3951 mgmt_device_connected(hdev
, &hcon
->dst
, hcon
->type
,
3952 hcon
->dst_type
, 0, NULL
, 0,
3954 hci_dev_unlock(hdev
);
3956 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3960 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3961 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3964 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3965 u16 scid
, dcid
, result
, status
;
3966 struct l2cap_chan
*chan
;
3970 if (cmd_len
< sizeof(*rsp
))
3973 scid
= __le16_to_cpu(rsp
->scid
);
3974 dcid
= __le16_to_cpu(rsp
->dcid
);
3975 result
= __le16_to_cpu(rsp
->result
);
3976 status
= __le16_to_cpu(rsp
->status
);
3978 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3979 dcid
, scid
, result
, status
);
3981 mutex_lock(&conn
->chan_lock
);
3984 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3990 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3999 l2cap_chan_lock(chan
);
4002 case L2CAP_CR_SUCCESS
:
4003 l2cap_state_change(chan
, BT_CONFIG
);
4006 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4008 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
4011 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4012 l2cap_build_conf_req(chan
, req
), req
);
4013 chan
->num_conf_req
++;
4017 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4021 l2cap_chan_del(chan
, ECONNREFUSED
);
4025 l2cap_chan_unlock(chan
);
4028 mutex_unlock(&conn
->chan_lock
);
4033 static inline void set_default_fcs(struct l2cap_chan
*chan
)
4035 /* FCS is enabled only in ERTM or streaming mode, if one or both
4038 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
4039 chan
->fcs
= L2CAP_FCS_NONE
;
4040 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
4041 chan
->fcs
= L2CAP_FCS_CRC16
;
4044 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
4045 u8 ident
, u16 flags
)
4047 struct l2cap_conn
*conn
= chan
->conn
;
4049 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
4052 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
4053 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
4055 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
4056 l2cap_build_conf_rsp(chan
, data
,
4057 L2CAP_CONF_SUCCESS
, flags
), data
);
4060 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
4063 struct l2cap_cmd_rej_cid rej
;
4065 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4066 rej
.scid
= __cpu_to_le16(scid
);
4067 rej
.dcid
= __cpu_to_le16(dcid
);
4069 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4072 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4073 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4076 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4079 struct l2cap_chan
*chan
;
4082 if (cmd_len
< sizeof(*req
))
4085 dcid
= __le16_to_cpu(req
->dcid
);
4086 flags
= __le16_to_cpu(req
->flags
);
4088 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4090 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4092 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4096 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4097 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4102 /* Reject if config buffer is too small. */
4103 len
= cmd_len
- sizeof(*req
);
4104 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4105 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4106 l2cap_build_conf_rsp(chan
, rsp
,
4107 L2CAP_CONF_REJECT
, flags
), rsp
);
4112 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4113 chan
->conf_len
+= len
;
4115 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4116 /* Incomplete config. Send empty response. */
4117 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4118 l2cap_build_conf_rsp(chan
, rsp
,
4119 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4123 /* Complete config. */
4124 len
= l2cap_parse_conf_req(chan
, rsp
);
4126 l2cap_send_disconn_req(chan
, ECONNRESET
);
4130 chan
->ident
= cmd
->ident
;
4131 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4132 chan
->num_conf_rsp
++;
4134 /* Reset config buffer. */
4137 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4140 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4141 set_default_fcs(chan
);
4143 if (chan
->mode
== L2CAP_MODE_ERTM
||
4144 chan
->mode
== L2CAP_MODE_STREAMING
)
4145 err
= l2cap_ertm_init(chan
);
4148 l2cap_send_disconn_req(chan
, -err
);
4150 l2cap_chan_ready(chan
);
4155 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4157 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4158 l2cap_build_conf_req(chan
, buf
), buf
);
4159 chan
->num_conf_req
++;
4162 /* Got Conf Rsp PENDING from remote side and asume we sent
4163 Conf Rsp PENDING in the code above */
4164 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4165 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4167 /* check compatibility */
4169 /* Send rsp for BR/EDR channel */
4171 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4173 chan
->ident
= cmd
->ident
;
4177 l2cap_chan_unlock(chan
);
4181 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4182 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4185 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4186 u16 scid
, flags
, result
;
4187 struct l2cap_chan
*chan
;
4188 int len
= cmd_len
- sizeof(*rsp
);
4191 if (cmd_len
< sizeof(*rsp
))
4194 scid
= __le16_to_cpu(rsp
->scid
);
4195 flags
= __le16_to_cpu(rsp
->flags
);
4196 result
= __le16_to_cpu(rsp
->result
);
4198 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4201 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4206 case L2CAP_CONF_SUCCESS
:
4207 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4208 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4211 case L2CAP_CONF_PENDING
:
4212 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4214 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4217 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4220 l2cap_send_disconn_req(chan
, ECONNRESET
);
4224 if (!chan
->hs_hcon
) {
4225 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4228 if (l2cap_check_efs(chan
)) {
4229 amp_create_logical_link(chan
);
4230 chan
->ident
= cmd
->ident
;
4236 case L2CAP_CONF_UNACCEPT
:
4237 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4240 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4241 l2cap_send_disconn_req(chan
, ECONNRESET
);
4245 /* throw out any old stored conf requests */
4246 result
= L2CAP_CONF_SUCCESS
;
4247 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4250 l2cap_send_disconn_req(chan
, ECONNRESET
);
4254 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4255 L2CAP_CONF_REQ
, len
, req
);
4256 chan
->num_conf_req
++;
4257 if (result
!= L2CAP_CONF_SUCCESS
)
4263 l2cap_chan_set_err(chan
, ECONNRESET
);
4265 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4266 l2cap_send_disconn_req(chan
, ECONNRESET
);
4270 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4273 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4275 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4276 set_default_fcs(chan
);
4278 if (chan
->mode
== L2CAP_MODE_ERTM
||
4279 chan
->mode
== L2CAP_MODE_STREAMING
)
4280 err
= l2cap_ertm_init(chan
);
4283 l2cap_send_disconn_req(chan
, -err
);
4285 l2cap_chan_ready(chan
);
4289 l2cap_chan_unlock(chan
);
4293 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4294 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4297 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4298 struct l2cap_disconn_rsp rsp
;
4300 struct l2cap_chan
*chan
;
4302 if (cmd_len
!= sizeof(*req
))
4305 scid
= __le16_to_cpu(req
->scid
);
4306 dcid
= __le16_to_cpu(req
->dcid
);
4308 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4310 mutex_lock(&conn
->chan_lock
);
4312 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4314 mutex_unlock(&conn
->chan_lock
);
4315 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4319 l2cap_chan_lock(chan
);
4321 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4322 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4323 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4325 chan
->ops
->set_shutdown(chan
);
4327 l2cap_chan_hold(chan
);
4328 l2cap_chan_del(chan
, ECONNRESET
);
4330 l2cap_chan_unlock(chan
);
4332 chan
->ops
->close(chan
);
4333 l2cap_chan_put(chan
);
4335 mutex_unlock(&conn
->chan_lock
);
4340 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4341 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4344 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4346 struct l2cap_chan
*chan
;
4348 if (cmd_len
!= sizeof(*rsp
))
4351 scid
= __le16_to_cpu(rsp
->scid
);
4352 dcid
= __le16_to_cpu(rsp
->dcid
);
4354 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4356 mutex_lock(&conn
->chan_lock
);
4358 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4360 mutex_unlock(&conn
->chan_lock
);
4364 l2cap_chan_lock(chan
);
4366 l2cap_chan_hold(chan
);
4367 l2cap_chan_del(chan
, 0);
4369 l2cap_chan_unlock(chan
);
4371 chan
->ops
->close(chan
);
4372 l2cap_chan_put(chan
);
4374 mutex_unlock(&conn
->chan_lock
);
4379 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4380 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4383 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4386 if (cmd_len
!= sizeof(*req
))
4389 type
= __le16_to_cpu(req
->type
);
4391 BT_DBG("type 0x%4.4x", type
);
4393 if (type
== L2CAP_IT_FEAT_MASK
) {
4395 u32 feat_mask
= l2cap_feat_mask
;
4396 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4397 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4398 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4400 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4402 if (conn
->hs_enabled
)
4403 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4404 | L2CAP_FEAT_EXT_WINDOW
;
4406 put_unaligned_le32(feat_mask
, rsp
->data
);
4407 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4409 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4411 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4413 if (conn
->hs_enabled
)
4414 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4416 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4418 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4419 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4420 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4421 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4424 struct l2cap_info_rsp rsp
;
4425 rsp
.type
= cpu_to_le16(type
);
4426 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
4427 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4434 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4435 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4438 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4441 if (cmd_len
< sizeof(*rsp
))
4444 type
= __le16_to_cpu(rsp
->type
);
4445 result
= __le16_to_cpu(rsp
->result
);
4447 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4449 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4450 if (cmd
->ident
!= conn
->info_ident
||
4451 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4454 cancel_delayed_work(&conn
->info_timer
);
4456 if (result
!= L2CAP_IR_SUCCESS
) {
4457 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4458 conn
->info_ident
= 0;
4460 l2cap_conn_start(conn
);
4466 case L2CAP_IT_FEAT_MASK
:
4467 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4469 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4470 struct l2cap_info_req req
;
4471 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4473 conn
->info_ident
= l2cap_get_ident(conn
);
4475 l2cap_send_cmd(conn
, conn
->info_ident
,
4476 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4478 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4479 conn
->info_ident
= 0;
4481 l2cap_conn_start(conn
);
4485 case L2CAP_IT_FIXED_CHAN
:
4486 conn
->fixed_chan_mask
= rsp
->data
[0];
4487 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4488 conn
->info_ident
= 0;
4490 l2cap_conn_start(conn
);
4497 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4498 struct l2cap_cmd_hdr
*cmd
,
4499 u16 cmd_len
, void *data
)
4501 struct l2cap_create_chan_req
*req
= data
;
4502 struct l2cap_create_chan_rsp rsp
;
4503 struct l2cap_chan
*chan
;
4504 struct hci_dev
*hdev
;
4507 if (cmd_len
!= sizeof(*req
))
4510 if (!conn
->hs_enabled
)
4513 psm
= le16_to_cpu(req
->psm
);
4514 scid
= le16_to_cpu(req
->scid
);
4516 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4518 /* For controller id 0 make BR/EDR connection */
4519 if (req
->amp_id
== AMP_ID_BREDR
) {
4520 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4525 /* Validate AMP controller id */
4526 hdev
= hci_dev_get(req
->amp_id
);
4530 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4535 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4538 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4539 struct hci_conn
*hs_hcon
;
4541 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4545 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4550 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4552 mgr
->bredr_chan
= chan
;
4553 chan
->hs_hcon
= hs_hcon
;
4554 chan
->fcs
= L2CAP_FCS_NONE
;
4555 conn
->mtu
= hdev
->block_mtu
;
4564 rsp
.scid
= cpu_to_le16(scid
);
4565 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_BAD_AMP
);
4566 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4568 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4574 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4576 struct l2cap_move_chan_req req
;
4579 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4581 ident
= l2cap_get_ident(chan
->conn
);
4582 chan
->ident
= ident
;
4584 req
.icid
= cpu_to_le16(chan
->scid
);
4585 req
.dest_amp_id
= dest_amp_id
;
4587 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4590 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4593 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4595 struct l2cap_move_chan_rsp rsp
;
4597 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4599 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4600 rsp
.result
= cpu_to_le16(result
);
4602 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4606 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4608 struct l2cap_move_chan_cfm cfm
;
4610 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4612 chan
->ident
= l2cap_get_ident(chan
->conn
);
4614 cfm
.icid
= cpu_to_le16(chan
->scid
);
4615 cfm
.result
= cpu_to_le16(result
);
4617 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4620 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4623 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4625 struct l2cap_move_chan_cfm cfm
;
4627 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4629 cfm
.icid
= cpu_to_le16(icid
);
4630 cfm
.result
= __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4632 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4636 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4639 struct l2cap_move_chan_cfm_rsp rsp
;
4641 BT_DBG("icid 0x%4.4x", icid
);
4643 rsp
.icid
= cpu_to_le16(icid
);
4644 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4647 static void __release_logical_link(struct l2cap_chan
*chan
)
4649 chan
->hs_hchan
= NULL
;
4650 chan
->hs_hcon
= NULL
;
4652 /* Placeholder - release the logical link */
4655 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4657 /* Logical link setup failed */
4658 if (chan
->state
!= BT_CONNECTED
) {
4659 /* Create channel failure, disconnect */
4660 l2cap_send_disconn_req(chan
, ECONNRESET
);
4664 switch (chan
->move_role
) {
4665 case L2CAP_MOVE_ROLE_RESPONDER
:
4666 l2cap_move_done(chan
);
4667 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4669 case L2CAP_MOVE_ROLE_INITIATOR
:
4670 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4671 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4672 /* Remote has only sent pending or
4673 * success responses, clean up
4675 l2cap_move_done(chan
);
4678 /* Other amp move states imply that the move
4679 * has already aborted
4681 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4686 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4687 struct hci_chan
*hchan
)
4689 struct l2cap_conf_rsp rsp
;
4691 chan
->hs_hchan
= hchan
;
4692 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4694 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4696 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4699 set_default_fcs(chan
);
4701 err
= l2cap_ertm_init(chan
);
4703 l2cap_send_disconn_req(chan
, -err
);
4705 l2cap_chan_ready(chan
);
4709 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4710 struct hci_chan
*hchan
)
4712 chan
->hs_hcon
= hchan
->conn
;
4713 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4715 BT_DBG("move_state %d", chan
->move_state
);
4717 switch (chan
->move_state
) {
4718 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4719 /* Move confirm will be sent after a success
4720 * response is received
4722 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4724 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4725 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4726 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4727 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4728 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4729 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4730 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4731 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4732 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4736 /* Move was not in expected state, free the channel */
4737 __release_logical_link(chan
);
4739 chan
->move_state
= L2CAP_MOVE_STABLE
;
4743 /* Call with chan locked */
4744 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4747 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4750 l2cap_logical_fail(chan
);
4751 __release_logical_link(chan
);
4755 if (chan
->state
!= BT_CONNECTED
) {
4756 /* Ignore logical link if channel is on BR/EDR */
4757 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4758 l2cap_logical_finish_create(chan
, hchan
);
4760 l2cap_logical_finish_move(chan
, hchan
);
4764 void l2cap_move_start(struct l2cap_chan
*chan
)
4766 BT_DBG("chan %p", chan
);
4768 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4769 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4771 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4772 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4773 /* Placeholder - start physical link setup */
4775 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4776 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4778 l2cap_move_setup(chan
);
4779 l2cap_send_move_chan_req(chan
, 0);
4783 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4784 u8 local_amp_id
, u8 remote_amp_id
)
4786 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4787 local_amp_id
, remote_amp_id
);
4789 chan
->fcs
= L2CAP_FCS_NONE
;
4791 /* Outgoing channel on AMP */
4792 if (chan
->state
== BT_CONNECT
) {
4793 if (result
== L2CAP_CR_SUCCESS
) {
4794 chan
->local_amp_id
= local_amp_id
;
4795 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4797 /* Revert to BR/EDR connect */
4798 l2cap_send_conn_req(chan
);
4804 /* Incoming channel on AMP */
4805 if (__l2cap_no_conn_pending(chan
)) {
4806 struct l2cap_conn_rsp rsp
;
4808 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4809 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4811 if (result
== L2CAP_CR_SUCCESS
) {
4812 /* Send successful response */
4813 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
4814 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4816 /* Send negative response */
4817 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4818 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4821 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4824 if (result
== L2CAP_CR_SUCCESS
) {
4825 l2cap_state_change(chan
, BT_CONFIG
);
4826 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4827 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4829 l2cap_build_conf_req(chan
, buf
), buf
);
4830 chan
->num_conf_req
++;
4835 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4838 l2cap_move_setup(chan
);
4839 chan
->move_id
= local_amp_id
;
4840 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4842 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4845 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4847 struct hci_chan
*hchan
= NULL
;
4849 /* Placeholder - get hci_chan for logical link */
4852 if (hchan
->state
== BT_CONNECTED
) {
4853 /* Logical link is ready to go */
4854 chan
->hs_hcon
= hchan
->conn
;
4855 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4856 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4857 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4859 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4861 /* Wait for logical link to be ready */
4862 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4865 /* Logical link not available */
4866 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4870 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4872 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4874 if (result
== -EINVAL
)
4875 rsp_result
= L2CAP_MR_BAD_ID
;
4877 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4879 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4882 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4883 chan
->move_state
= L2CAP_MOVE_STABLE
;
4885 /* Restart data transmission */
4886 l2cap_ertm_send(chan
);
4889 /* Invoke with locked chan */
4890 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4892 u8 local_amp_id
= chan
->local_amp_id
;
4893 u8 remote_amp_id
= chan
->remote_amp_id
;
4895 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4896 chan
, result
, local_amp_id
, remote_amp_id
);
4898 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4899 l2cap_chan_unlock(chan
);
4903 if (chan
->state
!= BT_CONNECTED
) {
4904 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4905 } else if (result
!= L2CAP_MR_SUCCESS
) {
4906 l2cap_do_move_cancel(chan
, result
);
4908 switch (chan
->move_role
) {
4909 case L2CAP_MOVE_ROLE_INITIATOR
:
4910 l2cap_do_move_initiate(chan
, local_amp_id
,
4913 case L2CAP_MOVE_ROLE_RESPONDER
:
4914 l2cap_do_move_respond(chan
, result
);
4917 l2cap_do_move_cancel(chan
, result
);
4923 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4924 struct l2cap_cmd_hdr
*cmd
,
4925 u16 cmd_len
, void *data
)
4927 struct l2cap_move_chan_req
*req
= data
;
4928 struct l2cap_move_chan_rsp rsp
;
4929 struct l2cap_chan
*chan
;
4931 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4933 if (cmd_len
!= sizeof(*req
))
4936 icid
= le16_to_cpu(req
->icid
);
4938 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4940 if (!conn
->hs_enabled
)
4943 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4945 rsp
.icid
= cpu_to_le16(icid
);
4946 rsp
.result
= __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4947 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4952 chan
->ident
= cmd
->ident
;
4954 if (chan
->scid
< L2CAP_CID_DYN_START
||
4955 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4956 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4957 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4958 result
= L2CAP_MR_NOT_ALLOWED
;
4959 goto send_move_response
;
4962 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4963 result
= L2CAP_MR_SAME_ID
;
4964 goto send_move_response
;
4967 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4968 struct hci_dev
*hdev
;
4969 hdev
= hci_dev_get(req
->dest_amp_id
);
4970 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4971 !test_bit(HCI_UP
, &hdev
->flags
)) {
4975 result
= L2CAP_MR_BAD_ID
;
4976 goto send_move_response
;
4981 /* Detect a move collision. Only send a collision response
4982 * if this side has "lost", otherwise proceed with the move.
4983 * The winner has the larger bd_addr.
4985 if ((__chan_is_moving(chan
) ||
4986 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4987 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4988 result
= L2CAP_MR_COLLISION
;
4989 goto send_move_response
;
4992 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4993 l2cap_move_setup(chan
);
4994 chan
->move_id
= req
->dest_amp_id
;
4997 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4998 /* Moving to BR/EDR */
4999 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5000 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
5001 result
= L2CAP_MR_PEND
;
5003 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
5004 result
= L2CAP_MR_SUCCESS
;
5007 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
5008 /* Placeholder - uncomment when amp functions are available */
5009 /*amp_accept_physical(chan, req->dest_amp_id);*/
5010 result
= L2CAP_MR_PEND
;
5014 l2cap_send_move_chan_rsp(chan
, result
);
5016 l2cap_chan_unlock(chan
);
5021 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
5023 struct l2cap_chan
*chan
;
5024 struct hci_chan
*hchan
= NULL
;
5026 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5028 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5032 __clear_chan_timer(chan
);
5033 if (result
== L2CAP_MR_PEND
)
5034 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
5036 switch (chan
->move_state
) {
5037 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
5038 /* Move confirm will be sent when logical link
5041 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5043 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
5044 if (result
== L2CAP_MR_PEND
) {
5046 } else if (test_bit(CONN_LOCAL_BUSY
,
5047 &chan
->conn_state
)) {
5048 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
5050 /* Logical link is up or moving to BR/EDR,
5053 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
5054 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5057 case L2CAP_MOVE_WAIT_RSP
:
5059 if (result
== L2CAP_MR_SUCCESS
) {
5060 /* Remote is ready, send confirm immediately
5061 * after logical link is ready
5063 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5065 /* Both logical link and move success
5066 * are required to confirm
5068 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5071 /* Placeholder - get hci_chan for logical link */
5073 /* Logical link not available */
5074 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5078 /* If the logical link is not yet connected, do not
5079 * send confirmation.
5081 if (hchan
->state
!= BT_CONNECTED
)
5084 /* Logical link is already ready to go */
5086 chan
->hs_hcon
= hchan
->conn
;
5087 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5089 if (result
== L2CAP_MR_SUCCESS
) {
5090 /* Can confirm now */
5091 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5093 /* Now only need move success
5096 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5099 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5102 /* Any other amp move state means the move failed. */
5103 chan
->move_id
= chan
->local_amp_id
;
5104 l2cap_move_done(chan
);
5105 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5108 l2cap_chan_unlock(chan
);
5111 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5114 struct l2cap_chan
*chan
;
5116 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5118 /* Could not locate channel, icid is best guess */
5119 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5123 __clear_chan_timer(chan
);
5125 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5126 if (result
== L2CAP_MR_COLLISION
) {
5127 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5129 /* Cleanup - cancel move */
5130 chan
->move_id
= chan
->local_amp_id
;
5131 l2cap_move_done(chan
);
5135 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5137 l2cap_chan_unlock(chan
);
5140 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5141 struct l2cap_cmd_hdr
*cmd
,
5142 u16 cmd_len
, void *data
)
5144 struct l2cap_move_chan_rsp
*rsp
= data
;
5147 if (cmd_len
!= sizeof(*rsp
))
5150 icid
= le16_to_cpu(rsp
->icid
);
5151 result
= le16_to_cpu(rsp
->result
);
5153 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5155 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5156 l2cap_move_continue(conn
, icid
, result
);
5158 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5163 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5164 struct l2cap_cmd_hdr
*cmd
,
5165 u16 cmd_len
, void *data
)
5167 struct l2cap_move_chan_cfm
*cfm
= data
;
5168 struct l2cap_chan
*chan
;
5171 if (cmd_len
!= sizeof(*cfm
))
5174 icid
= le16_to_cpu(cfm
->icid
);
5175 result
= le16_to_cpu(cfm
->result
);
5177 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5179 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5181 /* Spec requires a response even if the icid was not found */
5182 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5186 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5187 if (result
== L2CAP_MC_CONFIRMED
) {
5188 chan
->local_amp_id
= chan
->move_id
;
5189 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5190 __release_logical_link(chan
);
5192 chan
->move_id
= chan
->local_amp_id
;
5195 l2cap_move_done(chan
);
5198 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5200 l2cap_chan_unlock(chan
);
5205 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5206 struct l2cap_cmd_hdr
*cmd
,
5207 u16 cmd_len
, void *data
)
5209 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5210 struct l2cap_chan
*chan
;
5213 if (cmd_len
!= sizeof(*rsp
))
5216 icid
= le16_to_cpu(rsp
->icid
);
5218 BT_DBG("icid 0x%4.4x", icid
);
5220 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5224 __clear_chan_timer(chan
);
5226 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5227 chan
->local_amp_id
= chan
->move_id
;
5229 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5230 __release_logical_link(chan
);
5232 l2cap_move_done(chan
);
5235 l2cap_chan_unlock(chan
);
5240 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
5245 if (min
> max
|| min
< 6 || max
> 3200)
5248 if (to_multiplier
< 10 || to_multiplier
> 3200)
5251 if (max
>= to_multiplier
* 8)
5254 max_latency
= (to_multiplier
* 8 / max
) - 1;
5255 if (latency
> 499 || latency
> max_latency
)
5261 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5262 struct l2cap_cmd_hdr
*cmd
,
5263 u16 cmd_len
, u8
*data
)
5265 struct hci_conn
*hcon
= conn
->hcon
;
5266 struct l2cap_conn_param_update_req
*req
;
5267 struct l2cap_conn_param_update_rsp rsp
;
5268 u16 min
, max
, latency
, to_multiplier
;
5271 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
5274 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5277 req
= (struct l2cap_conn_param_update_req
*) data
;
5278 min
= __le16_to_cpu(req
->min
);
5279 max
= __le16_to_cpu(req
->max
);
5280 latency
= __le16_to_cpu(req
->latency
);
5281 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5283 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5284 min
, max
, latency
, to_multiplier
);
5286 memset(&rsp
, 0, sizeof(rsp
));
5288 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
5290 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5292 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5294 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5298 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
5303 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5304 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5307 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5308 u16 dcid
, mtu
, mps
, credits
, result
;
5309 struct l2cap_chan
*chan
;
5312 if (cmd_len
< sizeof(*rsp
))
5315 dcid
= __le16_to_cpu(rsp
->dcid
);
5316 mtu
= __le16_to_cpu(rsp
->mtu
);
5317 mps
= __le16_to_cpu(rsp
->mps
);
5318 credits
= __le16_to_cpu(rsp
->credits
);
5319 result
= __le16_to_cpu(rsp
->result
);
5321 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23))
5324 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5325 dcid
, mtu
, mps
, credits
, result
);
5327 mutex_lock(&conn
->chan_lock
);
5329 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5337 l2cap_chan_lock(chan
);
5340 case L2CAP_CR_SUCCESS
:
5344 chan
->remote_mps
= mps
;
5345 l2cap_chan_ready(chan
);
5349 l2cap_chan_del(chan
, ECONNREFUSED
);
5353 l2cap_chan_unlock(chan
);
5356 mutex_unlock(&conn
->chan_lock
);
5361 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5362 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5367 switch (cmd
->code
) {
5368 case L2CAP_COMMAND_REJ
:
5369 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5372 case L2CAP_CONN_REQ
:
5373 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5376 case L2CAP_CONN_RSP
:
5377 case L2CAP_CREATE_CHAN_RSP
:
5378 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5381 case L2CAP_CONF_REQ
:
5382 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5385 case L2CAP_CONF_RSP
:
5386 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5389 case L2CAP_DISCONN_REQ
:
5390 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5393 case L2CAP_DISCONN_RSP
:
5394 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5397 case L2CAP_ECHO_REQ
:
5398 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5401 case L2CAP_ECHO_RSP
:
5404 case L2CAP_INFO_REQ
:
5405 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5408 case L2CAP_INFO_RSP
:
5409 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5412 case L2CAP_CREATE_CHAN_REQ
:
5413 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5416 case L2CAP_MOVE_CHAN_REQ
:
5417 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5420 case L2CAP_MOVE_CHAN_RSP
:
5421 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5424 case L2CAP_MOVE_CHAN_CFM
:
5425 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5428 case L2CAP_MOVE_CHAN_CFM_RSP
:
5429 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5433 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5441 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5442 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5445 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5446 struct l2cap_le_conn_rsp rsp
;
5447 struct l2cap_chan
*chan
, *pchan
;
5448 u16 dcid
, scid
, mtu
, mps
;
5452 if (cmd_len
!= sizeof(*req
))
5455 scid
= __le16_to_cpu(req
->scid
);
5456 mtu
= __le16_to_cpu(req
->mtu
);
5457 mps
= __le16_to_cpu(req
->mps
);
5461 if (mtu
< 23 || mps
< 23)
5464 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5467 /* Check if we have socket listening on psm */
5468 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5469 &conn
->hcon
->dst
, LE_LINK
);
5471 result
= L2CAP_CR_BAD_PSM
;
5476 mutex_lock(&conn
->chan_lock
);
5477 l2cap_chan_lock(pchan
);
5479 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
)) {
5480 result
= L2CAP_CR_AUTHENTICATION
;
5482 goto response_unlock
;
5485 /* Check if we already have channel with that dcid */
5486 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5487 result
= L2CAP_CR_NO_MEM
;
5489 goto response_unlock
;
5492 chan
= pchan
->ops
->new_connection(pchan
);
5494 result
= L2CAP_CR_NO_MEM
;
5495 goto response_unlock
;
5498 bacpy(&chan
->src
, &conn
->hcon
->src
);
5499 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5500 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
5501 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
5505 chan
->remote_mps
= mps
;
5507 __l2cap_chan_add(conn
, chan
);
5510 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5512 chan
->ident
= cmd
->ident
;
5514 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5515 l2cap_state_change(chan
, BT_CONNECT2
);
5516 result
= L2CAP_CR_PEND
;
5517 chan
->ops
->defer(chan
);
5519 l2cap_chan_ready(chan
);
5520 result
= L2CAP_CR_SUCCESS
;
5524 l2cap_chan_unlock(pchan
);
5525 mutex_unlock(&conn
->chan_lock
);
5527 if (result
== L2CAP_CR_PEND
)
5532 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5533 rsp
.mps
= __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS
);
5539 rsp
.dcid
= cpu_to_le16(dcid
);
5540 rsp
.credits
= __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS
);
5541 rsp
.result
= cpu_to_le16(result
);
5543 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5548 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5549 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5554 switch (cmd
->code
) {
5555 case L2CAP_COMMAND_REJ
:
5558 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5559 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5562 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5565 case L2CAP_LE_CONN_RSP
:
5566 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5569 case L2CAP_LE_CONN_REQ
:
5570 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5573 case L2CAP_DISCONN_REQ
:
5574 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5577 case L2CAP_DISCONN_RSP
:
5578 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5582 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5590 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5591 struct sk_buff
*skb
)
5593 struct hci_conn
*hcon
= conn
->hcon
;
5594 struct l2cap_cmd_hdr
*cmd
;
5598 if (hcon
->type
!= LE_LINK
)
5601 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5604 cmd
= (void *) skb
->data
;
5605 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5607 len
= le16_to_cpu(cmd
->len
);
5609 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5611 if (len
!= skb
->len
|| !cmd
->ident
) {
5612 BT_DBG("corrupted command");
5616 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5618 struct l2cap_cmd_rej_unk rej
;
5620 BT_ERR("Wrong link type (%d)", err
);
5622 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5623 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5631 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5632 struct sk_buff
*skb
)
5634 struct hci_conn
*hcon
= conn
->hcon
;
5635 u8
*data
= skb
->data
;
5637 struct l2cap_cmd_hdr cmd
;
5640 l2cap_raw_recv(conn
, skb
);
5642 if (hcon
->type
!= ACL_LINK
)
5645 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5647 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5648 data
+= L2CAP_CMD_HDR_SIZE
;
5649 len
-= L2CAP_CMD_HDR_SIZE
;
5651 cmd_len
= le16_to_cpu(cmd
.len
);
5653 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5656 if (cmd_len
> len
|| !cmd
.ident
) {
5657 BT_DBG("corrupted command");
5661 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5663 struct l2cap_cmd_rej_unk rej
;
5665 BT_ERR("Wrong link type (%d)", err
);
5667 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5668 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5680 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5682 u16 our_fcs
, rcv_fcs
;
5685 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5686 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5688 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5690 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5691 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5692 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5693 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5695 if (our_fcs
!= rcv_fcs
)
5701 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5703 struct l2cap_ctrl control
;
5705 BT_DBG("chan %p", chan
);
5707 memset(&control
, 0, sizeof(control
));
5710 control
.reqseq
= chan
->buffer_seq
;
5711 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5713 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5714 control
.super
= L2CAP_SUPER_RNR
;
5715 l2cap_send_sframe(chan
, &control
);
5718 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5719 chan
->unacked_frames
> 0)
5720 __set_retrans_timer(chan
);
5722 /* Send pending iframes */
5723 l2cap_ertm_send(chan
);
5725 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5726 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5727 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5730 control
.super
= L2CAP_SUPER_RR
;
5731 l2cap_send_sframe(chan
, &control
);
5735 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5736 struct sk_buff
**last_frag
)
5738 /* skb->len reflects data in skb as well as all fragments
5739 * skb->data_len reflects only data in fragments
5741 if (!skb_has_frag_list(skb
))
5742 skb_shinfo(skb
)->frag_list
= new_frag
;
5744 new_frag
->next
= NULL
;
5746 (*last_frag
)->next
= new_frag
;
5747 *last_frag
= new_frag
;
5749 skb
->len
+= new_frag
->len
;
5750 skb
->data_len
+= new_frag
->len
;
5751 skb
->truesize
+= new_frag
->truesize
;
5754 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5755 struct l2cap_ctrl
*control
)
5759 switch (control
->sar
) {
5760 case L2CAP_SAR_UNSEGMENTED
:
5764 err
= chan
->ops
->recv(chan
, skb
);
5767 case L2CAP_SAR_START
:
5771 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5772 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5774 if (chan
->sdu_len
> chan
->imtu
) {
5779 if (skb
->len
>= chan
->sdu_len
)
5783 chan
->sdu_last_frag
= skb
;
5789 case L2CAP_SAR_CONTINUE
:
5793 append_skb_frag(chan
->sdu
, skb
,
5794 &chan
->sdu_last_frag
);
5797 if (chan
->sdu
->len
>= chan
->sdu_len
)
5807 append_skb_frag(chan
->sdu
, skb
,
5808 &chan
->sdu_last_frag
);
5811 if (chan
->sdu
->len
!= chan
->sdu_len
)
5814 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5817 /* Reassembly complete */
5819 chan
->sdu_last_frag
= NULL
;
5827 kfree_skb(chan
->sdu
);
5829 chan
->sdu_last_frag
= NULL
;
5836 static int l2cap_resegment(struct l2cap_chan
*chan
)
5842 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5846 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5849 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5850 l2cap_tx(chan
, NULL
, NULL
, event
);
5853 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5856 /* Pass sequential frames to l2cap_reassemble_sdu()
5857 * until a gap is encountered.
5860 BT_DBG("chan %p", chan
);
5862 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5863 struct sk_buff
*skb
;
5864 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5865 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5867 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5872 skb_unlink(skb
, &chan
->srej_q
);
5873 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5874 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5879 if (skb_queue_empty(&chan
->srej_q
)) {
5880 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5881 l2cap_send_ack(chan
);
5887 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5888 struct l2cap_ctrl
*control
)
5890 struct sk_buff
*skb
;
5892 BT_DBG("chan %p, control %p", chan
, control
);
5894 if (control
->reqseq
== chan
->next_tx_seq
) {
5895 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5896 l2cap_send_disconn_req(chan
, ECONNRESET
);
5900 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5903 BT_DBG("Seq %d not available for retransmission",
5908 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5909 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5910 l2cap_send_disconn_req(chan
, ECONNRESET
);
5914 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5916 if (control
->poll
) {
5917 l2cap_pass_to_tx(chan
, control
);
5919 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5920 l2cap_retransmit(chan
, control
);
5921 l2cap_ertm_send(chan
);
5923 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5924 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5925 chan
->srej_save_reqseq
= control
->reqseq
;
5928 l2cap_pass_to_tx_fbit(chan
, control
);
5930 if (control
->final
) {
5931 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5932 !test_and_clear_bit(CONN_SREJ_ACT
,
5934 l2cap_retransmit(chan
, control
);
5936 l2cap_retransmit(chan
, control
);
5937 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5938 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5939 chan
->srej_save_reqseq
= control
->reqseq
;
5945 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5946 struct l2cap_ctrl
*control
)
5948 struct sk_buff
*skb
;
5950 BT_DBG("chan %p, control %p", chan
, control
);
5952 if (control
->reqseq
== chan
->next_tx_seq
) {
5953 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5954 l2cap_send_disconn_req(chan
, ECONNRESET
);
5958 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5960 if (chan
->max_tx
&& skb
&&
5961 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5962 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5963 l2cap_send_disconn_req(chan
, ECONNRESET
);
5967 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5969 l2cap_pass_to_tx(chan
, control
);
5971 if (control
->final
) {
5972 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
5973 l2cap_retransmit_all(chan
, control
);
5975 l2cap_retransmit_all(chan
, control
);
5976 l2cap_ertm_send(chan
);
5977 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
5978 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
5982 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
5984 BT_DBG("chan %p, txseq %d", chan
, txseq
);
5986 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
5987 chan
->expected_tx_seq
);
5989 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
5990 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5992 /* See notes below regarding "double poll" and
5995 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5996 BT_DBG("Invalid/Ignore - after SREJ");
5997 return L2CAP_TXSEQ_INVALID_IGNORE
;
5999 BT_DBG("Invalid - in window after SREJ sent");
6000 return L2CAP_TXSEQ_INVALID
;
6004 if (chan
->srej_list
.head
== txseq
) {
6005 BT_DBG("Expected SREJ");
6006 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6009 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6010 BT_DBG("Duplicate SREJ - txseq already stored");
6011 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6014 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6015 BT_DBG("Unexpected SREJ - not requested");
6016 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6020 if (chan
->expected_tx_seq
== txseq
) {
6021 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6023 BT_DBG("Invalid - txseq outside tx window");
6024 return L2CAP_TXSEQ_INVALID
;
6027 return L2CAP_TXSEQ_EXPECTED
;
6031 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6032 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6033 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6034 return L2CAP_TXSEQ_DUPLICATE
;
6037 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6038 /* A source of invalid packets is a "double poll" condition,
6039 * where delays cause us to send multiple poll packets. If
6040 * the remote stack receives and processes both polls,
6041 * sequence numbers can wrap around in such a way that a
6042 * resent frame has a sequence number that looks like new data
6043 * with a sequence gap. This would trigger an erroneous SREJ
6046 * Fortunately, this is impossible with a tx window that's
6047 * less than half of the maximum sequence number, which allows
6048 * invalid frames to be safely ignored.
6050 * With tx window sizes greater than half of the tx window
6051 * maximum, the frame is invalid and cannot be ignored. This
6052 * causes a disconnect.
6055 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6056 BT_DBG("Invalid/Ignore - txseq outside tx window");
6057 return L2CAP_TXSEQ_INVALID_IGNORE
;
6059 BT_DBG("Invalid - txseq outside tx window");
6060 return L2CAP_TXSEQ_INVALID
;
6063 BT_DBG("Unexpected - txseq indicates missing frames");
6064 return L2CAP_TXSEQ_UNEXPECTED
;
6068 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6069 struct l2cap_ctrl
*control
,
6070 struct sk_buff
*skb
, u8 event
)
6073 bool skb_in_use
= false;
6075 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6079 case L2CAP_EV_RECV_IFRAME
:
6080 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6081 case L2CAP_TXSEQ_EXPECTED
:
6082 l2cap_pass_to_tx(chan
, control
);
6084 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6085 BT_DBG("Busy, discarding expected seq %d",
6090 chan
->expected_tx_seq
= __next_seq(chan
,
6093 chan
->buffer_seq
= chan
->expected_tx_seq
;
6096 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6100 if (control
->final
) {
6101 if (!test_and_clear_bit(CONN_REJ_ACT
,
6102 &chan
->conn_state
)) {
6104 l2cap_retransmit_all(chan
, control
);
6105 l2cap_ertm_send(chan
);
6109 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6110 l2cap_send_ack(chan
);
6112 case L2CAP_TXSEQ_UNEXPECTED
:
6113 l2cap_pass_to_tx(chan
, control
);
6115 /* Can't issue SREJ frames in the local busy state.
6116 * Drop this frame, it will be seen as missing
6117 * when local busy is exited.
6119 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6120 BT_DBG("Busy, discarding unexpected seq %d",
6125 /* There was a gap in the sequence, so an SREJ
6126 * must be sent for each missing frame. The
6127 * current frame is stored for later use.
6129 skb_queue_tail(&chan
->srej_q
, skb
);
6131 BT_DBG("Queued %p (queue len %d)", skb
,
6132 skb_queue_len(&chan
->srej_q
));
6134 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6135 l2cap_seq_list_clear(&chan
->srej_list
);
6136 l2cap_send_srej(chan
, control
->txseq
);
6138 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6140 case L2CAP_TXSEQ_DUPLICATE
:
6141 l2cap_pass_to_tx(chan
, control
);
6143 case L2CAP_TXSEQ_INVALID_IGNORE
:
6145 case L2CAP_TXSEQ_INVALID
:
6147 l2cap_send_disconn_req(chan
, ECONNRESET
);
6151 case L2CAP_EV_RECV_RR
:
6152 l2cap_pass_to_tx(chan
, control
);
6153 if (control
->final
) {
6154 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6156 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6157 !__chan_is_moving(chan
)) {
6159 l2cap_retransmit_all(chan
, control
);
6162 l2cap_ertm_send(chan
);
6163 } else if (control
->poll
) {
6164 l2cap_send_i_or_rr_or_rnr(chan
);
6166 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6167 &chan
->conn_state
) &&
6168 chan
->unacked_frames
)
6169 __set_retrans_timer(chan
);
6171 l2cap_ertm_send(chan
);
6174 case L2CAP_EV_RECV_RNR
:
6175 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6176 l2cap_pass_to_tx(chan
, control
);
6177 if (control
&& control
->poll
) {
6178 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6179 l2cap_send_rr_or_rnr(chan
, 0);
6181 __clear_retrans_timer(chan
);
6182 l2cap_seq_list_clear(&chan
->retrans_list
);
6184 case L2CAP_EV_RECV_REJ
:
6185 l2cap_handle_rej(chan
, control
);
6187 case L2CAP_EV_RECV_SREJ
:
6188 l2cap_handle_srej(chan
, control
);
6194 if (skb
&& !skb_in_use
) {
6195 BT_DBG("Freeing %p", skb
);
6202 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6203 struct l2cap_ctrl
*control
,
6204 struct sk_buff
*skb
, u8 event
)
6207 u16 txseq
= control
->txseq
;
6208 bool skb_in_use
= false;
6210 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6214 case L2CAP_EV_RECV_IFRAME
:
6215 switch (l2cap_classify_txseq(chan
, txseq
)) {
6216 case L2CAP_TXSEQ_EXPECTED
:
6217 /* Keep frame for reassembly later */
6218 l2cap_pass_to_tx(chan
, control
);
6219 skb_queue_tail(&chan
->srej_q
, skb
);
6221 BT_DBG("Queued %p (queue len %d)", skb
,
6222 skb_queue_len(&chan
->srej_q
));
6224 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6226 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6227 l2cap_seq_list_pop(&chan
->srej_list
);
6229 l2cap_pass_to_tx(chan
, control
);
6230 skb_queue_tail(&chan
->srej_q
, skb
);
6232 BT_DBG("Queued %p (queue len %d)", skb
,
6233 skb_queue_len(&chan
->srej_q
));
6235 err
= l2cap_rx_queued_iframes(chan
);
6240 case L2CAP_TXSEQ_UNEXPECTED
:
6241 /* Got a frame that can't be reassembled yet.
6242 * Save it for later, and send SREJs to cover
6243 * the missing frames.
6245 skb_queue_tail(&chan
->srej_q
, skb
);
6247 BT_DBG("Queued %p (queue len %d)", skb
,
6248 skb_queue_len(&chan
->srej_q
));
6250 l2cap_pass_to_tx(chan
, control
);
6251 l2cap_send_srej(chan
, control
->txseq
);
6253 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6254 /* This frame was requested with an SREJ, but
6255 * some expected retransmitted frames are
6256 * missing. Request retransmission of missing
6259 skb_queue_tail(&chan
->srej_q
, skb
);
6261 BT_DBG("Queued %p (queue len %d)", skb
,
6262 skb_queue_len(&chan
->srej_q
));
6264 l2cap_pass_to_tx(chan
, control
);
6265 l2cap_send_srej_list(chan
, control
->txseq
);
6267 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6268 /* We've already queued this frame. Drop this copy. */
6269 l2cap_pass_to_tx(chan
, control
);
6271 case L2CAP_TXSEQ_DUPLICATE
:
6272 /* Expecting a later sequence number, so this frame
6273 * was already received. Ignore it completely.
6276 case L2CAP_TXSEQ_INVALID_IGNORE
:
6278 case L2CAP_TXSEQ_INVALID
:
6280 l2cap_send_disconn_req(chan
, ECONNRESET
);
6284 case L2CAP_EV_RECV_RR
:
6285 l2cap_pass_to_tx(chan
, control
);
6286 if (control
->final
) {
6287 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6289 if (!test_and_clear_bit(CONN_REJ_ACT
,
6290 &chan
->conn_state
)) {
6292 l2cap_retransmit_all(chan
, control
);
6295 l2cap_ertm_send(chan
);
6296 } else if (control
->poll
) {
6297 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6298 &chan
->conn_state
) &&
6299 chan
->unacked_frames
) {
6300 __set_retrans_timer(chan
);
6303 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6304 l2cap_send_srej_tail(chan
);
6306 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6307 &chan
->conn_state
) &&
6308 chan
->unacked_frames
)
6309 __set_retrans_timer(chan
);
6311 l2cap_send_ack(chan
);
6314 case L2CAP_EV_RECV_RNR
:
6315 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6316 l2cap_pass_to_tx(chan
, control
);
6317 if (control
->poll
) {
6318 l2cap_send_srej_tail(chan
);
6320 struct l2cap_ctrl rr_control
;
6321 memset(&rr_control
, 0, sizeof(rr_control
));
6322 rr_control
.sframe
= 1;
6323 rr_control
.super
= L2CAP_SUPER_RR
;
6324 rr_control
.reqseq
= chan
->buffer_seq
;
6325 l2cap_send_sframe(chan
, &rr_control
);
6329 case L2CAP_EV_RECV_REJ
:
6330 l2cap_handle_rej(chan
, control
);
6332 case L2CAP_EV_RECV_SREJ
:
6333 l2cap_handle_srej(chan
, control
);
6337 if (skb
&& !skb_in_use
) {
6338 BT_DBG("Freeing %p", skb
);
6345 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6347 BT_DBG("chan %p", chan
);
6349 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6352 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6354 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6356 return l2cap_resegment(chan
);
6359 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6360 struct l2cap_ctrl
*control
,
6361 struct sk_buff
*skb
, u8 event
)
6365 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6371 l2cap_process_reqseq(chan
, control
->reqseq
);
6373 if (!skb_queue_empty(&chan
->tx_q
))
6374 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6376 chan
->tx_send_head
= NULL
;
6378 /* Rewind next_tx_seq to the point expected
6381 chan
->next_tx_seq
= control
->reqseq
;
6382 chan
->unacked_frames
= 0;
6384 err
= l2cap_finish_move(chan
);
6388 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6389 l2cap_send_i_or_rr_or_rnr(chan
);
6391 if (event
== L2CAP_EV_RECV_IFRAME
)
6394 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6397 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6398 struct l2cap_ctrl
*control
,
6399 struct sk_buff
*skb
, u8 event
)
6403 if (!control
->final
)
6406 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6408 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6409 l2cap_process_reqseq(chan
, control
->reqseq
);
6411 if (!skb_queue_empty(&chan
->tx_q
))
6412 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6414 chan
->tx_send_head
= NULL
;
6416 /* Rewind next_tx_seq to the point expected
6419 chan
->next_tx_seq
= control
->reqseq
;
6420 chan
->unacked_frames
= 0;
6423 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6425 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6427 err
= l2cap_resegment(chan
);
6430 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6435 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6437 /* Make sure reqseq is for a packet that has been sent but not acked */
6440 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6441 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6444 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6445 struct sk_buff
*skb
, u8 event
)
6449 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6450 control
, skb
, event
, chan
->rx_state
);
6452 if (__valid_reqseq(chan
, control
->reqseq
)) {
6453 switch (chan
->rx_state
) {
6454 case L2CAP_RX_STATE_RECV
:
6455 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6457 case L2CAP_RX_STATE_SREJ_SENT
:
6458 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6461 case L2CAP_RX_STATE_WAIT_P
:
6462 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6464 case L2CAP_RX_STATE_WAIT_F
:
6465 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6472 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6473 control
->reqseq
, chan
->next_tx_seq
,
6474 chan
->expected_ack_seq
);
6475 l2cap_send_disconn_req(chan
, ECONNRESET
);
6481 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6482 struct sk_buff
*skb
)
6486 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6489 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6490 L2CAP_TXSEQ_EXPECTED
) {
6491 l2cap_pass_to_tx(chan
, control
);
6493 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6494 __next_seq(chan
, chan
->buffer_seq
));
6496 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6498 l2cap_reassemble_sdu(chan
, skb
, control
);
6501 kfree_skb(chan
->sdu
);
6504 chan
->sdu_last_frag
= NULL
;
6508 BT_DBG("Freeing %p", skb
);
6513 chan
->last_acked_seq
= control
->txseq
;
6514 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6519 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6521 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6525 __unpack_control(chan
, skb
);
6530 * We can just drop the corrupted I-frame here.
6531 * Receiver will miss it and start proper recovery
6532 * procedures and ask for retransmission.
6534 if (l2cap_check_fcs(chan
, skb
))
6537 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6538 len
-= L2CAP_SDULEN_SIZE
;
6540 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6541 len
-= L2CAP_FCS_SIZE
;
6543 if (len
> chan
->mps
) {
6544 l2cap_send_disconn_req(chan
, ECONNRESET
);
6548 if (!control
->sframe
) {
6551 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6552 control
->sar
, control
->reqseq
, control
->final
,
6555 /* Validate F-bit - F=0 always valid, F=1 only
6556 * valid in TX WAIT_F
6558 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6561 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6562 event
= L2CAP_EV_RECV_IFRAME
;
6563 err
= l2cap_rx(chan
, control
, skb
, event
);
6565 err
= l2cap_stream_rx(chan
, control
, skb
);
6569 l2cap_send_disconn_req(chan
, ECONNRESET
);
6571 const u8 rx_func_to_event
[4] = {
6572 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6573 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6576 /* Only I-frames are expected in streaming mode */
6577 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6580 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6581 control
->reqseq
, control
->final
, control
->poll
,
6585 BT_ERR("Trailing bytes: %d in sframe", len
);
6586 l2cap_send_disconn_req(chan
, ECONNRESET
);
6590 /* Validate F and P bits */
6591 if (control
->final
&& (control
->poll
||
6592 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6595 event
= rx_func_to_event
[control
->super
];
6596 if (l2cap_rx(chan
, control
, skb
, event
))
6597 l2cap_send_disconn_req(chan
, ECONNRESET
);
6607 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6608 struct sk_buff
*skb
)
6610 struct l2cap_chan
*chan
;
6612 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6614 if (cid
== L2CAP_CID_A2MP
) {
6615 chan
= a2mp_channel_create(conn
, skb
);
6621 l2cap_chan_lock(chan
);
6623 BT_DBG("unknown cid 0x%4.4x", cid
);
6624 /* Drop packet and return */
6630 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6632 if (chan
->state
!= BT_CONNECTED
)
6635 switch (chan
->mode
) {
6636 case L2CAP_MODE_LE_FLOWCTL
:
6637 case L2CAP_MODE_BASIC
:
6638 /* If socket recv buffers overflows we drop data here
6639 * which is *bad* because L2CAP has to be reliable.
6640 * But we don't have any other choice. L2CAP doesn't
6641 * provide flow control mechanism. */
6643 if (chan
->imtu
< skb
->len
)
6646 if (!chan
->ops
->recv(chan
, skb
))
6650 case L2CAP_MODE_ERTM
:
6651 case L2CAP_MODE_STREAMING
:
6652 l2cap_data_rcv(chan
, skb
);
6656 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6664 l2cap_chan_unlock(chan
);
6667 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6668 struct sk_buff
*skb
)
6670 struct hci_conn
*hcon
= conn
->hcon
;
6671 struct l2cap_chan
*chan
;
6673 if (hcon
->type
!= ACL_LINK
)
6676 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
6681 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6683 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6686 if (chan
->imtu
< skb
->len
)
6689 /* Store remote BD_ADDR and PSM for msg_name */
6690 bacpy(&bt_cb(skb
)->bdaddr
, &hcon
->dst
);
6691 bt_cb(skb
)->psm
= psm
;
6693 if (!chan
->ops
->recv(chan
, skb
))
6700 static void l2cap_att_channel(struct l2cap_conn
*conn
,
6701 struct sk_buff
*skb
)
6703 struct hci_conn
*hcon
= conn
->hcon
;
6704 struct l2cap_chan
*chan
;
6706 if (hcon
->type
!= LE_LINK
)
6709 chan
= l2cap_global_chan_by_scid(BT_CONNECTED
, L2CAP_CID_ATT
,
6710 &hcon
->src
, &hcon
->dst
);
6714 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6716 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
))
6719 if (chan
->imtu
< skb
->len
)
6722 if (!chan
->ops
->recv(chan
, skb
))
6729 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6731 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6735 skb_pull(skb
, L2CAP_HDR_SIZE
);
6736 cid
= __le16_to_cpu(lh
->cid
);
6737 len
= __le16_to_cpu(lh
->len
);
6739 if (len
!= skb
->len
) {
6744 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6747 case L2CAP_CID_SIGNALING
:
6748 l2cap_sig_channel(conn
, skb
);
6751 case L2CAP_CID_CONN_LESS
:
6752 psm
= get_unaligned((__le16
*) skb
->data
);
6753 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6754 l2cap_conless_channel(conn
, psm
, skb
);
6758 l2cap_att_channel(conn
, skb
);
6761 case L2CAP_CID_LE_SIGNALING
:
6762 l2cap_le_sig_channel(conn
, skb
);
6766 if (smp_sig_channel(conn
, skb
))
6767 l2cap_conn_del(conn
->hcon
, EACCES
);
6771 l2cap_data_channel(conn
, cid
, skb
);
6776 /* ---- L2CAP interface with lower layer (HCI) ---- */
6778 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
6780 int exact
= 0, lm1
= 0, lm2
= 0;
6781 struct l2cap_chan
*c
;
6783 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
6785 /* Find listening sockets and check their link_mode */
6786 read_lock(&chan_list_lock
);
6787 list_for_each_entry(c
, &chan_list
, global_l
) {
6788 if (c
->state
!= BT_LISTEN
)
6791 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
6792 lm1
|= HCI_LM_ACCEPT
;
6793 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6794 lm1
|= HCI_LM_MASTER
;
6796 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
6797 lm2
|= HCI_LM_ACCEPT
;
6798 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6799 lm2
|= HCI_LM_MASTER
;
6802 read_unlock(&chan_list_lock
);
6804 return exact
? lm1
: lm2
;
6807 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
6809 struct l2cap_conn
*conn
;
6811 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
6814 conn
= l2cap_conn_add(hcon
);
6816 l2cap_conn_ready(conn
);
6818 l2cap_conn_del(hcon
, bt_to_errno(status
));
6822 int l2cap_disconn_ind(struct hci_conn
*hcon
)
6824 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6826 BT_DBG("hcon %p", hcon
);
6829 return HCI_ERROR_REMOTE_USER_TERM
;
6830 return conn
->disc_reason
;
6833 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
6835 BT_DBG("hcon %p reason %d", hcon
, reason
);
6837 l2cap_conn_del(hcon
, bt_to_errno(reason
));
6840 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
6842 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
6845 if (encrypt
== 0x00) {
6846 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
6847 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
6848 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
6849 l2cap_chan_close(chan
, ECONNREFUSED
);
6851 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
6852 __clear_chan_timer(chan
);
6856 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
6858 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6859 struct l2cap_chan
*chan
;
6864 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
6866 if (hcon
->type
== LE_LINK
) {
6867 if (!status
&& encrypt
)
6868 smp_distribute_keys(conn
, 0);
6869 cancel_delayed_work(&conn
->security_timer
);
6872 mutex_lock(&conn
->chan_lock
);
6874 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
6875 l2cap_chan_lock(chan
);
6877 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
6878 state_to_string(chan
->state
));
6880 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
6881 l2cap_chan_unlock(chan
);
6885 if (chan
->scid
== L2CAP_CID_ATT
) {
6886 if (!status
&& encrypt
) {
6887 chan
->sec_level
= hcon
->sec_level
;
6888 l2cap_chan_ready(chan
);
6891 l2cap_chan_unlock(chan
);
6895 if (!__l2cap_no_conn_pending(chan
)) {
6896 l2cap_chan_unlock(chan
);
6900 if (!status
&& (chan
->state
== BT_CONNECTED
||
6901 chan
->state
== BT_CONFIG
)) {
6902 chan
->ops
->resume(chan
);
6903 l2cap_check_encryption(chan
, encrypt
);
6904 l2cap_chan_unlock(chan
);
6908 if (chan
->state
== BT_CONNECT
) {
6910 l2cap_start_connection(chan
);
6912 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6913 } else if (chan
->state
== BT_CONNECT2
) {
6914 struct l2cap_conn_rsp rsp
;
6918 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
6919 res
= L2CAP_CR_PEND
;
6920 stat
= L2CAP_CS_AUTHOR_PEND
;
6921 chan
->ops
->defer(chan
);
6923 l2cap_state_change(chan
, BT_CONFIG
);
6924 res
= L2CAP_CR_SUCCESS
;
6925 stat
= L2CAP_CS_NO_INFO
;
6928 l2cap_state_change(chan
, BT_DISCONN
);
6929 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6930 res
= L2CAP_CR_SEC_BLOCK
;
6931 stat
= L2CAP_CS_NO_INFO
;
6934 rsp
.scid
= cpu_to_le16(chan
->dcid
);
6935 rsp
.dcid
= cpu_to_le16(chan
->scid
);
6936 rsp
.result
= cpu_to_le16(res
);
6937 rsp
.status
= cpu_to_le16(stat
);
6938 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
6941 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
6942 res
== L2CAP_CR_SUCCESS
) {
6944 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
6945 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
6947 l2cap_build_conf_req(chan
, buf
),
6949 chan
->num_conf_req
++;
6953 l2cap_chan_unlock(chan
);
6956 mutex_unlock(&conn
->chan_lock
);
6961 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
6963 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6964 struct l2cap_hdr
*hdr
;
6967 /* For AMP controller do not create l2cap conn */
6968 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
6972 conn
= l2cap_conn_add(hcon
);
6977 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
6981 case ACL_START_NO_FLUSH
:
6984 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
6985 kfree_skb(conn
->rx_skb
);
6986 conn
->rx_skb
= NULL
;
6988 l2cap_conn_unreliable(conn
, ECOMM
);
6991 /* Start fragment always begin with Basic L2CAP header */
6992 if (skb
->len
< L2CAP_HDR_SIZE
) {
6993 BT_ERR("Frame is too short (len %d)", skb
->len
);
6994 l2cap_conn_unreliable(conn
, ECOMM
);
6998 hdr
= (struct l2cap_hdr
*) skb
->data
;
6999 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7001 if (len
== skb
->len
) {
7002 /* Complete frame received */
7003 l2cap_recv_frame(conn
, skb
);
7007 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7009 if (skb
->len
> len
) {
7010 BT_ERR("Frame is too long (len %d, expected len %d)",
7012 l2cap_conn_unreliable(conn
, ECOMM
);
7016 /* Allocate skb for the complete frame (with header) */
7017 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7021 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7023 conn
->rx_len
= len
- skb
->len
;
7027 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7029 if (!conn
->rx_len
) {
7030 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7031 l2cap_conn_unreliable(conn
, ECOMM
);
7035 if (skb
->len
> conn
->rx_len
) {
7036 BT_ERR("Fragment is too long (len %d, expected %d)",
7037 skb
->len
, conn
->rx_len
);
7038 kfree_skb(conn
->rx_skb
);
7039 conn
->rx_skb
= NULL
;
7041 l2cap_conn_unreliable(conn
, ECOMM
);
7045 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7047 conn
->rx_len
-= skb
->len
;
7049 if (!conn
->rx_len
) {
7050 /* Complete frame received. l2cap_recv_frame
7051 * takes ownership of the skb so set the global
7052 * rx_skb pointer to NULL first.
7054 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7055 conn
->rx_skb
= NULL
;
7056 l2cap_recv_frame(conn
, rx_skb
);
7066 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7068 struct l2cap_chan
*c
;
7070 read_lock(&chan_list_lock
);
7072 list_for_each_entry(c
, &chan_list
, global_l
) {
7073 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7075 c
->state
, __le16_to_cpu(c
->psm
),
7076 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7077 c
->sec_level
, c
->mode
);
7080 read_unlock(&chan_list_lock
);
7085 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7087 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7090 static const struct file_operations l2cap_debugfs_fops
= {
7091 .open
= l2cap_debugfs_open
,
7093 .llseek
= seq_lseek
,
7094 .release
= single_release
,
7097 static struct dentry
*l2cap_debugfs
;
7099 int __init
l2cap_init(void)
7103 err
= l2cap_init_sockets();
7107 if (IS_ERR_OR_NULL(bt_debugfs
))
7110 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7111 NULL
, &l2cap_debugfs_fops
);
7116 void l2cap_exit(void)
7118 debugfs_remove(l2cap_debugfs
);
7119 l2cap_cleanup_sockets();
7122 module_param(disable_ertm
, bool, 0644);
7123 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");