2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
49 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_SIG_BREDR
| L2CAP_FC_CONNLESS
, };
51 static LIST_HEAD(chan_list
);
52 static DEFINE_RWLOCK(chan_list_lock
);
54 static u16 le_max_credits
= L2CAP_LE_MAX_CREDITS
;
55 static u16 le_default_mps
= L2CAP_LE_DEFAULT_MPS
;
57 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
58 u8 code
, u8 ident
, u16 dlen
, void *data
);
59 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
61 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
62 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
64 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
65 struct sk_buff_head
*skbs
, u8 event
);
67 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
69 if (hcon
->type
== LE_LINK
) {
70 if (type
== ADDR_LE_DEV_PUBLIC
)
71 return BDADDR_LE_PUBLIC
;
73 return BDADDR_LE_RANDOM
;
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
86 list_for_each_entry(c
, &conn
->chan_l
, list
) {
93 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
98 list_for_each_entry(c
, &conn
->chan_l
, list
) {
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
110 struct l2cap_chan
*c
;
112 mutex_lock(&conn
->chan_lock
);
113 c
= __l2cap_get_chan_by_scid(conn
, cid
);
116 mutex_unlock(&conn
->chan_lock
);
121 /* Find channel with given DCID.
122 * Returns locked channel.
124 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
127 struct l2cap_chan
*c
;
129 mutex_lock(&conn
->chan_lock
);
130 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
133 mutex_unlock(&conn
->chan_lock
);
138 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
141 struct l2cap_chan
*c
;
143 list_for_each_entry(c
, &conn
->chan_l
, list
) {
144 if (c
->ident
== ident
)
150 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
153 struct l2cap_chan
*c
;
155 mutex_lock(&conn
->chan_lock
);
156 c
= __l2cap_get_chan_by_ident(conn
, ident
);
159 mutex_unlock(&conn
->chan_lock
);
164 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
166 struct l2cap_chan
*c
;
168 list_for_each_entry(c
, &chan_list
, global_l
) {
169 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
175 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
179 write_lock(&chan_list_lock
);
181 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
194 for (p
= 0x1001; p
< 0x1100; p
+= 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
196 chan
->psm
= cpu_to_le16(p
);
197 chan
->sport
= cpu_to_le16(p
);
204 write_unlock(&chan_list_lock
);
207 EXPORT_SYMBOL_GPL(l2cap_add_psm
);
209 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
211 write_lock(&chan_list_lock
);
213 /* Override the defaults (which are for conn-oriented) */
214 chan
->omtu
= L2CAP_DEFAULT_MTU
;
215 chan
->chan_type
= L2CAP_CHAN_FIXED
;
219 write_unlock(&chan_list_lock
);
224 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
228 if (conn
->hcon
->type
== LE_LINK
)
229 dyn_end
= L2CAP_CID_LE_DYN_END
;
231 dyn_end
= L2CAP_CID_DYN_END
;
233 for (cid
= L2CAP_CID_DYN_START
; cid
< dyn_end
; cid
++) {
234 if (!__l2cap_get_chan_by_scid(conn
, cid
))
241 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
243 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
244 state_to_string(state
));
247 chan
->ops
->state_change(chan
, state
, 0);
250 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
254 chan
->ops
->state_change(chan
, chan
->state
, err
);
257 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
259 chan
->ops
->state_change(chan
, chan
->state
, err
);
262 static void __set_retrans_timer(struct l2cap_chan
*chan
)
264 if (!delayed_work_pending(&chan
->monitor_timer
) &&
265 chan
->retrans_timeout
) {
266 l2cap_set_timer(chan
, &chan
->retrans_timer
,
267 msecs_to_jiffies(chan
->retrans_timeout
));
271 static void __set_monitor_timer(struct l2cap_chan
*chan
)
273 __clear_retrans_timer(chan
);
274 if (chan
->monitor_timeout
) {
275 l2cap_set_timer(chan
, &chan
->monitor_timer
,
276 msecs_to_jiffies(chan
->monitor_timeout
));
280 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
285 skb_queue_walk(head
, skb
) {
286 if (bt_cb(skb
)->control
.txseq
== seq
)
293 /* ---- L2CAP sequence number lists ---- */
295 /* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
304 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
306 size_t alloc_size
, i
;
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
312 alloc_size
= roundup_pow_of_two(size
);
314 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
318 seq_list
->mask
= alloc_size
- 1;
319 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
320 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
321 for (i
= 0; i
< alloc_size
; i
++)
322 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
327 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
329 kfree(seq_list
->list
);
332 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
335 /* Constant-time check for list membership */
336 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
339 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
341 u16 seq
= seq_list
->head
;
342 u16 mask
= seq_list
->mask
;
344 seq_list
->head
= seq_list
->list
[seq
& mask
];
345 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
347 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
348 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
349 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
355 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
359 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
362 for (i
= 0; i
<= seq_list
->mask
; i
++)
363 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
365 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
366 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
369 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
371 u16 mask
= seq_list
->mask
;
373 /* All appends happen in constant time */
375 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
378 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
379 seq_list
->head
= seq
;
381 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
383 seq_list
->tail
= seq
;
384 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
387 static void l2cap_chan_timeout(struct work_struct
*work
)
389 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
391 struct l2cap_conn
*conn
= chan
->conn
;
394 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
396 mutex_lock(&conn
->chan_lock
);
397 l2cap_chan_lock(chan
);
399 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
400 reason
= ECONNREFUSED
;
401 else if (chan
->state
== BT_CONNECT
&&
402 chan
->sec_level
!= BT_SECURITY_SDP
)
403 reason
= ECONNREFUSED
;
407 l2cap_chan_close(chan
, reason
);
409 l2cap_chan_unlock(chan
);
411 chan
->ops
->close(chan
);
412 mutex_unlock(&conn
->chan_lock
);
414 l2cap_chan_put(chan
);
417 struct l2cap_chan
*l2cap_chan_create(void)
419 struct l2cap_chan
*chan
;
421 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
425 mutex_init(&chan
->lock
);
427 write_lock(&chan_list_lock
);
428 list_add(&chan
->global_l
, &chan_list
);
429 write_unlock(&chan_list_lock
);
431 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
433 chan
->state
= BT_OPEN
;
435 kref_init(&chan
->kref
);
437 /* This flag is cleared in l2cap_chan_ready() */
438 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
440 BT_DBG("chan %p", chan
);
444 EXPORT_SYMBOL_GPL(l2cap_chan_create
);
446 static void l2cap_chan_destroy(struct kref
*kref
)
448 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
450 BT_DBG("chan %p", chan
);
452 write_lock(&chan_list_lock
);
453 list_del(&chan
->global_l
);
454 write_unlock(&chan_list_lock
);
459 void l2cap_chan_hold(struct l2cap_chan
*c
)
461 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
466 void l2cap_chan_put(struct l2cap_chan
*c
)
468 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
470 kref_put(&c
->kref
, l2cap_chan_destroy
);
472 EXPORT_SYMBOL_GPL(l2cap_chan_put
);
474 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
476 chan
->fcs
= L2CAP_FCS_CRC16
;
477 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
478 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
479 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
480 chan
->remote_max_tx
= chan
->max_tx
;
481 chan
->remote_tx_win
= chan
->tx_win
;
482 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
483 chan
->sec_level
= BT_SECURITY_LOW
;
484 chan
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
485 chan
->retrans_timeout
= L2CAP_DEFAULT_RETRANS_TO
;
486 chan
->monitor_timeout
= L2CAP_DEFAULT_MONITOR_TO
;
487 chan
->conf_state
= 0;
489 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
491 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults
);
493 static void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
496 chan
->sdu_last_frag
= NULL
;
498 chan
->tx_credits
= 0;
499 chan
->rx_credits
= le_max_credits
;
500 chan
->mps
= min_t(u16
, chan
->imtu
, le_default_mps
);
502 skb_queue_head_init(&chan
->tx_q
);
505 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
507 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
508 __le16_to_cpu(chan
->psm
), chan
->dcid
);
510 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
514 switch (chan
->chan_type
) {
515 case L2CAP_CHAN_CONN_ORIENTED
:
516 /* Alloc CID for connection-oriented socket */
517 chan
->scid
= l2cap_alloc_cid(conn
);
518 if (conn
->hcon
->type
== ACL_LINK
)
519 chan
->omtu
= L2CAP_DEFAULT_MTU
;
522 case L2CAP_CHAN_CONN_LESS
:
523 /* Connectionless socket */
524 chan
->scid
= L2CAP_CID_CONN_LESS
;
525 chan
->dcid
= L2CAP_CID_CONN_LESS
;
526 chan
->omtu
= L2CAP_DEFAULT_MTU
;
529 case L2CAP_CHAN_FIXED
:
530 /* Caller will set CID and CID specific MTU values */
534 /* Raw socket can send/recv signalling messages only */
535 chan
->scid
= L2CAP_CID_SIGNALING
;
536 chan
->dcid
= L2CAP_CID_SIGNALING
;
537 chan
->omtu
= L2CAP_DEFAULT_MTU
;
540 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
541 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
542 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
543 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
544 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
545 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
547 l2cap_chan_hold(chan
);
549 /* Only keep a reference for fixed channels if they requested it */
550 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
551 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
552 hci_conn_hold(conn
->hcon
);
554 list_add(&chan
->list
, &conn
->chan_l
);
557 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
559 mutex_lock(&conn
->chan_lock
);
560 __l2cap_chan_add(conn
, chan
);
561 mutex_unlock(&conn
->chan_lock
);
564 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
566 struct l2cap_conn
*conn
= chan
->conn
;
568 __clear_chan_timer(chan
);
570 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
572 chan
->ops
->teardown(chan
, err
);
575 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
576 /* Delete from channel list */
577 list_del(&chan
->list
);
579 l2cap_chan_put(chan
);
583 /* Reference was only held for non-fixed channels or
584 * fixed channels that explicitly requested it using the
585 * FLAG_HOLD_HCI_CONN flag.
587 if (chan
->chan_type
!= L2CAP_CHAN_FIXED
||
588 test_bit(FLAG_HOLD_HCI_CONN
, &chan
->flags
))
589 hci_conn_drop(conn
->hcon
);
591 if (mgr
&& mgr
->bredr_chan
== chan
)
592 mgr
->bredr_chan
= NULL
;
595 if (chan
->hs_hchan
) {
596 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
598 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
599 amp_disconnect_logical_link(hs_hchan
);
602 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
606 case L2CAP_MODE_BASIC
:
609 case L2CAP_MODE_LE_FLOWCTL
:
610 skb_queue_purge(&chan
->tx_q
);
613 case L2CAP_MODE_ERTM
:
614 __clear_retrans_timer(chan
);
615 __clear_monitor_timer(chan
);
616 __clear_ack_timer(chan
);
618 skb_queue_purge(&chan
->srej_q
);
620 l2cap_seq_list_free(&chan
->srej_list
);
621 l2cap_seq_list_free(&chan
->retrans_list
);
625 case L2CAP_MODE_STREAMING
:
626 skb_queue_purge(&chan
->tx_q
);
632 EXPORT_SYMBOL_GPL(l2cap_chan_del
);
634 static void l2cap_conn_update_id_addr(struct work_struct
*work
)
636 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
637 id_addr_update_work
);
638 struct hci_conn
*hcon
= conn
->hcon
;
639 struct l2cap_chan
*chan
;
641 mutex_lock(&conn
->chan_lock
);
643 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
644 l2cap_chan_lock(chan
);
645 bacpy(&chan
->dst
, &hcon
->dst
);
646 chan
->dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
647 l2cap_chan_unlock(chan
);
650 mutex_unlock(&conn
->chan_lock
);
653 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
655 struct l2cap_conn
*conn
= chan
->conn
;
656 struct l2cap_le_conn_rsp rsp
;
659 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
660 result
= L2CAP_CR_AUTHORIZATION
;
662 result
= L2CAP_CR_BAD_PSM
;
664 l2cap_state_change(chan
, BT_DISCONN
);
666 rsp
.dcid
= cpu_to_le16(chan
->scid
);
667 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
668 rsp
.mps
= cpu_to_le16(chan
->mps
);
669 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
670 rsp
.result
= cpu_to_le16(result
);
672 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
676 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
678 struct l2cap_conn
*conn
= chan
->conn
;
679 struct l2cap_conn_rsp rsp
;
682 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
683 result
= L2CAP_CR_SEC_BLOCK
;
685 result
= L2CAP_CR_BAD_PSM
;
687 l2cap_state_change(chan
, BT_DISCONN
);
689 rsp
.scid
= cpu_to_le16(chan
->dcid
);
690 rsp
.dcid
= cpu_to_le16(chan
->scid
);
691 rsp
.result
= cpu_to_le16(result
);
692 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
694 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
697 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
699 struct l2cap_conn
*conn
= chan
->conn
;
701 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
703 switch (chan
->state
) {
705 chan
->ops
->teardown(chan
, 0);
710 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
711 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
712 l2cap_send_disconn_req(chan
, reason
);
714 l2cap_chan_del(chan
, reason
);
718 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
719 if (conn
->hcon
->type
== ACL_LINK
)
720 l2cap_chan_connect_reject(chan
);
721 else if (conn
->hcon
->type
== LE_LINK
)
722 l2cap_chan_le_connect_reject(chan
);
725 l2cap_chan_del(chan
, reason
);
730 l2cap_chan_del(chan
, reason
);
734 chan
->ops
->teardown(chan
, 0);
738 EXPORT_SYMBOL(l2cap_chan_close
);
740 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
742 switch (chan
->chan_type
) {
744 switch (chan
->sec_level
) {
745 case BT_SECURITY_HIGH
:
746 case BT_SECURITY_FIPS
:
747 return HCI_AT_DEDICATED_BONDING_MITM
;
748 case BT_SECURITY_MEDIUM
:
749 return HCI_AT_DEDICATED_BONDING
;
751 return HCI_AT_NO_BONDING
;
754 case L2CAP_CHAN_CONN_LESS
:
755 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_3DSP
)) {
756 if (chan
->sec_level
== BT_SECURITY_LOW
)
757 chan
->sec_level
= BT_SECURITY_SDP
;
759 if (chan
->sec_level
== BT_SECURITY_HIGH
||
760 chan
->sec_level
== BT_SECURITY_FIPS
)
761 return HCI_AT_NO_BONDING_MITM
;
763 return HCI_AT_NO_BONDING
;
765 case L2CAP_CHAN_CONN_ORIENTED
:
766 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_SDP
)) {
767 if (chan
->sec_level
== BT_SECURITY_LOW
)
768 chan
->sec_level
= BT_SECURITY_SDP
;
770 if (chan
->sec_level
== BT_SECURITY_HIGH
||
771 chan
->sec_level
== BT_SECURITY_FIPS
)
772 return HCI_AT_NO_BONDING_MITM
;
774 return HCI_AT_NO_BONDING
;
778 switch (chan
->sec_level
) {
779 case BT_SECURITY_HIGH
:
780 case BT_SECURITY_FIPS
:
781 return HCI_AT_GENERAL_BONDING_MITM
;
782 case BT_SECURITY_MEDIUM
:
783 return HCI_AT_GENERAL_BONDING
;
785 return HCI_AT_NO_BONDING
;
791 /* Service level security */
792 int l2cap_chan_check_security(struct l2cap_chan
*chan
, bool initiator
)
794 struct l2cap_conn
*conn
= chan
->conn
;
797 if (conn
->hcon
->type
== LE_LINK
)
798 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
800 auth_type
= l2cap_get_auth_type(chan
);
802 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
,
806 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
810 /* Get next available identificator.
811 * 1 - 128 are used by kernel.
812 * 129 - 199 are reserved.
813 * 200 - 254 are used by utilities like l2ping, etc.
816 mutex_lock(&conn
->ident_lock
);
818 if (++conn
->tx_ident
> 128)
823 mutex_unlock(&conn
->ident_lock
);
828 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
831 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
834 BT_DBG("code 0x%2.2x", code
);
839 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
840 flags
= ACL_START_NO_FLUSH
;
844 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
845 skb
->priority
= HCI_PRIO_MAX
;
847 hci_send_acl(conn
->hchan
, skb
, flags
);
850 static bool __chan_is_moving(struct l2cap_chan
*chan
)
852 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
853 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
856 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
858 struct hci_conn
*hcon
= chan
->conn
->hcon
;
861 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
864 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
866 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
873 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
874 lmp_no_flush_capable(hcon
->hdev
))
875 flags
= ACL_START_NO_FLUSH
;
879 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
880 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
883 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
885 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
886 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
888 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
891 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
892 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
899 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
900 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
907 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
909 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
910 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
912 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
915 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
916 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
923 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
924 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
931 static inline void __unpack_control(struct l2cap_chan
*chan
,
934 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
935 __unpack_extended_control(get_unaligned_le32(skb
->data
),
936 &bt_cb(skb
)->control
);
937 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
939 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
940 &bt_cb(skb
)->control
);
941 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
945 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
949 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
950 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
952 if (control
->sframe
) {
953 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
954 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
955 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
957 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
958 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
964 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
968 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
969 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
971 if (control
->sframe
) {
972 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
973 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
974 packed
|= L2CAP_CTRL_FRAME_TYPE
;
976 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
977 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
983 static inline void __pack_control(struct l2cap_chan
*chan
,
984 struct l2cap_ctrl
*control
,
987 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
988 put_unaligned_le32(__pack_extended_control(control
),
989 skb
->data
+ L2CAP_HDR_SIZE
);
991 put_unaligned_le16(__pack_enhanced_control(control
),
992 skb
->data
+ L2CAP_HDR_SIZE
);
996 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
998 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
999 return L2CAP_EXT_HDR_SIZE
;
1001 return L2CAP_ENH_HDR_SIZE
;
1004 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
1007 struct sk_buff
*skb
;
1008 struct l2cap_hdr
*lh
;
1009 int hlen
= __ertm_hdr_size(chan
);
1011 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1012 hlen
+= L2CAP_FCS_SIZE
;
1014 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
1017 return ERR_PTR(-ENOMEM
);
1019 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1020 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
1021 lh
->cid
= cpu_to_le16(chan
->dcid
);
1023 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1024 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1026 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1028 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1029 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1030 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1033 skb
->priority
= HCI_PRIO_MAX
;
1037 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1038 struct l2cap_ctrl
*control
)
1040 struct sk_buff
*skb
;
1043 BT_DBG("chan %p, control %p", chan
, control
);
1045 if (!control
->sframe
)
1048 if (__chan_is_moving(chan
))
1051 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1055 if (control
->super
== L2CAP_SUPER_RR
)
1056 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1057 else if (control
->super
== L2CAP_SUPER_RNR
)
1058 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1060 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1061 chan
->last_acked_seq
= control
->reqseq
;
1062 __clear_ack_timer(chan
);
1065 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1066 control
->final
, control
->poll
, control
->super
);
1068 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1069 control_field
= __pack_extended_control(control
);
1071 control_field
= __pack_enhanced_control(control
);
1073 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1075 l2cap_do_send(chan
, skb
);
1078 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1080 struct l2cap_ctrl control
;
1082 BT_DBG("chan %p, poll %d", chan
, poll
);
1084 memset(&control
, 0, sizeof(control
));
1086 control
.poll
= poll
;
1088 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1089 control
.super
= L2CAP_SUPER_RNR
;
1091 control
.super
= L2CAP_SUPER_RR
;
1093 control
.reqseq
= chan
->buffer_seq
;
1094 l2cap_send_sframe(chan
, &control
);
1097 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1099 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
1102 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1105 static bool __amp_capable(struct l2cap_chan
*chan
)
1107 struct l2cap_conn
*conn
= chan
->conn
;
1108 struct hci_dev
*hdev
;
1109 bool amp_available
= false;
1111 if (!conn
->hs_enabled
)
1114 if (!(conn
->fixed_chan_mask
& L2CAP_FC_A2MP
))
1117 read_lock(&hci_dev_list_lock
);
1118 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1119 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1120 test_bit(HCI_UP
, &hdev
->flags
)) {
1121 amp_available
= true;
1125 read_unlock(&hci_dev_list_lock
);
1127 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1128 return amp_available
;
1133 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1135 /* Check EFS parameters */
1139 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1141 struct l2cap_conn
*conn
= chan
->conn
;
1142 struct l2cap_conn_req req
;
1144 req
.scid
= cpu_to_le16(chan
->scid
);
1145 req
.psm
= chan
->psm
;
1147 chan
->ident
= l2cap_get_ident(conn
);
1149 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1151 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1154 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1156 struct l2cap_create_chan_req req
;
1157 req
.scid
= cpu_to_le16(chan
->scid
);
1158 req
.psm
= chan
->psm
;
1159 req
.amp_id
= amp_id
;
1161 chan
->ident
= l2cap_get_ident(chan
->conn
);
1163 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1167 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1169 struct sk_buff
*skb
;
1171 BT_DBG("chan %p", chan
);
1173 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1176 __clear_retrans_timer(chan
);
1177 __clear_monitor_timer(chan
);
1178 __clear_ack_timer(chan
);
1180 chan
->retry_count
= 0;
1181 skb_queue_walk(&chan
->tx_q
, skb
) {
1182 if (bt_cb(skb
)->control
.retries
)
1183 bt_cb(skb
)->control
.retries
= 1;
1188 chan
->expected_tx_seq
= chan
->buffer_seq
;
1190 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1191 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1192 l2cap_seq_list_clear(&chan
->retrans_list
);
1193 l2cap_seq_list_clear(&chan
->srej_list
);
1194 skb_queue_purge(&chan
->srej_q
);
1196 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1197 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1199 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1202 static void l2cap_move_done(struct l2cap_chan
*chan
)
1204 u8 move_role
= chan
->move_role
;
1205 BT_DBG("chan %p", chan
);
1207 chan
->move_state
= L2CAP_MOVE_STABLE
;
1208 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1210 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1213 switch (move_role
) {
1214 case L2CAP_MOVE_ROLE_INITIATOR
:
1215 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1216 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1218 case L2CAP_MOVE_ROLE_RESPONDER
:
1219 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1224 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1226 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1227 chan
->conf_state
= 0;
1228 __clear_chan_timer(chan
);
1230 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
&& !chan
->tx_credits
)
1231 chan
->ops
->suspend(chan
);
1233 chan
->state
= BT_CONNECTED
;
1235 chan
->ops
->ready(chan
);
1238 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1240 struct l2cap_conn
*conn
= chan
->conn
;
1241 struct l2cap_le_conn_req req
;
1243 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1246 req
.psm
= chan
->psm
;
1247 req
.scid
= cpu_to_le16(chan
->scid
);
1248 req
.mtu
= cpu_to_le16(chan
->imtu
);
1249 req
.mps
= cpu_to_le16(chan
->mps
);
1250 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1252 chan
->ident
= l2cap_get_ident(conn
);
1254 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1258 static void l2cap_le_start(struct l2cap_chan
*chan
)
1260 struct l2cap_conn
*conn
= chan
->conn
;
1262 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1266 l2cap_chan_ready(chan
);
1270 if (chan
->state
== BT_CONNECT
)
1271 l2cap_le_connect(chan
);
1274 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1276 if (__amp_capable(chan
)) {
1277 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1278 a2mp_discover_amp(chan
);
1279 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1280 l2cap_le_start(chan
);
1282 l2cap_send_conn_req(chan
);
1286 static void l2cap_request_info(struct l2cap_conn
*conn
)
1288 struct l2cap_info_req req
;
1290 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1293 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1295 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1296 conn
->info_ident
= l2cap_get_ident(conn
);
1298 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1300 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1304 static void l2cap_do_start(struct l2cap_chan
*chan
)
1306 struct l2cap_conn
*conn
= chan
->conn
;
1308 if (conn
->hcon
->type
== LE_LINK
) {
1309 l2cap_le_start(chan
);
1313 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)) {
1314 l2cap_request_info(conn
);
1318 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1321 if (l2cap_chan_check_security(chan
, true) &&
1322 __l2cap_no_conn_pending(chan
))
1323 l2cap_start_connection(chan
);
1326 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1328 u32 local_feat_mask
= l2cap_feat_mask
;
1330 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1333 case L2CAP_MODE_ERTM
:
1334 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1335 case L2CAP_MODE_STREAMING
:
1336 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1342 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1344 struct l2cap_conn
*conn
= chan
->conn
;
1345 struct l2cap_disconn_req req
;
1350 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1351 __clear_retrans_timer(chan
);
1352 __clear_monitor_timer(chan
);
1353 __clear_ack_timer(chan
);
1356 if (chan
->scid
== L2CAP_CID_A2MP
) {
1357 l2cap_state_change(chan
, BT_DISCONN
);
1361 req
.dcid
= cpu_to_le16(chan
->dcid
);
1362 req
.scid
= cpu_to_le16(chan
->scid
);
1363 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1366 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1369 /* ---- L2CAP connections ---- */
1370 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1372 struct l2cap_chan
*chan
, *tmp
;
1374 BT_DBG("conn %p", conn
);
1376 mutex_lock(&conn
->chan_lock
);
1378 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1379 l2cap_chan_lock(chan
);
1381 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1382 l2cap_chan_ready(chan
);
1383 l2cap_chan_unlock(chan
);
1387 if (chan
->state
== BT_CONNECT
) {
1388 if (!l2cap_chan_check_security(chan
, true) ||
1389 !__l2cap_no_conn_pending(chan
)) {
1390 l2cap_chan_unlock(chan
);
1394 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1395 && test_bit(CONF_STATE2_DEVICE
,
1396 &chan
->conf_state
)) {
1397 l2cap_chan_close(chan
, ECONNRESET
);
1398 l2cap_chan_unlock(chan
);
1402 l2cap_start_connection(chan
);
1404 } else if (chan
->state
== BT_CONNECT2
) {
1405 struct l2cap_conn_rsp rsp
;
1407 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1408 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1410 if (l2cap_chan_check_security(chan
, false)) {
1411 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1412 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1413 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1414 chan
->ops
->defer(chan
);
1417 l2cap_state_change(chan
, BT_CONFIG
);
1418 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1419 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1422 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1423 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1426 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1429 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1430 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1431 l2cap_chan_unlock(chan
);
1435 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1436 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1437 l2cap_build_conf_req(chan
, buf
), buf
);
1438 chan
->num_conf_req
++;
1441 l2cap_chan_unlock(chan
);
1444 mutex_unlock(&conn
->chan_lock
);
1447 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1449 struct hci_conn
*hcon
= conn
->hcon
;
1450 struct hci_dev
*hdev
= hcon
->hdev
;
1452 BT_DBG("%s conn %p", hdev
->name
, conn
);
1454 /* For outgoing pairing which doesn't necessarily have an
1455 * associated socket (e.g. mgmt_pair_device).
1458 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1460 /* For LE slave connections, make sure the connection interval
1461 * is in the range of the minium and maximum interval that has
1462 * been configured for this connection. If not, then trigger
1463 * the connection update procedure.
1465 if (hcon
->role
== HCI_ROLE_SLAVE
&&
1466 (hcon
->le_conn_interval
< hcon
->le_conn_min_interval
||
1467 hcon
->le_conn_interval
> hcon
->le_conn_max_interval
)) {
1468 struct l2cap_conn_param_update_req req
;
1470 req
.min
= cpu_to_le16(hcon
->le_conn_min_interval
);
1471 req
.max
= cpu_to_le16(hcon
->le_conn_max_interval
);
1472 req
.latency
= cpu_to_le16(hcon
->le_conn_latency
);
1473 req
.to_multiplier
= cpu_to_le16(hcon
->le_supv_timeout
);
1475 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1476 L2CAP_CONN_PARAM_UPDATE_REQ
, sizeof(req
), &req
);
1480 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1482 struct l2cap_chan
*chan
;
1483 struct hci_conn
*hcon
= conn
->hcon
;
1485 BT_DBG("conn %p", conn
);
1487 if (hcon
->type
== ACL_LINK
)
1488 l2cap_request_info(conn
);
1490 mutex_lock(&conn
->chan_lock
);
1492 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1494 l2cap_chan_lock(chan
);
1496 if (chan
->scid
== L2CAP_CID_A2MP
) {
1497 l2cap_chan_unlock(chan
);
1501 if (hcon
->type
== LE_LINK
) {
1502 l2cap_le_start(chan
);
1503 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1504 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
1505 l2cap_chan_ready(chan
);
1506 } else if (chan
->state
== BT_CONNECT
) {
1507 l2cap_do_start(chan
);
1510 l2cap_chan_unlock(chan
);
1513 mutex_unlock(&conn
->chan_lock
);
1515 if (hcon
->type
== LE_LINK
)
1516 l2cap_le_conn_ready(conn
);
1518 queue_work(hcon
->hdev
->workqueue
, &conn
->pending_rx_work
);
1521 /* Notify sockets that we cannot guaranty reliability anymore */
1522 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1524 struct l2cap_chan
*chan
;
1526 BT_DBG("conn %p", conn
);
1528 mutex_lock(&conn
->chan_lock
);
1530 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1531 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1532 l2cap_chan_set_err(chan
, err
);
1535 mutex_unlock(&conn
->chan_lock
);
1538 static void l2cap_info_timeout(struct work_struct
*work
)
1540 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1543 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1544 conn
->info_ident
= 0;
1546 l2cap_conn_start(conn
);
1551 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1552 * callback is called during registration. The ->remove callback is called
1553 * during unregistration.
1554 * An l2cap_user object can either be explicitly unregistered or when the
1555 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1556 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1557 * External modules must own a reference to the l2cap_conn object if they intend
1558 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1559 * any time if they don't.
1562 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1564 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1567 /* We need to check whether l2cap_conn is registered. If it is not, we
1568 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1569 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1570 * relies on the parent hci_conn object to be locked. This itself relies
1571 * on the hci_dev object to be locked. So we must lock the hci device
1576 if (user
->list
.next
|| user
->list
.prev
) {
1581 /* conn->hchan is NULL after l2cap_conn_del() was called */
1587 ret
= user
->probe(conn
, user
);
1591 list_add(&user
->list
, &conn
->users
);
1595 hci_dev_unlock(hdev
);
1598 EXPORT_SYMBOL(l2cap_register_user
);
1600 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1602 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1606 if (!user
->list
.next
|| !user
->list
.prev
)
1609 list_del(&user
->list
);
1610 user
->list
.next
= NULL
;
1611 user
->list
.prev
= NULL
;
1612 user
->remove(conn
, user
);
1615 hci_dev_unlock(hdev
);
1617 EXPORT_SYMBOL(l2cap_unregister_user
);
1619 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1621 struct l2cap_user
*user
;
1623 while (!list_empty(&conn
->users
)) {
1624 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1625 list_del(&user
->list
);
1626 user
->list
.next
= NULL
;
1627 user
->list
.prev
= NULL
;
1628 user
->remove(conn
, user
);
1632 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1634 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1635 struct l2cap_chan
*chan
, *l
;
1640 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1642 kfree_skb(conn
->rx_skb
);
1644 skb_queue_purge(&conn
->pending_rx
);
1646 /* We can not call flush_work(&conn->pending_rx_work) here since we
1647 * might block if we are running on a worker from the same workqueue
1648 * pending_rx_work is waiting on.
1650 if (work_pending(&conn
->pending_rx_work
))
1651 cancel_work_sync(&conn
->pending_rx_work
);
1653 if (work_pending(&conn
->id_addr_update_work
))
1654 cancel_work_sync(&conn
->id_addr_update_work
);
1656 l2cap_unregister_all_users(conn
);
1658 /* Force the connection to be immediately dropped */
1659 hcon
->disc_timeout
= 0;
1661 mutex_lock(&conn
->chan_lock
);
1664 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1665 l2cap_chan_hold(chan
);
1666 l2cap_chan_lock(chan
);
1668 l2cap_chan_del(chan
, err
);
1670 l2cap_chan_unlock(chan
);
1672 chan
->ops
->close(chan
);
1673 l2cap_chan_put(chan
);
1676 mutex_unlock(&conn
->chan_lock
);
1678 hci_chan_del(conn
->hchan
);
1680 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1681 cancel_delayed_work_sync(&conn
->info_timer
);
1683 hcon
->l2cap_data
= NULL
;
1685 l2cap_conn_put(conn
);
1688 static void l2cap_conn_free(struct kref
*ref
)
1690 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1692 hci_conn_put(conn
->hcon
);
1696 struct l2cap_conn
*l2cap_conn_get(struct l2cap_conn
*conn
)
1698 kref_get(&conn
->ref
);
1701 EXPORT_SYMBOL(l2cap_conn_get
);
1703 void l2cap_conn_put(struct l2cap_conn
*conn
)
1705 kref_put(&conn
->ref
, l2cap_conn_free
);
1707 EXPORT_SYMBOL(l2cap_conn_put
);
1709 /* ---- Socket interface ---- */
1711 /* Find socket with psm and source / destination bdaddr.
1712 * Returns closest match.
1714 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1719 struct l2cap_chan
*c
, *c1
= NULL
;
1721 read_lock(&chan_list_lock
);
1723 list_for_each_entry(c
, &chan_list
, global_l
) {
1724 if (state
&& c
->state
!= state
)
1727 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1730 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1733 if (c
->psm
== psm
) {
1734 int src_match
, dst_match
;
1735 int src_any
, dst_any
;
1738 src_match
= !bacmp(&c
->src
, src
);
1739 dst_match
= !bacmp(&c
->dst
, dst
);
1740 if (src_match
&& dst_match
) {
1742 read_unlock(&chan_list_lock
);
1747 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1748 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1749 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1750 (src_any
&& dst_any
))
1756 l2cap_chan_hold(c1
);
1758 read_unlock(&chan_list_lock
);
1763 static void l2cap_monitor_timeout(struct work_struct
*work
)
1765 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1766 monitor_timer
.work
);
1768 BT_DBG("chan %p", chan
);
1770 l2cap_chan_lock(chan
);
1773 l2cap_chan_unlock(chan
);
1774 l2cap_chan_put(chan
);
1778 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1780 l2cap_chan_unlock(chan
);
1781 l2cap_chan_put(chan
);
1784 static void l2cap_retrans_timeout(struct work_struct
*work
)
1786 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1787 retrans_timer
.work
);
1789 BT_DBG("chan %p", chan
);
1791 l2cap_chan_lock(chan
);
1794 l2cap_chan_unlock(chan
);
1795 l2cap_chan_put(chan
);
1799 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1800 l2cap_chan_unlock(chan
);
1801 l2cap_chan_put(chan
);
1804 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1805 struct sk_buff_head
*skbs
)
1807 struct sk_buff
*skb
;
1808 struct l2cap_ctrl
*control
;
1810 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1812 if (__chan_is_moving(chan
))
1815 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1817 while (!skb_queue_empty(&chan
->tx_q
)) {
1819 skb
= skb_dequeue(&chan
->tx_q
);
1821 bt_cb(skb
)->control
.retries
= 1;
1822 control
= &bt_cb(skb
)->control
;
1824 control
->reqseq
= 0;
1825 control
->txseq
= chan
->next_tx_seq
;
1827 __pack_control(chan
, control
, skb
);
1829 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1830 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1831 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1834 l2cap_do_send(chan
, skb
);
1836 BT_DBG("Sent txseq %u", control
->txseq
);
1838 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1839 chan
->frames_sent
++;
1843 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1845 struct sk_buff
*skb
, *tx_skb
;
1846 struct l2cap_ctrl
*control
;
1849 BT_DBG("chan %p", chan
);
1851 if (chan
->state
!= BT_CONNECTED
)
1854 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1857 if (__chan_is_moving(chan
))
1860 while (chan
->tx_send_head
&&
1861 chan
->unacked_frames
< chan
->remote_tx_win
&&
1862 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1864 skb
= chan
->tx_send_head
;
1866 bt_cb(skb
)->control
.retries
= 1;
1867 control
= &bt_cb(skb
)->control
;
1869 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1872 control
->reqseq
= chan
->buffer_seq
;
1873 chan
->last_acked_seq
= chan
->buffer_seq
;
1874 control
->txseq
= chan
->next_tx_seq
;
1876 __pack_control(chan
, control
, skb
);
1878 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1879 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1880 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1883 /* Clone after data has been modified. Data is assumed to be
1884 read-only (for locking purposes) on cloned sk_buffs.
1886 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1891 __set_retrans_timer(chan
);
1893 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1894 chan
->unacked_frames
++;
1895 chan
->frames_sent
++;
1898 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1899 chan
->tx_send_head
= NULL
;
1901 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1903 l2cap_do_send(chan
, tx_skb
);
1904 BT_DBG("Sent txseq %u", control
->txseq
);
1907 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1908 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1913 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1915 struct l2cap_ctrl control
;
1916 struct sk_buff
*skb
;
1917 struct sk_buff
*tx_skb
;
1920 BT_DBG("chan %p", chan
);
1922 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1925 if (__chan_is_moving(chan
))
1928 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1929 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1931 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1933 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1938 bt_cb(skb
)->control
.retries
++;
1939 control
= bt_cb(skb
)->control
;
1941 if (chan
->max_tx
!= 0 &&
1942 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1943 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1944 l2cap_send_disconn_req(chan
, ECONNRESET
);
1945 l2cap_seq_list_clear(&chan
->retrans_list
);
1949 control
.reqseq
= chan
->buffer_seq
;
1950 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1955 if (skb_cloned(skb
)) {
1956 /* Cloned sk_buffs are read-only, so we need a
1959 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1961 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1965 l2cap_seq_list_clear(&chan
->retrans_list
);
1969 /* Update skb contents */
1970 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1971 put_unaligned_le32(__pack_extended_control(&control
),
1972 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1974 put_unaligned_le16(__pack_enhanced_control(&control
),
1975 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1979 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1980 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
,
1981 tx_skb
->len
- L2CAP_FCS_SIZE
);
1982 put_unaligned_le16(fcs
, skb_tail_pointer(tx_skb
) -
1986 l2cap_do_send(chan
, tx_skb
);
1988 BT_DBG("Resent txseq %d", control
.txseq
);
1990 chan
->last_acked_seq
= chan
->buffer_seq
;
1994 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1995 struct l2cap_ctrl
*control
)
1997 BT_DBG("chan %p, control %p", chan
, control
);
1999 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2000 l2cap_ertm_resend(chan
);
2003 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2004 struct l2cap_ctrl
*control
)
2006 struct sk_buff
*skb
;
2008 BT_DBG("chan %p, control %p", chan
, control
);
2011 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2013 l2cap_seq_list_clear(&chan
->retrans_list
);
2015 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2018 if (chan
->unacked_frames
) {
2019 skb_queue_walk(&chan
->tx_q
, skb
) {
2020 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2021 skb
== chan
->tx_send_head
)
2025 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2026 if (skb
== chan
->tx_send_head
)
2029 l2cap_seq_list_append(&chan
->retrans_list
,
2030 bt_cb(skb
)->control
.txseq
);
2033 l2cap_ertm_resend(chan
);
2037 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2039 struct l2cap_ctrl control
;
2040 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2041 chan
->last_acked_seq
);
2044 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2045 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2047 memset(&control
, 0, sizeof(control
));
2050 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2051 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2052 __clear_ack_timer(chan
);
2053 control
.super
= L2CAP_SUPER_RNR
;
2054 control
.reqseq
= chan
->buffer_seq
;
2055 l2cap_send_sframe(chan
, &control
);
2057 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2058 l2cap_ertm_send(chan
);
2059 /* If any i-frames were sent, they included an ack */
2060 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2064 /* Ack now if the window is 3/4ths full.
2065 * Calculate without mul or div
2067 threshold
= chan
->ack_win
;
2068 threshold
+= threshold
<< 1;
2071 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2074 if (frames_to_ack
>= threshold
) {
2075 __clear_ack_timer(chan
);
2076 control
.super
= L2CAP_SUPER_RR
;
2077 control
.reqseq
= chan
->buffer_seq
;
2078 l2cap_send_sframe(chan
, &control
);
2083 __set_ack_timer(chan
);
2087 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2088 struct msghdr
*msg
, int len
,
2089 int count
, struct sk_buff
*skb
)
2091 struct l2cap_conn
*conn
= chan
->conn
;
2092 struct sk_buff
**frag
;
2095 if (chan
->ops
->memcpy_fromiovec(chan
, skb_put(skb
, count
),
2096 msg
->msg_iov
, count
))
2102 /* Continuation fragments (no L2CAP header) */
2103 frag
= &skb_shinfo(skb
)->frag_list
;
2105 struct sk_buff
*tmp
;
2107 count
= min_t(unsigned int, conn
->mtu
, len
);
2109 tmp
= chan
->ops
->alloc_skb(chan
, 0, count
,
2110 msg
->msg_flags
& MSG_DONTWAIT
);
2112 return PTR_ERR(tmp
);
2116 if (chan
->ops
->memcpy_fromiovec(chan
, skb_put(*frag
, count
),
2117 msg
->msg_iov
, count
))
2123 skb
->len
+= (*frag
)->len
;
2124 skb
->data_len
+= (*frag
)->len
;
2126 frag
= &(*frag
)->next
;
2132 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2133 struct msghdr
*msg
, size_t len
)
2135 struct l2cap_conn
*conn
= chan
->conn
;
2136 struct sk_buff
*skb
;
2137 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2138 struct l2cap_hdr
*lh
;
2140 BT_DBG("chan %p psm 0x%2.2x len %zu", chan
,
2141 __le16_to_cpu(chan
->psm
), len
);
2143 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2145 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2146 msg
->msg_flags
& MSG_DONTWAIT
);
2150 /* Create L2CAP header */
2151 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2152 lh
->cid
= cpu_to_le16(chan
->dcid
);
2153 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2154 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2156 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2157 if (unlikely(err
< 0)) {
2159 return ERR_PTR(err
);
2164 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2165 struct msghdr
*msg
, size_t len
)
2167 struct l2cap_conn
*conn
= chan
->conn
;
2168 struct sk_buff
*skb
;
2170 struct l2cap_hdr
*lh
;
2172 BT_DBG("chan %p len %zu", chan
, len
);
2174 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2176 skb
= chan
->ops
->alloc_skb(chan
, L2CAP_HDR_SIZE
, count
,
2177 msg
->msg_flags
& MSG_DONTWAIT
);
2181 /* Create L2CAP header */
2182 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2183 lh
->cid
= cpu_to_le16(chan
->dcid
);
2184 lh
->len
= cpu_to_le16(len
);
2186 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2187 if (unlikely(err
< 0)) {
2189 return ERR_PTR(err
);
2194 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2195 struct msghdr
*msg
, size_t len
,
2198 struct l2cap_conn
*conn
= chan
->conn
;
2199 struct sk_buff
*skb
;
2200 int err
, count
, hlen
;
2201 struct l2cap_hdr
*lh
;
2203 BT_DBG("chan %p len %zu", chan
, len
);
2206 return ERR_PTR(-ENOTCONN
);
2208 hlen
= __ertm_hdr_size(chan
);
2211 hlen
+= L2CAP_SDULEN_SIZE
;
2213 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2214 hlen
+= L2CAP_FCS_SIZE
;
2216 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2218 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2219 msg
->msg_flags
& MSG_DONTWAIT
);
2223 /* Create L2CAP header */
2224 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2225 lh
->cid
= cpu_to_le16(chan
->dcid
);
2226 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2228 /* Control header is populated later */
2229 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2230 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2232 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2235 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2237 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2238 if (unlikely(err
< 0)) {
2240 return ERR_PTR(err
);
2243 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2244 bt_cb(skb
)->control
.retries
= 0;
2248 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2249 struct sk_buff_head
*seg_queue
,
2250 struct msghdr
*msg
, size_t len
)
2252 struct sk_buff
*skb
;
2257 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2259 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2260 * so fragmented skbs are not used. The HCI layer's handling
2261 * of fragmented skbs is not compatible with ERTM's queueing.
2264 /* PDU size is derived from the HCI MTU */
2265 pdu_len
= chan
->conn
->mtu
;
2267 /* Constrain PDU size for BR/EDR connections */
2269 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2271 /* Adjust for largest possible L2CAP overhead. */
2273 pdu_len
-= L2CAP_FCS_SIZE
;
2275 pdu_len
-= __ertm_hdr_size(chan
);
2277 /* Remote device may have requested smaller PDUs */
2278 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2280 if (len
<= pdu_len
) {
2281 sar
= L2CAP_SAR_UNSEGMENTED
;
2285 sar
= L2CAP_SAR_START
;
2290 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2293 __skb_queue_purge(seg_queue
);
2294 return PTR_ERR(skb
);
2297 bt_cb(skb
)->control
.sar
= sar
;
2298 __skb_queue_tail(seg_queue
, skb
);
2304 if (len
<= pdu_len
) {
2305 sar
= L2CAP_SAR_END
;
2308 sar
= L2CAP_SAR_CONTINUE
;
2315 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2317 size_t len
, u16 sdulen
)
2319 struct l2cap_conn
*conn
= chan
->conn
;
2320 struct sk_buff
*skb
;
2321 int err
, count
, hlen
;
2322 struct l2cap_hdr
*lh
;
2324 BT_DBG("chan %p len %zu", chan
, len
);
2327 return ERR_PTR(-ENOTCONN
);
2329 hlen
= L2CAP_HDR_SIZE
;
2332 hlen
+= L2CAP_SDULEN_SIZE
;
2334 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2336 skb
= chan
->ops
->alloc_skb(chan
, hlen
, count
,
2337 msg
->msg_flags
& MSG_DONTWAIT
);
2341 /* Create L2CAP header */
2342 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2343 lh
->cid
= cpu_to_le16(chan
->dcid
);
2344 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2347 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2349 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2350 if (unlikely(err
< 0)) {
2352 return ERR_PTR(err
);
2358 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2359 struct sk_buff_head
*seg_queue
,
2360 struct msghdr
*msg
, size_t len
)
2362 struct sk_buff
*skb
;
2366 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2369 pdu_len
= chan
->remote_mps
- L2CAP_SDULEN_SIZE
;
2375 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2377 __skb_queue_purge(seg_queue
);
2378 return PTR_ERR(skb
);
2381 __skb_queue_tail(seg_queue
, skb
);
2387 pdu_len
+= L2CAP_SDULEN_SIZE
;
2394 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
2396 struct sk_buff
*skb
;
2398 struct sk_buff_head seg_queue
;
2403 /* Connectionless channel */
2404 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2405 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
2407 return PTR_ERR(skb
);
2409 /* Channel lock is released before requesting new skb and then
2410 * reacquired thus we need to recheck channel state.
2412 if (chan
->state
!= BT_CONNECTED
) {
2417 l2cap_do_send(chan
, skb
);
2421 switch (chan
->mode
) {
2422 case L2CAP_MODE_LE_FLOWCTL
:
2423 /* Check outgoing MTU */
2424 if (len
> chan
->omtu
)
2427 if (!chan
->tx_credits
)
2430 __skb_queue_head_init(&seg_queue
);
2432 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2434 if (chan
->state
!= BT_CONNECTED
) {
2435 __skb_queue_purge(&seg_queue
);
2442 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2444 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2445 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2449 if (!chan
->tx_credits
)
2450 chan
->ops
->suspend(chan
);
2456 case L2CAP_MODE_BASIC
:
2457 /* Check outgoing MTU */
2458 if (len
> chan
->omtu
)
2461 /* Create a basic PDU */
2462 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
2464 return PTR_ERR(skb
);
2466 /* Channel lock is released before requesting new skb and then
2467 * reacquired thus we need to recheck channel state.
2469 if (chan
->state
!= BT_CONNECTED
) {
2474 l2cap_do_send(chan
, skb
);
2478 case L2CAP_MODE_ERTM
:
2479 case L2CAP_MODE_STREAMING
:
2480 /* Check outgoing MTU */
2481 if (len
> chan
->omtu
) {
2486 __skb_queue_head_init(&seg_queue
);
2488 /* Do segmentation before calling in to the state machine,
2489 * since it's possible to block while waiting for memory
2492 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2494 /* The channel could have been closed while segmenting,
2495 * check that it is still connected.
2497 if (chan
->state
!= BT_CONNECTED
) {
2498 __skb_queue_purge(&seg_queue
);
2505 if (chan
->mode
== L2CAP_MODE_ERTM
)
2506 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2508 l2cap_streaming_send(chan
, &seg_queue
);
2512 /* If the skbs were not queued for sending, they'll still be in
2513 * seg_queue and need to be purged.
2515 __skb_queue_purge(&seg_queue
);
2519 BT_DBG("bad state %1.1x", chan
->mode
);
2525 EXPORT_SYMBOL_GPL(l2cap_chan_send
);
2527 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2529 struct l2cap_ctrl control
;
2532 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2534 memset(&control
, 0, sizeof(control
));
2536 control
.super
= L2CAP_SUPER_SREJ
;
2538 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2539 seq
= __next_seq(chan
, seq
)) {
2540 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2541 control
.reqseq
= seq
;
2542 l2cap_send_sframe(chan
, &control
);
2543 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2547 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2550 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2552 struct l2cap_ctrl control
;
2554 BT_DBG("chan %p", chan
);
2556 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2559 memset(&control
, 0, sizeof(control
));
2561 control
.super
= L2CAP_SUPER_SREJ
;
2562 control
.reqseq
= chan
->srej_list
.tail
;
2563 l2cap_send_sframe(chan
, &control
);
2566 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2568 struct l2cap_ctrl control
;
2572 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2574 memset(&control
, 0, sizeof(control
));
2576 control
.super
= L2CAP_SUPER_SREJ
;
2578 /* Capture initial list head to allow only one pass through the list. */
2579 initial_head
= chan
->srej_list
.head
;
2582 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2583 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2586 control
.reqseq
= seq
;
2587 l2cap_send_sframe(chan
, &control
);
2588 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2589 } while (chan
->srej_list
.head
!= initial_head
);
2592 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2594 struct sk_buff
*acked_skb
;
2597 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2599 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2602 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2603 chan
->expected_ack_seq
, chan
->unacked_frames
);
2605 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2606 ackseq
= __next_seq(chan
, ackseq
)) {
2608 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2610 skb_unlink(acked_skb
, &chan
->tx_q
);
2611 kfree_skb(acked_skb
);
2612 chan
->unacked_frames
--;
2616 chan
->expected_ack_seq
= reqseq
;
2618 if (chan
->unacked_frames
== 0)
2619 __clear_retrans_timer(chan
);
2621 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2624 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2626 BT_DBG("chan %p", chan
);
2628 chan
->expected_tx_seq
= chan
->buffer_seq
;
2629 l2cap_seq_list_clear(&chan
->srej_list
);
2630 skb_queue_purge(&chan
->srej_q
);
2631 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2634 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2635 struct l2cap_ctrl
*control
,
2636 struct sk_buff_head
*skbs
, u8 event
)
2638 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2642 case L2CAP_EV_DATA_REQUEST
:
2643 if (chan
->tx_send_head
== NULL
)
2644 chan
->tx_send_head
= skb_peek(skbs
);
2646 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2647 l2cap_ertm_send(chan
);
2649 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2650 BT_DBG("Enter LOCAL_BUSY");
2651 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2653 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2654 /* The SREJ_SENT state must be aborted if we are to
2655 * enter the LOCAL_BUSY state.
2657 l2cap_abort_rx_srej_sent(chan
);
2660 l2cap_send_ack(chan
);
2663 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2664 BT_DBG("Exit LOCAL_BUSY");
2665 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2667 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2668 struct l2cap_ctrl local_control
;
2670 memset(&local_control
, 0, sizeof(local_control
));
2671 local_control
.sframe
= 1;
2672 local_control
.super
= L2CAP_SUPER_RR
;
2673 local_control
.poll
= 1;
2674 local_control
.reqseq
= chan
->buffer_seq
;
2675 l2cap_send_sframe(chan
, &local_control
);
2677 chan
->retry_count
= 1;
2678 __set_monitor_timer(chan
);
2679 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2682 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2683 l2cap_process_reqseq(chan
, control
->reqseq
);
2685 case L2CAP_EV_EXPLICIT_POLL
:
2686 l2cap_send_rr_or_rnr(chan
, 1);
2687 chan
->retry_count
= 1;
2688 __set_monitor_timer(chan
);
2689 __clear_ack_timer(chan
);
2690 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2692 case L2CAP_EV_RETRANS_TO
:
2693 l2cap_send_rr_or_rnr(chan
, 1);
2694 chan
->retry_count
= 1;
2695 __set_monitor_timer(chan
);
2696 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2698 case L2CAP_EV_RECV_FBIT
:
2699 /* Nothing to process */
2706 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2707 struct l2cap_ctrl
*control
,
2708 struct sk_buff_head
*skbs
, u8 event
)
2710 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2714 case L2CAP_EV_DATA_REQUEST
:
2715 if (chan
->tx_send_head
== NULL
)
2716 chan
->tx_send_head
= skb_peek(skbs
);
2717 /* Queue data, but don't send. */
2718 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2720 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2721 BT_DBG("Enter LOCAL_BUSY");
2722 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2724 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2725 /* The SREJ_SENT state must be aborted if we are to
2726 * enter the LOCAL_BUSY state.
2728 l2cap_abort_rx_srej_sent(chan
);
2731 l2cap_send_ack(chan
);
2734 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2735 BT_DBG("Exit LOCAL_BUSY");
2736 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2738 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2739 struct l2cap_ctrl local_control
;
2740 memset(&local_control
, 0, sizeof(local_control
));
2741 local_control
.sframe
= 1;
2742 local_control
.super
= L2CAP_SUPER_RR
;
2743 local_control
.poll
= 1;
2744 local_control
.reqseq
= chan
->buffer_seq
;
2745 l2cap_send_sframe(chan
, &local_control
);
2747 chan
->retry_count
= 1;
2748 __set_monitor_timer(chan
);
2749 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2752 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2753 l2cap_process_reqseq(chan
, control
->reqseq
);
2757 case L2CAP_EV_RECV_FBIT
:
2758 if (control
&& control
->final
) {
2759 __clear_monitor_timer(chan
);
2760 if (chan
->unacked_frames
> 0)
2761 __set_retrans_timer(chan
);
2762 chan
->retry_count
= 0;
2763 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2764 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2767 case L2CAP_EV_EXPLICIT_POLL
:
2770 case L2CAP_EV_MONITOR_TO
:
2771 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2772 l2cap_send_rr_or_rnr(chan
, 1);
2773 __set_monitor_timer(chan
);
2774 chan
->retry_count
++;
2776 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2784 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2785 struct sk_buff_head
*skbs
, u8 event
)
2787 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2788 chan
, control
, skbs
, event
, chan
->tx_state
);
2790 switch (chan
->tx_state
) {
2791 case L2CAP_TX_STATE_XMIT
:
2792 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2794 case L2CAP_TX_STATE_WAIT_F
:
2795 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2803 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2804 struct l2cap_ctrl
*control
)
2806 BT_DBG("chan %p, control %p", chan
, control
);
2807 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2810 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2811 struct l2cap_ctrl
*control
)
2813 BT_DBG("chan %p, control %p", chan
, control
);
2814 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2817 /* Copy frame to all raw sockets on that connection */
2818 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2820 struct sk_buff
*nskb
;
2821 struct l2cap_chan
*chan
;
2823 BT_DBG("conn %p", conn
);
2825 mutex_lock(&conn
->chan_lock
);
2827 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2828 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2831 /* Don't send frame to the channel it came from */
2832 if (bt_cb(skb
)->chan
== chan
)
2835 nskb
= skb_clone(skb
, GFP_KERNEL
);
2838 if (chan
->ops
->recv(chan
, nskb
))
2842 mutex_unlock(&conn
->chan_lock
);
2845 /* ---- L2CAP signalling commands ---- */
2846 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2847 u8 ident
, u16 dlen
, void *data
)
2849 struct sk_buff
*skb
, **frag
;
2850 struct l2cap_cmd_hdr
*cmd
;
2851 struct l2cap_hdr
*lh
;
2854 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2855 conn
, code
, ident
, dlen
);
2857 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2860 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2861 count
= min_t(unsigned int, conn
->mtu
, len
);
2863 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2867 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2868 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2870 if (conn
->hcon
->type
== LE_LINK
)
2871 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2873 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2875 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2878 cmd
->len
= cpu_to_le16(dlen
);
2881 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2882 memcpy(skb_put(skb
, count
), data
, count
);
2888 /* Continuation fragments (no L2CAP header) */
2889 frag
= &skb_shinfo(skb
)->frag_list
;
2891 count
= min_t(unsigned int, conn
->mtu
, len
);
2893 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2897 memcpy(skb_put(*frag
, count
), data
, count
);
2902 frag
= &(*frag
)->next
;
2912 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2915 struct l2cap_conf_opt
*opt
= *ptr
;
2918 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2926 *val
= *((u8
*) opt
->val
);
2930 *val
= get_unaligned_le16(opt
->val
);
2934 *val
= get_unaligned_le32(opt
->val
);
2938 *val
= (unsigned long) opt
->val
;
2942 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2946 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2948 struct l2cap_conf_opt
*opt
= *ptr
;
2950 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2957 *((u8
*) opt
->val
) = val
;
2961 put_unaligned_le16(val
, opt
->val
);
2965 put_unaligned_le32(val
, opt
->val
);
2969 memcpy(opt
->val
, (void *) val
, len
);
2973 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2976 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2978 struct l2cap_conf_efs efs
;
2980 switch (chan
->mode
) {
2981 case L2CAP_MODE_ERTM
:
2982 efs
.id
= chan
->local_id
;
2983 efs
.stype
= chan
->local_stype
;
2984 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2985 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2986 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2987 efs
.flush_to
= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
2990 case L2CAP_MODE_STREAMING
:
2992 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2993 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2994 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3003 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3004 (unsigned long) &efs
);
3007 static void l2cap_ack_timeout(struct work_struct
*work
)
3009 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3013 BT_DBG("chan %p", chan
);
3015 l2cap_chan_lock(chan
);
3017 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3018 chan
->last_acked_seq
);
3021 l2cap_send_rr_or_rnr(chan
, 0);
3023 l2cap_chan_unlock(chan
);
3024 l2cap_chan_put(chan
);
3027 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3031 chan
->next_tx_seq
= 0;
3032 chan
->expected_tx_seq
= 0;
3033 chan
->expected_ack_seq
= 0;
3034 chan
->unacked_frames
= 0;
3035 chan
->buffer_seq
= 0;
3036 chan
->frames_sent
= 0;
3037 chan
->last_acked_seq
= 0;
3039 chan
->sdu_last_frag
= NULL
;
3042 skb_queue_head_init(&chan
->tx_q
);
3044 chan
->local_amp_id
= AMP_ID_BREDR
;
3045 chan
->move_id
= AMP_ID_BREDR
;
3046 chan
->move_state
= L2CAP_MOVE_STABLE
;
3047 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3049 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3052 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3053 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3055 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3056 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3057 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3059 skb_queue_head_init(&chan
->srej_q
);
3061 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3065 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3067 l2cap_seq_list_free(&chan
->srej_list
);
3072 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3075 case L2CAP_MODE_STREAMING
:
3076 case L2CAP_MODE_ERTM
:
3077 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3081 return L2CAP_MODE_BASIC
;
3085 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3087 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3090 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3092 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3095 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3096 struct l2cap_conf_rfc
*rfc
)
3098 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3099 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3101 /* Class 1 devices have must have ERTM timeouts
3102 * exceeding the Link Supervision Timeout. The
3103 * default Link Supervision Timeout for AMP
3104 * controllers is 10 seconds.
3106 * Class 1 devices use 0xffffffff for their
3107 * best-effort flush timeout, so the clamping logic
3108 * will result in a timeout that meets the above
3109 * requirement. ERTM timeouts are 16-bit values, so
3110 * the maximum timeout is 65.535 seconds.
3113 /* Convert timeout to milliseconds and round */
3114 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3116 /* This is the recommended formula for class 2 devices
3117 * that start ERTM timers when packets are sent to the
3120 ertm_to
= 3 * ertm_to
+ 500;
3122 if (ertm_to
> 0xffff)
3125 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3126 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3128 rfc
->retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3129 rfc
->monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3133 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3135 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3136 __l2cap_ews_supported(chan
->conn
)) {
3137 /* use extended control field */
3138 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3139 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3141 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3142 L2CAP_DEFAULT_TX_WINDOW
);
3143 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3145 chan
->ack_win
= chan
->tx_win
;
3148 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3150 struct l2cap_conf_req
*req
= data
;
3151 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3152 void *ptr
= req
->data
;
3155 BT_DBG("chan %p", chan
);
3157 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3160 switch (chan
->mode
) {
3161 case L2CAP_MODE_STREAMING
:
3162 case L2CAP_MODE_ERTM
:
3163 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3166 if (__l2cap_efs_supported(chan
->conn
))
3167 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3171 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3176 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3177 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3179 switch (chan
->mode
) {
3180 case L2CAP_MODE_BASIC
:
3184 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3185 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3188 rfc
.mode
= L2CAP_MODE_BASIC
;
3190 rfc
.max_transmit
= 0;
3191 rfc
.retrans_timeout
= 0;
3192 rfc
.monitor_timeout
= 0;
3193 rfc
.max_pdu_size
= 0;
3195 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3196 (unsigned long) &rfc
);
3199 case L2CAP_MODE_ERTM
:
3200 rfc
.mode
= L2CAP_MODE_ERTM
;
3201 rfc
.max_transmit
= chan
->max_tx
;
3203 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3205 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3206 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3208 rfc
.max_pdu_size
= cpu_to_le16(size
);
3210 l2cap_txwin_setup(chan
);
3212 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3213 L2CAP_DEFAULT_TX_WINDOW
);
3215 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3216 (unsigned long) &rfc
);
3218 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3219 l2cap_add_opt_efs(&ptr
, chan
);
3221 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3222 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3225 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3226 if (chan
->fcs
== L2CAP_FCS_NONE
||
3227 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3228 chan
->fcs
= L2CAP_FCS_NONE
;
3229 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3234 case L2CAP_MODE_STREAMING
:
3235 l2cap_txwin_setup(chan
);
3236 rfc
.mode
= L2CAP_MODE_STREAMING
;
3238 rfc
.max_transmit
= 0;
3239 rfc
.retrans_timeout
= 0;
3240 rfc
.monitor_timeout
= 0;
3242 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3243 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3245 rfc
.max_pdu_size
= cpu_to_le16(size
);
3247 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3248 (unsigned long) &rfc
);
3250 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3251 l2cap_add_opt_efs(&ptr
, chan
);
3253 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3254 if (chan
->fcs
== L2CAP_FCS_NONE
||
3255 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3256 chan
->fcs
= L2CAP_FCS_NONE
;
3257 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3263 req
->dcid
= cpu_to_le16(chan
->dcid
);
3264 req
->flags
= cpu_to_le16(0);
3269 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3271 struct l2cap_conf_rsp
*rsp
= data
;
3272 void *ptr
= rsp
->data
;
3273 void *req
= chan
->conf_req
;
3274 int len
= chan
->conf_len
;
3275 int type
, hint
, olen
;
3277 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3278 struct l2cap_conf_efs efs
;
3280 u16 mtu
= L2CAP_DEFAULT_MTU
;
3281 u16 result
= L2CAP_CONF_SUCCESS
;
3284 BT_DBG("chan %p", chan
);
3286 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3287 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3289 hint
= type
& L2CAP_CONF_HINT
;
3290 type
&= L2CAP_CONF_MASK
;
3293 case L2CAP_CONF_MTU
:
3297 case L2CAP_CONF_FLUSH_TO
:
3298 chan
->flush_to
= val
;
3301 case L2CAP_CONF_QOS
:
3304 case L2CAP_CONF_RFC
:
3305 if (olen
== sizeof(rfc
))
3306 memcpy(&rfc
, (void *) val
, olen
);
3309 case L2CAP_CONF_FCS
:
3310 if (val
== L2CAP_FCS_NONE
)
3311 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3314 case L2CAP_CONF_EFS
:
3316 if (olen
== sizeof(efs
))
3317 memcpy(&efs
, (void *) val
, olen
);
3320 case L2CAP_CONF_EWS
:
3321 if (!chan
->conn
->hs_enabled
)
3322 return -ECONNREFUSED
;
3324 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3325 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3326 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3327 chan
->remote_tx_win
= val
;
3334 result
= L2CAP_CONF_UNKNOWN
;
3335 *((u8
*) ptr
++) = type
;
3340 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3343 switch (chan
->mode
) {
3344 case L2CAP_MODE_STREAMING
:
3345 case L2CAP_MODE_ERTM
:
3346 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3347 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3348 chan
->conn
->feat_mask
);
3353 if (__l2cap_efs_supported(chan
->conn
))
3354 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3356 return -ECONNREFUSED
;
3359 if (chan
->mode
!= rfc
.mode
)
3360 return -ECONNREFUSED
;
3366 if (chan
->mode
!= rfc
.mode
) {
3367 result
= L2CAP_CONF_UNACCEPT
;
3368 rfc
.mode
= chan
->mode
;
3370 if (chan
->num_conf_rsp
== 1)
3371 return -ECONNREFUSED
;
3373 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3374 (unsigned long) &rfc
);
3377 if (result
== L2CAP_CONF_SUCCESS
) {
3378 /* Configure output options and let the other side know
3379 * which ones we don't like. */
3381 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3382 result
= L2CAP_CONF_UNACCEPT
;
3385 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3387 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3390 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3391 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3392 efs
.stype
!= chan
->local_stype
) {
3394 result
= L2CAP_CONF_UNACCEPT
;
3396 if (chan
->num_conf_req
>= 1)
3397 return -ECONNREFUSED
;
3399 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3401 (unsigned long) &efs
);
3403 /* Send PENDING Conf Rsp */
3404 result
= L2CAP_CONF_PENDING
;
3405 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3410 case L2CAP_MODE_BASIC
:
3411 chan
->fcs
= L2CAP_FCS_NONE
;
3412 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3415 case L2CAP_MODE_ERTM
:
3416 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3417 chan
->remote_tx_win
= rfc
.txwin_size
;
3419 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3421 chan
->remote_max_tx
= rfc
.max_transmit
;
3423 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3424 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3425 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3426 rfc
.max_pdu_size
= cpu_to_le16(size
);
3427 chan
->remote_mps
= size
;
3429 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3431 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3433 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3434 sizeof(rfc
), (unsigned long) &rfc
);
3436 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3437 chan
->remote_id
= efs
.id
;
3438 chan
->remote_stype
= efs
.stype
;
3439 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3440 chan
->remote_flush_to
=
3441 le32_to_cpu(efs
.flush_to
);
3442 chan
->remote_acc_lat
=
3443 le32_to_cpu(efs
.acc_lat
);
3444 chan
->remote_sdu_itime
=
3445 le32_to_cpu(efs
.sdu_itime
);
3446 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3448 (unsigned long) &efs
);
3452 case L2CAP_MODE_STREAMING
:
3453 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3454 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3455 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3456 rfc
.max_pdu_size
= cpu_to_le16(size
);
3457 chan
->remote_mps
= size
;
3459 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3461 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3462 (unsigned long) &rfc
);
3467 result
= L2CAP_CONF_UNACCEPT
;
3469 memset(&rfc
, 0, sizeof(rfc
));
3470 rfc
.mode
= chan
->mode
;
3473 if (result
== L2CAP_CONF_SUCCESS
)
3474 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3476 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3477 rsp
->result
= cpu_to_le16(result
);
3478 rsp
->flags
= cpu_to_le16(0);
3483 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3484 void *data
, u16
*result
)
3486 struct l2cap_conf_req
*req
= data
;
3487 void *ptr
= req
->data
;
3490 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3491 struct l2cap_conf_efs efs
;
3493 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3495 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3496 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3499 case L2CAP_CONF_MTU
:
3500 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3501 *result
= L2CAP_CONF_UNACCEPT
;
3502 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3505 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3508 case L2CAP_CONF_FLUSH_TO
:
3509 chan
->flush_to
= val
;
3510 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3514 case L2CAP_CONF_RFC
:
3515 if (olen
== sizeof(rfc
))
3516 memcpy(&rfc
, (void *)val
, olen
);
3518 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3519 rfc
.mode
!= chan
->mode
)
3520 return -ECONNREFUSED
;
3524 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3525 sizeof(rfc
), (unsigned long) &rfc
);
3528 case L2CAP_CONF_EWS
:
3529 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3530 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3534 case L2CAP_CONF_EFS
:
3535 if (olen
== sizeof(efs
))
3536 memcpy(&efs
, (void *)val
, olen
);
3538 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3539 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3540 efs
.stype
!= chan
->local_stype
)
3541 return -ECONNREFUSED
;
3543 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3544 (unsigned long) &efs
);
3547 case L2CAP_CONF_FCS
:
3548 if (*result
== L2CAP_CONF_PENDING
)
3549 if (val
== L2CAP_FCS_NONE
)
3550 set_bit(CONF_RECV_NO_FCS
,
3556 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3557 return -ECONNREFUSED
;
3559 chan
->mode
= rfc
.mode
;
3561 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3563 case L2CAP_MODE_ERTM
:
3564 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3565 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3566 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3567 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3568 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3571 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3572 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3573 chan
->local_sdu_itime
=
3574 le32_to_cpu(efs
.sdu_itime
);
3575 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3576 chan
->local_flush_to
=
3577 le32_to_cpu(efs
.flush_to
);
3581 case L2CAP_MODE_STREAMING
:
3582 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3586 req
->dcid
= cpu_to_le16(chan
->dcid
);
3587 req
->flags
= cpu_to_le16(0);
3592 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3593 u16 result
, u16 flags
)
3595 struct l2cap_conf_rsp
*rsp
= data
;
3596 void *ptr
= rsp
->data
;
3598 BT_DBG("chan %p", chan
);
3600 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3601 rsp
->result
= cpu_to_le16(result
);
3602 rsp
->flags
= cpu_to_le16(flags
);
3607 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3609 struct l2cap_le_conn_rsp rsp
;
3610 struct l2cap_conn
*conn
= chan
->conn
;
3612 BT_DBG("chan %p", chan
);
3614 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3615 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3616 rsp
.mps
= cpu_to_le16(chan
->mps
);
3617 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3618 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3620 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3624 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3626 struct l2cap_conn_rsp rsp
;
3627 struct l2cap_conn
*conn
= chan
->conn
;
3631 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3632 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3633 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3634 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3637 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3639 rsp_code
= L2CAP_CONN_RSP
;
3641 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3643 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3645 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3648 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3649 l2cap_build_conf_req(chan
, buf
), buf
);
3650 chan
->num_conf_req
++;
3653 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3657 /* Use sane default values in case a misbehaving remote device
3658 * did not send an RFC or extended window size option.
3660 u16 txwin_ext
= chan
->ack_win
;
3661 struct l2cap_conf_rfc rfc
= {
3663 .retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3664 .monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3665 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3666 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3669 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3671 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3674 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3675 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3678 case L2CAP_CONF_RFC
:
3679 if (olen
== sizeof(rfc
))
3680 memcpy(&rfc
, (void *)val
, olen
);
3682 case L2CAP_CONF_EWS
:
3689 case L2CAP_MODE_ERTM
:
3690 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3691 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3692 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3693 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3694 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3696 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3699 case L2CAP_MODE_STREAMING
:
3700 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3704 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3705 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3708 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3710 if (cmd_len
< sizeof(*rej
))
3713 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3716 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3717 cmd
->ident
== conn
->info_ident
) {
3718 cancel_delayed_work(&conn
->info_timer
);
3720 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3721 conn
->info_ident
= 0;
3723 l2cap_conn_start(conn
);
3729 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3730 struct l2cap_cmd_hdr
*cmd
,
3731 u8
*data
, u8 rsp_code
, u8 amp_id
)
3733 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3734 struct l2cap_conn_rsp rsp
;
3735 struct l2cap_chan
*chan
= NULL
, *pchan
;
3736 int result
, status
= L2CAP_CS_NO_INFO
;
3738 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3739 __le16 psm
= req
->psm
;
3741 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3743 /* Check if we have socket listening on psm */
3744 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3745 &conn
->hcon
->dst
, ACL_LINK
);
3747 result
= L2CAP_CR_BAD_PSM
;
3751 mutex_lock(&conn
->chan_lock
);
3752 l2cap_chan_lock(pchan
);
3754 /* Check if the ACL is secure enough (if not SDP) */
3755 if (psm
!= cpu_to_le16(L2CAP_PSM_SDP
) &&
3756 !hci_conn_check_link_mode(conn
->hcon
)) {
3757 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3758 result
= L2CAP_CR_SEC_BLOCK
;
3762 result
= L2CAP_CR_NO_MEM
;
3764 /* Check if we already have channel with that dcid */
3765 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3768 chan
= pchan
->ops
->new_connection(pchan
);
3772 /* For certain devices (ex: HID mouse), support for authentication,
3773 * pairing and bonding is optional. For such devices, inorder to avoid
3774 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3775 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3777 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3779 bacpy(&chan
->src
, &conn
->hcon
->src
);
3780 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3781 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
3782 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
3785 chan
->local_amp_id
= amp_id
;
3787 __l2cap_chan_add(conn
, chan
);
3791 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3793 chan
->ident
= cmd
->ident
;
3795 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3796 if (l2cap_chan_check_security(chan
, false)) {
3797 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3798 l2cap_state_change(chan
, BT_CONNECT2
);
3799 result
= L2CAP_CR_PEND
;
3800 status
= L2CAP_CS_AUTHOR_PEND
;
3801 chan
->ops
->defer(chan
);
3803 /* Force pending result for AMP controllers.
3804 * The connection will succeed after the
3805 * physical link is up.
3807 if (amp_id
== AMP_ID_BREDR
) {
3808 l2cap_state_change(chan
, BT_CONFIG
);
3809 result
= L2CAP_CR_SUCCESS
;
3811 l2cap_state_change(chan
, BT_CONNECT2
);
3812 result
= L2CAP_CR_PEND
;
3814 status
= L2CAP_CS_NO_INFO
;
3817 l2cap_state_change(chan
, BT_CONNECT2
);
3818 result
= L2CAP_CR_PEND
;
3819 status
= L2CAP_CS_AUTHEN_PEND
;
3822 l2cap_state_change(chan
, BT_CONNECT2
);
3823 result
= L2CAP_CR_PEND
;
3824 status
= L2CAP_CS_NO_INFO
;
3828 l2cap_chan_unlock(pchan
);
3829 mutex_unlock(&conn
->chan_lock
);
3830 l2cap_chan_put(pchan
);
3833 rsp
.scid
= cpu_to_le16(scid
);
3834 rsp
.dcid
= cpu_to_le16(dcid
);
3835 rsp
.result
= cpu_to_le16(result
);
3836 rsp
.status
= cpu_to_le16(status
);
3837 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3839 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3840 struct l2cap_info_req info
;
3841 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3843 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3844 conn
->info_ident
= l2cap_get_ident(conn
);
3846 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3848 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3849 sizeof(info
), &info
);
3852 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3853 result
== L2CAP_CR_SUCCESS
) {
3855 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3856 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3857 l2cap_build_conf_req(chan
, buf
), buf
);
3858 chan
->num_conf_req
++;
3864 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3865 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3867 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3868 struct hci_conn
*hcon
= conn
->hcon
;
3870 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3874 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3875 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3876 mgmt_device_connected(hdev
, hcon
, 0, NULL
, 0);
3877 hci_dev_unlock(hdev
);
3879 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3883 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3884 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3887 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3888 u16 scid
, dcid
, result
, status
;
3889 struct l2cap_chan
*chan
;
3893 if (cmd_len
< sizeof(*rsp
))
3896 scid
= __le16_to_cpu(rsp
->scid
);
3897 dcid
= __le16_to_cpu(rsp
->dcid
);
3898 result
= __le16_to_cpu(rsp
->result
);
3899 status
= __le16_to_cpu(rsp
->status
);
3901 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3902 dcid
, scid
, result
, status
);
3904 mutex_lock(&conn
->chan_lock
);
3907 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3913 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3922 l2cap_chan_lock(chan
);
3925 case L2CAP_CR_SUCCESS
:
3926 l2cap_state_change(chan
, BT_CONFIG
);
3929 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3931 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3934 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3935 l2cap_build_conf_req(chan
, req
), req
);
3936 chan
->num_conf_req
++;
3940 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3944 l2cap_chan_del(chan
, ECONNREFUSED
);
3948 l2cap_chan_unlock(chan
);
3951 mutex_unlock(&conn
->chan_lock
);
3956 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3958 /* FCS is enabled only in ERTM or streaming mode, if one or both
3961 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3962 chan
->fcs
= L2CAP_FCS_NONE
;
3963 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
3964 chan
->fcs
= L2CAP_FCS_CRC16
;
3967 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3968 u8 ident
, u16 flags
)
3970 struct l2cap_conn
*conn
= chan
->conn
;
3972 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3975 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3976 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3978 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
3979 l2cap_build_conf_rsp(chan
, data
,
3980 L2CAP_CONF_SUCCESS
, flags
), data
);
3983 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
3986 struct l2cap_cmd_rej_cid rej
;
3988 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3989 rej
.scid
= __cpu_to_le16(scid
);
3990 rej
.dcid
= __cpu_to_le16(dcid
);
3992 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3995 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
3996 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3999 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4002 struct l2cap_chan
*chan
;
4005 if (cmd_len
< sizeof(*req
))
4008 dcid
= __le16_to_cpu(req
->dcid
);
4009 flags
= __le16_to_cpu(req
->flags
);
4011 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4013 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4015 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4019 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4020 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4025 /* Reject if config buffer is too small. */
4026 len
= cmd_len
- sizeof(*req
);
4027 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4028 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4029 l2cap_build_conf_rsp(chan
, rsp
,
4030 L2CAP_CONF_REJECT
, flags
), rsp
);
4035 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4036 chan
->conf_len
+= len
;
4038 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4039 /* Incomplete config. Send empty response. */
4040 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4041 l2cap_build_conf_rsp(chan
, rsp
,
4042 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4046 /* Complete config. */
4047 len
= l2cap_parse_conf_req(chan
, rsp
);
4049 l2cap_send_disconn_req(chan
, ECONNRESET
);
4053 chan
->ident
= cmd
->ident
;
4054 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4055 chan
->num_conf_rsp
++;
4057 /* Reset config buffer. */
4060 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4063 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4064 set_default_fcs(chan
);
4066 if (chan
->mode
== L2CAP_MODE_ERTM
||
4067 chan
->mode
== L2CAP_MODE_STREAMING
)
4068 err
= l2cap_ertm_init(chan
);
4071 l2cap_send_disconn_req(chan
, -err
);
4073 l2cap_chan_ready(chan
);
4078 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4080 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4081 l2cap_build_conf_req(chan
, buf
), buf
);
4082 chan
->num_conf_req
++;
4085 /* Got Conf Rsp PENDING from remote side and assume we sent
4086 Conf Rsp PENDING in the code above */
4087 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4088 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4090 /* check compatibility */
4092 /* Send rsp for BR/EDR channel */
4094 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4096 chan
->ident
= cmd
->ident
;
4100 l2cap_chan_unlock(chan
);
4104 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4105 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4108 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4109 u16 scid
, flags
, result
;
4110 struct l2cap_chan
*chan
;
4111 int len
= cmd_len
- sizeof(*rsp
);
4114 if (cmd_len
< sizeof(*rsp
))
4117 scid
= __le16_to_cpu(rsp
->scid
);
4118 flags
= __le16_to_cpu(rsp
->flags
);
4119 result
= __le16_to_cpu(rsp
->result
);
4121 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4124 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4129 case L2CAP_CONF_SUCCESS
:
4130 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4131 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4134 case L2CAP_CONF_PENDING
:
4135 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4137 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4140 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4143 l2cap_send_disconn_req(chan
, ECONNRESET
);
4147 if (!chan
->hs_hcon
) {
4148 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4151 if (l2cap_check_efs(chan
)) {
4152 amp_create_logical_link(chan
);
4153 chan
->ident
= cmd
->ident
;
4159 case L2CAP_CONF_UNACCEPT
:
4160 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4163 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4164 l2cap_send_disconn_req(chan
, ECONNRESET
);
4168 /* throw out any old stored conf requests */
4169 result
= L2CAP_CONF_SUCCESS
;
4170 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4173 l2cap_send_disconn_req(chan
, ECONNRESET
);
4177 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4178 L2CAP_CONF_REQ
, len
, req
);
4179 chan
->num_conf_req
++;
4180 if (result
!= L2CAP_CONF_SUCCESS
)
4186 l2cap_chan_set_err(chan
, ECONNRESET
);
4188 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4189 l2cap_send_disconn_req(chan
, ECONNRESET
);
4193 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4196 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4198 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4199 set_default_fcs(chan
);
4201 if (chan
->mode
== L2CAP_MODE_ERTM
||
4202 chan
->mode
== L2CAP_MODE_STREAMING
)
4203 err
= l2cap_ertm_init(chan
);
4206 l2cap_send_disconn_req(chan
, -err
);
4208 l2cap_chan_ready(chan
);
4212 l2cap_chan_unlock(chan
);
4216 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4217 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4220 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4221 struct l2cap_disconn_rsp rsp
;
4223 struct l2cap_chan
*chan
;
4225 if (cmd_len
!= sizeof(*req
))
4228 scid
= __le16_to_cpu(req
->scid
);
4229 dcid
= __le16_to_cpu(req
->dcid
);
4231 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4233 mutex_lock(&conn
->chan_lock
);
4235 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4237 mutex_unlock(&conn
->chan_lock
);
4238 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4242 l2cap_chan_lock(chan
);
4244 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4245 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4246 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4248 chan
->ops
->set_shutdown(chan
);
4250 l2cap_chan_hold(chan
);
4251 l2cap_chan_del(chan
, ECONNRESET
);
4253 l2cap_chan_unlock(chan
);
4255 chan
->ops
->close(chan
);
4256 l2cap_chan_put(chan
);
4258 mutex_unlock(&conn
->chan_lock
);
4263 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4264 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4267 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4269 struct l2cap_chan
*chan
;
4271 if (cmd_len
!= sizeof(*rsp
))
4274 scid
= __le16_to_cpu(rsp
->scid
);
4275 dcid
= __le16_to_cpu(rsp
->dcid
);
4277 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4279 mutex_lock(&conn
->chan_lock
);
4281 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4283 mutex_unlock(&conn
->chan_lock
);
4287 l2cap_chan_lock(chan
);
4289 l2cap_chan_hold(chan
);
4290 l2cap_chan_del(chan
, 0);
4292 l2cap_chan_unlock(chan
);
4294 chan
->ops
->close(chan
);
4295 l2cap_chan_put(chan
);
4297 mutex_unlock(&conn
->chan_lock
);
4302 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4303 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4306 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4309 if (cmd_len
!= sizeof(*req
))
4312 type
= __le16_to_cpu(req
->type
);
4314 BT_DBG("type 0x%4.4x", type
);
4316 if (type
== L2CAP_IT_FEAT_MASK
) {
4318 u32 feat_mask
= l2cap_feat_mask
;
4319 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4320 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4321 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4323 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4325 if (conn
->hs_enabled
)
4326 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4327 | L2CAP_FEAT_EXT_WINDOW
;
4329 put_unaligned_le32(feat_mask
, rsp
->data
);
4330 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4332 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4334 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4336 if (conn
->hs_enabled
)
4337 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4339 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4341 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4342 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4343 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4344 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4347 struct l2cap_info_rsp rsp
;
4348 rsp
.type
= cpu_to_le16(type
);
4349 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
4350 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4357 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4358 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4361 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4364 if (cmd_len
< sizeof(*rsp
))
4367 type
= __le16_to_cpu(rsp
->type
);
4368 result
= __le16_to_cpu(rsp
->result
);
4370 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4372 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4373 if (cmd
->ident
!= conn
->info_ident
||
4374 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4377 cancel_delayed_work(&conn
->info_timer
);
4379 if (result
!= L2CAP_IR_SUCCESS
) {
4380 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4381 conn
->info_ident
= 0;
4383 l2cap_conn_start(conn
);
4389 case L2CAP_IT_FEAT_MASK
:
4390 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4392 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4393 struct l2cap_info_req req
;
4394 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4396 conn
->info_ident
= l2cap_get_ident(conn
);
4398 l2cap_send_cmd(conn
, conn
->info_ident
,
4399 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4401 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4402 conn
->info_ident
= 0;
4404 l2cap_conn_start(conn
);
4408 case L2CAP_IT_FIXED_CHAN
:
4409 conn
->fixed_chan_mask
= rsp
->data
[0];
4410 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4411 conn
->info_ident
= 0;
4413 l2cap_conn_start(conn
);
4420 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4421 struct l2cap_cmd_hdr
*cmd
,
4422 u16 cmd_len
, void *data
)
4424 struct l2cap_create_chan_req
*req
= data
;
4425 struct l2cap_create_chan_rsp rsp
;
4426 struct l2cap_chan
*chan
;
4427 struct hci_dev
*hdev
;
4430 if (cmd_len
!= sizeof(*req
))
4433 if (!conn
->hs_enabled
)
4436 psm
= le16_to_cpu(req
->psm
);
4437 scid
= le16_to_cpu(req
->scid
);
4439 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4441 /* For controller id 0 make BR/EDR connection */
4442 if (req
->amp_id
== AMP_ID_BREDR
) {
4443 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4448 /* Validate AMP controller id */
4449 hdev
= hci_dev_get(req
->amp_id
);
4453 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4458 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4461 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4462 struct hci_conn
*hs_hcon
;
4464 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4468 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4473 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4475 mgr
->bredr_chan
= chan
;
4476 chan
->hs_hcon
= hs_hcon
;
4477 chan
->fcs
= L2CAP_FCS_NONE
;
4478 conn
->mtu
= hdev
->block_mtu
;
4487 rsp
.scid
= cpu_to_le16(scid
);
4488 rsp
.result
= cpu_to_le16(L2CAP_CR_BAD_AMP
);
4489 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4491 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4497 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4499 struct l2cap_move_chan_req req
;
4502 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4504 ident
= l2cap_get_ident(chan
->conn
);
4505 chan
->ident
= ident
;
4507 req
.icid
= cpu_to_le16(chan
->scid
);
4508 req
.dest_amp_id
= dest_amp_id
;
4510 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4513 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4516 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4518 struct l2cap_move_chan_rsp rsp
;
4520 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4522 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4523 rsp
.result
= cpu_to_le16(result
);
4525 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4529 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4531 struct l2cap_move_chan_cfm cfm
;
4533 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4535 chan
->ident
= l2cap_get_ident(chan
->conn
);
4537 cfm
.icid
= cpu_to_le16(chan
->scid
);
4538 cfm
.result
= cpu_to_le16(result
);
4540 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4543 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4546 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4548 struct l2cap_move_chan_cfm cfm
;
4550 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4552 cfm
.icid
= cpu_to_le16(icid
);
4553 cfm
.result
= cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4555 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4559 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4562 struct l2cap_move_chan_cfm_rsp rsp
;
4564 BT_DBG("icid 0x%4.4x", icid
);
4566 rsp
.icid
= cpu_to_le16(icid
);
4567 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4570 static void __release_logical_link(struct l2cap_chan
*chan
)
4572 chan
->hs_hchan
= NULL
;
4573 chan
->hs_hcon
= NULL
;
4575 /* Placeholder - release the logical link */
4578 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4580 /* Logical link setup failed */
4581 if (chan
->state
!= BT_CONNECTED
) {
4582 /* Create channel failure, disconnect */
4583 l2cap_send_disconn_req(chan
, ECONNRESET
);
4587 switch (chan
->move_role
) {
4588 case L2CAP_MOVE_ROLE_RESPONDER
:
4589 l2cap_move_done(chan
);
4590 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4592 case L2CAP_MOVE_ROLE_INITIATOR
:
4593 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4594 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4595 /* Remote has only sent pending or
4596 * success responses, clean up
4598 l2cap_move_done(chan
);
4601 /* Other amp move states imply that the move
4602 * has already aborted
4604 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4609 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4610 struct hci_chan
*hchan
)
4612 struct l2cap_conf_rsp rsp
;
4614 chan
->hs_hchan
= hchan
;
4615 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4617 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4619 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4622 set_default_fcs(chan
);
4624 err
= l2cap_ertm_init(chan
);
4626 l2cap_send_disconn_req(chan
, -err
);
4628 l2cap_chan_ready(chan
);
4632 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4633 struct hci_chan
*hchan
)
4635 chan
->hs_hcon
= hchan
->conn
;
4636 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4638 BT_DBG("move_state %d", chan
->move_state
);
4640 switch (chan
->move_state
) {
4641 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4642 /* Move confirm will be sent after a success
4643 * response is received
4645 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4647 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4648 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4649 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4650 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4651 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4652 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4653 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4654 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4655 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4659 /* Move was not in expected state, free the channel */
4660 __release_logical_link(chan
);
4662 chan
->move_state
= L2CAP_MOVE_STABLE
;
4666 /* Call with chan locked */
4667 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4670 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4673 l2cap_logical_fail(chan
);
4674 __release_logical_link(chan
);
4678 if (chan
->state
!= BT_CONNECTED
) {
4679 /* Ignore logical link if channel is on BR/EDR */
4680 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4681 l2cap_logical_finish_create(chan
, hchan
);
4683 l2cap_logical_finish_move(chan
, hchan
);
4687 void l2cap_move_start(struct l2cap_chan
*chan
)
4689 BT_DBG("chan %p", chan
);
4691 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4692 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4694 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4695 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4696 /* Placeholder - start physical link setup */
4698 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4699 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4701 l2cap_move_setup(chan
);
4702 l2cap_send_move_chan_req(chan
, 0);
4706 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4707 u8 local_amp_id
, u8 remote_amp_id
)
4709 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4710 local_amp_id
, remote_amp_id
);
4712 chan
->fcs
= L2CAP_FCS_NONE
;
4714 /* Outgoing channel on AMP */
4715 if (chan
->state
== BT_CONNECT
) {
4716 if (result
== L2CAP_CR_SUCCESS
) {
4717 chan
->local_amp_id
= local_amp_id
;
4718 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4720 /* Revert to BR/EDR connect */
4721 l2cap_send_conn_req(chan
);
4727 /* Incoming channel on AMP */
4728 if (__l2cap_no_conn_pending(chan
)) {
4729 struct l2cap_conn_rsp rsp
;
4731 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4732 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4734 if (result
== L2CAP_CR_SUCCESS
) {
4735 /* Send successful response */
4736 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4737 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4739 /* Send negative response */
4740 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4741 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4744 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4747 if (result
== L2CAP_CR_SUCCESS
) {
4748 l2cap_state_change(chan
, BT_CONFIG
);
4749 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4750 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4752 l2cap_build_conf_req(chan
, buf
), buf
);
4753 chan
->num_conf_req
++;
4758 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4761 l2cap_move_setup(chan
);
4762 chan
->move_id
= local_amp_id
;
4763 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4765 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4768 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4770 struct hci_chan
*hchan
= NULL
;
4772 /* Placeholder - get hci_chan for logical link */
4775 if (hchan
->state
== BT_CONNECTED
) {
4776 /* Logical link is ready to go */
4777 chan
->hs_hcon
= hchan
->conn
;
4778 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4779 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4780 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4782 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4784 /* Wait for logical link to be ready */
4785 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4788 /* Logical link not available */
4789 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4793 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4795 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4797 if (result
== -EINVAL
)
4798 rsp_result
= L2CAP_MR_BAD_ID
;
4800 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4802 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4805 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4806 chan
->move_state
= L2CAP_MOVE_STABLE
;
4808 /* Restart data transmission */
4809 l2cap_ertm_send(chan
);
4812 /* Invoke with locked chan */
4813 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4815 u8 local_amp_id
= chan
->local_amp_id
;
4816 u8 remote_amp_id
= chan
->remote_amp_id
;
4818 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4819 chan
, result
, local_amp_id
, remote_amp_id
);
4821 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4822 l2cap_chan_unlock(chan
);
4826 if (chan
->state
!= BT_CONNECTED
) {
4827 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4828 } else if (result
!= L2CAP_MR_SUCCESS
) {
4829 l2cap_do_move_cancel(chan
, result
);
4831 switch (chan
->move_role
) {
4832 case L2CAP_MOVE_ROLE_INITIATOR
:
4833 l2cap_do_move_initiate(chan
, local_amp_id
,
4836 case L2CAP_MOVE_ROLE_RESPONDER
:
4837 l2cap_do_move_respond(chan
, result
);
4840 l2cap_do_move_cancel(chan
, result
);
4846 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4847 struct l2cap_cmd_hdr
*cmd
,
4848 u16 cmd_len
, void *data
)
4850 struct l2cap_move_chan_req
*req
= data
;
4851 struct l2cap_move_chan_rsp rsp
;
4852 struct l2cap_chan
*chan
;
4854 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4856 if (cmd_len
!= sizeof(*req
))
4859 icid
= le16_to_cpu(req
->icid
);
4861 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4863 if (!conn
->hs_enabled
)
4866 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4868 rsp
.icid
= cpu_to_le16(icid
);
4869 rsp
.result
= cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4870 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4875 chan
->ident
= cmd
->ident
;
4877 if (chan
->scid
< L2CAP_CID_DYN_START
||
4878 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4879 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4880 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4881 result
= L2CAP_MR_NOT_ALLOWED
;
4882 goto send_move_response
;
4885 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4886 result
= L2CAP_MR_SAME_ID
;
4887 goto send_move_response
;
4890 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4891 struct hci_dev
*hdev
;
4892 hdev
= hci_dev_get(req
->dest_amp_id
);
4893 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4894 !test_bit(HCI_UP
, &hdev
->flags
)) {
4898 result
= L2CAP_MR_BAD_ID
;
4899 goto send_move_response
;
4904 /* Detect a move collision. Only send a collision response
4905 * if this side has "lost", otherwise proceed with the move.
4906 * The winner has the larger bd_addr.
4908 if ((__chan_is_moving(chan
) ||
4909 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4910 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4911 result
= L2CAP_MR_COLLISION
;
4912 goto send_move_response
;
4915 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4916 l2cap_move_setup(chan
);
4917 chan
->move_id
= req
->dest_amp_id
;
4920 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4921 /* Moving to BR/EDR */
4922 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4923 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4924 result
= L2CAP_MR_PEND
;
4926 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4927 result
= L2CAP_MR_SUCCESS
;
4930 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4931 /* Placeholder - uncomment when amp functions are available */
4932 /*amp_accept_physical(chan, req->dest_amp_id);*/
4933 result
= L2CAP_MR_PEND
;
4937 l2cap_send_move_chan_rsp(chan
, result
);
4939 l2cap_chan_unlock(chan
);
4944 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4946 struct l2cap_chan
*chan
;
4947 struct hci_chan
*hchan
= NULL
;
4949 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4951 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4955 __clear_chan_timer(chan
);
4956 if (result
== L2CAP_MR_PEND
)
4957 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4959 switch (chan
->move_state
) {
4960 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4961 /* Move confirm will be sent when logical link
4964 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4966 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4967 if (result
== L2CAP_MR_PEND
) {
4969 } else if (test_bit(CONN_LOCAL_BUSY
,
4970 &chan
->conn_state
)) {
4971 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4973 /* Logical link is up or moving to BR/EDR,
4976 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4977 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4980 case L2CAP_MOVE_WAIT_RSP
:
4982 if (result
== L2CAP_MR_SUCCESS
) {
4983 /* Remote is ready, send confirm immediately
4984 * after logical link is ready
4986 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4988 /* Both logical link and move success
4989 * are required to confirm
4991 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
4994 /* Placeholder - get hci_chan for logical link */
4996 /* Logical link not available */
4997 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5001 /* If the logical link is not yet connected, do not
5002 * send confirmation.
5004 if (hchan
->state
!= BT_CONNECTED
)
5007 /* Logical link is already ready to go */
5009 chan
->hs_hcon
= hchan
->conn
;
5010 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5012 if (result
== L2CAP_MR_SUCCESS
) {
5013 /* Can confirm now */
5014 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5016 /* Now only need move success
5019 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5022 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5025 /* Any other amp move state means the move failed. */
5026 chan
->move_id
= chan
->local_amp_id
;
5027 l2cap_move_done(chan
);
5028 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5031 l2cap_chan_unlock(chan
);
5034 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5037 struct l2cap_chan
*chan
;
5039 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5041 /* Could not locate channel, icid is best guess */
5042 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5046 __clear_chan_timer(chan
);
5048 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5049 if (result
== L2CAP_MR_COLLISION
) {
5050 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5052 /* Cleanup - cancel move */
5053 chan
->move_id
= chan
->local_amp_id
;
5054 l2cap_move_done(chan
);
5058 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5060 l2cap_chan_unlock(chan
);
5063 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5064 struct l2cap_cmd_hdr
*cmd
,
5065 u16 cmd_len
, void *data
)
5067 struct l2cap_move_chan_rsp
*rsp
= data
;
5070 if (cmd_len
!= sizeof(*rsp
))
5073 icid
= le16_to_cpu(rsp
->icid
);
5074 result
= le16_to_cpu(rsp
->result
);
5076 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5078 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5079 l2cap_move_continue(conn
, icid
, result
);
5081 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5086 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5087 struct l2cap_cmd_hdr
*cmd
,
5088 u16 cmd_len
, void *data
)
5090 struct l2cap_move_chan_cfm
*cfm
= data
;
5091 struct l2cap_chan
*chan
;
5094 if (cmd_len
!= sizeof(*cfm
))
5097 icid
= le16_to_cpu(cfm
->icid
);
5098 result
= le16_to_cpu(cfm
->result
);
5100 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5102 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5104 /* Spec requires a response even if the icid was not found */
5105 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5109 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5110 if (result
== L2CAP_MC_CONFIRMED
) {
5111 chan
->local_amp_id
= chan
->move_id
;
5112 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5113 __release_logical_link(chan
);
5115 chan
->move_id
= chan
->local_amp_id
;
5118 l2cap_move_done(chan
);
5121 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5123 l2cap_chan_unlock(chan
);
5128 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5129 struct l2cap_cmd_hdr
*cmd
,
5130 u16 cmd_len
, void *data
)
5132 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5133 struct l2cap_chan
*chan
;
5136 if (cmd_len
!= sizeof(*rsp
))
5139 icid
= le16_to_cpu(rsp
->icid
);
5141 BT_DBG("icid 0x%4.4x", icid
);
5143 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5147 __clear_chan_timer(chan
);
5149 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5150 chan
->local_amp_id
= chan
->move_id
;
5152 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5153 __release_logical_link(chan
);
5155 l2cap_move_done(chan
);
5158 l2cap_chan_unlock(chan
);
5163 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5164 struct l2cap_cmd_hdr
*cmd
,
5165 u16 cmd_len
, u8
*data
)
5167 struct hci_conn
*hcon
= conn
->hcon
;
5168 struct l2cap_conn_param_update_req
*req
;
5169 struct l2cap_conn_param_update_rsp rsp
;
5170 u16 min
, max
, latency
, to_multiplier
;
5173 if (hcon
->role
!= HCI_ROLE_MASTER
)
5176 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5179 req
= (struct l2cap_conn_param_update_req
*) data
;
5180 min
= __le16_to_cpu(req
->min
);
5181 max
= __le16_to_cpu(req
->max
);
5182 latency
= __le16_to_cpu(req
->latency
);
5183 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5185 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5186 min
, max
, latency
, to_multiplier
);
5188 memset(&rsp
, 0, sizeof(rsp
));
5190 err
= hci_check_conn_params(min
, max
, latency
, to_multiplier
);
5192 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5194 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5196 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5202 store_hint
= hci_le_conn_update(hcon
, min
, max
, latency
,
5204 mgmt_new_conn_param(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
,
5205 store_hint
, min
, max
, latency
,
5213 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5214 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5217 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5218 u16 dcid
, mtu
, mps
, credits
, result
;
5219 struct l2cap_chan
*chan
;
5222 if (cmd_len
< sizeof(*rsp
))
5225 dcid
= __le16_to_cpu(rsp
->dcid
);
5226 mtu
= __le16_to_cpu(rsp
->mtu
);
5227 mps
= __le16_to_cpu(rsp
->mps
);
5228 credits
= __le16_to_cpu(rsp
->credits
);
5229 result
= __le16_to_cpu(rsp
->result
);
5231 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23))
5234 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5235 dcid
, mtu
, mps
, credits
, result
);
5237 mutex_lock(&conn
->chan_lock
);
5239 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5247 l2cap_chan_lock(chan
);
5250 case L2CAP_CR_SUCCESS
:
5254 chan
->remote_mps
= mps
;
5255 chan
->tx_credits
= credits
;
5256 l2cap_chan_ready(chan
);
5260 l2cap_chan_del(chan
, ECONNREFUSED
);
5264 l2cap_chan_unlock(chan
);
5267 mutex_unlock(&conn
->chan_lock
);
5272 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5273 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5278 switch (cmd
->code
) {
5279 case L2CAP_COMMAND_REJ
:
5280 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5283 case L2CAP_CONN_REQ
:
5284 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5287 case L2CAP_CONN_RSP
:
5288 case L2CAP_CREATE_CHAN_RSP
:
5289 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5292 case L2CAP_CONF_REQ
:
5293 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5296 case L2CAP_CONF_RSP
:
5297 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5300 case L2CAP_DISCONN_REQ
:
5301 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5304 case L2CAP_DISCONN_RSP
:
5305 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5308 case L2CAP_ECHO_REQ
:
5309 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5312 case L2CAP_ECHO_RSP
:
5315 case L2CAP_INFO_REQ
:
5316 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5319 case L2CAP_INFO_RSP
:
5320 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5323 case L2CAP_CREATE_CHAN_REQ
:
5324 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5327 case L2CAP_MOVE_CHAN_REQ
:
5328 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5331 case L2CAP_MOVE_CHAN_RSP
:
5332 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5335 case L2CAP_MOVE_CHAN_CFM
:
5336 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5339 case L2CAP_MOVE_CHAN_CFM_RSP
:
5340 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5344 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5352 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5353 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5356 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5357 struct l2cap_le_conn_rsp rsp
;
5358 struct l2cap_chan
*chan
, *pchan
;
5359 u16 dcid
, scid
, credits
, mtu
, mps
;
5363 if (cmd_len
!= sizeof(*req
))
5366 scid
= __le16_to_cpu(req
->scid
);
5367 mtu
= __le16_to_cpu(req
->mtu
);
5368 mps
= __le16_to_cpu(req
->mps
);
5373 if (mtu
< 23 || mps
< 23)
5376 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5379 /* Check if we have socket listening on psm */
5380 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5381 &conn
->hcon
->dst
, LE_LINK
);
5383 result
= L2CAP_CR_BAD_PSM
;
5388 mutex_lock(&conn
->chan_lock
);
5389 l2cap_chan_lock(pchan
);
5391 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
)) {
5392 result
= L2CAP_CR_AUTHENTICATION
;
5394 goto response_unlock
;
5397 /* Check if we already have channel with that dcid */
5398 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5399 result
= L2CAP_CR_NO_MEM
;
5401 goto response_unlock
;
5404 chan
= pchan
->ops
->new_connection(pchan
);
5406 result
= L2CAP_CR_NO_MEM
;
5407 goto response_unlock
;
5410 l2cap_le_flowctl_init(chan
);
5412 bacpy(&chan
->src
, &conn
->hcon
->src
);
5413 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5414 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
5415 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
5419 chan
->remote_mps
= mps
;
5420 chan
->tx_credits
= __le16_to_cpu(req
->credits
);
5422 __l2cap_chan_add(conn
, chan
);
5424 credits
= chan
->rx_credits
;
5426 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5428 chan
->ident
= cmd
->ident
;
5430 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5431 l2cap_state_change(chan
, BT_CONNECT2
);
5432 /* The following result value is actually not defined
5433 * for LE CoC but we use it to let the function know
5434 * that it should bail out after doing its cleanup
5435 * instead of sending a response.
5437 result
= L2CAP_CR_PEND
;
5438 chan
->ops
->defer(chan
);
5440 l2cap_chan_ready(chan
);
5441 result
= L2CAP_CR_SUCCESS
;
5445 l2cap_chan_unlock(pchan
);
5446 mutex_unlock(&conn
->chan_lock
);
5447 l2cap_chan_put(pchan
);
5449 if (result
== L2CAP_CR_PEND
)
5454 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5455 rsp
.mps
= cpu_to_le16(chan
->mps
);
5461 rsp
.dcid
= cpu_to_le16(dcid
);
5462 rsp
.credits
= cpu_to_le16(credits
);
5463 rsp
.result
= cpu_to_le16(result
);
5465 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5470 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5471 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5474 struct l2cap_le_credits
*pkt
;
5475 struct l2cap_chan
*chan
;
5476 u16 cid
, credits
, max_credits
;
5478 if (cmd_len
!= sizeof(*pkt
))
5481 pkt
= (struct l2cap_le_credits
*) data
;
5482 cid
= __le16_to_cpu(pkt
->cid
);
5483 credits
= __le16_to_cpu(pkt
->credits
);
5485 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5487 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5491 max_credits
= LE_FLOWCTL_MAX_CREDITS
- chan
->tx_credits
;
5492 if (credits
> max_credits
) {
5493 BT_ERR("LE credits overflow");
5494 l2cap_send_disconn_req(chan
, ECONNRESET
);
5495 l2cap_chan_unlock(chan
);
5497 /* Return 0 so that we don't trigger an unnecessary
5498 * command reject packet.
5503 chan
->tx_credits
+= credits
;
5505 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
5506 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
5510 if (chan
->tx_credits
)
5511 chan
->ops
->resume(chan
);
5513 l2cap_chan_unlock(chan
);
5518 static inline int l2cap_le_command_rej(struct l2cap_conn
*conn
,
5519 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5522 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
5523 struct l2cap_chan
*chan
;
5525 if (cmd_len
< sizeof(*rej
))
5528 mutex_lock(&conn
->chan_lock
);
5530 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5534 l2cap_chan_lock(chan
);
5535 l2cap_chan_del(chan
, ECONNREFUSED
);
5536 l2cap_chan_unlock(chan
);
5539 mutex_unlock(&conn
->chan_lock
);
5543 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5544 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5549 switch (cmd
->code
) {
5550 case L2CAP_COMMAND_REJ
:
5551 l2cap_le_command_rej(conn
, cmd
, cmd_len
, data
);
5554 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5555 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5558 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5561 case L2CAP_LE_CONN_RSP
:
5562 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5565 case L2CAP_LE_CONN_REQ
:
5566 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5569 case L2CAP_LE_CREDITS
:
5570 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5573 case L2CAP_DISCONN_REQ
:
5574 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5577 case L2CAP_DISCONN_RSP
:
5578 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5582 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5590 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5591 struct sk_buff
*skb
)
5593 struct hci_conn
*hcon
= conn
->hcon
;
5594 struct l2cap_cmd_hdr
*cmd
;
5598 if (hcon
->type
!= LE_LINK
)
5601 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5604 cmd
= (void *) skb
->data
;
5605 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5607 len
= le16_to_cpu(cmd
->len
);
5609 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5611 if (len
!= skb
->len
|| !cmd
->ident
) {
5612 BT_DBG("corrupted command");
5616 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5618 struct l2cap_cmd_rej_unk rej
;
5620 BT_ERR("Wrong link type (%d)", err
);
5622 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5623 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5631 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5632 struct sk_buff
*skb
)
5634 struct hci_conn
*hcon
= conn
->hcon
;
5635 u8
*data
= skb
->data
;
5637 struct l2cap_cmd_hdr cmd
;
5640 l2cap_raw_recv(conn
, skb
);
5642 if (hcon
->type
!= ACL_LINK
)
5645 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5647 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5648 data
+= L2CAP_CMD_HDR_SIZE
;
5649 len
-= L2CAP_CMD_HDR_SIZE
;
5651 cmd_len
= le16_to_cpu(cmd
.len
);
5653 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5656 if (cmd_len
> len
|| !cmd
.ident
) {
5657 BT_DBG("corrupted command");
5661 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5663 struct l2cap_cmd_rej_unk rej
;
5665 BT_ERR("Wrong link type (%d)", err
);
5667 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5668 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5680 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5682 u16 our_fcs
, rcv_fcs
;
5685 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5686 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5688 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5690 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5691 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5692 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5693 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5695 if (our_fcs
!= rcv_fcs
)
5701 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5703 struct l2cap_ctrl control
;
5705 BT_DBG("chan %p", chan
);
5707 memset(&control
, 0, sizeof(control
));
5710 control
.reqseq
= chan
->buffer_seq
;
5711 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5713 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5714 control
.super
= L2CAP_SUPER_RNR
;
5715 l2cap_send_sframe(chan
, &control
);
5718 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5719 chan
->unacked_frames
> 0)
5720 __set_retrans_timer(chan
);
5722 /* Send pending iframes */
5723 l2cap_ertm_send(chan
);
5725 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5726 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5727 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5730 control
.super
= L2CAP_SUPER_RR
;
5731 l2cap_send_sframe(chan
, &control
);
5735 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5736 struct sk_buff
**last_frag
)
5738 /* skb->len reflects data in skb as well as all fragments
5739 * skb->data_len reflects only data in fragments
5741 if (!skb_has_frag_list(skb
))
5742 skb_shinfo(skb
)->frag_list
= new_frag
;
5744 new_frag
->next
= NULL
;
5746 (*last_frag
)->next
= new_frag
;
5747 *last_frag
= new_frag
;
5749 skb
->len
+= new_frag
->len
;
5750 skb
->data_len
+= new_frag
->len
;
5751 skb
->truesize
+= new_frag
->truesize
;
5754 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5755 struct l2cap_ctrl
*control
)
5759 switch (control
->sar
) {
5760 case L2CAP_SAR_UNSEGMENTED
:
5764 err
= chan
->ops
->recv(chan
, skb
);
5767 case L2CAP_SAR_START
:
5771 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5772 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5774 if (chan
->sdu_len
> chan
->imtu
) {
5779 if (skb
->len
>= chan
->sdu_len
)
5783 chan
->sdu_last_frag
= skb
;
5789 case L2CAP_SAR_CONTINUE
:
5793 append_skb_frag(chan
->sdu
, skb
,
5794 &chan
->sdu_last_frag
);
5797 if (chan
->sdu
->len
>= chan
->sdu_len
)
5807 append_skb_frag(chan
->sdu
, skb
,
5808 &chan
->sdu_last_frag
);
5811 if (chan
->sdu
->len
!= chan
->sdu_len
)
5814 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5817 /* Reassembly complete */
5819 chan
->sdu_last_frag
= NULL
;
5827 kfree_skb(chan
->sdu
);
5829 chan
->sdu_last_frag
= NULL
;
5836 static int l2cap_resegment(struct l2cap_chan
*chan
)
5842 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5846 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5849 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5850 l2cap_tx(chan
, NULL
, NULL
, event
);
5853 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5856 /* Pass sequential frames to l2cap_reassemble_sdu()
5857 * until a gap is encountered.
5860 BT_DBG("chan %p", chan
);
5862 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5863 struct sk_buff
*skb
;
5864 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5865 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5867 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5872 skb_unlink(skb
, &chan
->srej_q
);
5873 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5874 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5879 if (skb_queue_empty(&chan
->srej_q
)) {
5880 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5881 l2cap_send_ack(chan
);
5887 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5888 struct l2cap_ctrl
*control
)
5890 struct sk_buff
*skb
;
5892 BT_DBG("chan %p, control %p", chan
, control
);
5894 if (control
->reqseq
== chan
->next_tx_seq
) {
5895 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5896 l2cap_send_disconn_req(chan
, ECONNRESET
);
5900 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5903 BT_DBG("Seq %d not available for retransmission",
5908 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5909 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5910 l2cap_send_disconn_req(chan
, ECONNRESET
);
5914 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5916 if (control
->poll
) {
5917 l2cap_pass_to_tx(chan
, control
);
5919 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5920 l2cap_retransmit(chan
, control
);
5921 l2cap_ertm_send(chan
);
5923 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5924 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5925 chan
->srej_save_reqseq
= control
->reqseq
;
5928 l2cap_pass_to_tx_fbit(chan
, control
);
5930 if (control
->final
) {
5931 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5932 !test_and_clear_bit(CONN_SREJ_ACT
,
5934 l2cap_retransmit(chan
, control
);
5936 l2cap_retransmit(chan
, control
);
5937 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5938 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5939 chan
->srej_save_reqseq
= control
->reqseq
;
5945 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5946 struct l2cap_ctrl
*control
)
5948 struct sk_buff
*skb
;
5950 BT_DBG("chan %p, control %p", chan
, control
);
5952 if (control
->reqseq
== chan
->next_tx_seq
) {
5953 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5954 l2cap_send_disconn_req(chan
, ECONNRESET
);
5958 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5960 if (chan
->max_tx
&& skb
&&
5961 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5962 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5963 l2cap_send_disconn_req(chan
, ECONNRESET
);
5967 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5969 l2cap_pass_to_tx(chan
, control
);
5971 if (control
->final
) {
5972 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
5973 l2cap_retransmit_all(chan
, control
);
5975 l2cap_retransmit_all(chan
, control
);
5976 l2cap_ertm_send(chan
);
5977 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
5978 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
5982 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
5984 BT_DBG("chan %p, txseq %d", chan
, txseq
);
5986 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
5987 chan
->expected_tx_seq
);
5989 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
5990 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5992 /* See notes below regarding "double poll" and
5995 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5996 BT_DBG("Invalid/Ignore - after SREJ");
5997 return L2CAP_TXSEQ_INVALID_IGNORE
;
5999 BT_DBG("Invalid - in window after SREJ sent");
6000 return L2CAP_TXSEQ_INVALID
;
6004 if (chan
->srej_list
.head
== txseq
) {
6005 BT_DBG("Expected SREJ");
6006 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6009 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6010 BT_DBG("Duplicate SREJ - txseq already stored");
6011 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6014 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6015 BT_DBG("Unexpected SREJ - not requested");
6016 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6020 if (chan
->expected_tx_seq
== txseq
) {
6021 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6023 BT_DBG("Invalid - txseq outside tx window");
6024 return L2CAP_TXSEQ_INVALID
;
6027 return L2CAP_TXSEQ_EXPECTED
;
6031 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6032 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6033 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6034 return L2CAP_TXSEQ_DUPLICATE
;
6037 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6038 /* A source of invalid packets is a "double poll" condition,
6039 * where delays cause us to send multiple poll packets. If
6040 * the remote stack receives and processes both polls,
6041 * sequence numbers can wrap around in such a way that a
6042 * resent frame has a sequence number that looks like new data
6043 * with a sequence gap. This would trigger an erroneous SREJ
6046 * Fortunately, this is impossible with a tx window that's
6047 * less than half of the maximum sequence number, which allows
6048 * invalid frames to be safely ignored.
6050 * With tx window sizes greater than half of the tx window
6051 * maximum, the frame is invalid and cannot be ignored. This
6052 * causes a disconnect.
6055 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6056 BT_DBG("Invalid/Ignore - txseq outside tx window");
6057 return L2CAP_TXSEQ_INVALID_IGNORE
;
6059 BT_DBG("Invalid - txseq outside tx window");
6060 return L2CAP_TXSEQ_INVALID
;
6063 BT_DBG("Unexpected - txseq indicates missing frames");
6064 return L2CAP_TXSEQ_UNEXPECTED
;
6068 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6069 struct l2cap_ctrl
*control
,
6070 struct sk_buff
*skb
, u8 event
)
6073 bool skb_in_use
= false;
6075 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6079 case L2CAP_EV_RECV_IFRAME
:
6080 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6081 case L2CAP_TXSEQ_EXPECTED
:
6082 l2cap_pass_to_tx(chan
, control
);
6084 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6085 BT_DBG("Busy, discarding expected seq %d",
6090 chan
->expected_tx_seq
= __next_seq(chan
,
6093 chan
->buffer_seq
= chan
->expected_tx_seq
;
6096 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6100 if (control
->final
) {
6101 if (!test_and_clear_bit(CONN_REJ_ACT
,
6102 &chan
->conn_state
)) {
6104 l2cap_retransmit_all(chan
, control
);
6105 l2cap_ertm_send(chan
);
6109 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6110 l2cap_send_ack(chan
);
6112 case L2CAP_TXSEQ_UNEXPECTED
:
6113 l2cap_pass_to_tx(chan
, control
);
6115 /* Can't issue SREJ frames in the local busy state.
6116 * Drop this frame, it will be seen as missing
6117 * when local busy is exited.
6119 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6120 BT_DBG("Busy, discarding unexpected seq %d",
6125 /* There was a gap in the sequence, so an SREJ
6126 * must be sent for each missing frame. The
6127 * current frame is stored for later use.
6129 skb_queue_tail(&chan
->srej_q
, skb
);
6131 BT_DBG("Queued %p (queue len %d)", skb
,
6132 skb_queue_len(&chan
->srej_q
));
6134 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6135 l2cap_seq_list_clear(&chan
->srej_list
);
6136 l2cap_send_srej(chan
, control
->txseq
);
6138 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6140 case L2CAP_TXSEQ_DUPLICATE
:
6141 l2cap_pass_to_tx(chan
, control
);
6143 case L2CAP_TXSEQ_INVALID_IGNORE
:
6145 case L2CAP_TXSEQ_INVALID
:
6147 l2cap_send_disconn_req(chan
, ECONNRESET
);
6151 case L2CAP_EV_RECV_RR
:
6152 l2cap_pass_to_tx(chan
, control
);
6153 if (control
->final
) {
6154 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6156 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6157 !__chan_is_moving(chan
)) {
6159 l2cap_retransmit_all(chan
, control
);
6162 l2cap_ertm_send(chan
);
6163 } else if (control
->poll
) {
6164 l2cap_send_i_or_rr_or_rnr(chan
);
6166 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6167 &chan
->conn_state
) &&
6168 chan
->unacked_frames
)
6169 __set_retrans_timer(chan
);
6171 l2cap_ertm_send(chan
);
6174 case L2CAP_EV_RECV_RNR
:
6175 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6176 l2cap_pass_to_tx(chan
, control
);
6177 if (control
&& control
->poll
) {
6178 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6179 l2cap_send_rr_or_rnr(chan
, 0);
6181 __clear_retrans_timer(chan
);
6182 l2cap_seq_list_clear(&chan
->retrans_list
);
6184 case L2CAP_EV_RECV_REJ
:
6185 l2cap_handle_rej(chan
, control
);
6187 case L2CAP_EV_RECV_SREJ
:
6188 l2cap_handle_srej(chan
, control
);
6194 if (skb
&& !skb_in_use
) {
6195 BT_DBG("Freeing %p", skb
);
6202 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6203 struct l2cap_ctrl
*control
,
6204 struct sk_buff
*skb
, u8 event
)
6207 u16 txseq
= control
->txseq
;
6208 bool skb_in_use
= false;
6210 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6214 case L2CAP_EV_RECV_IFRAME
:
6215 switch (l2cap_classify_txseq(chan
, txseq
)) {
6216 case L2CAP_TXSEQ_EXPECTED
:
6217 /* Keep frame for reassembly later */
6218 l2cap_pass_to_tx(chan
, control
);
6219 skb_queue_tail(&chan
->srej_q
, skb
);
6221 BT_DBG("Queued %p (queue len %d)", skb
,
6222 skb_queue_len(&chan
->srej_q
));
6224 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6226 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6227 l2cap_seq_list_pop(&chan
->srej_list
);
6229 l2cap_pass_to_tx(chan
, control
);
6230 skb_queue_tail(&chan
->srej_q
, skb
);
6232 BT_DBG("Queued %p (queue len %d)", skb
,
6233 skb_queue_len(&chan
->srej_q
));
6235 err
= l2cap_rx_queued_iframes(chan
);
6240 case L2CAP_TXSEQ_UNEXPECTED
:
6241 /* Got a frame that can't be reassembled yet.
6242 * Save it for later, and send SREJs to cover
6243 * the missing frames.
6245 skb_queue_tail(&chan
->srej_q
, skb
);
6247 BT_DBG("Queued %p (queue len %d)", skb
,
6248 skb_queue_len(&chan
->srej_q
));
6250 l2cap_pass_to_tx(chan
, control
);
6251 l2cap_send_srej(chan
, control
->txseq
);
6253 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6254 /* This frame was requested with an SREJ, but
6255 * some expected retransmitted frames are
6256 * missing. Request retransmission of missing
6259 skb_queue_tail(&chan
->srej_q
, skb
);
6261 BT_DBG("Queued %p (queue len %d)", skb
,
6262 skb_queue_len(&chan
->srej_q
));
6264 l2cap_pass_to_tx(chan
, control
);
6265 l2cap_send_srej_list(chan
, control
->txseq
);
6267 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6268 /* We've already queued this frame. Drop this copy. */
6269 l2cap_pass_to_tx(chan
, control
);
6271 case L2CAP_TXSEQ_DUPLICATE
:
6272 /* Expecting a later sequence number, so this frame
6273 * was already received. Ignore it completely.
6276 case L2CAP_TXSEQ_INVALID_IGNORE
:
6278 case L2CAP_TXSEQ_INVALID
:
6280 l2cap_send_disconn_req(chan
, ECONNRESET
);
6284 case L2CAP_EV_RECV_RR
:
6285 l2cap_pass_to_tx(chan
, control
);
6286 if (control
->final
) {
6287 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6289 if (!test_and_clear_bit(CONN_REJ_ACT
,
6290 &chan
->conn_state
)) {
6292 l2cap_retransmit_all(chan
, control
);
6295 l2cap_ertm_send(chan
);
6296 } else if (control
->poll
) {
6297 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6298 &chan
->conn_state
) &&
6299 chan
->unacked_frames
) {
6300 __set_retrans_timer(chan
);
6303 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6304 l2cap_send_srej_tail(chan
);
6306 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6307 &chan
->conn_state
) &&
6308 chan
->unacked_frames
)
6309 __set_retrans_timer(chan
);
6311 l2cap_send_ack(chan
);
6314 case L2CAP_EV_RECV_RNR
:
6315 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6316 l2cap_pass_to_tx(chan
, control
);
6317 if (control
->poll
) {
6318 l2cap_send_srej_tail(chan
);
6320 struct l2cap_ctrl rr_control
;
6321 memset(&rr_control
, 0, sizeof(rr_control
));
6322 rr_control
.sframe
= 1;
6323 rr_control
.super
= L2CAP_SUPER_RR
;
6324 rr_control
.reqseq
= chan
->buffer_seq
;
6325 l2cap_send_sframe(chan
, &rr_control
);
6329 case L2CAP_EV_RECV_REJ
:
6330 l2cap_handle_rej(chan
, control
);
6332 case L2CAP_EV_RECV_SREJ
:
6333 l2cap_handle_srej(chan
, control
);
6337 if (skb
&& !skb_in_use
) {
6338 BT_DBG("Freeing %p", skb
);
6345 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6347 BT_DBG("chan %p", chan
);
6349 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6352 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6354 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6356 return l2cap_resegment(chan
);
6359 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6360 struct l2cap_ctrl
*control
,
6361 struct sk_buff
*skb
, u8 event
)
6365 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6371 l2cap_process_reqseq(chan
, control
->reqseq
);
6373 if (!skb_queue_empty(&chan
->tx_q
))
6374 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6376 chan
->tx_send_head
= NULL
;
6378 /* Rewind next_tx_seq to the point expected
6381 chan
->next_tx_seq
= control
->reqseq
;
6382 chan
->unacked_frames
= 0;
6384 err
= l2cap_finish_move(chan
);
6388 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6389 l2cap_send_i_or_rr_or_rnr(chan
);
6391 if (event
== L2CAP_EV_RECV_IFRAME
)
6394 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6397 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6398 struct l2cap_ctrl
*control
,
6399 struct sk_buff
*skb
, u8 event
)
6403 if (!control
->final
)
6406 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6408 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6409 l2cap_process_reqseq(chan
, control
->reqseq
);
6411 if (!skb_queue_empty(&chan
->tx_q
))
6412 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6414 chan
->tx_send_head
= NULL
;
6416 /* Rewind next_tx_seq to the point expected
6419 chan
->next_tx_seq
= control
->reqseq
;
6420 chan
->unacked_frames
= 0;
6423 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6425 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6427 err
= l2cap_resegment(chan
);
6430 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6435 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6437 /* Make sure reqseq is for a packet that has been sent but not acked */
6440 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6441 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6444 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6445 struct sk_buff
*skb
, u8 event
)
6449 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6450 control
, skb
, event
, chan
->rx_state
);
6452 if (__valid_reqseq(chan
, control
->reqseq
)) {
6453 switch (chan
->rx_state
) {
6454 case L2CAP_RX_STATE_RECV
:
6455 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6457 case L2CAP_RX_STATE_SREJ_SENT
:
6458 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6461 case L2CAP_RX_STATE_WAIT_P
:
6462 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6464 case L2CAP_RX_STATE_WAIT_F
:
6465 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6472 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6473 control
->reqseq
, chan
->next_tx_seq
,
6474 chan
->expected_ack_seq
);
6475 l2cap_send_disconn_req(chan
, ECONNRESET
);
6481 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6482 struct sk_buff
*skb
)
6486 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6489 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6490 L2CAP_TXSEQ_EXPECTED
) {
6491 l2cap_pass_to_tx(chan
, control
);
6493 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6494 __next_seq(chan
, chan
->buffer_seq
));
6496 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6498 l2cap_reassemble_sdu(chan
, skb
, control
);
6501 kfree_skb(chan
->sdu
);
6504 chan
->sdu_last_frag
= NULL
;
6508 BT_DBG("Freeing %p", skb
);
6513 chan
->last_acked_seq
= control
->txseq
;
6514 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6519 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6521 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6525 __unpack_control(chan
, skb
);
6530 * We can just drop the corrupted I-frame here.
6531 * Receiver will miss it and start proper recovery
6532 * procedures and ask for retransmission.
6534 if (l2cap_check_fcs(chan
, skb
))
6537 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6538 len
-= L2CAP_SDULEN_SIZE
;
6540 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6541 len
-= L2CAP_FCS_SIZE
;
6543 if (len
> chan
->mps
) {
6544 l2cap_send_disconn_req(chan
, ECONNRESET
);
6548 if (!control
->sframe
) {
6551 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6552 control
->sar
, control
->reqseq
, control
->final
,
6555 /* Validate F-bit - F=0 always valid, F=1 only
6556 * valid in TX WAIT_F
6558 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6561 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6562 event
= L2CAP_EV_RECV_IFRAME
;
6563 err
= l2cap_rx(chan
, control
, skb
, event
);
6565 err
= l2cap_stream_rx(chan
, control
, skb
);
6569 l2cap_send_disconn_req(chan
, ECONNRESET
);
6571 const u8 rx_func_to_event
[4] = {
6572 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6573 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6576 /* Only I-frames are expected in streaming mode */
6577 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6580 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6581 control
->reqseq
, control
->final
, control
->poll
,
6585 BT_ERR("Trailing bytes: %d in sframe", len
);
6586 l2cap_send_disconn_req(chan
, ECONNRESET
);
6590 /* Validate F and P bits */
6591 if (control
->final
&& (control
->poll
||
6592 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6595 event
= rx_func_to_event
[control
->super
];
6596 if (l2cap_rx(chan
, control
, skb
, event
))
6597 l2cap_send_disconn_req(chan
, ECONNRESET
);
6607 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6609 struct l2cap_conn
*conn
= chan
->conn
;
6610 struct l2cap_le_credits pkt
;
6613 /* We return more credits to the sender only after the amount of
6614 * credits falls below half of the initial amount.
6616 if (chan
->rx_credits
>= (le_max_credits
+ 1) / 2)
6619 return_credits
= le_max_credits
- chan
->rx_credits
;
6621 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6623 chan
->rx_credits
+= return_credits
;
6625 pkt
.cid
= cpu_to_le16(chan
->scid
);
6626 pkt
.credits
= cpu_to_le16(return_credits
);
6628 chan
->ident
= l2cap_get_ident(conn
);
6630 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6633 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6637 if (!chan
->rx_credits
) {
6638 BT_ERR("No credits to receive LE L2CAP data");
6639 l2cap_send_disconn_req(chan
, ECONNRESET
);
6643 if (chan
->imtu
< skb
->len
) {
6644 BT_ERR("Too big LE L2CAP PDU");
6649 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6651 l2cap_chan_le_send_credits(chan
);
6658 sdu_len
= get_unaligned_le16(skb
->data
);
6659 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6661 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6662 sdu_len
, skb
->len
, chan
->imtu
);
6664 if (sdu_len
> chan
->imtu
) {
6665 BT_ERR("Too big LE L2CAP SDU length received");
6670 if (skb
->len
> sdu_len
) {
6671 BT_ERR("Too much LE L2CAP data received");
6676 if (skb
->len
== sdu_len
)
6677 return chan
->ops
->recv(chan
, skb
);
6680 chan
->sdu_len
= sdu_len
;
6681 chan
->sdu_last_frag
= skb
;
6686 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6687 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6689 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6690 BT_ERR("Too much LE L2CAP data received");
6695 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6698 if (chan
->sdu
->len
== chan
->sdu_len
) {
6699 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6702 chan
->sdu_last_frag
= NULL
;
6710 kfree_skb(chan
->sdu
);
6712 chan
->sdu_last_frag
= NULL
;
6716 /* We can't return an error here since we took care of the skb
6717 * freeing internally. An error return would cause the caller to
6718 * do a double-free of the skb.
6723 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6724 struct sk_buff
*skb
)
6726 struct l2cap_chan
*chan
;
6728 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6730 if (cid
== L2CAP_CID_A2MP
) {
6731 chan
= a2mp_channel_create(conn
, skb
);
6737 l2cap_chan_lock(chan
);
6739 BT_DBG("unknown cid 0x%4.4x", cid
);
6740 /* Drop packet and return */
6746 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6748 if (chan
->state
!= BT_CONNECTED
)
6751 switch (chan
->mode
) {
6752 case L2CAP_MODE_LE_FLOWCTL
:
6753 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6758 case L2CAP_MODE_BASIC
:
6759 /* If socket recv buffers overflows we drop data here
6760 * which is *bad* because L2CAP has to be reliable.
6761 * But we don't have any other choice. L2CAP doesn't
6762 * provide flow control mechanism. */
6764 if (chan
->imtu
< skb
->len
) {
6765 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6769 if (!chan
->ops
->recv(chan
, skb
))
6773 case L2CAP_MODE_ERTM
:
6774 case L2CAP_MODE_STREAMING
:
6775 l2cap_data_rcv(chan
, skb
);
6779 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6787 l2cap_chan_unlock(chan
);
6790 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6791 struct sk_buff
*skb
)
6793 struct hci_conn
*hcon
= conn
->hcon
;
6794 struct l2cap_chan
*chan
;
6796 if (hcon
->type
!= ACL_LINK
)
6799 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
6804 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6806 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6809 if (chan
->imtu
< skb
->len
)
6812 /* Store remote BD_ADDR and PSM for msg_name */
6813 bacpy(&bt_cb(skb
)->bdaddr
, &hcon
->dst
);
6814 bt_cb(skb
)->psm
= psm
;
6816 if (!chan
->ops
->recv(chan
, skb
)) {
6817 l2cap_chan_put(chan
);
6822 l2cap_chan_put(chan
);
6827 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6829 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6830 struct hci_conn
*hcon
= conn
->hcon
;
6834 if (hcon
->state
!= BT_CONNECTED
) {
6835 BT_DBG("queueing pending rx skb");
6836 skb_queue_tail(&conn
->pending_rx
, skb
);
6840 skb_pull(skb
, L2CAP_HDR_SIZE
);
6841 cid
= __le16_to_cpu(lh
->cid
);
6842 len
= __le16_to_cpu(lh
->len
);
6844 if (len
!= skb
->len
) {
6849 /* Since we can't actively block incoming LE connections we must
6850 * at least ensure that we ignore incoming data from them.
6852 if (hcon
->type
== LE_LINK
&&
6853 hci_bdaddr_list_lookup(&hcon
->hdev
->blacklist
, &hcon
->dst
,
6854 bdaddr_type(hcon
, hcon
->dst_type
))) {
6859 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6862 case L2CAP_CID_SIGNALING
:
6863 l2cap_sig_channel(conn
, skb
);
6866 case L2CAP_CID_CONN_LESS
:
6867 psm
= get_unaligned((__le16
*) skb
->data
);
6868 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6869 l2cap_conless_channel(conn
, psm
, skb
);
6872 case L2CAP_CID_LE_SIGNALING
:
6873 l2cap_le_sig_channel(conn
, skb
);
6877 l2cap_data_channel(conn
, cid
, skb
);
6882 static void process_pending_rx(struct work_struct
*work
)
6884 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
6886 struct sk_buff
*skb
;
6890 while ((skb
= skb_dequeue(&conn
->pending_rx
)))
6891 l2cap_recv_frame(conn
, skb
);
6894 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
6896 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6897 struct hci_chan
*hchan
;
6902 hchan
= hci_chan_create(hcon
);
6906 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
6908 hci_chan_del(hchan
);
6912 kref_init(&conn
->ref
);
6913 hcon
->l2cap_data
= conn
;
6914 conn
->hcon
= hci_conn_get(hcon
);
6915 conn
->hchan
= hchan
;
6917 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
6919 switch (hcon
->type
) {
6921 if (hcon
->hdev
->le_mtu
) {
6922 conn
->mtu
= hcon
->hdev
->le_mtu
;
6927 conn
->mtu
= hcon
->hdev
->acl_mtu
;
6931 conn
->feat_mask
= 0;
6933 if (hcon
->type
== ACL_LINK
)
6934 conn
->hs_enabled
= test_bit(HCI_HS_ENABLED
,
6935 &hcon
->hdev
->dev_flags
);
6937 mutex_init(&conn
->ident_lock
);
6938 mutex_init(&conn
->chan_lock
);
6940 INIT_LIST_HEAD(&conn
->chan_l
);
6941 INIT_LIST_HEAD(&conn
->users
);
6943 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
6945 skb_queue_head_init(&conn
->pending_rx
);
6946 INIT_WORK(&conn
->pending_rx_work
, process_pending_rx
);
6947 INIT_WORK(&conn
->id_addr_update_work
, l2cap_conn_update_id_addr
);
6949 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
6954 static bool is_valid_psm(u16 psm
, u8 dst_type
) {
6958 if (bdaddr_type_is_le(dst_type
))
6959 return (psm
<= 0x00ff);
6961 /* PSM must be odd and lsb of upper byte must be 0 */
6962 return ((psm
& 0x0101) == 0x0001);
6965 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
6966 bdaddr_t
*dst
, u8 dst_type
)
6968 struct l2cap_conn
*conn
;
6969 struct hci_conn
*hcon
;
6970 struct hci_dev
*hdev
;
6973 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
6974 dst_type
, __le16_to_cpu(psm
));
6976 hdev
= hci_get_route(dst
, &chan
->src
);
6978 return -EHOSTUNREACH
;
6982 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
6983 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
6988 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !psm
) {
6993 if (chan
->chan_type
== L2CAP_CHAN_FIXED
&& !cid
) {
6998 switch (chan
->mode
) {
6999 case L2CAP_MODE_BASIC
:
7001 case L2CAP_MODE_LE_FLOWCTL
:
7002 l2cap_le_flowctl_init(chan
);
7004 case L2CAP_MODE_ERTM
:
7005 case L2CAP_MODE_STREAMING
:
7014 switch (chan
->state
) {
7018 /* Already connecting */
7023 /* Already connected */
7037 /* Set destination address and psm */
7038 bacpy(&chan
->dst
, dst
);
7039 chan
->dst_type
= dst_type
;
7044 if (bdaddr_type_is_le(dst_type
)) {
7047 /* Convert from L2CAP channel address type to HCI address type
7049 if (dst_type
== BDADDR_LE_PUBLIC
)
7050 dst_type
= ADDR_LE_DEV_PUBLIC
;
7052 dst_type
= ADDR_LE_DEV_RANDOM
;
7054 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
7055 role
= HCI_ROLE_SLAVE
;
7057 role
= HCI_ROLE_MASTER
;
7059 hcon
= hci_connect_le(hdev
, dst
, dst_type
, chan
->sec_level
,
7060 HCI_LE_CONN_TIMEOUT
, role
);
7062 u8 auth_type
= l2cap_get_auth_type(chan
);
7063 hcon
= hci_connect_acl(hdev
, dst
, chan
->sec_level
, auth_type
);
7067 err
= PTR_ERR(hcon
);
7071 conn
= l2cap_conn_add(hcon
);
7073 hci_conn_drop(hcon
);
7078 mutex_lock(&conn
->chan_lock
);
7079 l2cap_chan_lock(chan
);
7081 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
7082 hci_conn_drop(hcon
);
7087 /* Update source addr of the socket */
7088 bacpy(&chan
->src
, &hcon
->src
);
7089 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
7091 __l2cap_chan_add(conn
, chan
);
7093 /* l2cap_chan_add takes its own ref so we can drop this one */
7094 hci_conn_drop(hcon
);
7096 l2cap_state_change(chan
, BT_CONNECT
);
7097 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
7099 /* Release chan->sport so that it can be reused by other
7100 * sockets (as it's only used for listening sockets).
7102 write_lock(&chan_list_lock
);
7104 write_unlock(&chan_list_lock
);
7106 if (hcon
->state
== BT_CONNECTED
) {
7107 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
7108 __clear_chan_timer(chan
);
7109 if (l2cap_chan_check_security(chan
, true))
7110 l2cap_state_change(chan
, BT_CONNECTED
);
7112 l2cap_do_start(chan
);
7118 l2cap_chan_unlock(chan
);
7119 mutex_unlock(&conn
->chan_lock
);
7121 hci_dev_unlock(hdev
);
7125 EXPORT_SYMBOL_GPL(l2cap_chan_connect
);
7127 /* ---- L2CAP interface with lower layer (HCI) ---- */
7129 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7131 int exact
= 0, lm1
= 0, lm2
= 0;
7132 struct l2cap_chan
*c
;
7134 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7136 /* Find listening sockets and check their link_mode */
7137 read_lock(&chan_list_lock
);
7138 list_for_each_entry(c
, &chan_list
, global_l
) {
7139 if (c
->state
!= BT_LISTEN
)
7142 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7143 lm1
|= HCI_LM_ACCEPT
;
7144 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7145 lm1
|= HCI_LM_MASTER
;
7147 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7148 lm2
|= HCI_LM_ACCEPT
;
7149 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7150 lm2
|= HCI_LM_MASTER
;
7153 read_unlock(&chan_list_lock
);
7155 return exact
? lm1
: lm2
;
7158 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7159 * from an existing channel in the list or from the beginning of the
7160 * global list (by passing NULL as first parameter).
7162 static struct l2cap_chan
*l2cap_global_fixed_chan(struct l2cap_chan
*c
,
7163 bdaddr_t
*src
, u8 link_type
)
7165 read_lock(&chan_list_lock
);
7168 c
= list_next_entry(c
, global_l
);
7170 c
= list_entry(chan_list
.next
, typeof(*c
), global_l
);
7172 list_for_each_entry_from(c
, &chan_list
, global_l
) {
7173 if (c
->chan_type
!= L2CAP_CHAN_FIXED
)
7175 if (c
->state
!= BT_LISTEN
)
7177 if (bacmp(&c
->src
, src
) && bacmp(&c
->src
, BDADDR_ANY
))
7179 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
7181 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
7185 read_unlock(&chan_list_lock
);
7189 read_unlock(&chan_list_lock
);
7194 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7196 struct hci_dev
*hdev
= hcon
->hdev
;
7197 struct l2cap_conn
*conn
;
7198 struct l2cap_chan
*pchan
;
7201 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7204 l2cap_conn_del(hcon
, bt_to_errno(status
));
7208 conn
= l2cap_conn_add(hcon
);
7212 dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
7214 /* If device is blocked, do not create channels for it */
7215 if (hci_bdaddr_list_lookup(&hdev
->blacklist
, &hcon
->dst
, dst_type
))
7218 /* Find fixed channels and notify them of the new connection. We
7219 * use multiple individual lookups, continuing each time where
7220 * we left off, because the list lock would prevent calling the
7221 * potentially sleeping l2cap_chan_lock() function.
7223 pchan
= l2cap_global_fixed_chan(NULL
, &hdev
->bdaddr
, hcon
->type
);
7225 struct l2cap_chan
*chan
, *next
;
7227 /* Client fixed channels should override server ones */
7228 if (__l2cap_get_chan_by_dcid(conn
, pchan
->scid
))
7231 l2cap_chan_lock(pchan
);
7232 chan
= pchan
->ops
->new_connection(pchan
);
7234 bacpy(&chan
->src
, &hcon
->src
);
7235 bacpy(&chan
->dst
, &hcon
->dst
);
7236 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
7237 chan
->dst_type
= dst_type
;
7239 __l2cap_chan_add(conn
, chan
);
7242 l2cap_chan_unlock(pchan
);
7244 next
= l2cap_global_fixed_chan(pchan
, &hdev
->bdaddr
,
7246 l2cap_chan_put(pchan
);
7250 l2cap_conn_ready(conn
);
7253 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7255 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7257 BT_DBG("hcon %p", hcon
);
7260 return HCI_ERROR_REMOTE_USER_TERM
;
7261 return conn
->disc_reason
;
7264 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7266 BT_DBG("hcon %p reason %d", hcon
, reason
);
7268 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7271 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7273 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7276 if (encrypt
== 0x00) {
7277 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7278 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7279 } else if (chan
->sec_level
== BT_SECURITY_HIGH
||
7280 chan
->sec_level
== BT_SECURITY_FIPS
)
7281 l2cap_chan_close(chan
, ECONNREFUSED
);
7283 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7284 __clear_chan_timer(chan
);
7288 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7290 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7291 struct l2cap_chan
*chan
;
7296 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7298 mutex_lock(&conn
->chan_lock
);
7300 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7301 l2cap_chan_lock(chan
);
7303 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7304 state_to_string(chan
->state
));
7306 if (chan
->scid
== L2CAP_CID_A2MP
) {
7307 l2cap_chan_unlock(chan
);
7311 if (!status
&& encrypt
)
7312 chan
->sec_level
= hcon
->sec_level
;
7314 if (!__l2cap_no_conn_pending(chan
)) {
7315 l2cap_chan_unlock(chan
);
7319 if (!status
&& (chan
->state
== BT_CONNECTED
||
7320 chan
->state
== BT_CONFIG
)) {
7321 chan
->ops
->resume(chan
);
7322 l2cap_check_encryption(chan
, encrypt
);
7323 l2cap_chan_unlock(chan
);
7327 if (chan
->state
== BT_CONNECT
) {
7329 l2cap_start_connection(chan
);
7331 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7332 } else if (chan
->state
== BT_CONNECT2
) {
7333 struct l2cap_conn_rsp rsp
;
7337 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7338 res
= L2CAP_CR_PEND
;
7339 stat
= L2CAP_CS_AUTHOR_PEND
;
7340 chan
->ops
->defer(chan
);
7342 l2cap_state_change(chan
, BT_CONFIG
);
7343 res
= L2CAP_CR_SUCCESS
;
7344 stat
= L2CAP_CS_NO_INFO
;
7347 l2cap_state_change(chan
, BT_DISCONN
);
7348 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7349 res
= L2CAP_CR_SEC_BLOCK
;
7350 stat
= L2CAP_CS_NO_INFO
;
7353 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7354 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7355 rsp
.result
= cpu_to_le16(res
);
7356 rsp
.status
= cpu_to_le16(stat
);
7357 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7360 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7361 res
== L2CAP_CR_SUCCESS
) {
7363 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7364 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7366 l2cap_build_conf_req(chan
, buf
),
7368 chan
->num_conf_req
++;
7372 l2cap_chan_unlock(chan
);
7375 mutex_unlock(&conn
->chan_lock
);
7380 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7382 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7383 struct l2cap_hdr
*hdr
;
7386 /* For AMP controller do not create l2cap conn */
7387 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
7391 conn
= l2cap_conn_add(hcon
);
7396 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7400 case ACL_START_NO_FLUSH
:
7403 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7404 kfree_skb(conn
->rx_skb
);
7405 conn
->rx_skb
= NULL
;
7407 l2cap_conn_unreliable(conn
, ECOMM
);
7410 /* Start fragment always begin with Basic L2CAP header */
7411 if (skb
->len
< L2CAP_HDR_SIZE
) {
7412 BT_ERR("Frame is too short (len %d)", skb
->len
);
7413 l2cap_conn_unreliable(conn
, ECOMM
);
7417 hdr
= (struct l2cap_hdr
*) skb
->data
;
7418 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7420 if (len
== skb
->len
) {
7421 /* Complete frame received */
7422 l2cap_recv_frame(conn
, skb
);
7426 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7428 if (skb
->len
> len
) {
7429 BT_ERR("Frame is too long (len %d, expected len %d)",
7431 l2cap_conn_unreliable(conn
, ECOMM
);
7435 /* Allocate skb for the complete frame (with header) */
7436 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7440 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7442 conn
->rx_len
= len
- skb
->len
;
7446 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7448 if (!conn
->rx_len
) {
7449 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7450 l2cap_conn_unreliable(conn
, ECOMM
);
7454 if (skb
->len
> conn
->rx_len
) {
7455 BT_ERR("Fragment is too long (len %d, expected %d)",
7456 skb
->len
, conn
->rx_len
);
7457 kfree_skb(conn
->rx_skb
);
7458 conn
->rx_skb
= NULL
;
7460 l2cap_conn_unreliable(conn
, ECOMM
);
7464 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7466 conn
->rx_len
-= skb
->len
;
7468 if (!conn
->rx_len
) {
7469 /* Complete frame received. l2cap_recv_frame
7470 * takes ownership of the skb so set the global
7471 * rx_skb pointer to NULL first.
7473 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7474 conn
->rx_skb
= NULL
;
7475 l2cap_recv_frame(conn
, rx_skb
);
7485 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7487 struct l2cap_chan
*c
;
7489 read_lock(&chan_list_lock
);
7491 list_for_each_entry(c
, &chan_list
, global_l
) {
7492 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7494 c
->state
, __le16_to_cpu(c
->psm
),
7495 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7496 c
->sec_level
, c
->mode
);
7499 read_unlock(&chan_list_lock
);
7504 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7506 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7509 static const struct file_operations l2cap_debugfs_fops
= {
7510 .open
= l2cap_debugfs_open
,
7512 .llseek
= seq_lseek
,
7513 .release
= single_release
,
7516 static struct dentry
*l2cap_debugfs
;
7518 int __init
l2cap_init(void)
7522 err
= l2cap_init_sockets();
7526 if (IS_ERR_OR_NULL(bt_debugfs
))
7529 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7530 NULL
, &l2cap_debugfs_fops
);
7532 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs
,
7534 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs
,
7540 void l2cap_exit(void)
7542 debugfs_remove(l2cap_debugfs
);
7543 l2cap_cleanup_sockets();
7546 module_param(disable_ertm
, bool, 0644);
7547 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");