2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
47 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
| L2CAP_FC_CONNLESS
, };
49 static LIST_HEAD(chan_list
);
50 static DEFINE_RWLOCK(chan_list_lock
);
52 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
53 u8 code
, u8 ident
, u16 dlen
, void *data
);
54 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
56 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
57 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
59 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
60 struct sk_buff_head
*skbs
, u8 event
);
62 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
64 if (hcon
->type
== LE_LINK
) {
65 if (type
== ADDR_LE_DEV_PUBLIC
)
66 return BDADDR_LE_PUBLIC
;
68 return BDADDR_LE_RANDOM
;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
81 list_for_each_entry(c
, &conn
->chan_l
, list
) {
88 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
93 list_for_each_entry(c
, &conn
->chan_l
, list
) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
105 struct l2cap_chan
*c
;
107 mutex_lock(&conn
->chan_lock
);
108 c
= __l2cap_get_chan_by_scid(conn
, cid
);
111 mutex_unlock(&conn
->chan_lock
);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
122 struct l2cap_chan
*c
;
124 mutex_lock(&conn
->chan_lock
);
125 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
128 mutex_unlock(&conn
->chan_lock
);
133 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
136 struct l2cap_chan
*c
;
138 list_for_each_entry(c
, &conn
->chan_l
, list
) {
139 if (c
->ident
== ident
)
145 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
148 struct l2cap_chan
*c
;
150 mutex_lock(&conn
->chan_lock
);
151 c
= __l2cap_get_chan_by_ident(conn
, ident
);
154 mutex_unlock(&conn
->chan_lock
);
159 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
161 struct l2cap_chan
*c
;
163 list_for_each_entry(c
, &chan_list
, global_l
) {
164 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
170 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
174 write_lock(&chan_list_lock
);
176 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
189 for (p
= 0x1001; p
< 0x1100; p
+= 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
191 chan
->psm
= cpu_to_le16(p
);
192 chan
->sport
= cpu_to_le16(p
);
199 write_unlock(&chan_list_lock
);
203 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
205 write_lock(&chan_list_lock
);
209 write_unlock(&chan_list_lock
);
214 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
216 u16 cid
= L2CAP_CID_DYN_START
;
218 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
219 if (!__l2cap_get_chan_by_scid(conn
, cid
))
226 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
228 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
229 state_to_string(state
));
232 chan
->ops
->state_change(chan
, state
, 0);
235 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
237 struct sock
*sk
= chan
->sk
;
240 __l2cap_state_change(chan
, state
);
244 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
247 struct sock
*sk
= chan
->sk
;
251 chan
->ops
->state_change(chan
, chan
->state
, err
);
255 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
257 struct sock
*sk
= chan
->sk
;
260 chan
->ops
->state_change(chan
, chan
->state
, err
);
264 static void __set_retrans_timer(struct l2cap_chan
*chan
)
266 if (!delayed_work_pending(&chan
->monitor_timer
) &&
267 chan
->retrans_timeout
) {
268 l2cap_set_timer(chan
, &chan
->retrans_timer
,
269 msecs_to_jiffies(chan
->retrans_timeout
));
273 static void __set_monitor_timer(struct l2cap_chan
*chan
)
275 __clear_retrans_timer(chan
);
276 if (chan
->monitor_timeout
) {
277 l2cap_set_timer(chan
, &chan
->monitor_timer
,
278 msecs_to_jiffies(chan
->monitor_timeout
));
282 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
287 skb_queue_walk(head
, skb
) {
288 if (bt_cb(skb
)->control
.txseq
== seq
)
295 /* ---- L2CAP sequence number lists ---- */
297 /* For ERTM, ordered lists of sequence numbers must be tracked for
298 * SREJ requests that are received and for frames that are to be
299 * retransmitted. These seq_list functions implement a singly-linked
300 * list in an array, where membership in the list can also be checked
301 * in constant time. Items can also be added to the tail of the list
302 * and removed from the head in constant time, without further memory
306 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
308 size_t alloc_size
, i
;
310 /* Allocated size is a power of 2 to map sequence numbers
311 * (which may be up to 14 bits) in to a smaller array that is
312 * sized for the negotiated ERTM transmit windows.
314 alloc_size
= roundup_pow_of_two(size
);
316 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
320 seq_list
->mask
= alloc_size
- 1;
321 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
322 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
323 for (i
= 0; i
< alloc_size
; i
++)
324 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
329 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
331 kfree(seq_list
->list
);
334 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
337 /* Constant-time check for list membership */
338 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
341 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
343 u16 mask
= seq_list
->mask
;
345 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
346 /* In case someone tries to pop the head of an empty list */
347 return L2CAP_SEQ_LIST_CLEAR
;
348 } else if (seq_list
->head
== seq
) {
349 /* Head can be removed in constant time */
350 seq_list
->head
= seq_list
->list
[seq
& mask
];
351 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
353 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
354 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
355 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
358 /* Walk the list to find the sequence number */
359 u16 prev
= seq_list
->head
;
360 while (seq_list
->list
[prev
& mask
] != seq
) {
361 prev
= seq_list
->list
[prev
& mask
];
362 if (prev
== L2CAP_SEQ_LIST_TAIL
)
363 return L2CAP_SEQ_LIST_CLEAR
;
366 /* Unlink the number from the list and clear it */
367 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
368 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
369 if (seq_list
->tail
== seq
)
370 seq_list
->tail
= prev
;
375 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
377 /* Remove the head in constant time */
378 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
381 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
385 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
388 for (i
= 0; i
<= seq_list
->mask
; i
++)
389 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
391 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
392 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
395 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
397 u16 mask
= seq_list
->mask
;
399 /* All appends happen in constant time */
401 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
404 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
405 seq_list
->head
= seq
;
407 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
409 seq_list
->tail
= seq
;
410 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
413 static void l2cap_chan_timeout(struct work_struct
*work
)
415 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
417 struct l2cap_conn
*conn
= chan
->conn
;
420 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
422 mutex_lock(&conn
->chan_lock
);
423 l2cap_chan_lock(chan
);
425 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
426 reason
= ECONNREFUSED
;
427 else if (chan
->state
== BT_CONNECT
&&
428 chan
->sec_level
!= BT_SECURITY_SDP
)
429 reason
= ECONNREFUSED
;
433 l2cap_chan_close(chan
, reason
);
435 l2cap_chan_unlock(chan
);
437 chan
->ops
->close(chan
);
438 mutex_unlock(&conn
->chan_lock
);
440 l2cap_chan_put(chan
);
443 struct l2cap_chan
*l2cap_chan_create(void)
445 struct l2cap_chan
*chan
;
447 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
451 mutex_init(&chan
->lock
);
453 write_lock(&chan_list_lock
);
454 list_add(&chan
->global_l
, &chan_list
);
455 write_unlock(&chan_list_lock
);
457 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
459 chan
->state
= BT_OPEN
;
461 kref_init(&chan
->kref
);
463 /* This flag is cleared in l2cap_chan_ready() */
464 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
466 BT_DBG("chan %p", chan
);
471 static void l2cap_chan_destroy(struct kref
*kref
)
473 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
475 BT_DBG("chan %p", chan
);
477 write_lock(&chan_list_lock
);
478 list_del(&chan
->global_l
);
479 write_unlock(&chan_list_lock
);
484 void l2cap_chan_hold(struct l2cap_chan
*c
)
486 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
491 void l2cap_chan_put(struct l2cap_chan
*c
)
493 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
495 kref_put(&c
->kref
, l2cap_chan_destroy
);
498 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
500 chan
->fcs
= L2CAP_FCS_CRC16
;
501 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
502 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
503 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
504 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
505 chan
->sec_level
= BT_SECURITY_LOW
;
507 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
510 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
512 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
513 __le16_to_cpu(chan
->psm
), chan
->dcid
);
515 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
519 switch (chan
->chan_type
) {
520 case L2CAP_CHAN_CONN_ORIENTED
:
521 if (conn
->hcon
->type
== LE_LINK
) {
523 chan
->omtu
= L2CAP_DEFAULT_MTU
;
524 if (chan
->dcid
== L2CAP_CID_ATT
)
525 chan
->scid
= L2CAP_CID_ATT
;
527 chan
->scid
= l2cap_alloc_cid(conn
);
529 /* Alloc CID for connection-oriented socket */
530 chan
->scid
= l2cap_alloc_cid(conn
);
531 chan
->omtu
= L2CAP_DEFAULT_MTU
;
535 case L2CAP_CHAN_CONN_LESS
:
536 /* Connectionless socket */
537 chan
->scid
= L2CAP_CID_CONN_LESS
;
538 chan
->dcid
= L2CAP_CID_CONN_LESS
;
539 chan
->omtu
= L2CAP_DEFAULT_MTU
;
542 case L2CAP_CHAN_CONN_FIX_A2MP
:
543 chan
->scid
= L2CAP_CID_A2MP
;
544 chan
->dcid
= L2CAP_CID_A2MP
;
545 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
546 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
550 /* Raw socket can send/recv signalling messages only */
551 chan
->scid
= L2CAP_CID_SIGNALING
;
552 chan
->dcid
= L2CAP_CID_SIGNALING
;
553 chan
->omtu
= L2CAP_DEFAULT_MTU
;
556 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
557 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
558 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
559 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
560 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
561 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
563 l2cap_chan_hold(chan
);
565 hci_conn_hold(conn
->hcon
);
567 list_add(&chan
->list
, &conn
->chan_l
);
570 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
572 mutex_lock(&conn
->chan_lock
);
573 __l2cap_chan_add(conn
, chan
);
574 mutex_unlock(&conn
->chan_lock
);
577 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
579 struct l2cap_conn
*conn
= chan
->conn
;
581 __clear_chan_timer(chan
);
583 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
586 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
587 /* Delete from channel list */
588 list_del(&chan
->list
);
590 l2cap_chan_put(chan
);
594 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
595 hci_conn_drop(conn
->hcon
);
597 if (mgr
&& mgr
->bredr_chan
== chan
)
598 mgr
->bredr_chan
= NULL
;
601 if (chan
->hs_hchan
) {
602 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
604 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
605 amp_disconnect_logical_link(hs_hchan
);
608 chan
->ops
->teardown(chan
, err
);
610 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
614 case L2CAP_MODE_BASIC
:
617 case L2CAP_MODE_ERTM
:
618 __clear_retrans_timer(chan
);
619 __clear_monitor_timer(chan
);
620 __clear_ack_timer(chan
);
622 skb_queue_purge(&chan
->srej_q
);
624 l2cap_seq_list_free(&chan
->srej_list
);
625 l2cap_seq_list_free(&chan
->retrans_list
);
629 case L2CAP_MODE_STREAMING
:
630 skb_queue_purge(&chan
->tx_q
);
637 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
639 struct l2cap_conn
*conn
= chan
->conn
;
641 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
643 switch (chan
->state
) {
645 chan
->ops
->teardown(chan
, 0);
650 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
651 conn
->hcon
->type
== ACL_LINK
) {
652 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
653 l2cap_send_disconn_req(chan
, reason
);
655 l2cap_chan_del(chan
, reason
);
659 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
660 conn
->hcon
->type
== ACL_LINK
) {
661 struct l2cap_conn_rsp rsp
;
664 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
665 result
= L2CAP_CR_SEC_BLOCK
;
667 result
= L2CAP_CR_BAD_PSM
;
669 l2cap_state_change(chan
, BT_DISCONN
);
671 rsp
.scid
= cpu_to_le16(chan
->dcid
);
672 rsp
.dcid
= cpu_to_le16(chan
->scid
);
673 rsp
.result
= cpu_to_le16(result
);
674 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
675 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
679 l2cap_chan_del(chan
, reason
);
684 l2cap_chan_del(chan
, reason
);
688 chan
->ops
->teardown(chan
, 0);
693 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
695 switch (chan
->chan_type
) {
697 switch (chan
->sec_level
) {
698 case BT_SECURITY_HIGH
:
699 return HCI_AT_DEDICATED_BONDING_MITM
;
700 case BT_SECURITY_MEDIUM
:
701 return HCI_AT_DEDICATED_BONDING
;
703 return HCI_AT_NO_BONDING
;
706 case L2CAP_CHAN_CONN_LESS
:
707 if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_3DSP
)) {
708 if (chan
->sec_level
== BT_SECURITY_LOW
)
709 chan
->sec_level
= BT_SECURITY_SDP
;
711 if (chan
->sec_level
== BT_SECURITY_HIGH
)
712 return HCI_AT_NO_BONDING_MITM
;
714 return HCI_AT_NO_BONDING
;
716 case L2CAP_CHAN_CONN_ORIENTED
:
717 if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
718 if (chan
->sec_level
== BT_SECURITY_LOW
)
719 chan
->sec_level
= BT_SECURITY_SDP
;
721 if (chan
->sec_level
== BT_SECURITY_HIGH
)
722 return HCI_AT_NO_BONDING_MITM
;
724 return HCI_AT_NO_BONDING
;
728 switch (chan
->sec_level
) {
729 case BT_SECURITY_HIGH
:
730 return HCI_AT_GENERAL_BONDING_MITM
;
731 case BT_SECURITY_MEDIUM
:
732 return HCI_AT_GENERAL_BONDING
;
734 return HCI_AT_NO_BONDING
;
740 /* Service level security */
741 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
743 struct l2cap_conn
*conn
= chan
->conn
;
746 auth_type
= l2cap_get_auth_type(chan
);
748 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
751 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
755 /* Get next available identificator.
756 * 1 - 128 are used by kernel.
757 * 129 - 199 are reserved.
758 * 200 - 254 are used by utilities like l2ping, etc.
761 spin_lock(&conn
->lock
);
763 if (++conn
->tx_ident
> 128)
768 spin_unlock(&conn
->lock
);
773 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
776 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
779 BT_DBG("code 0x%2.2x", code
);
784 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
785 flags
= ACL_START_NO_FLUSH
;
789 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
790 skb
->priority
= HCI_PRIO_MAX
;
792 hci_send_acl(conn
->hchan
, skb
, flags
);
795 static bool __chan_is_moving(struct l2cap_chan
*chan
)
797 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
798 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
801 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
803 struct hci_conn
*hcon
= chan
->conn
->hcon
;
806 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
809 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
811 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
818 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
819 lmp_no_flush_capable(hcon
->hdev
))
820 flags
= ACL_START_NO_FLUSH
;
824 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
825 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
828 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
830 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
831 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
833 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
836 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
837 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
844 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
845 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
852 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
854 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
855 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
857 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
860 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
861 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
868 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
869 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
876 static inline void __unpack_control(struct l2cap_chan
*chan
,
879 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
880 __unpack_extended_control(get_unaligned_le32(skb
->data
),
881 &bt_cb(skb
)->control
);
882 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
884 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
885 &bt_cb(skb
)->control
);
886 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
890 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
894 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
895 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
897 if (control
->sframe
) {
898 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
899 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
900 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
902 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
903 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
909 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
913 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
914 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
916 if (control
->sframe
) {
917 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
918 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
919 packed
|= L2CAP_CTRL_FRAME_TYPE
;
921 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
922 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
928 static inline void __pack_control(struct l2cap_chan
*chan
,
929 struct l2cap_ctrl
*control
,
932 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
933 put_unaligned_le32(__pack_extended_control(control
),
934 skb
->data
+ L2CAP_HDR_SIZE
);
936 put_unaligned_le16(__pack_enhanced_control(control
),
937 skb
->data
+ L2CAP_HDR_SIZE
);
941 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
943 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
944 return L2CAP_EXT_HDR_SIZE
;
946 return L2CAP_ENH_HDR_SIZE
;
949 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
953 struct l2cap_hdr
*lh
;
954 int hlen
= __ertm_hdr_size(chan
);
956 if (chan
->fcs
== L2CAP_FCS_CRC16
)
957 hlen
+= L2CAP_FCS_SIZE
;
959 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
962 return ERR_PTR(-ENOMEM
);
964 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
965 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
966 lh
->cid
= cpu_to_le16(chan
->dcid
);
968 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
969 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
971 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
973 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
974 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
975 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
978 skb
->priority
= HCI_PRIO_MAX
;
982 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
983 struct l2cap_ctrl
*control
)
988 BT_DBG("chan %p, control %p", chan
, control
);
990 if (!control
->sframe
)
993 if (__chan_is_moving(chan
))
996 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1000 if (control
->super
== L2CAP_SUPER_RR
)
1001 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1002 else if (control
->super
== L2CAP_SUPER_RNR
)
1003 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1005 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1006 chan
->last_acked_seq
= control
->reqseq
;
1007 __clear_ack_timer(chan
);
1010 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1011 control
->final
, control
->poll
, control
->super
);
1013 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1014 control_field
= __pack_extended_control(control
);
1016 control_field
= __pack_enhanced_control(control
);
1018 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1020 l2cap_do_send(chan
, skb
);
1023 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1025 struct l2cap_ctrl control
;
1027 BT_DBG("chan %p, poll %d", chan
, poll
);
1029 memset(&control
, 0, sizeof(control
));
1031 control
.poll
= poll
;
1033 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1034 control
.super
= L2CAP_SUPER_RNR
;
1036 control
.super
= L2CAP_SUPER_RR
;
1038 control
.reqseq
= chan
->buffer_seq
;
1039 l2cap_send_sframe(chan
, &control
);
1042 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1044 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1047 static bool __amp_capable(struct l2cap_chan
*chan
)
1049 struct l2cap_conn
*conn
= chan
->conn
;
1050 struct hci_dev
*hdev
;
1051 bool amp_available
= false;
1053 if (!conn
->hs_enabled
)
1056 if (!(conn
->fixed_chan_mask
& L2CAP_FC_A2MP
))
1059 read_lock(&hci_dev_list_lock
);
1060 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1061 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1062 test_bit(HCI_UP
, &hdev
->flags
)) {
1063 amp_available
= true;
1067 read_unlock(&hci_dev_list_lock
);
1069 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1070 return amp_available
;
1075 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1077 /* Check EFS parameters */
1081 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1083 struct l2cap_conn
*conn
= chan
->conn
;
1084 struct l2cap_conn_req req
;
1086 req
.scid
= cpu_to_le16(chan
->scid
);
1087 req
.psm
= chan
->psm
;
1089 chan
->ident
= l2cap_get_ident(conn
);
1091 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1093 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1096 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1098 struct l2cap_create_chan_req req
;
1099 req
.scid
= cpu_to_le16(chan
->scid
);
1100 req
.psm
= chan
->psm
;
1101 req
.amp_id
= amp_id
;
1103 chan
->ident
= l2cap_get_ident(chan
->conn
);
1105 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1109 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1111 struct sk_buff
*skb
;
1113 BT_DBG("chan %p", chan
);
1115 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1118 __clear_retrans_timer(chan
);
1119 __clear_monitor_timer(chan
);
1120 __clear_ack_timer(chan
);
1122 chan
->retry_count
= 0;
1123 skb_queue_walk(&chan
->tx_q
, skb
) {
1124 if (bt_cb(skb
)->control
.retries
)
1125 bt_cb(skb
)->control
.retries
= 1;
1130 chan
->expected_tx_seq
= chan
->buffer_seq
;
1132 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1133 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1134 l2cap_seq_list_clear(&chan
->retrans_list
);
1135 l2cap_seq_list_clear(&chan
->srej_list
);
1136 skb_queue_purge(&chan
->srej_q
);
1138 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1139 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1141 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1144 static void l2cap_move_done(struct l2cap_chan
*chan
)
1146 u8 move_role
= chan
->move_role
;
1147 BT_DBG("chan %p", chan
);
1149 chan
->move_state
= L2CAP_MOVE_STABLE
;
1150 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1152 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1155 switch (move_role
) {
1156 case L2CAP_MOVE_ROLE_INITIATOR
:
1157 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1158 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1160 case L2CAP_MOVE_ROLE_RESPONDER
:
1161 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1166 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1168 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1169 chan
->conf_state
= 0;
1170 __clear_chan_timer(chan
);
1172 chan
->state
= BT_CONNECTED
;
1174 chan
->ops
->ready(chan
);
1177 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1179 if (__amp_capable(chan
)) {
1180 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1181 a2mp_discover_amp(chan
);
1183 l2cap_send_conn_req(chan
);
1187 static void l2cap_do_start(struct l2cap_chan
*chan
)
1189 struct l2cap_conn
*conn
= chan
->conn
;
1191 if (conn
->hcon
->type
== LE_LINK
) {
1192 l2cap_chan_ready(chan
);
1196 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1197 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1200 if (l2cap_chan_check_security(chan
) &&
1201 __l2cap_no_conn_pending(chan
)) {
1202 l2cap_start_connection(chan
);
1205 struct l2cap_info_req req
;
1206 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1208 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1209 conn
->info_ident
= l2cap_get_ident(conn
);
1211 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1213 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1218 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1220 u32 local_feat_mask
= l2cap_feat_mask
;
1222 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1225 case L2CAP_MODE_ERTM
:
1226 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1227 case L2CAP_MODE_STREAMING
:
1228 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1234 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1236 struct l2cap_conn
*conn
= chan
->conn
;
1237 struct l2cap_disconn_req req
;
1242 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1243 __clear_retrans_timer(chan
);
1244 __clear_monitor_timer(chan
);
1245 __clear_ack_timer(chan
);
1248 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1249 l2cap_state_change(chan
, BT_DISCONN
);
1253 req
.dcid
= cpu_to_le16(chan
->dcid
);
1254 req
.scid
= cpu_to_le16(chan
->scid
);
1255 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1258 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1261 /* ---- L2CAP connections ---- */
1262 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1264 struct l2cap_chan
*chan
, *tmp
;
1266 BT_DBG("conn %p", conn
);
1268 mutex_lock(&conn
->chan_lock
);
1270 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1271 l2cap_chan_lock(chan
);
1273 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1274 l2cap_chan_unlock(chan
);
1278 if (chan
->state
== BT_CONNECT
) {
1279 if (!l2cap_chan_check_security(chan
) ||
1280 !__l2cap_no_conn_pending(chan
)) {
1281 l2cap_chan_unlock(chan
);
1285 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1286 && test_bit(CONF_STATE2_DEVICE
,
1287 &chan
->conf_state
)) {
1288 l2cap_chan_close(chan
, ECONNRESET
);
1289 l2cap_chan_unlock(chan
);
1293 l2cap_start_connection(chan
);
1295 } else if (chan
->state
== BT_CONNECT2
) {
1296 struct l2cap_conn_rsp rsp
;
1298 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1299 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1301 if (l2cap_chan_check_security(chan
)) {
1302 struct sock
*sk
= chan
->sk
;
1305 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1306 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1307 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1308 chan
->ops
->defer(chan
);
1311 __l2cap_state_change(chan
, BT_CONFIG
);
1312 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1313 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1317 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1318 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1321 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1324 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1325 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1326 l2cap_chan_unlock(chan
);
1330 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1331 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1332 l2cap_build_conf_req(chan
, buf
), buf
);
1333 chan
->num_conf_req
++;
1336 l2cap_chan_unlock(chan
);
1339 mutex_unlock(&conn
->chan_lock
);
1342 /* Find socket with cid and source/destination bdaddr.
1343 * Returns closest match, locked.
1345 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1349 struct l2cap_chan
*c
, *c1
= NULL
;
1351 read_lock(&chan_list_lock
);
1353 list_for_each_entry(c
, &chan_list
, global_l
) {
1354 if (state
&& c
->state
!= state
)
1357 if (c
->scid
== cid
) {
1358 int src_match
, dst_match
;
1359 int src_any
, dst_any
;
1362 src_match
= !bacmp(&c
->src
, src
);
1363 dst_match
= !bacmp(&c
->dst
, dst
);
1364 if (src_match
&& dst_match
) {
1365 read_unlock(&chan_list_lock
);
1370 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1371 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1372 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1373 (src_any
&& dst_any
))
1378 read_unlock(&chan_list_lock
);
1383 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1385 struct sock
*parent
;
1386 struct l2cap_chan
*chan
, *pchan
;
1390 /* Check if we have socket listening on cid */
1391 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_ATT
,
1392 &conn
->hcon
->src
, &conn
->hcon
->dst
);
1396 /* Client ATT sockets should override the server one */
1397 if (__l2cap_get_chan_by_dcid(conn
, L2CAP_CID_ATT
))
1404 chan
= pchan
->ops
->new_connection(pchan
);
1408 chan
->dcid
= L2CAP_CID_ATT
;
1410 bacpy(&chan
->src
, &conn
->hcon
->src
);
1411 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
1412 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
1413 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
1415 __l2cap_chan_add(conn
, chan
);
1418 release_sock(parent
);
1421 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1423 struct l2cap_chan
*chan
;
1424 struct hci_conn
*hcon
= conn
->hcon
;
1426 BT_DBG("conn %p", conn
);
1428 /* For outgoing pairing which doesn't necessarily have an
1429 * associated socket (e.g. mgmt_pair_device).
1431 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1432 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1434 mutex_lock(&conn
->chan_lock
);
1436 if (hcon
->type
== LE_LINK
)
1437 l2cap_le_conn_ready(conn
);
1439 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1441 l2cap_chan_lock(chan
);
1443 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1444 l2cap_chan_unlock(chan
);
1448 if (hcon
->type
== LE_LINK
) {
1449 if (smp_conn_security(hcon
, chan
->sec_level
))
1450 l2cap_chan_ready(chan
);
1452 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1453 struct sock
*sk
= chan
->sk
;
1454 __clear_chan_timer(chan
);
1456 __l2cap_state_change(chan
, BT_CONNECTED
);
1457 sk
->sk_state_change(sk
);
1460 } else if (chan
->state
== BT_CONNECT
) {
1461 l2cap_do_start(chan
);
1464 l2cap_chan_unlock(chan
);
1467 mutex_unlock(&conn
->chan_lock
);
1470 /* Notify sockets that we cannot guaranty reliability anymore */
1471 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1473 struct l2cap_chan
*chan
;
1475 BT_DBG("conn %p", conn
);
1477 mutex_lock(&conn
->chan_lock
);
1479 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1480 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1481 l2cap_chan_set_err(chan
, err
);
1484 mutex_unlock(&conn
->chan_lock
);
1487 static void l2cap_info_timeout(struct work_struct
*work
)
1489 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1492 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1493 conn
->info_ident
= 0;
1495 l2cap_conn_start(conn
);
1500 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1501 * callback is called during registration. The ->remove callback is called
1502 * during unregistration.
1503 * An l2cap_user object can either be explicitly unregistered or when the
1504 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1505 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1506 * External modules must own a reference to the l2cap_conn object if they intend
1507 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1508 * any time if they don't.
1511 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1513 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1516 /* We need to check whether l2cap_conn is registered. If it is not, we
1517 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1518 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1519 * relies on the parent hci_conn object to be locked. This itself relies
1520 * on the hci_dev object to be locked. So we must lock the hci device
1525 if (user
->list
.next
|| user
->list
.prev
) {
1530 /* conn->hchan is NULL after l2cap_conn_del() was called */
1536 ret
= user
->probe(conn
, user
);
1540 list_add(&user
->list
, &conn
->users
);
1544 hci_dev_unlock(hdev
);
1547 EXPORT_SYMBOL(l2cap_register_user
);
1549 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1551 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1555 if (!user
->list
.next
|| !user
->list
.prev
)
1558 list_del(&user
->list
);
1559 user
->list
.next
= NULL
;
1560 user
->list
.prev
= NULL
;
1561 user
->remove(conn
, user
);
1564 hci_dev_unlock(hdev
);
1566 EXPORT_SYMBOL(l2cap_unregister_user
);
1568 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1570 struct l2cap_user
*user
;
1572 while (!list_empty(&conn
->users
)) {
1573 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1574 list_del(&user
->list
);
1575 user
->list
.next
= NULL
;
1576 user
->list
.prev
= NULL
;
1577 user
->remove(conn
, user
);
1581 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1583 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1584 struct l2cap_chan
*chan
, *l
;
1589 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1591 kfree_skb(conn
->rx_skb
);
1593 l2cap_unregister_all_users(conn
);
1595 mutex_lock(&conn
->chan_lock
);
1598 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1599 l2cap_chan_hold(chan
);
1600 l2cap_chan_lock(chan
);
1602 l2cap_chan_del(chan
, err
);
1604 l2cap_chan_unlock(chan
);
1606 chan
->ops
->close(chan
);
1607 l2cap_chan_put(chan
);
1610 mutex_unlock(&conn
->chan_lock
);
1612 hci_chan_del(conn
->hchan
);
1614 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1615 cancel_delayed_work_sync(&conn
->info_timer
);
1617 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1618 cancel_delayed_work_sync(&conn
->security_timer
);
1619 smp_chan_destroy(conn
);
1622 hcon
->l2cap_data
= NULL
;
1624 l2cap_conn_put(conn
);
1627 static void security_timeout(struct work_struct
*work
)
1629 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1630 security_timer
.work
);
1632 BT_DBG("conn %p", conn
);
1634 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1635 smp_chan_destroy(conn
);
1636 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1640 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
1642 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1643 struct hci_chan
*hchan
;
1648 hchan
= hci_chan_create(hcon
);
1652 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1654 hci_chan_del(hchan
);
1658 kref_init(&conn
->ref
);
1659 hcon
->l2cap_data
= conn
;
1661 hci_conn_get(conn
->hcon
);
1662 conn
->hchan
= hchan
;
1664 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1666 switch (hcon
->type
) {
1668 if (hcon
->hdev
->le_mtu
) {
1669 conn
->mtu
= hcon
->hdev
->le_mtu
;
1674 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1678 conn
->feat_mask
= 0;
1680 if (hcon
->type
== ACL_LINK
)
1681 conn
->hs_enabled
= test_bit(HCI_HS_ENABLED
,
1682 &hcon
->hdev
->dev_flags
);
1684 spin_lock_init(&conn
->lock
);
1685 mutex_init(&conn
->chan_lock
);
1687 INIT_LIST_HEAD(&conn
->chan_l
);
1688 INIT_LIST_HEAD(&conn
->users
);
1690 if (hcon
->type
== LE_LINK
)
1691 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1693 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1695 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1700 static void l2cap_conn_free(struct kref
*ref
)
1702 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1704 hci_conn_put(conn
->hcon
);
1708 void l2cap_conn_get(struct l2cap_conn
*conn
)
1710 kref_get(&conn
->ref
);
1712 EXPORT_SYMBOL(l2cap_conn_get
);
1714 void l2cap_conn_put(struct l2cap_conn
*conn
)
1716 kref_put(&conn
->ref
, l2cap_conn_free
);
1718 EXPORT_SYMBOL(l2cap_conn_put
);
1720 /* ---- Socket interface ---- */
1722 /* Find socket with psm and source / destination bdaddr.
1723 * Returns closest match.
1725 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1729 struct l2cap_chan
*c
, *c1
= NULL
;
1731 read_lock(&chan_list_lock
);
1733 list_for_each_entry(c
, &chan_list
, global_l
) {
1734 if (state
&& c
->state
!= state
)
1737 if (c
->psm
== psm
) {
1738 int src_match
, dst_match
;
1739 int src_any
, dst_any
;
1742 src_match
= !bacmp(&c
->src
, src
);
1743 dst_match
= !bacmp(&c
->dst
, dst
);
1744 if (src_match
&& dst_match
) {
1745 read_unlock(&chan_list_lock
);
1750 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1751 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1752 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1753 (src_any
&& dst_any
))
1758 read_unlock(&chan_list_lock
);
1763 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1764 bdaddr_t
*dst
, u8 dst_type
)
1766 struct l2cap_conn
*conn
;
1767 struct hci_conn
*hcon
;
1768 struct hci_dev
*hdev
;
1772 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
1773 dst_type
, __le16_to_cpu(psm
));
1775 hdev
= hci_get_route(dst
, &chan
->src
);
1777 return -EHOSTUNREACH
;
1781 l2cap_chan_lock(chan
);
1783 /* PSM must be odd and lsb of upper byte must be 0 */
1784 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1785 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1790 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1795 switch (chan
->mode
) {
1796 case L2CAP_MODE_BASIC
:
1798 case L2CAP_MODE_ERTM
:
1799 case L2CAP_MODE_STREAMING
:
1808 switch (chan
->state
) {
1812 /* Already connecting */
1817 /* Already connected */
1831 /* Set destination address and psm */
1832 bacpy(&chan
->dst
, dst
);
1833 chan
->dst_type
= dst_type
;
1838 auth_type
= l2cap_get_auth_type(chan
);
1840 if (bdaddr_type_is_le(dst_type
))
1841 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1842 chan
->sec_level
, auth_type
);
1844 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1845 chan
->sec_level
, auth_type
);
1848 err
= PTR_ERR(hcon
);
1852 conn
= l2cap_conn_add(hcon
);
1854 hci_conn_drop(hcon
);
1859 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
1860 hci_conn_drop(hcon
);
1865 /* Update source addr of the socket */
1866 bacpy(&chan
->src
, &hcon
->src
);
1867 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1869 l2cap_chan_unlock(chan
);
1870 l2cap_chan_add(conn
, chan
);
1871 l2cap_chan_lock(chan
);
1873 /* l2cap_chan_add takes its own ref so we can drop this one */
1874 hci_conn_drop(hcon
);
1876 l2cap_state_change(chan
, BT_CONNECT
);
1877 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
1879 if (hcon
->state
== BT_CONNECTED
) {
1880 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1881 __clear_chan_timer(chan
);
1882 if (l2cap_chan_check_security(chan
))
1883 l2cap_state_change(chan
, BT_CONNECTED
);
1885 l2cap_do_start(chan
);
1891 l2cap_chan_unlock(chan
);
1892 hci_dev_unlock(hdev
);
1897 int __l2cap_wait_ack(struct sock
*sk
)
1899 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1900 DECLARE_WAITQUEUE(wait
, current
);
1904 add_wait_queue(sk_sleep(sk
), &wait
);
1905 set_current_state(TASK_INTERRUPTIBLE
);
1906 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1910 if (signal_pending(current
)) {
1911 err
= sock_intr_errno(timeo
);
1916 timeo
= schedule_timeout(timeo
);
1918 set_current_state(TASK_INTERRUPTIBLE
);
1920 err
= sock_error(sk
);
1924 set_current_state(TASK_RUNNING
);
1925 remove_wait_queue(sk_sleep(sk
), &wait
);
1929 static void l2cap_monitor_timeout(struct work_struct
*work
)
1931 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1932 monitor_timer
.work
);
1934 BT_DBG("chan %p", chan
);
1936 l2cap_chan_lock(chan
);
1939 l2cap_chan_unlock(chan
);
1940 l2cap_chan_put(chan
);
1944 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1946 l2cap_chan_unlock(chan
);
1947 l2cap_chan_put(chan
);
1950 static void l2cap_retrans_timeout(struct work_struct
*work
)
1952 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1953 retrans_timer
.work
);
1955 BT_DBG("chan %p", chan
);
1957 l2cap_chan_lock(chan
);
1960 l2cap_chan_unlock(chan
);
1961 l2cap_chan_put(chan
);
1965 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1966 l2cap_chan_unlock(chan
);
1967 l2cap_chan_put(chan
);
1970 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1971 struct sk_buff_head
*skbs
)
1973 struct sk_buff
*skb
;
1974 struct l2cap_ctrl
*control
;
1976 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1978 if (__chan_is_moving(chan
))
1981 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1983 while (!skb_queue_empty(&chan
->tx_q
)) {
1985 skb
= skb_dequeue(&chan
->tx_q
);
1987 bt_cb(skb
)->control
.retries
= 1;
1988 control
= &bt_cb(skb
)->control
;
1990 control
->reqseq
= 0;
1991 control
->txseq
= chan
->next_tx_seq
;
1993 __pack_control(chan
, control
, skb
);
1995 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1996 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1997 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
2000 l2cap_do_send(chan
, skb
);
2002 BT_DBG("Sent txseq %u", control
->txseq
);
2004 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
2005 chan
->frames_sent
++;
2009 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
2011 struct sk_buff
*skb
, *tx_skb
;
2012 struct l2cap_ctrl
*control
;
2015 BT_DBG("chan %p", chan
);
2017 if (chan
->state
!= BT_CONNECTED
)
2020 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2023 if (__chan_is_moving(chan
))
2026 while (chan
->tx_send_head
&&
2027 chan
->unacked_frames
< chan
->remote_tx_win
&&
2028 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
2030 skb
= chan
->tx_send_head
;
2032 bt_cb(skb
)->control
.retries
= 1;
2033 control
= &bt_cb(skb
)->control
;
2035 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2038 control
->reqseq
= chan
->buffer_seq
;
2039 chan
->last_acked_seq
= chan
->buffer_seq
;
2040 control
->txseq
= chan
->next_tx_seq
;
2042 __pack_control(chan
, control
, skb
);
2044 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2045 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
2046 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
2049 /* Clone after data has been modified. Data is assumed to be
2050 read-only (for locking purposes) on cloned sk_buffs.
2052 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2057 __set_retrans_timer(chan
);
2059 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
2060 chan
->unacked_frames
++;
2061 chan
->frames_sent
++;
2064 if (skb_queue_is_last(&chan
->tx_q
, skb
))
2065 chan
->tx_send_head
= NULL
;
2067 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
2069 l2cap_do_send(chan
, tx_skb
);
2070 BT_DBG("Sent txseq %u", control
->txseq
);
2073 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
2074 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
2079 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
2081 struct l2cap_ctrl control
;
2082 struct sk_buff
*skb
;
2083 struct sk_buff
*tx_skb
;
2086 BT_DBG("chan %p", chan
);
2088 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2091 if (__chan_is_moving(chan
))
2094 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
2095 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
2097 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
2099 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2104 bt_cb(skb
)->control
.retries
++;
2105 control
= bt_cb(skb
)->control
;
2107 if (chan
->max_tx
!= 0 &&
2108 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
2109 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
2110 l2cap_send_disconn_req(chan
, ECONNRESET
);
2111 l2cap_seq_list_clear(&chan
->retrans_list
);
2115 control
.reqseq
= chan
->buffer_seq
;
2116 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2121 if (skb_cloned(skb
)) {
2122 /* Cloned sk_buffs are read-only, so we need a
2125 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
2127 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2131 l2cap_seq_list_clear(&chan
->retrans_list
);
2135 /* Update skb contents */
2136 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
2137 put_unaligned_le32(__pack_extended_control(&control
),
2138 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2140 put_unaligned_le16(__pack_enhanced_control(&control
),
2141 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2144 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2145 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
2146 put_unaligned_le16(fcs
, skb_put(tx_skb
,
2150 l2cap_do_send(chan
, tx_skb
);
2152 BT_DBG("Resent txseq %d", control
.txseq
);
2154 chan
->last_acked_seq
= chan
->buffer_seq
;
2158 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2159 struct l2cap_ctrl
*control
)
2161 BT_DBG("chan %p, control %p", chan
, control
);
2163 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2164 l2cap_ertm_resend(chan
);
2167 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2168 struct l2cap_ctrl
*control
)
2170 struct sk_buff
*skb
;
2172 BT_DBG("chan %p, control %p", chan
, control
);
2175 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2177 l2cap_seq_list_clear(&chan
->retrans_list
);
2179 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2182 if (chan
->unacked_frames
) {
2183 skb_queue_walk(&chan
->tx_q
, skb
) {
2184 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2185 skb
== chan
->tx_send_head
)
2189 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2190 if (skb
== chan
->tx_send_head
)
2193 l2cap_seq_list_append(&chan
->retrans_list
,
2194 bt_cb(skb
)->control
.txseq
);
2197 l2cap_ertm_resend(chan
);
2201 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2203 struct l2cap_ctrl control
;
2204 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2205 chan
->last_acked_seq
);
2208 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2209 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2211 memset(&control
, 0, sizeof(control
));
2214 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2215 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2216 __clear_ack_timer(chan
);
2217 control
.super
= L2CAP_SUPER_RNR
;
2218 control
.reqseq
= chan
->buffer_seq
;
2219 l2cap_send_sframe(chan
, &control
);
2221 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2222 l2cap_ertm_send(chan
);
2223 /* If any i-frames were sent, they included an ack */
2224 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2228 /* Ack now if the window is 3/4ths full.
2229 * Calculate without mul or div
2231 threshold
= chan
->ack_win
;
2232 threshold
+= threshold
<< 1;
2235 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2238 if (frames_to_ack
>= threshold
) {
2239 __clear_ack_timer(chan
);
2240 control
.super
= L2CAP_SUPER_RR
;
2241 control
.reqseq
= chan
->buffer_seq
;
2242 l2cap_send_sframe(chan
, &control
);
2247 __set_ack_timer(chan
);
2251 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2252 struct msghdr
*msg
, int len
,
2253 int count
, struct sk_buff
*skb
)
2255 struct l2cap_conn
*conn
= chan
->conn
;
2256 struct sk_buff
**frag
;
2259 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
2265 /* Continuation fragments (no L2CAP header) */
2266 frag
= &skb_shinfo(skb
)->frag_list
;
2268 struct sk_buff
*tmp
;
2270 count
= min_t(unsigned int, conn
->mtu
, len
);
2272 tmp
= chan
->ops
->alloc_skb(chan
, count
,
2273 msg
->msg_flags
& MSG_DONTWAIT
);
2275 return PTR_ERR(tmp
);
2279 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
2282 (*frag
)->priority
= skb
->priority
;
2287 skb
->len
+= (*frag
)->len
;
2288 skb
->data_len
+= (*frag
)->len
;
2290 frag
= &(*frag
)->next
;
2296 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2297 struct msghdr
*msg
, size_t len
,
2300 struct l2cap_conn
*conn
= chan
->conn
;
2301 struct sk_buff
*skb
;
2302 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2303 struct l2cap_hdr
*lh
;
2305 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan
,
2306 __le16_to_cpu(chan
->psm
), len
, priority
);
2308 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2310 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2311 msg
->msg_flags
& MSG_DONTWAIT
);
2315 skb
->priority
= priority
;
2317 /* Create L2CAP header */
2318 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2319 lh
->cid
= cpu_to_le16(chan
->dcid
);
2320 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2321 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2323 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2324 if (unlikely(err
< 0)) {
2326 return ERR_PTR(err
);
2331 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2332 struct msghdr
*msg
, size_t len
,
2335 struct l2cap_conn
*conn
= chan
->conn
;
2336 struct sk_buff
*skb
;
2338 struct l2cap_hdr
*lh
;
2340 BT_DBG("chan %p len %zu", chan
, len
);
2342 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2344 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2345 msg
->msg_flags
& MSG_DONTWAIT
);
2349 skb
->priority
= priority
;
2351 /* Create L2CAP header */
2352 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2353 lh
->cid
= cpu_to_le16(chan
->dcid
);
2354 lh
->len
= cpu_to_le16(len
);
2356 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2357 if (unlikely(err
< 0)) {
2359 return ERR_PTR(err
);
2364 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2365 struct msghdr
*msg
, size_t len
,
2368 struct l2cap_conn
*conn
= chan
->conn
;
2369 struct sk_buff
*skb
;
2370 int err
, count
, hlen
;
2371 struct l2cap_hdr
*lh
;
2373 BT_DBG("chan %p len %zu", chan
, len
);
2376 return ERR_PTR(-ENOTCONN
);
2378 hlen
= __ertm_hdr_size(chan
);
2381 hlen
+= L2CAP_SDULEN_SIZE
;
2383 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2384 hlen
+= L2CAP_FCS_SIZE
;
2386 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2388 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2389 msg
->msg_flags
& MSG_DONTWAIT
);
2393 /* Create L2CAP header */
2394 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2395 lh
->cid
= cpu_to_le16(chan
->dcid
);
2396 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2398 /* Control header is populated later */
2399 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2400 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2402 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2405 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2407 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2408 if (unlikely(err
< 0)) {
2410 return ERR_PTR(err
);
2413 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2414 bt_cb(skb
)->control
.retries
= 0;
2418 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2419 struct sk_buff_head
*seg_queue
,
2420 struct msghdr
*msg
, size_t len
)
2422 struct sk_buff
*skb
;
2427 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2429 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2430 * so fragmented skbs are not used. The HCI layer's handling
2431 * of fragmented skbs is not compatible with ERTM's queueing.
2434 /* PDU size is derived from the HCI MTU */
2435 pdu_len
= chan
->conn
->mtu
;
2437 /* Constrain PDU size for BR/EDR connections */
2439 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2441 /* Adjust for largest possible L2CAP overhead. */
2443 pdu_len
-= L2CAP_FCS_SIZE
;
2445 pdu_len
-= __ertm_hdr_size(chan
);
2447 /* Remote device may have requested smaller PDUs */
2448 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2450 if (len
<= pdu_len
) {
2451 sar
= L2CAP_SAR_UNSEGMENTED
;
2455 sar
= L2CAP_SAR_START
;
2457 pdu_len
-= L2CAP_SDULEN_SIZE
;
2461 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2464 __skb_queue_purge(seg_queue
);
2465 return PTR_ERR(skb
);
2468 bt_cb(skb
)->control
.sar
= sar
;
2469 __skb_queue_tail(seg_queue
, skb
);
2474 pdu_len
+= L2CAP_SDULEN_SIZE
;
2477 if (len
<= pdu_len
) {
2478 sar
= L2CAP_SAR_END
;
2481 sar
= L2CAP_SAR_CONTINUE
;
2488 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2491 struct sk_buff
*skb
;
2493 struct sk_buff_head seg_queue
;
2495 /* Connectionless channel */
2496 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2497 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2499 return PTR_ERR(skb
);
2501 l2cap_do_send(chan
, skb
);
2505 switch (chan
->mode
) {
2506 case L2CAP_MODE_BASIC
:
2507 /* Check outgoing MTU */
2508 if (len
> chan
->omtu
)
2511 /* Create a basic PDU */
2512 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2514 return PTR_ERR(skb
);
2516 l2cap_do_send(chan
, skb
);
2520 case L2CAP_MODE_ERTM
:
2521 case L2CAP_MODE_STREAMING
:
2522 /* Check outgoing MTU */
2523 if (len
> chan
->omtu
) {
2528 __skb_queue_head_init(&seg_queue
);
2530 /* Do segmentation before calling in to the state machine,
2531 * since it's possible to block while waiting for memory
2534 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2536 /* The channel could have been closed while segmenting,
2537 * check that it is still connected.
2539 if (chan
->state
!= BT_CONNECTED
) {
2540 __skb_queue_purge(&seg_queue
);
2547 if (chan
->mode
== L2CAP_MODE_ERTM
)
2548 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2550 l2cap_streaming_send(chan
, &seg_queue
);
2554 /* If the skbs were not queued for sending, they'll still be in
2555 * seg_queue and need to be purged.
2557 __skb_queue_purge(&seg_queue
);
2561 BT_DBG("bad state %1.1x", chan
->mode
);
2568 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2570 struct l2cap_ctrl control
;
2573 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2575 memset(&control
, 0, sizeof(control
));
2577 control
.super
= L2CAP_SUPER_SREJ
;
2579 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2580 seq
= __next_seq(chan
, seq
)) {
2581 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2582 control
.reqseq
= seq
;
2583 l2cap_send_sframe(chan
, &control
);
2584 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2588 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2591 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2593 struct l2cap_ctrl control
;
2595 BT_DBG("chan %p", chan
);
2597 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2600 memset(&control
, 0, sizeof(control
));
2602 control
.super
= L2CAP_SUPER_SREJ
;
2603 control
.reqseq
= chan
->srej_list
.tail
;
2604 l2cap_send_sframe(chan
, &control
);
2607 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2609 struct l2cap_ctrl control
;
2613 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2615 memset(&control
, 0, sizeof(control
));
2617 control
.super
= L2CAP_SUPER_SREJ
;
2619 /* Capture initial list head to allow only one pass through the list. */
2620 initial_head
= chan
->srej_list
.head
;
2623 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2624 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2627 control
.reqseq
= seq
;
2628 l2cap_send_sframe(chan
, &control
);
2629 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2630 } while (chan
->srej_list
.head
!= initial_head
);
2633 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2635 struct sk_buff
*acked_skb
;
2638 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2640 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2643 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2644 chan
->expected_ack_seq
, chan
->unacked_frames
);
2646 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2647 ackseq
= __next_seq(chan
, ackseq
)) {
2649 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2651 skb_unlink(acked_skb
, &chan
->tx_q
);
2652 kfree_skb(acked_skb
);
2653 chan
->unacked_frames
--;
2657 chan
->expected_ack_seq
= reqseq
;
2659 if (chan
->unacked_frames
== 0)
2660 __clear_retrans_timer(chan
);
2662 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2665 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2667 BT_DBG("chan %p", chan
);
2669 chan
->expected_tx_seq
= chan
->buffer_seq
;
2670 l2cap_seq_list_clear(&chan
->srej_list
);
2671 skb_queue_purge(&chan
->srej_q
);
2672 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2675 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2676 struct l2cap_ctrl
*control
,
2677 struct sk_buff_head
*skbs
, u8 event
)
2679 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2683 case L2CAP_EV_DATA_REQUEST
:
2684 if (chan
->tx_send_head
== NULL
)
2685 chan
->tx_send_head
= skb_peek(skbs
);
2687 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2688 l2cap_ertm_send(chan
);
2690 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2691 BT_DBG("Enter LOCAL_BUSY");
2692 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2694 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2695 /* The SREJ_SENT state must be aborted if we are to
2696 * enter the LOCAL_BUSY state.
2698 l2cap_abort_rx_srej_sent(chan
);
2701 l2cap_send_ack(chan
);
2704 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2705 BT_DBG("Exit LOCAL_BUSY");
2706 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2708 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2709 struct l2cap_ctrl local_control
;
2711 memset(&local_control
, 0, sizeof(local_control
));
2712 local_control
.sframe
= 1;
2713 local_control
.super
= L2CAP_SUPER_RR
;
2714 local_control
.poll
= 1;
2715 local_control
.reqseq
= chan
->buffer_seq
;
2716 l2cap_send_sframe(chan
, &local_control
);
2718 chan
->retry_count
= 1;
2719 __set_monitor_timer(chan
);
2720 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2723 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2724 l2cap_process_reqseq(chan
, control
->reqseq
);
2726 case L2CAP_EV_EXPLICIT_POLL
:
2727 l2cap_send_rr_or_rnr(chan
, 1);
2728 chan
->retry_count
= 1;
2729 __set_monitor_timer(chan
);
2730 __clear_ack_timer(chan
);
2731 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2733 case L2CAP_EV_RETRANS_TO
:
2734 l2cap_send_rr_or_rnr(chan
, 1);
2735 chan
->retry_count
= 1;
2736 __set_monitor_timer(chan
);
2737 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2739 case L2CAP_EV_RECV_FBIT
:
2740 /* Nothing to process */
2747 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2748 struct l2cap_ctrl
*control
,
2749 struct sk_buff_head
*skbs
, u8 event
)
2751 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2755 case L2CAP_EV_DATA_REQUEST
:
2756 if (chan
->tx_send_head
== NULL
)
2757 chan
->tx_send_head
= skb_peek(skbs
);
2758 /* Queue data, but don't send. */
2759 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2761 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2762 BT_DBG("Enter LOCAL_BUSY");
2763 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2765 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2766 /* The SREJ_SENT state must be aborted if we are to
2767 * enter the LOCAL_BUSY state.
2769 l2cap_abort_rx_srej_sent(chan
);
2772 l2cap_send_ack(chan
);
2775 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2776 BT_DBG("Exit LOCAL_BUSY");
2777 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2779 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2780 struct l2cap_ctrl local_control
;
2781 memset(&local_control
, 0, sizeof(local_control
));
2782 local_control
.sframe
= 1;
2783 local_control
.super
= L2CAP_SUPER_RR
;
2784 local_control
.poll
= 1;
2785 local_control
.reqseq
= chan
->buffer_seq
;
2786 l2cap_send_sframe(chan
, &local_control
);
2788 chan
->retry_count
= 1;
2789 __set_monitor_timer(chan
);
2790 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2793 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2794 l2cap_process_reqseq(chan
, control
->reqseq
);
2798 case L2CAP_EV_RECV_FBIT
:
2799 if (control
&& control
->final
) {
2800 __clear_monitor_timer(chan
);
2801 if (chan
->unacked_frames
> 0)
2802 __set_retrans_timer(chan
);
2803 chan
->retry_count
= 0;
2804 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2805 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2808 case L2CAP_EV_EXPLICIT_POLL
:
2811 case L2CAP_EV_MONITOR_TO
:
2812 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2813 l2cap_send_rr_or_rnr(chan
, 1);
2814 __set_monitor_timer(chan
);
2815 chan
->retry_count
++;
2817 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2825 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2826 struct sk_buff_head
*skbs
, u8 event
)
2828 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2829 chan
, control
, skbs
, event
, chan
->tx_state
);
2831 switch (chan
->tx_state
) {
2832 case L2CAP_TX_STATE_XMIT
:
2833 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2835 case L2CAP_TX_STATE_WAIT_F
:
2836 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2844 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2845 struct l2cap_ctrl
*control
)
2847 BT_DBG("chan %p, control %p", chan
, control
);
2848 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2851 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2852 struct l2cap_ctrl
*control
)
2854 BT_DBG("chan %p, control %p", chan
, control
);
2855 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2858 /* Copy frame to all raw sockets on that connection */
2859 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2861 struct sk_buff
*nskb
;
2862 struct l2cap_chan
*chan
;
2864 BT_DBG("conn %p", conn
);
2866 mutex_lock(&conn
->chan_lock
);
2868 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2869 struct sock
*sk
= chan
->sk
;
2870 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2873 /* Don't send frame to the socket it came from */
2876 nskb
= skb_clone(skb
, GFP_KERNEL
);
2880 if (chan
->ops
->recv(chan
, nskb
))
2884 mutex_unlock(&conn
->chan_lock
);
2887 /* ---- L2CAP signalling commands ---- */
2888 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2889 u8 ident
, u16 dlen
, void *data
)
2891 struct sk_buff
*skb
, **frag
;
2892 struct l2cap_cmd_hdr
*cmd
;
2893 struct l2cap_hdr
*lh
;
2896 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2897 conn
, code
, ident
, dlen
);
2899 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2902 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2903 count
= min_t(unsigned int, conn
->mtu
, len
);
2905 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2909 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2910 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2912 if (conn
->hcon
->type
== LE_LINK
)
2913 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2915 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2917 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2920 cmd
->len
= cpu_to_le16(dlen
);
2923 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2924 memcpy(skb_put(skb
, count
), data
, count
);
2930 /* Continuation fragments (no L2CAP header) */
2931 frag
= &skb_shinfo(skb
)->frag_list
;
2933 count
= min_t(unsigned int, conn
->mtu
, len
);
2935 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2939 memcpy(skb_put(*frag
, count
), data
, count
);
2944 frag
= &(*frag
)->next
;
2954 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2957 struct l2cap_conf_opt
*opt
= *ptr
;
2960 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2968 *val
= *((u8
*) opt
->val
);
2972 *val
= get_unaligned_le16(opt
->val
);
2976 *val
= get_unaligned_le32(opt
->val
);
2980 *val
= (unsigned long) opt
->val
;
2984 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2988 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2990 struct l2cap_conf_opt
*opt
= *ptr
;
2992 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2999 *((u8
*) opt
->val
) = val
;
3003 put_unaligned_le16(val
, opt
->val
);
3007 put_unaligned_le32(val
, opt
->val
);
3011 memcpy(opt
->val
, (void *) val
, len
);
3015 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
3018 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
3020 struct l2cap_conf_efs efs
;
3022 switch (chan
->mode
) {
3023 case L2CAP_MODE_ERTM
:
3024 efs
.id
= chan
->local_id
;
3025 efs
.stype
= chan
->local_stype
;
3026 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3027 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3028 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
3029 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3032 case L2CAP_MODE_STREAMING
:
3034 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3035 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3036 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3045 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3046 (unsigned long) &efs
);
3049 static void l2cap_ack_timeout(struct work_struct
*work
)
3051 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3055 BT_DBG("chan %p", chan
);
3057 l2cap_chan_lock(chan
);
3059 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3060 chan
->last_acked_seq
);
3063 l2cap_send_rr_or_rnr(chan
, 0);
3065 l2cap_chan_unlock(chan
);
3066 l2cap_chan_put(chan
);
3069 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3073 chan
->next_tx_seq
= 0;
3074 chan
->expected_tx_seq
= 0;
3075 chan
->expected_ack_seq
= 0;
3076 chan
->unacked_frames
= 0;
3077 chan
->buffer_seq
= 0;
3078 chan
->frames_sent
= 0;
3079 chan
->last_acked_seq
= 0;
3081 chan
->sdu_last_frag
= NULL
;
3084 skb_queue_head_init(&chan
->tx_q
);
3086 chan
->local_amp_id
= AMP_ID_BREDR
;
3087 chan
->move_id
= AMP_ID_BREDR
;
3088 chan
->move_state
= L2CAP_MOVE_STABLE
;
3089 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3091 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3094 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3095 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3097 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3098 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3099 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3101 skb_queue_head_init(&chan
->srej_q
);
3103 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3107 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3109 l2cap_seq_list_free(&chan
->srej_list
);
3114 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3117 case L2CAP_MODE_STREAMING
:
3118 case L2CAP_MODE_ERTM
:
3119 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3123 return L2CAP_MODE_BASIC
;
3127 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3129 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3132 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3134 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3137 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3138 struct l2cap_conf_rfc
*rfc
)
3140 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3141 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3143 /* Class 1 devices have must have ERTM timeouts
3144 * exceeding the Link Supervision Timeout. The
3145 * default Link Supervision Timeout for AMP
3146 * controllers is 10 seconds.
3148 * Class 1 devices use 0xffffffff for their
3149 * best-effort flush timeout, so the clamping logic
3150 * will result in a timeout that meets the above
3151 * requirement. ERTM timeouts are 16-bit values, so
3152 * the maximum timeout is 65.535 seconds.
3155 /* Convert timeout to milliseconds and round */
3156 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3158 /* This is the recommended formula for class 2 devices
3159 * that start ERTM timers when packets are sent to the
3162 ertm_to
= 3 * ertm_to
+ 500;
3164 if (ertm_to
> 0xffff)
3167 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3168 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3170 rfc
->retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3171 rfc
->monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3175 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3177 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3178 __l2cap_ews_supported(chan
->conn
)) {
3179 /* use extended control field */
3180 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3181 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3183 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3184 L2CAP_DEFAULT_TX_WINDOW
);
3185 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3187 chan
->ack_win
= chan
->tx_win
;
3190 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3192 struct l2cap_conf_req
*req
= data
;
3193 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3194 void *ptr
= req
->data
;
3197 BT_DBG("chan %p", chan
);
3199 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3202 switch (chan
->mode
) {
3203 case L2CAP_MODE_STREAMING
:
3204 case L2CAP_MODE_ERTM
:
3205 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3208 if (__l2cap_efs_supported(chan
->conn
))
3209 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3213 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3218 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3219 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3221 switch (chan
->mode
) {
3222 case L2CAP_MODE_BASIC
:
3223 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3224 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3227 rfc
.mode
= L2CAP_MODE_BASIC
;
3229 rfc
.max_transmit
= 0;
3230 rfc
.retrans_timeout
= 0;
3231 rfc
.monitor_timeout
= 0;
3232 rfc
.max_pdu_size
= 0;
3234 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3235 (unsigned long) &rfc
);
3238 case L2CAP_MODE_ERTM
:
3239 rfc
.mode
= L2CAP_MODE_ERTM
;
3240 rfc
.max_transmit
= chan
->max_tx
;
3242 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3244 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3245 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3247 rfc
.max_pdu_size
= cpu_to_le16(size
);
3249 l2cap_txwin_setup(chan
);
3251 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3252 L2CAP_DEFAULT_TX_WINDOW
);
3254 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3255 (unsigned long) &rfc
);
3257 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3258 l2cap_add_opt_efs(&ptr
, chan
);
3260 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3261 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3264 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3265 if (chan
->fcs
== L2CAP_FCS_NONE
||
3266 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3267 chan
->fcs
= L2CAP_FCS_NONE
;
3268 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3273 case L2CAP_MODE_STREAMING
:
3274 l2cap_txwin_setup(chan
);
3275 rfc
.mode
= L2CAP_MODE_STREAMING
;
3277 rfc
.max_transmit
= 0;
3278 rfc
.retrans_timeout
= 0;
3279 rfc
.monitor_timeout
= 0;
3281 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3282 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3284 rfc
.max_pdu_size
= cpu_to_le16(size
);
3286 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3287 (unsigned long) &rfc
);
3289 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3290 l2cap_add_opt_efs(&ptr
, chan
);
3292 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3293 if (chan
->fcs
== L2CAP_FCS_NONE
||
3294 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3295 chan
->fcs
= L2CAP_FCS_NONE
;
3296 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3302 req
->dcid
= cpu_to_le16(chan
->dcid
);
3303 req
->flags
= __constant_cpu_to_le16(0);
3308 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3310 struct l2cap_conf_rsp
*rsp
= data
;
3311 void *ptr
= rsp
->data
;
3312 void *req
= chan
->conf_req
;
3313 int len
= chan
->conf_len
;
3314 int type
, hint
, olen
;
3316 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3317 struct l2cap_conf_efs efs
;
3319 u16 mtu
= L2CAP_DEFAULT_MTU
;
3320 u16 result
= L2CAP_CONF_SUCCESS
;
3323 BT_DBG("chan %p", chan
);
3325 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3326 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3328 hint
= type
& L2CAP_CONF_HINT
;
3329 type
&= L2CAP_CONF_MASK
;
3332 case L2CAP_CONF_MTU
:
3336 case L2CAP_CONF_FLUSH_TO
:
3337 chan
->flush_to
= val
;
3340 case L2CAP_CONF_QOS
:
3343 case L2CAP_CONF_RFC
:
3344 if (olen
== sizeof(rfc
))
3345 memcpy(&rfc
, (void *) val
, olen
);
3348 case L2CAP_CONF_FCS
:
3349 if (val
== L2CAP_FCS_NONE
)
3350 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3353 case L2CAP_CONF_EFS
:
3355 if (olen
== sizeof(efs
))
3356 memcpy(&efs
, (void *) val
, olen
);
3359 case L2CAP_CONF_EWS
:
3360 if (!chan
->conn
->hs_enabled
)
3361 return -ECONNREFUSED
;
3363 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3364 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3365 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3366 chan
->remote_tx_win
= val
;
3373 result
= L2CAP_CONF_UNKNOWN
;
3374 *((u8
*) ptr
++) = type
;
3379 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3382 switch (chan
->mode
) {
3383 case L2CAP_MODE_STREAMING
:
3384 case L2CAP_MODE_ERTM
:
3385 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3386 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3387 chan
->conn
->feat_mask
);
3392 if (__l2cap_efs_supported(chan
->conn
))
3393 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3395 return -ECONNREFUSED
;
3398 if (chan
->mode
!= rfc
.mode
)
3399 return -ECONNREFUSED
;
3405 if (chan
->mode
!= rfc
.mode
) {
3406 result
= L2CAP_CONF_UNACCEPT
;
3407 rfc
.mode
= chan
->mode
;
3409 if (chan
->num_conf_rsp
== 1)
3410 return -ECONNREFUSED
;
3412 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3413 (unsigned long) &rfc
);
3416 if (result
== L2CAP_CONF_SUCCESS
) {
3417 /* Configure output options and let the other side know
3418 * which ones we don't like. */
3420 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3421 result
= L2CAP_CONF_UNACCEPT
;
3424 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3426 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3429 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3430 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3431 efs
.stype
!= chan
->local_stype
) {
3433 result
= L2CAP_CONF_UNACCEPT
;
3435 if (chan
->num_conf_req
>= 1)
3436 return -ECONNREFUSED
;
3438 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3440 (unsigned long) &efs
);
3442 /* Send PENDING Conf Rsp */
3443 result
= L2CAP_CONF_PENDING
;
3444 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3449 case L2CAP_MODE_BASIC
:
3450 chan
->fcs
= L2CAP_FCS_NONE
;
3451 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3454 case L2CAP_MODE_ERTM
:
3455 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3456 chan
->remote_tx_win
= rfc
.txwin_size
;
3458 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3460 chan
->remote_max_tx
= rfc
.max_transmit
;
3462 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3463 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3464 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3465 rfc
.max_pdu_size
= cpu_to_le16(size
);
3466 chan
->remote_mps
= size
;
3468 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3470 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3472 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3473 sizeof(rfc
), (unsigned long) &rfc
);
3475 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3476 chan
->remote_id
= efs
.id
;
3477 chan
->remote_stype
= efs
.stype
;
3478 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3479 chan
->remote_flush_to
=
3480 le32_to_cpu(efs
.flush_to
);
3481 chan
->remote_acc_lat
=
3482 le32_to_cpu(efs
.acc_lat
);
3483 chan
->remote_sdu_itime
=
3484 le32_to_cpu(efs
.sdu_itime
);
3485 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3487 (unsigned long) &efs
);
3491 case L2CAP_MODE_STREAMING
:
3492 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3493 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3494 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3495 rfc
.max_pdu_size
= cpu_to_le16(size
);
3496 chan
->remote_mps
= size
;
3498 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3500 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3501 (unsigned long) &rfc
);
3506 result
= L2CAP_CONF_UNACCEPT
;
3508 memset(&rfc
, 0, sizeof(rfc
));
3509 rfc
.mode
= chan
->mode
;
3512 if (result
== L2CAP_CONF_SUCCESS
)
3513 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3515 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3516 rsp
->result
= cpu_to_le16(result
);
3517 rsp
->flags
= __constant_cpu_to_le16(0);
3522 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3523 void *data
, u16
*result
)
3525 struct l2cap_conf_req
*req
= data
;
3526 void *ptr
= req
->data
;
3529 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3530 struct l2cap_conf_efs efs
;
3532 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3534 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3535 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3538 case L2CAP_CONF_MTU
:
3539 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3540 *result
= L2CAP_CONF_UNACCEPT
;
3541 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3544 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3547 case L2CAP_CONF_FLUSH_TO
:
3548 chan
->flush_to
= val
;
3549 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3553 case L2CAP_CONF_RFC
:
3554 if (olen
== sizeof(rfc
))
3555 memcpy(&rfc
, (void *)val
, olen
);
3557 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3558 rfc
.mode
!= chan
->mode
)
3559 return -ECONNREFUSED
;
3563 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3564 sizeof(rfc
), (unsigned long) &rfc
);
3567 case L2CAP_CONF_EWS
:
3568 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3569 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3573 case L2CAP_CONF_EFS
:
3574 if (olen
== sizeof(efs
))
3575 memcpy(&efs
, (void *)val
, olen
);
3577 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3578 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3579 efs
.stype
!= chan
->local_stype
)
3580 return -ECONNREFUSED
;
3582 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3583 (unsigned long) &efs
);
3586 case L2CAP_CONF_FCS
:
3587 if (*result
== L2CAP_CONF_PENDING
)
3588 if (val
== L2CAP_FCS_NONE
)
3589 set_bit(CONF_RECV_NO_FCS
,
3595 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3596 return -ECONNREFUSED
;
3598 chan
->mode
= rfc
.mode
;
3600 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3602 case L2CAP_MODE_ERTM
:
3603 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3604 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3605 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3606 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3607 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3610 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3611 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3612 chan
->local_sdu_itime
=
3613 le32_to_cpu(efs
.sdu_itime
);
3614 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3615 chan
->local_flush_to
=
3616 le32_to_cpu(efs
.flush_to
);
3620 case L2CAP_MODE_STREAMING
:
3621 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3625 req
->dcid
= cpu_to_le16(chan
->dcid
);
3626 req
->flags
= __constant_cpu_to_le16(0);
3631 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3632 u16 result
, u16 flags
)
3634 struct l2cap_conf_rsp
*rsp
= data
;
3635 void *ptr
= rsp
->data
;
3637 BT_DBG("chan %p", chan
);
3639 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3640 rsp
->result
= cpu_to_le16(result
);
3641 rsp
->flags
= cpu_to_le16(flags
);
3646 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3648 struct l2cap_conn_rsp rsp
;
3649 struct l2cap_conn
*conn
= chan
->conn
;
3653 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3654 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3655 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3656 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3659 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3661 rsp_code
= L2CAP_CONN_RSP
;
3663 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3665 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3667 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3670 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3671 l2cap_build_conf_req(chan
, buf
), buf
);
3672 chan
->num_conf_req
++;
3675 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3679 /* Use sane default values in case a misbehaving remote device
3680 * did not send an RFC or extended window size option.
3682 u16 txwin_ext
= chan
->ack_win
;
3683 struct l2cap_conf_rfc rfc
= {
3685 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3686 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3687 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3688 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3691 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3693 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3696 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3697 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3700 case L2CAP_CONF_RFC
:
3701 if (olen
== sizeof(rfc
))
3702 memcpy(&rfc
, (void *)val
, olen
);
3704 case L2CAP_CONF_EWS
:
3711 case L2CAP_MODE_ERTM
:
3712 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3713 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3714 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3715 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3716 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3718 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3721 case L2CAP_MODE_STREAMING
:
3722 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3726 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3727 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3730 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3732 if (cmd_len
< sizeof(*rej
))
3735 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3738 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3739 cmd
->ident
== conn
->info_ident
) {
3740 cancel_delayed_work(&conn
->info_timer
);
3742 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3743 conn
->info_ident
= 0;
3745 l2cap_conn_start(conn
);
3751 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3752 struct l2cap_cmd_hdr
*cmd
,
3753 u8
*data
, u8 rsp_code
, u8 amp_id
)
3755 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3756 struct l2cap_conn_rsp rsp
;
3757 struct l2cap_chan
*chan
= NULL
, *pchan
;
3758 struct sock
*parent
, *sk
= NULL
;
3759 int result
, status
= L2CAP_CS_NO_INFO
;
3761 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3762 __le16 psm
= req
->psm
;
3764 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3766 /* Check if we have socket listening on psm */
3767 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3770 result
= L2CAP_CR_BAD_PSM
;
3776 mutex_lock(&conn
->chan_lock
);
3779 /* Check if the ACL is secure enough (if not SDP) */
3780 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3781 !hci_conn_check_link_mode(conn
->hcon
)) {
3782 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3783 result
= L2CAP_CR_SEC_BLOCK
;
3787 result
= L2CAP_CR_NO_MEM
;
3789 /* Check if we already have channel with that dcid */
3790 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3793 chan
= pchan
->ops
->new_connection(pchan
);
3799 /* For certain devices (ex: HID mouse), support for authentication,
3800 * pairing and bonding is optional. For such devices, inorder to avoid
3801 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3802 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3804 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3806 bacpy(&chan
->src
, &conn
->hcon
->src
);
3807 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3808 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
3809 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
3812 chan
->local_amp_id
= amp_id
;
3814 __l2cap_chan_add(conn
, chan
);
3818 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3820 chan
->ident
= cmd
->ident
;
3822 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3823 if (l2cap_chan_check_security(chan
)) {
3824 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3825 __l2cap_state_change(chan
, BT_CONNECT2
);
3826 result
= L2CAP_CR_PEND
;
3827 status
= L2CAP_CS_AUTHOR_PEND
;
3828 chan
->ops
->defer(chan
);
3830 /* Force pending result for AMP controllers.
3831 * The connection will succeed after the
3832 * physical link is up.
3834 if (amp_id
== AMP_ID_BREDR
) {
3835 __l2cap_state_change(chan
, BT_CONFIG
);
3836 result
= L2CAP_CR_SUCCESS
;
3838 __l2cap_state_change(chan
, BT_CONNECT2
);
3839 result
= L2CAP_CR_PEND
;
3841 status
= L2CAP_CS_NO_INFO
;
3844 __l2cap_state_change(chan
, BT_CONNECT2
);
3845 result
= L2CAP_CR_PEND
;
3846 status
= L2CAP_CS_AUTHEN_PEND
;
3849 __l2cap_state_change(chan
, BT_CONNECT2
);
3850 result
= L2CAP_CR_PEND
;
3851 status
= L2CAP_CS_NO_INFO
;
3855 release_sock(parent
);
3856 mutex_unlock(&conn
->chan_lock
);
3859 rsp
.scid
= cpu_to_le16(scid
);
3860 rsp
.dcid
= cpu_to_le16(dcid
);
3861 rsp
.result
= cpu_to_le16(result
);
3862 rsp
.status
= cpu_to_le16(status
);
3863 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3865 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3866 struct l2cap_info_req info
;
3867 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3869 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3870 conn
->info_ident
= l2cap_get_ident(conn
);
3872 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3874 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3875 sizeof(info
), &info
);
3878 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3879 result
== L2CAP_CR_SUCCESS
) {
3881 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3882 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3883 l2cap_build_conf_req(chan
, buf
), buf
);
3884 chan
->num_conf_req
++;
3890 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3891 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3893 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3894 struct hci_conn
*hcon
= conn
->hcon
;
3896 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3900 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3901 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3902 mgmt_device_connected(hdev
, &hcon
->dst
, hcon
->type
,
3903 hcon
->dst_type
, 0, NULL
, 0,
3905 hci_dev_unlock(hdev
);
3907 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3911 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3912 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3915 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3916 u16 scid
, dcid
, result
, status
;
3917 struct l2cap_chan
*chan
;
3921 if (cmd_len
< sizeof(*rsp
))
3924 scid
= __le16_to_cpu(rsp
->scid
);
3925 dcid
= __le16_to_cpu(rsp
->dcid
);
3926 result
= __le16_to_cpu(rsp
->result
);
3927 status
= __le16_to_cpu(rsp
->status
);
3929 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3930 dcid
, scid
, result
, status
);
3932 mutex_lock(&conn
->chan_lock
);
3935 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3941 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3950 l2cap_chan_lock(chan
);
3953 case L2CAP_CR_SUCCESS
:
3954 l2cap_state_change(chan
, BT_CONFIG
);
3957 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3959 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3962 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3963 l2cap_build_conf_req(chan
, req
), req
);
3964 chan
->num_conf_req
++;
3968 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3972 l2cap_chan_del(chan
, ECONNREFUSED
);
3976 l2cap_chan_unlock(chan
);
3979 mutex_unlock(&conn
->chan_lock
);
3984 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3986 /* FCS is enabled only in ERTM or streaming mode, if one or both
3989 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3990 chan
->fcs
= L2CAP_FCS_NONE
;
3991 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
3992 chan
->fcs
= L2CAP_FCS_CRC16
;
3995 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3996 u8 ident
, u16 flags
)
3998 struct l2cap_conn
*conn
= chan
->conn
;
4000 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
4003 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
4004 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
4006 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
4007 l2cap_build_conf_rsp(chan
, data
,
4008 L2CAP_CONF_SUCCESS
, flags
), data
);
4011 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4012 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4015 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4018 struct l2cap_chan
*chan
;
4021 if (cmd_len
< sizeof(*req
))
4024 dcid
= __le16_to_cpu(req
->dcid
);
4025 flags
= __le16_to_cpu(req
->flags
);
4027 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4029 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4033 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4034 struct l2cap_cmd_rej_cid rej
;
4036 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4037 rej
.scid
= cpu_to_le16(chan
->scid
);
4038 rej
.dcid
= cpu_to_le16(chan
->dcid
);
4040 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
4045 /* Reject if config buffer is too small. */
4046 len
= cmd_len
- sizeof(*req
);
4047 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4048 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4049 l2cap_build_conf_rsp(chan
, rsp
,
4050 L2CAP_CONF_REJECT
, flags
), rsp
);
4055 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4056 chan
->conf_len
+= len
;
4058 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4059 /* Incomplete config. Send empty response. */
4060 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4061 l2cap_build_conf_rsp(chan
, rsp
,
4062 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4066 /* Complete config. */
4067 len
= l2cap_parse_conf_req(chan
, rsp
);
4069 l2cap_send_disconn_req(chan
, ECONNRESET
);
4073 chan
->ident
= cmd
->ident
;
4074 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4075 chan
->num_conf_rsp
++;
4077 /* Reset config buffer. */
4080 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4083 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4084 set_default_fcs(chan
);
4086 if (chan
->mode
== L2CAP_MODE_ERTM
||
4087 chan
->mode
== L2CAP_MODE_STREAMING
)
4088 err
= l2cap_ertm_init(chan
);
4091 l2cap_send_disconn_req(chan
, -err
);
4093 l2cap_chan_ready(chan
);
4098 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4100 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4101 l2cap_build_conf_req(chan
, buf
), buf
);
4102 chan
->num_conf_req
++;
4105 /* Got Conf Rsp PENDING from remote side and asume we sent
4106 Conf Rsp PENDING in the code above */
4107 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4108 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4110 /* check compatibility */
4112 /* Send rsp for BR/EDR channel */
4114 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4116 chan
->ident
= cmd
->ident
;
4120 l2cap_chan_unlock(chan
);
4124 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4125 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4128 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4129 u16 scid
, flags
, result
;
4130 struct l2cap_chan
*chan
;
4131 int len
= cmd_len
- sizeof(*rsp
);
4134 if (cmd_len
< sizeof(*rsp
))
4137 scid
= __le16_to_cpu(rsp
->scid
);
4138 flags
= __le16_to_cpu(rsp
->flags
);
4139 result
= __le16_to_cpu(rsp
->result
);
4141 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4144 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4149 case L2CAP_CONF_SUCCESS
:
4150 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4151 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4154 case L2CAP_CONF_PENDING
:
4155 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4157 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4160 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4163 l2cap_send_disconn_req(chan
, ECONNRESET
);
4167 if (!chan
->hs_hcon
) {
4168 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4171 if (l2cap_check_efs(chan
)) {
4172 amp_create_logical_link(chan
);
4173 chan
->ident
= cmd
->ident
;
4179 case L2CAP_CONF_UNACCEPT
:
4180 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4183 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4184 l2cap_send_disconn_req(chan
, ECONNRESET
);
4188 /* throw out any old stored conf requests */
4189 result
= L2CAP_CONF_SUCCESS
;
4190 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4193 l2cap_send_disconn_req(chan
, ECONNRESET
);
4197 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4198 L2CAP_CONF_REQ
, len
, req
);
4199 chan
->num_conf_req
++;
4200 if (result
!= L2CAP_CONF_SUCCESS
)
4206 l2cap_chan_set_err(chan
, ECONNRESET
);
4208 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4209 l2cap_send_disconn_req(chan
, ECONNRESET
);
4213 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4216 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4218 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4219 set_default_fcs(chan
);
4221 if (chan
->mode
== L2CAP_MODE_ERTM
||
4222 chan
->mode
== L2CAP_MODE_STREAMING
)
4223 err
= l2cap_ertm_init(chan
);
4226 l2cap_send_disconn_req(chan
, -err
);
4228 l2cap_chan_ready(chan
);
4232 l2cap_chan_unlock(chan
);
4236 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4237 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4240 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4241 struct l2cap_disconn_rsp rsp
;
4243 struct l2cap_chan
*chan
;
4246 if (cmd_len
!= sizeof(*req
))
4249 scid
= __le16_to_cpu(req
->scid
);
4250 dcid
= __le16_to_cpu(req
->dcid
);
4252 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4254 mutex_lock(&conn
->chan_lock
);
4256 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4258 mutex_unlock(&conn
->chan_lock
);
4262 l2cap_chan_lock(chan
);
4266 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4267 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4268 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4271 sk
->sk_shutdown
= SHUTDOWN_MASK
;
4274 l2cap_chan_hold(chan
);
4275 l2cap_chan_del(chan
, ECONNRESET
);
4277 l2cap_chan_unlock(chan
);
4279 chan
->ops
->close(chan
);
4280 l2cap_chan_put(chan
);
4282 mutex_unlock(&conn
->chan_lock
);
4287 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4288 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4291 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4293 struct l2cap_chan
*chan
;
4295 if (cmd_len
!= sizeof(*rsp
))
4298 scid
= __le16_to_cpu(rsp
->scid
);
4299 dcid
= __le16_to_cpu(rsp
->dcid
);
4301 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4303 mutex_lock(&conn
->chan_lock
);
4305 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4307 mutex_unlock(&conn
->chan_lock
);
4311 l2cap_chan_lock(chan
);
4313 l2cap_chan_hold(chan
);
4314 l2cap_chan_del(chan
, 0);
4316 l2cap_chan_unlock(chan
);
4318 chan
->ops
->close(chan
);
4319 l2cap_chan_put(chan
);
4321 mutex_unlock(&conn
->chan_lock
);
4326 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4327 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4330 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4333 if (cmd_len
!= sizeof(*req
))
4336 type
= __le16_to_cpu(req
->type
);
4338 BT_DBG("type 0x%4.4x", type
);
4340 if (type
== L2CAP_IT_FEAT_MASK
) {
4342 u32 feat_mask
= l2cap_feat_mask
;
4343 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4344 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4345 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4347 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4349 if (conn
->hs_enabled
)
4350 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4351 | L2CAP_FEAT_EXT_WINDOW
;
4353 put_unaligned_le32(feat_mask
, rsp
->data
);
4354 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4356 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4358 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4360 if (conn
->hs_enabled
)
4361 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4363 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4365 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4366 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4367 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4368 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4371 struct l2cap_info_rsp rsp
;
4372 rsp
.type
= cpu_to_le16(type
);
4373 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
4374 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4381 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4382 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4385 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4388 if (cmd_len
< sizeof(*rsp
))
4391 type
= __le16_to_cpu(rsp
->type
);
4392 result
= __le16_to_cpu(rsp
->result
);
4394 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4396 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4397 if (cmd
->ident
!= conn
->info_ident
||
4398 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4401 cancel_delayed_work(&conn
->info_timer
);
4403 if (result
!= L2CAP_IR_SUCCESS
) {
4404 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4405 conn
->info_ident
= 0;
4407 l2cap_conn_start(conn
);
4413 case L2CAP_IT_FEAT_MASK
:
4414 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4416 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4417 struct l2cap_info_req req
;
4418 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4420 conn
->info_ident
= l2cap_get_ident(conn
);
4422 l2cap_send_cmd(conn
, conn
->info_ident
,
4423 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4425 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4426 conn
->info_ident
= 0;
4428 l2cap_conn_start(conn
);
4432 case L2CAP_IT_FIXED_CHAN
:
4433 conn
->fixed_chan_mask
= rsp
->data
[0];
4434 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4435 conn
->info_ident
= 0;
4437 l2cap_conn_start(conn
);
4444 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4445 struct l2cap_cmd_hdr
*cmd
,
4446 u16 cmd_len
, void *data
)
4448 struct l2cap_create_chan_req
*req
= data
;
4449 struct l2cap_create_chan_rsp rsp
;
4450 struct l2cap_chan
*chan
;
4451 struct hci_dev
*hdev
;
4454 if (cmd_len
!= sizeof(*req
))
4457 if (!conn
->hs_enabled
)
4460 psm
= le16_to_cpu(req
->psm
);
4461 scid
= le16_to_cpu(req
->scid
);
4463 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4465 /* For controller id 0 make BR/EDR connection */
4466 if (req
->amp_id
== AMP_ID_BREDR
) {
4467 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4472 /* Validate AMP controller id */
4473 hdev
= hci_dev_get(req
->amp_id
);
4477 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4482 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4485 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4486 struct hci_conn
*hs_hcon
;
4488 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4495 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4497 mgr
->bredr_chan
= chan
;
4498 chan
->hs_hcon
= hs_hcon
;
4499 chan
->fcs
= L2CAP_FCS_NONE
;
4500 conn
->mtu
= hdev
->block_mtu
;
4509 rsp
.scid
= cpu_to_le16(scid
);
4510 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_BAD_AMP
);
4511 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4513 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4519 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4521 struct l2cap_move_chan_req req
;
4524 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4526 ident
= l2cap_get_ident(chan
->conn
);
4527 chan
->ident
= ident
;
4529 req
.icid
= cpu_to_le16(chan
->scid
);
4530 req
.dest_amp_id
= dest_amp_id
;
4532 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4535 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4538 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4540 struct l2cap_move_chan_rsp rsp
;
4542 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4544 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4545 rsp
.result
= cpu_to_le16(result
);
4547 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4551 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4553 struct l2cap_move_chan_cfm cfm
;
4555 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4557 chan
->ident
= l2cap_get_ident(chan
->conn
);
4559 cfm
.icid
= cpu_to_le16(chan
->scid
);
4560 cfm
.result
= cpu_to_le16(result
);
4562 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4565 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4568 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4570 struct l2cap_move_chan_cfm cfm
;
4572 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4574 cfm
.icid
= cpu_to_le16(icid
);
4575 cfm
.result
= __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4577 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4581 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4584 struct l2cap_move_chan_cfm_rsp rsp
;
4586 BT_DBG("icid 0x%4.4x", icid
);
4588 rsp
.icid
= cpu_to_le16(icid
);
4589 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4592 static void __release_logical_link(struct l2cap_chan
*chan
)
4594 chan
->hs_hchan
= NULL
;
4595 chan
->hs_hcon
= NULL
;
4597 /* Placeholder - release the logical link */
4600 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4602 /* Logical link setup failed */
4603 if (chan
->state
!= BT_CONNECTED
) {
4604 /* Create channel failure, disconnect */
4605 l2cap_send_disconn_req(chan
, ECONNRESET
);
4609 switch (chan
->move_role
) {
4610 case L2CAP_MOVE_ROLE_RESPONDER
:
4611 l2cap_move_done(chan
);
4612 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4614 case L2CAP_MOVE_ROLE_INITIATOR
:
4615 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4616 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4617 /* Remote has only sent pending or
4618 * success responses, clean up
4620 l2cap_move_done(chan
);
4623 /* Other amp move states imply that the move
4624 * has already aborted
4626 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4631 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4632 struct hci_chan
*hchan
)
4634 struct l2cap_conf_rsp rsp
;
4636 chan
->hs_hchan
= hchan
;
4637 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4639 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4641 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4644 set_default_fcs(chan
);
4646 err
= l2cap_ertm_init(chan
);
4648 l2cap_send_disconn_req(chan
, -err
);
4650 l2cap_chan_ready(chan
);
4654 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4655 struct hci_chan
*hchan
)
4657 chan
->hs_hcon
= hchan
->conn
;
4658 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4660 BT_DBG("move_state %d", chan
->move_state
);
4662 switch (chan
->move_state
) {
4663 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4664 /* Move confirm will be sent after a success
4665 * response is received
4667 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4669 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4670 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4671 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4672 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4673 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4674 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4675 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4676 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4677 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4681 /* Move was not in expected state, free the channel */
4682 __release_logical_link(chan
);
4684 chan
->move_state
= L2CAP_MOVE_STABLE
;
4688 /* Call with chan locked */
4689 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4692 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4695 l2cap_logical_fail(chan
);
4696 __release_logical_link(chan
);
4700 if (chan
->state
!= BT_CONNECTED
) {
4701 /* Ignore logical link if channel is on BR/EDR */
4702 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4703 l2cap_logical_finish_create(chan
, hchan
);
4705 l2cap_logical_finish_move(chan
, hchan
);
4709 void l2cap_move_start(struct l2cap_chan
*chan
)
4711 BT_DBG("chan %p", chan
);
4713 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4714 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4716 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4717 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4718 /* Placeholder - start physical link setup */
4720 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4721 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4723 l2cap_move_setup(chan
);
4724 l2cap_send_move_chan_req(chan
, 0);
4728 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4729 u8 local_amp_id
, u8 remote_amp_id
)
4731 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4732 local_amp_id
, remote_amp_id
);
4734 chan
->fcs
= L2CAP_FCS_NONE
;
4736 /* Outgoing channel on AMP */
4737 if (chan
->state
== BT_CONNECT
) {
4738 if (result
== L2CAP_CR_SUCCESS
) {
4739 chan
->local_amp_id
= local_amp_id
;
4740 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4742 /* Revert to BR/EDR connect */
4743 l2cap_send_conn_req(chan
);
4749 /* Incoming channel on AMP */
4750 if (__l2cap_no_conn_pending(chan
)) {
4751 struct l2cap_conn_rsp rsp
;
4753 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4754 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4756 if (result
== L2CAP_CR_SUCCESS
) {
4757 /* Send successful response */
4758 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
4759 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4761 /* Send negative response */
4762 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4763 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4766 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4769 if (result
== L2CAP_CR_SUCCESS
) {
4770 __l2cap_state_change(chan
, BT_CONFIG
);
4771 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4772 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4774 l2cap_build_conf_req(chan
, buf
), buf
);
4775 chan
->num_conf_req
++;
4780 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4783 l2cap_move_setup(chan
);
4784 chan
->move_id
= local_amp_id
;
4785 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4787 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4790 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4792 struct hci_chan
*hchan
= NULL
;
4794 /* Placeholder - get hci_chan for logical link */
4797 if (hchan
->state
== BT_CONNECTED
) {
4798 /* Logical link is ready to go */
4799 chan
->hs_hcon
= hchan
->conn
;
4800 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4801 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4802 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4804 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4806 /* Wait for logical link to be ready */
4807 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4810 /* Logical link not available */
4811 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4815 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4817 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4819 if (result
== -EINVAL
)
4820 rsp_result
= L2CAP_MR_BAD_ID
;
4822 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4824 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4827 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4828 chan
->move_state
= L2CAP_MOVE_STABLE
;
4830 /* Restart data transmission */
4831 l2cap_ertm_send(chan
);
4834 /* Invoke with locked chan */
4835 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4837 u8 local_amp_id
= chan
->local_amp_id
;
4838 u8 remote_amp_id
= chan
->remote_amp_id
;
4840 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4841 chan
, result
, local_amp_id
, remote_amp_id
);
4843 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4844 l2cap_chan_unlock(chan
);
4848 if (chan
->state
!= BT_CONNECTED
) {
4849 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4850 } else if (result
!= L2CAP_MR_SUCCESS
) {
4851 l2cap_do_move_cancel(chan
, result
);
4853 switch (chan
->move_role
) {
4854 case L2CAP_MOVE_ROLE_INITIATOR
:
4855 l2cap_do_move_initiate(chan
, local_amp_id
,
4858 case L2CAP_MOVE_ROLE_RESPONDER
:
4859 l2cap_do_move_respond(chan
, result
);
4862 l2cap_do_move_cancel(chan
, result
);
4868 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4869 struct l2cap_cmd_hdr
*cmd
,
4870 u16 cmd_len
, void *data
)
4872 struct l2cap_move_chan_req
*req
= data
;
4873 struct l2cap_move_chan_rsp rsp
;
4874 struct l2cap_chan
*chan
;
4876 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4878 if (cmd_len
!= sizeof(*req
))
4881 icid
= le16_to_cpu(req
->icid
);
4883 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4885 if (!conn
->hs_enabled
)
4888 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4890 rsp
.icid
= cpu_to_le16(icid
);
4891 rsp
.result
= __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4892 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4897 chan
->ident
= cmd
->ident
;
4899 if (chan
->scid
< L2CAP_CID_DYN_START
||
4900 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4901 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4902 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4903 result
= L2CAP_MR_NOT_ALLOWED
;
4904 goto send_move_response
;
4907 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4908 result
= L2CAP_MR_SAME_ID
;
4909 goto send_move_response
;
4912 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4913 struct hci_dev
*hdev
;
4914 hdev
= hci_dev_get(req
->dest_amp_id
);
4915 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4916 !test_bit(HCI_UP
, &hdev
->flags
)) {
4920 result
= L2CAP_MR_BAD_ID
;
4921 goto send_move_response
;
4926 /* Detect a move collision. Only send a collision response
4927 * if this side has "lost", otherwise proceed with the move.
4928 * The winner has the larger bd_addr.
4930 if ((__chan_is_moving(chan
) ||
4931 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4932 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4933 result
= L2CAP_MR_COLLISION
;
4934 goto send_move_response
;
4937 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4938 l2cap_move_setup(chan
);
4939 chan
->move_id
= req
->dest_amp_id
;
4942 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4943 /* Moving to BR/EDR */
4944 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4945 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4946 result
= L2CAP_MR_PEND
;
4948 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4949 result
= L2CAP_MR_SUCCESS
;
4952 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4953 /* Placeholder - uncomment when amp functions are available */
4954 /*amp_accept_physical(chan, req->dest_amp_id);*/
4955 result
= L2CAP_MR_PEND
;
4959 l2cap_send_move_chan_rsp(chan
, result
);
4961 l2cap_chan_unlock(chan
);
4966 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4968 struct l2cap_chan
*chan
;
4969 struct hci_chan
*hchan
= NULL
;
4971 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4973 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4977 __clear_chan_timer(chan
);
4978 if (result
== L2CAP_MR_PEND
)
4979 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4981 switch (chan
->move_state
) {
4982 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4983 /* Move confirm will be sent when logical link
4986 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4988 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4989 if (result
== L2CAP_MR_PEND
) {
4991 } else if (test_bit(CONN_LOCAL_BUSY
,
4992 &chan
->conn_state
)) {
4993 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4995 /* Logical link is up or moving to BR/EDR,
4998 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4999 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5002 case L2CAP_MOVE_WAIT_RSP
:
5004 if (result
== L2CAP_MR_SUCCESS
) {
5005 /* Remote is ready, send confirm immediately
5006 * after logical link is ready
5008 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5010 /* Both logical link and move success
5011 * are required to confirm
5013 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5016 /* Placeholder - get hci_chan for logical link */
5018 /* Logical link not available */
5019 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5023 /* If the logical link is not yet connected, do not
5024 * send confirmation.
5026 if (hchan
->state
!= BT_CONNECTED
)
5029 /* Logical link is already ready to go */
5031 chan
->hs_hcon
= hchan
->conn
;
5032 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5034 if (result
== L2CAP_MR_SUCCESS
) {
5035 /* Can confirm now */
5036 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5038 /* Now only need move success
5041 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5044 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5047 /* Any other amp move state means the move failed. */
5048 chan
->move_id
= chan
->local_amp_id
;
5049 l2cap_move_done(chan
);
5050 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5053 l2cap_chan_unlock(chan
);
5056 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5059 struct l2cap_chan
*chan
;
5061 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5063 /* Could not locate channel, icid is best guess */
5064 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5068 __clear_chan_timer(chan
);
5070 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5071 if (result
== L2CAP_MR_COLLISION
) {
5072 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5074 /* Cleanup - cancel move */
5075 chan
->move_id
= chan
->local_amp_id
;
5076 l2cap_move_done(chan
);
5080 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5082 l2cap_chan_unlock(chan
);
5085 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5086 struct l2cap_cmd_hdr
*cmd
,
5087 u16 cmd_len
, void *data
)
5089 struct l2cap_move_chan_rsp
*rsp
= data
;
5092 if (cmd_len
!= sizeof(*rsp
))
5095 icid
= le16_to_cpu(rsp
->icid
);
5096 result
= le16_to_cpu(rsp
->result
);
5098 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5100 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5101 l2cap_move_continue(conn
, icid
, result
);
5103 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5108 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5109 struct l2cap_cmd_hdr
*cmd
,
5110 u16 cmd_len
, void *data
)
5112 struct l2cap_move_chan_cfm
*cfm
= data
;
5113 struct l2cap_chan
*chan
;
5116 if (cmd_len
!= sizeof(*cfm
))
5119 icid
= le16_to_cpu(cfm
->icid
);
5120 result
= le16_to_cpu(cfm
->result
);
5122 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5124 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5126 /* Spec requires a response even if the icid was not found */
5127 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5131 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5132 if (result
== L2CAP_MC_CONFIRMED
) {
5133 chan
->local_amp_id
= chan
->move_id
;
5134 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5135 __release_logical_link(chan
);
5137 chan
->move_id
= chan
->local_amp_id
;
5140 l2cap_move_done(chan
);
5143 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5145 l2cap_chan_unlock(chan
);
5150 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5151 struct l2cap_cmd_hdr
*cmd
,
5152 u16 cmd_len
, void *data
)
5154 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5155 struct l2cap_chan
*chan
;
5158 if (cmd_len
!= sizeof(*rsp
))
5161 icid
= le16_to_cpu(rsp
->icid
);
5163 BT_DBG("icid 0x%4.4x", icid
);
5165 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5169 __clear_chan_timer(chan
);
5171 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5172 chan
->local_amp_id
= chan
->move_id
;
5174 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5175 __release_logical_link(chan
);
5177 l2cap_move_done(chan
);
5180 l2cap_chan_unlock(chan
);
5185 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
5190 if (min
> max
|| min
< 6 || max
> 3200)
5193 if (to_multiplier
< 10 || to_multiplier
> 3200)
5196 if (max
>= to_multiplier
* 8)
5199 max_latency
= (to_multiplier
* 8 / max
) - 1;
5200 if (latency
> 499 || latency
> max_latency
)
5206 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5207 struct l2cap_cmd_hdr
*cmd
,
5210 struct hci_conn
*hcon
= conn
->hcon
;
5211 struct l2cap_conn_param_update_req
*req
;
5212 struct l2cap_conn_param_update_rsp rsp
;
5213 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
5216 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
5219 cmd_len
= __le16_to_cpu(cmd
->len
);
5220 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5223 req
= (struct l2cap_conn_param_update_req
*) data
;
5224 min
= __le16_to_cpu(req
->min
);
5225 max
= __le16_to_cpu(req
->max
);
5226 latency
= __le16_to_cpu(req
->latency
);
5227 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5229 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5230 min
, max
, latency
, to_multiplier
);
5232 memset(&rsp
, 0, sizeof(rsp
));
5234 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
5236 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5238 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5240 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5244 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
5249 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5250 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5255 switch (cmd
->code
) {
5256 case L2CAP_COMMAND_REJ
:
5257 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5260 case L2CAP_CONN_REQ
:
5261 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5264 case L2CAP_CONN_RSP
:
5265 case L2CAP_CREATE_CHAN_RSP
:
5266 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5269 case L2CAP_CONF_REQ
:
5270 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5273 case L2CAP_CONF_RSP
:
5274 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5277 case L2CAP_DISCONN_REQ
:
5278 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5281 case L2CAP_DISCONN_RSP
:
5282 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5285 case L2CAP_ECHO_REQ
:
5286 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5289 case L2CAP_ECHO_RSP
:
5292 case L2CAP_INFO_REQ
:
5293 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5296 case L2CAP_INFO_RSP
:
5297 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5300 case L2CAP_CREATE_CHAN_REQ
:
5301 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5304 case L2CAP_MOVE_CHAN_REQ
:
5305 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5308 case L2CAP_MOVE_CHAN_RSP
:
5309 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5312 case L2CAP_MOVE_CHAN_CFM
:
5313 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5316 case L2CAP_MOVE_CHAN_CFM_RSP
:
5317 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5321 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5329 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5330 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
5332 switch (cmd
->code
) {
5333 case L2CAP_COMMAND_REJ
:
5336 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5337 return l2cap_conn_param_update_req(conn
, cmd
, data
);
5339 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5343 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5348 static __le16
l2cap_err_to_reason(int err
)
5352 return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
5354 return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED
);
5358 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5362 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5363 struct sk_buff
*skb
)
5365 struct hci_conn
*hcon
= conn
->hcon
;
5366 struct l2cap_cmd_hdr
*cmd
;
5370 if (hcon
->type
!= LE_LINK
)
5373 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5376 cmd
= (void *) skb
->data
;
5377 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5379 len
= le16_to_cpu(cmd
->len
);
5381 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5383 if (len
!= skb
->len
|| !cmd
->ident
) {
5384 BT_DBG("corrupted command");
5388 err
= l2cap_le_sig_cmd(conn
, cmd
, skb
->data
);
5390 struct l2cap_cmd_rej_unk rej
;
5392 BT_ERR("Wrong link type (%d)", err
);
5394 rej
.reason
= l2cap_err_to_reason(err
);
5395 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5403 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5404 struct sk_buff
*skb
)
5406 struct hci_conn
*hcon
= conn
->hcon
;
5407 u8
*data
= skb
->data
;
5409 struct l2cap_cmd_hdr cmd
;
5412 l2cap_raw_recv(conn
, skb
);
5414 if (hcon
->type
!= ACL_LINK
)
5417 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5419 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5420 data
+= L2CAP_CMD_HDR_SIZE
;
5421 len
-= L2CAP_CMD_HDR_SIZE
;
5423 cmd_len
= le16_to_cpu(cmd
.len
);
5425 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5428 if (cmd_len
> len
|| !cmd
.ident
) {
5429 BT_DBG("corrupted command");
5433 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5435 struct l2cap_cmd_rej_unk rej
;
5437 BT_ERR("Wrong link type (%d)", err
);
5439 rej
.reason
= l2cap_err_to_reason(err
);
5440 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5452 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5454 u16 our_fcs
, rcv_fcs
;
5457 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5458 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5460 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5462 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5463 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5464 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5465 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5467 if (our_fcs
!= rcv_fcs
)
5473 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5475 struct l2cap_ctrl control
;
5477 BT_DBG("chan %p", chan
);
5479 memset(&control
, 0, sizeof(control
));
5482 control
.reqseq
= chan
->buffer_seq
;
5483 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5485 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5486 control
.super
= L2CAP_SUPER_RNR
;
5487 l2cap_send_sframe(chan
, &control
);
5490 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5491 chan
->unacked_frames
> 0)
5492 __set_retrans_timer(chan
);
5494 /* Send pending iframes */
5495 l2cap_ertm_send(chan
);
5497 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5498 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5499 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5502 control
.super
= L2CAP_SUPER_RR
;
5503 l2cap_send_sframe(chan
, &control
);
5507 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5508 struct sk_buff
**last_frag
)
5510 /* skb->len reflects data in skb as well as all fragments
5511 * skb->data_len reflects only data in fragments
5513 if (!skb_has_frag_list(skb
))
5514 skb_shinfo(skb
)->frag_list
= new_frag
;
5516 new_frag
->next
= NULL
;
5518 (*last_frag
)->next
= new_frag
;
5519 *last_frag
= new_frag
;
5521 skb
->len
+= new_frag
->len
;
5522 skb
->data_len
+= new_frag
->len
;
5523 skb
->truesize
+= new_frag
->truesize
;
5526 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5527 struct l2cap_ctrl
*control
)
5531 switch (control
->sar
) {
5532 case L2CAP_SAR_UNSEGMENTED
:
5536 err
= chan
->ops
->recv(chan
, skb
);
5539 case L2CAP_SAR_START
:
5543 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5544 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5546 if (chan
->sdu_len
> chan
->imtu
) {
5551 if (skb
->len
>= chan
->sdu_len
)
5555 chan
->sdu_last_frag
= skb
;
5561 case L2CAP_SAR_CONTINUE
:
5565 append_skb_frag(chan
->sdu
, skb
,
5566 &chan
->sdu_last_frag
);
5569 if (chan
->sdu
->len
>= chan
->sdu_len
)
5579 append_skb_frag(chan
->sdu
, skb
,
5580 &chan
->sdu_last_frag
);
5583 if (chan
->sdu
->len
!= chan
->sdu_len
)
5586 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5589 /* Reassembly complete */
5591 chan
->sdu_last_frag
= NULL
;
5599 kfree_skb(chan
->sdu
);
5601 chan
->sdu_last_frag
= NULL
;
5608 static int l2cap_resegment(struct l2cap_chan
*chan
)
5614 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5618 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5621 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5622 l2cap_tx(chan
, NULL
, NULL
, event
);
5625 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5628 /* Pass sequential frames to l2cap_reassemble_sdu()
5629 * until a gap is encountered.
5632 BT_DBG("chan %p", chan
);
5634 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5635 struct sk_buff
*skb
;
5636 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5637 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5639 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5644 skb_unlink(skb
, &chan
->srej_q
);
5645 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5646 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5651 if (skb_queue_empty(&chan
->srej_q
)) {
5652 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5653 l2cap_send_ack(chan
);
5659 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5660 struct l2cap_ctrl
*control
)
5662 struct sk_buff
*skb
;
5664 BT_DBG("chan %p, control %p", chan
, control
);
5666 if (control
->reqseq
== chan
->next_tx_seq
) {
5667 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5668 l2cap_send_disconn_req(chan
, ECONNRESET
);
5672 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5675 BT_DBG("Seq %d not available for retransmission",
5680 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5681 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5682 l2cap_send_disconn_req(chan
, ECONNRESET
);
5686 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5688 if (control
->poll
) {
5689 l2cap_pass_to_tx(chan
, control
);
5691 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5692 l2cap_retransmit(chan
, control
);
5693 l2cap_ertm_send(chan
);
5695 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5696 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5697 chan
->srej_save_reqseq
= control
->reqseq
;
5700 l2cap_pass_to_tx_fbit(chan
, control
);
5702 if (control
->final
) {
5703 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5704 !test_and_clear_bit(CONN_SREJ_ACT
,
5706 l2cap_retransmit(chan
, control
);
5708 l2cap_retransmit(chan
, control
);
5709 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5710 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5711 chan
->srej_save_reqseq
= control
->reqseq
;
5717 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5718 struct l2cap_ctrl
*control
)
5720 struct sk_buff
*skb
;
5722 BT_DBG("chan %p, control %p", chan
, control
);
5724 if (control
->reqseq
== chan
->next_tx_seq
) {
5725 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5726 l2cap_send_disconn_req(chan
, ECONNRESET
);
5730 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5732 if (chan
->max_tx
&& skb
&&
5733 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5734 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5735 l2cap_send_disconn_req(chan
, ECONNRESET
);
5739 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5741 l2cap_pass_to_tx(chan
, control
);
5743 if (control
->final
) {
5744 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
5745 l2cap_retransmit_all(chan
, control
);
5747 l2cap_retransmit_all(chan
, control
);
5748 l2cap_ertm_send(chan
);
5749 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
5750 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
5754 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
5756 BT_DBG("chan %p, txseq %d", chan
, txseq
);
5758 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
5759 chan
->expected_tx_seq
);
5761 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
5762 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5764 /* See notes below regarding "double poll" and
5767 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5768 BT_DBG("Invalid/Ignore - after SREJ");
5769 return L2CAP_TXSEQ_INVALID_IGNORE
;
5771 BT_DBG("Invalid - in window after SREJ sent");
5772 return L2CAP_TXSEQ_INVALID
;
5776 if (chan
->srej_list
.head
== txseq
) {
5777 BT_DBG("Expected SREJ");
5778 return L2CAP_TXSEQ_EXPECTED_SREJ
;
5781 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
5782 BT_DBG("Duplicate SREJ - txseq already stored");
5783 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
5786 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
5787 BT_DBG("Unexpected SREJ - not requested");
5788 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
5792 if (chan
->expected_tx_seq
== txseq
) {
5793 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5795 BT_DBG("Invalid - txseq outside tx window");
5796 return L2CAP_TXSEQ_INVALID
;
5799 return L2CAP_TXSEQ_EXPECTED
;
5803 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
5804 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
5805 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5806 return L2CAP_TXSEQ_DUPLICATE
;
5809 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
5810 /* A source of invalid packets is a "double poll" condition,
5811 * where delays cause us to send multiple poll packets. If
5812 * the remote stack receives and processes both polls,
5813 * sequence numbers can wrap around in such a way that a
5814 * resent frame has a sequence number that looks like new data
5815 * with a sequence gap. This would trigger an erroneous SREJ
5818 * Fortunately, this is impossible with a tx window that's
5819 * less than half of the maximum sequence number, which allows
5820 * invalid frames to be safely ignored.
5822 * With tx window sizes greater than half of the tx window
5823 * maximum, the frame is invalid and cannot be ignored. This
5824 * causes a disconnect.
5827 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5828 BT_DBG("Invalid/Ignore - txseq outside tx window");
5829 return L2CAP_TXSEQ_INVALID_IGNORE
;
5831 BT_DBG("Invalid - txseq outside tx window");
5832 return L2CAP_TXSEQ_INVALID
;
5835 BT_DBG("Unexpected - txseq indicates missing frames");
5836 return L2CAP_TXSEQ_UNEXPECTED
;
5840 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
5841 struct l2cap_ctrl
*control
,
5842 struct sk_buff
*skb
, u8 event
)
5845 bool skb_in_use
= false;
5847 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5851 case L2CAP_EV_RECV_IFRAME
:
5852 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
5853 case L2CAP_TXSEQ_EXPECTED
:
5854 l2cap_pass_to_tx(chan
, control
);
5856 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5857 BT_DBG("Busy, discarding expected seq %d",
5862 chan
->expected_tx_seq
= __next_seq(chan
,
5865 chan
->buffer_seq
= chan
->expected_tx_seq
;
5868 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
5872 if (control
->final
) {
5873 if (!test_and_clear_bit(CONN_REJ_ACT
,
5874 &chan
->conn_state
)) {
5876 l2cap_retransmit_all(chan
, control
);
5877 l2cap_ertm_send(chan
);
5881 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
5882 l2cap_send_ack(chan
);
5884 case L2CAP_TXSEQ_UNEXPECTED
:
5885 l2cap_pass_to_tx(chan
, control
);
5887 /* Can't issue SREJ frames in the local busy state.
5888 * Drop this frame, it will be seen as missing
5889 * when local busy is exited.
5891 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5892 BT_DBG("Busy, discarding unexpected seq %d",
5897 /* There was a gap in the sequence, so an SREJ
5898 * must be sent for each missing frame. The
5899 * current frame is stored for later use.
5901 skb_queue_tail(&chan
->srej_q
, skb
);
5903 BT_DBG("Queued %p (queue len %d)", skb
,
5904 skb_queue_len(&chan
->srej_q
));
5906 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5907 l2cap_seq_list_clear(&chan
->srej_list
);
5908 l2cap_send_srej(chan
, control
->txseq
);
5910 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
5912 case L2CAP_TXSEQ_DUPLICATE
:
5913 l2cap_pass_to_tx(chan
, control
);
5915 case L2CAP_TXSEQ_INVALID_IGNORE
:
5917 case L2CAP_TXSEQ_INVALID
:
5919 l2cap_send_disconn_req(chan
, ECONNRESET
);
5923 case L2CAP_EV_RECV_RR
:
5924 l2cap_pass_to_tx(chan
, control
);
5925 if (control
->final
) {
5926 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5928 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
5929 !__chan_is_moving(chan
)) {
5931 l2cap_retransmit_all(chan
, control
);
5934 l2cap_ertm_send(chan
);
5935 } else if (control
->poll
) {
5936 l2cap_send_i_or_rr_or_rnr(chan
);
5938 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5939 &chan
->conn_state
) &&
5940 chan
->unacked_frames
)
5941 __set_retrans_timer(chan
);
5943 l2cap_ertm_send(chan
);
5946 case L2CAP_EV_RECV_RNR
:
5947 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5948 l2cap_pass_to_tx(chan
, control
);
5949 if (control
&& control
->poll
) {
5950 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5951 l2cap_send_rr_or_rnr(chan
, 0);
5953 __clear_retrans_timer(chan
);
5954 l2cap_seq_list_clear(&chan
->retrans_list
);
5956 case L2CAP_EV_RECV_REJ
:
5957 l2cap_handle_rej(chan
, control
);
5959 case L2CAP_EV_RECV_SREJ
:
5960 l2cap_handle_srej(chan
, control
);
5966 if (skb
&& !skb_in_use
) {
5967 BT_DBG("Freeing %p", skb
);
5974 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
5975 struct l2cap_ctrl
*control
,
5976 struct sk_buff
*skb
, u8 event
)
5979 u16 txseq
= control
->txseq
;
5980 bool skb_in_use
= false;
5982 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5986 case L2CAP_EV_RECV_IFRAME
:
5987 switch (l2cap_classify_txseq(chan
, txseq
)) {
5988 case L2CAP_TXSEQ_EXPECTED
:
5989 /* Keep frame for reassembly later */
5990 l2cap_pass_to_tx(chan
, control
);
5991 skb_queue_tail(&chan
->srej_q
, skb
);
5993 BT_DBG("Queued %p (queue len %d)", skb
,
5994 skb_queue_len(&chan
->srej_q
));
5996 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
5998 case L2CAP_TXSEQ_EXPECTED_SREJ
:
5999 l2cap_seq_list_pop(&chan
->srej_list
);
6001 l2cap_pass_to_tx(chan
, control
);
6002 skb_queue_tail(&chan
->srej_q
, skb
);
6004 BT_DBG("Queued %p (queue len %d)", skb
,
6005 skb_queue_len(&chan
->srej_q
));
6007 err
= l2cap_rx_queued_iframes(chan
);
6012 case L2CAP_TXSEQ_UNEXPECTED
:
6013 /* Got a frame that can't be reassembled yet.
6014 * Save it for later, and send SREJs to cover
6015 * the missing frames.
6017 skb_queue_tail(&chan
->srej_q
, skb
);
6019 BT_DBG("Queued %p (queue len %d)", skb
,
6020 skb_queue_len(&chan
->srej_q
));
6022 l2cap_pass_to_tx(chan
, control
);
6023 l2cap_send_srej(chan
, control
->txseq
);
6025 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6026 /* This frame was requested with an SREJ, but
6027 * some expected retransmitted frames are
6028 * missing. Request retransmission of missing
6031 skb_queue_tail(&chan
->srej_q
, skb
);
6033 BT_DBG("Queued %p (queue len %d)", skb
,
6034 skb_queue_len(&chan
->srej_q
));
6036 l2cap_pass_to_tx(chan
, control
);
6037 l2cap_send_srej_list(chan
, control
->txseq
);
6039 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6040 /* We've already queued this frame. Drop this copy. */
6041 l2cap_pass_to_tx(chan
, control
);
6043 case L2CAP_TXSEQ_DUPLICATE
:
6044 /* Expecting a later sequence number, so this frame
6045 * was already received. Ignore it completely.
6048 case L2CAP_TXSEQ_INVALID_IGNORE
:
6050 case L2CAP_TXSEQ_INVALID
:
6052 l2cap_send_disconn_req(chan
, ECONNRESET
);
6056 case L2CAP_EV_RECV_RR
:
6057 l2cap_pass_to_tx(chan
, control
);
6058 if (control
->final
) {
6059 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6061 if (!test_and_clear_bit(CONN_REJ_ACT
,
6062 &chan
->conn_state
)) {
6064 l2cap_retransmit_all(chan
, control
);
6067 l2cap_ertm_send(chan
);
6068 } else if (control
->poll
) {
6069 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6070 &chan
->conn_state
) &&
6071 chan
->unacked_frames
) {
6072 __set_retrans_timer(chan
);
6075 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6076 l2cap_send_srej_tail(chan
);
6078 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6079 &chan
->conn_state
) &&
6080 chan
->unacked_frames
)
6081 __set_retrans_timer(chan
);
6083 l2cap_send_ack(chan
);
6086 case L2CAP_EV_RECV_RNR
:
6087 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6088 l2cap_pass_to_tx(chan
, control
);
6089 if (control
->poll
) {
6090 l2cap_send_srej_tail(chan
);
6092 struct l2cap_ctrl rr_control
;
6093 memset(&rr_control
, 0, sizeof(rr_control
));
6094 rr_control
.sframe
= 1;
6095 rr_control
.super
= L2CAP_SUPER_RR
;
6096 rr_control
.reqseq
= chan
->buffer_seq
;
6097 l2cap_send_sframe(chan
, &rr_control
);
6101 case L2CAP_EV_RECV_REJ
:
6102 l2cap_handle_rej(chan
, control
);
6104 case L2CAP_EV_RECV_SREJ
:
6105 l2cap_handle_srej(chan
, control
);
6109 if (skb
&& !skb_in_use
) {
6110 BT_DBG("Freeing %p", skb
);
6117 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6119 BT_DBG("chan %p", chan
);
6121 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6124 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6126 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6128 return l2cap_resegment(chan
);
6131 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6132 struct l2cap_ctrl
*control
,
6133 struct sk_buff
*skb
, u8 event
)
6137 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6143 l2cap_process_reqseq(chan
, control
->reqseq
);
6145 if (!skb_queue_empty(&chan
->tx_q
))
6146 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6148 chan
->tx_send_head
= NULL
;
6150 /* Rewind next_tx_seq to the point expected
6153 chan
->next_tx_seq
= control
->reqseq
;
6154 chan
->unacked_frames
= 0;
6156 err
= l2cap_finish_move(chan
);
6160 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6161 l2cap_send_i_or_rr_or_rnr(chan
);
6163 if (event
== L2CAP_EV_RECV_IFRAME
)
6166 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6169 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6170 struct l2cap_ctrl
*control
,
6171 struct sk_buff
*skb
, u8 event
)
6175 if (!control
->final
)
6178 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6180 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6181 l2cap_process_reqseq(chan
, control
->reqseq
);
6183 if (!skb_queue_empty(&chan
->tx_q
))
6184 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6186 chan
->tx_send_head
= NULL
;
6188 /* Rewind next_tx_seq to the point expected
6191 chan
->next_tx_seq
= control
->reqseq
;
6192 chan
->unacked_frames
= 0;
6195 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6197 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6199 err
= l2cap_resegment(chan
);
6202 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6207 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6209 /* Make sure reqseq is for a packet that has been sent but not acked */
6212 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6213 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6216 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6217 struct sk_buff
*skb
, u8 event
)
6221 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6222 control
, skb
, event
, chan
->rx_state
);
6224 if (__valid_reqseq(chan
, control
->reqseq
)) {
6225 switch (chan
->rx_state
) {
6226 case L2CAP_RX_STATE_RECV
:
6227 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6229 case L2CAP_RX_STATE_SREJ_SENT
:
6230 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6233 case L2CAP_RX_STATE_WAIT_P
:
6234 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6236 case L2CAP_RX_STATE_WAIT_F
:
6237 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6244 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6245 control
->reqseq
, chan
->next_tx_seq
,
6246 chan
->expected_ack_seq
);
6247 l2cap_send_disconn_req(chan
, ECONNRESET
);
6253 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6254 struct sk_buff
*skb
)
6258 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6261 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6262 L2CAP_TXSEQ_EXPECTED
) {
6263 l2cap_pass_to_tx(chan
, control
);
6265 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6266 __next_seq(chan
, chan
->buffer_seq
));
6268 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6270 l2cap_reassemble_sdu(chan
, skb
, control
);
6273 kfree_skb(chan
->sdu
);
6276 chan
->sdu_last_frag
= NULL
;
6280 BT_DBG("Freeing %p", skb
);
6285 chan
->last_acked_seq
= control
->txseq
;
6286 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6291 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6293 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6297 __unpack_control(chan
, skb
);
6302 * We can just drop the corrupted I-frame here.
6303 * Receiver will miss it and start proper recovery
6304 * procedures and ask for retransmission.
6306 if (l2cap_check_fcs(chan
, skb
))
6309 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6310 len
-= L2CAP_SDULEN_SIZE
;
6312 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6313 len
-= L2CAP_FCS_SIZE
;
6315 if (len
> chan
->mps
) {
6316 l2cap_send_disconn_req(chan
, ECONNRESET
);
6320 if (!control
->sframe
) {
6323 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6324 control
->sar
, control
->reqseq
, control
->final
,
6327 /* Validate F-bit - F=0 always valid, F=1 only
6328 * valid in TX WAIT_F
6330 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6333 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6334 event
= L2CAP_EV_RECV_IFRAME
;
6335 err
= l2cap_rx(chan
, control
, skb
, event
);
6337 err
= l2cap_stream_rx(chan
, control
, skb
);
6341 l2cap_send_disconn_req(chan
, ECONNRESET
);
6343 const u8 rx_func_to_event
[4] = {
6344 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6345 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6348 /* Only I-frames are expected in streaming mode */
6349 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6352 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6353 control
->reqseq
, control
->final
, control
->poll
,
6357 BT_ERR("Trailing bytes: %d in sframe", len
);
6358 l2cap_send_disconn_req(chan
, ECONNRESET
);
6362 /* Validate F and P bits */
6363 if (control
->final
&& (control
->poll
||
6364 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6367 event
= rx_func_to_event
[control
->super
];
6368 if (l2cap_rx(chan
, control
, skb
, event
))
6369 l2cap_send_disconn_req(chan
, ECONNRESET
);
6379 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6380 struct sk_buff
*skb
)
6382 struct l2cap_chan
*chan
;
6384 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6386 if (cid
== L2CAP_CID_A2MP
) {
6387 chan
= a2mp_channel_create(conn
, skb
);
6393 l2cap_chan_lock(chan
);
6395 BT_DBG("unknown cid 0x%4.4x", cid
);
6396 /* Drop packet and return */
6402 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6404 if (chan
->state
!= BT_CONNECTED
)
6407 switch (chan
->mode
) {
6408 case L2CAP_MODE_BASIC
:
6409 /* If socket recv buffers overflows we drop data here
6410 * which is *bad* because L2CAP has to be reliable.
6411 * But we don't have any other choice. L2CAP doesn't
6412 * provide flow control mechanism. */
6414 if (chan
->imtu
< skb
->len
)
6417 if (!chan
->ops
->recv(chan
, skb
))
6421 case L2CAP_MODE_ERTM
:
6422 case L2CAP_MODE_STREAMING
:
6423 l2cap_data_rcv(chan
, skb
);
6427 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6435 l2cap_chan_unlock(chan
);
6438 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6439 struct sk_buff
*skb
)
6441 struct hci_conn
*hcon
= conn
->hcon
;
6442 struct l2cap_chan
*chan
;
6444 if (hcon
->type
!= ACL_LINK
)
6447 chan
= l2cap_global_chan_by_psm(0, psm
, &conn
->hcon
->src
,
6452 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6454 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6457 if (chan
->imtu
< skb
->len
)
6460 /* Store remote BD_ADDR and PSM for msg_name */
6461 bacpy(&bt_cb(skb
)->bdaddr
, &conn
->hcon
->dst
);
6462 bt_cb(skb
)->psm
= psm
;
6464 if (!chan
->ops
->recv(chan
, skb
))
6471 static void l2cap_att_channel(struct l2cap_conn
*conn
,
6472 struct sk_buff
*skb
)
6474 struct hci_conn
*hcon
= conn
->hcon
;
6475 struct l2cap_chan
*chan
;
6477 if (hcon
->type
!= LE_LINK
)
6480 chan
= l2cap_global_chan_by_scid(BT_CONNECTED
, L2CAP_CID_ATT
,
6481 &conn
->hcon
->src
, &conn
->hcon
->dst
);
6485 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6487 if (chan
->imtu
< skb
->len
)
6490 if (!chan
->ops
->recv(chan
, skb
))
6497 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6499 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6503 skb_pull(skb
, L2CAP_HDR_SIZE
);
6504 cid
= __le16_to_cpu(lh
->cid
);
6505 len
= __le16_to_cpu(lh
->len
);
6507 if (len
!= skb
->len
) {
6512 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6515 case L2CAP_CID_SIGNALING
:
6516 l2cap_sig_channel(conn
, skb
);
6519 case L2CAP_CID_CONN_LESS
:
6520 psm
= get_unaligned((__le16
*) skb
->data
);
6521 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6522 l2cap_conless_channel(conn
, psm
, skb
);
6526 l2cap_att_channel(conn
, skb
);
6529 case L2CAP_CID_LE_SIGNALING
:
6530 l2cap_le_sig_channel(conn
, skb
);
6534 if (smp_sig_channel(conn
, skb
))
6535 l2cap_conn_del(conn
->hcon
, EACCES
);
6539 l2cap_data_channel(conn
, cid
, skb
);
6544 /* ---- L2CAP interface with lower layer (HCI) ---- */
6546 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
6548 int exact
= 0, lm1
= 0, lm2
= 0;
6549 struct l2cap_chan
*c
;
6551 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
6553 /* Find listening sockets and check their link_mode */
6554 read_lock(&chan_list_lock
);
6555 list_for_each_entry(c
, &chan_list
, global_l
) {
6556 if (c
->state
!= BT_LISTEN
)
6559 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
6560 lm1
|= HCI_LM_ACCEPT
;
6561 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6562 lm1
|= HCI_LM_MASTER
;
6564 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
6565 lm2
|= HCI_LM_ACCEPT
;
6566 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6567 lm2
|= HCI_LM_MASTER
;
6570 read_unlock(&chan_list_lock
);
6572 return exact
? lm1
: lm2
;
6575 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
6577 struct l2cap_conn
*conn
;
6579 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
6582 conn
= l2cap_conn_add(hcon
);
6584 l2cap_conn_ready(conn
);
6586 l2cap_conn_del(hcon
, bt_to_errno(status
));
6590 int l2cap_disconn_ind(struct hci_conn
*hcon
)
6592 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6594 BT_DBG("hcon %p", hcon
);
6597 return HCI_ERROR_REMOTE_USER_TERM
;
6598 return conn
->disc_reason
;
6601 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
6603 BT_DBG("hcon %p reason %d", hcon
, reason
);
6605 l2cap_conn_del(hcon
, bt_to_errno(reason
));
6608 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
6610 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
6613 if (encrypt
== 0x00) {
6614 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
6615 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
6616 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
6617 l2cap_chan_close(chan
, ECONNREFUSED
);
6619 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
6620 __clear_chan_timer(chan
);
6624 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
6626 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6627 struct l2cap_chan
*chan
;
6632 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
6634 if (hcon
->type
== LE_LINK
) {
6635 if (!status
&& encrypt
)
6636 smp_distribute_keys(conn
, 0);
6637 cancel_delayed_work(&conn
->security_timer
);
6640 mutex_lock(&conn
->chan_lock
);
6642 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
6643 l2cap_chan_lock(chan
);
6645 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
6646 state_to_string(chan
->state
));
6648 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
6649 l2cap_chan_unlock(chan
);
6653 if (chan
->scid
== L2CAP_CID_ATT
) {
6654 if (!status
&& encrypt
) {
6655 chan
->sec_level
= hcon
->sec_level
;
6656 l2cap_chan_ready(chan
);
6659 l2cap_chan_unlock(chan
);
6663 if (!__l2cap_no_conn_pending(chan
)) {
6664 l2cap_chan_unlock(chan
);
6668 if (!status
&& (chan
->state
== BT_CONNECTED
||
6669 chan
->state
== BT_CONFIG
)) {
6670 chan
->ops
->resume(chan
);
6671 l2cap_check_encryption(chan
, encrypt
);
6672 l2cap_chan_unlock(chan
);
6676 if (chan
->state
== BT_CONNECT
) {
6678 l2cap_start_connection(chan
);
6680 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6682 } else if (chan
->state
== BT_CONNECT2
) {
6683 struct sock
*sk
= chan
->sk
;
6684 struct l2cap_conn_rsp rsp
;
6690 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
6691 res
= L2CAP_CR_PEND
;
6692 stat
= L2CAP_CS_AUTHOR_PEND
;
6693 chan
->ops
->defer(chan
);
6695 __l2cap_state_change(chan
, BT_CONFIG
);
6696 res
= L2CAP_CR_SUCCESS
;
6697 stat
= L2CAP_CS_NO_INFO
;
6700 __l2cap_state_change(chan
, BT_DISCONN
);
6701 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6702 res
= L2CAP_CR_SEC_BLOCK
;
6703 stat
= L2CAP_CS_NO_INFO
;
6708 rsp
.scid
= cpu_to_le16(chan
->dcid
);
6709 rsp
.dcid
= cpu_to_le16(chan
->scid
);
6710 rsp
.result
= cpu_to_le16(res
);
6711 rsp
.status
= cpu_to_le16(stat
);
6712 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
6715 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
6716 res
== L2CAP_CR_SUCCESS
) {
6718 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
6719 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
6721 l2cap_build_conf_req(chan
, buf
),
6723 chan
->num_conf_req
++;
6727 l2cap_chan_unlock(chan
);
6730 mutex_unlock(&conn
->chan_lock
);
6735 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
6737 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6738 struct l2cap_hdr
*hdr
;
6741 /* For AMP controller do not create l2cap conn */
6742 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
6746 conn
= l2cap_conn_add(hcon
);
6751 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
6755 case ACL_START_NO_FLUSH
:
6758 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
6759 kfree_skb(conn
->rx_skb
);
6760 conn
->rx_skb
= NULL
;
6762 l2cap_conn_unreliable(conn
, ECOMM
);
6765 /* Start fragment always begin with Basic L2CAP header */
6766 if (skb
->len
< L2CAP_HDR_SIZE
) {
6767 BT_ERR("Frame is too short (len %d)", skb
->len
);
6768 l2cap_conn_unreliable(conn
, ECOMM
);
6772 hdr
= (struct l2cap_hdr
*) skb
->data
;
6773 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
6775 if (len
== skb
->len
) {
6776 /* Complete frame received */
6777 l2cap_recv_frame(conn
, skb
);
6781 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
6783 if (skb
->len
> len
) {
6784 BT_ERR("Frame is too long (len %d, expected len %d)",
6786 l2cap_conn_unreliable(conn
, ECOMM
);
6790 /* Allocate skb for the complete frame (with header) */
6791 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
6795 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6797 conn
->rx_len
= len
- skb
->len
;
6801 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
6803 if (!conn
->rx_len
) {
6804 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
6805 l2cap_conn_unreliable(conn
, ECOMM
);
6809 if (skb
->len
> conn
->rx_len
) {
6810 BT_ERR("Fragment is too long (len %d, expected %d)",
6811 skb
->len
, conn
->rx_len
);
6812 kfree_skb(conn
->rx_skb
);
6813 conn
->rx_skb
= NULL
;
6815 l2cap_conn_unreliable(conn
, ECOMM
);
6819 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6821 conn
->rx_len
-= skb
->len
;
6823 if (!conn
->rx_len
) {
6824 /* Complete frame received. l2cap_recv_frame
6825 * takes ownership of the skb so set the global
6826 * rx_skb pointer to NULL first.
6828 struct sk_buff
*rx_skb
= conn
->rx_skb
;
6829 conn
->rx_skb
= NULL
;
6830 l2cap_recv_frame(conn
, rx_skb
);
6840 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
6842 struct l2cap_chan
*c
;
6844 read_lock(&chan_list_lock
);
6846 list_for_each_entry(c
, &chan_list
, global_l
) {
6847 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6849 c
->state
, __le16_to_cpu(c
->psm
),
6850 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
6851 c
->sec_level
, c
->mode
);
6854 read_unlock(&chan_list_lock
);
6859 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
6861 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
6864 static const struct file_operations l2cap_debugfs_fops
= {
6865 .open
= l2cap_debugfs_open
,
6867 .llseek
= seq_lseek
,
6868 .release
= single_release
,
6871 static struct dentry
*l2cap_debugfs
;
6873 int __init
l2cap_init(void)
6877 err
= l2cap_init_sockets();
6882 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
6883 NULL
, &l2cap_debugfs_fops
);
6885 BT_ERR("Failed to create L2CAP debug file");
6891 void l2cap_exit(void)
6893 debugfs_remove(l2cap_debugfs
);
6894 l2cap_cleanup_sockets();
6897 module_param(disable_ertm
, bool, 0644);
6898 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");