2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
82 list_for_each_entry(c
, &conn
->chan_l
, list
) {
89 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
93 list_for_each_entry(c
, &conn
->chan_l
, list
) {
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
104 struct l2cap_chan
*c
;
106 mutex_lock(&conn
->chan_lock
);
107 c
= __l2cap_get_chan_by_scid(conn
, cid
);
108 mutex_unlock(&conn
->chan_lock
);
113 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
115 struct l2cap_chan
*c
;
117 list_for_each_entry(c
, &conn
->chan_l
, list
) {
118 if (c
->ident
== ident
)
124 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
126 struct l2cap_chan
*c
;
128 list_for_each_entry(c
, &chan_list
, global_l
) {
129 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
135 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
139 write_lock(&chan_list_lock
);
141 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
154 for (p
= 0x1001; p
< 0x1100; p
+= 2)
155 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
156 chan
->psm
= cpu_to_le16(p
);
157 chan
->sport
= cpu_to_le16(p
);
164 write_unlock(&chan_list_lock
);
168 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
170 write_lock(&chan_list_lock
);
174 write_unlock(&chan_list_lock
);
179 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
181 u16 cid
= L2CAP_CID_DYN_START
;
183 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
184 if (!__l2cap_get_chan_by_scid(conn
, cid
))
191 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
193 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
194 state_to_string(state
));
197 chan
->ops
->state_change(chan
->data
, state
);
200 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
202 struct sock
*sk
= chan
->sk
;
205 __l2cap_state_change(chan
, state
);
209 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
211 struct sock
*sk
= chan
->sk
;
216 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
218 struct sock
*sk
= chan
->sk
;
221 __l2cap_chan_set_err(chan
, err
);
225 /* ---- L2CAP sequence number lists ---- */
227 /* For ERTM, ordered lists of sequence numbers must be tracked for
228 * SREJ requests that are received and for frames that are to be
229 * retransmitted. These seq_list functions implement a singly-linked
230 * list in an array, where membership in the list can also be checked
231 * in constant time. Items can also be added to the tail of the list
232 * and removed from the head in constant time, without further memory
236 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
238 size_t alloc_size
, i
;
240 /* Allocated size is a power of 2 to map sequence numbers
241 * (which may be up to 14 bits) in to a smaller array that is
242 * sized for the negotiated ERTM transmit windows.
244 alloc_size
= roundup_pow_of_two(size
);
246 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
250 seq_list
->mask
= alloc_size
- 1;
251 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
252 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
253 for (i
= 0; i
< alloc_size
; i
++)
254 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
259 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
261 kfree(seq_list
->list
);
264 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
267 /* Constant-time check for list membership */
268 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
271 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
273 u16 mask
= seq_list
->mask
;
275 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
276 /* In case someone tries to pop the head of an empty list */
277 return L2CAP_SEQ_LIST_CLEAR
;
278 } else if (seq_list
->head
== seq
) {
279 /* Head can be removed in constant time */
280 seq_list
->head
= seq_list
->list
[seq
& mask
];
281 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
283 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
284 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
285 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
288 /* Walk the list to find the sequence number */
289 u16 prev
= seq_list
->head
;
290 while (seq_list
->list
[prev
& mask
] != seq
) {
291 prev
= seq_list
->list
[prev
& mask
];
292 if (prev
== L2CAP_SEQ_LIST_TAIL
)
293 return L2CAP_SEQ_LIST_CLEAR
;
296 /* Unlink the number from the list and clear it */
297 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
298 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
299 if (seq_list
->tail
== seq
)
300 seq_list
->tail
= prev
;
305 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
307 /* Remove the head in constant time */
308 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
311 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
313 if (seq_list
->head
!= L2CAP_SEQ_LIST_CLEAR
) {
315 for (i
= 0; i
<= seq_list
->mask
; i
++)
316 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
318 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
319 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
323 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
325 u16 mask
= seq_list
->mask
;
327 /* All appends happen in constant time */
329 if (seq_list
->list
[seq
& mask
] == L2CAP_SEQ_LIST_CLEAR
) {
330 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
331 seq_list
->head
= seq
;
333 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
335 seq_list
->tail
= seq
;
336 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
340 static void l2cap_chan_timeout(struct work_struct
*work
)
342 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
344 struct l2cap_conn
*conn
= chan
->conn
;
347 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
349 mutex_lock(&conn
->chan_lock
);
350 l2cap_chan_lock(chan
);
352 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
353 reason
= ECONNREFUSED
;
354 else if (chan
->state
== BT_CONNECT
&&
355 chan
->sec_level
!= BT_SECURITY_SDP
)
356 reason
= ECONNREFUSED
;
360 l2cap_chan_close(chan
, reason
);
362 l2cap_chan_unlock(chan
);
364 chan
->ops
->close(chan
->data
);
365 mutex_unlock(&conn
->chan_lock
);
367 l2cap_chan_put(chan
);
370 struct l2cap_chan
*l2cap_chan_create(void)
372 struct l2cap_chan
*chan
;
374 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
378 mutex_init(&chan
->lock
);
380 write_lock(&chan_list_lock
);
381 list_add(&chan
->global_l
, &chan_list
);
382 write_unlock(&chan_list_lock
);
384 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
386 chan
->state
= BT_OPEN
;
388 atomic_set(&chan
->refcnt
, 1);
390 BT_DBG("chan %p", chan
);
395 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
397 write_lock(&chan_list_lock
);
398 list_del(&chan
->global_l
);
399 write_unlock(&chan_list_lock
);
401 l2cap_chan_put(chan
);
404 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
406 chan
->fcs
= L2CAP_FCS_CRC16
;
407 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
408 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
409 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
410 chan
->sec_level
= BT_SECURITY_LOW
;
412 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
415 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
417 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
418 __le16_to_cpu(chan
->psm
), chan
->dcid
);
420 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
424 switch (chan
->chan_type
) {
425 case L2CAP_CHAN_CONN_ORIENTED
:
426 if (conn
->hcon
->type
== LE_LINK
) {
428 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
429 chan
->scid
= L2CAP_CID_LE_DATA
;
430 chan
->dcid
= L2CAP_CID_LE_DATA
;
432 /* Alloc CID for connection-oriented socket */
433 chan
->scid
= l2cap_alloc_cid(conn
);
434 chan
->omtu
= L2CAP_DEFAULT_MTU
;
438 case L2CAP_CHAN_CONN_LESS
:
439 /* Connectionless socket */
440 chan
->scid
= L2CAP_CID_CONN_LESS
;
441 chan
->dcid
= L2CAP_CID_CONN_LESS
;
442 chan
->omtu
= L2CAP_DEFAULT_MTU
;
446 /* Raw socket can send/recv signalling messages only */
447 chan
->scid
= L2CAP_CID_SIGNALING
;
448 chan
->dcid
= L2CAP_CID_SIGNALING
;
449 chan
->omtu
= L2CAP_DEFAULT_MTU
;
452 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
453 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
454 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
455 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
456 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
457 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
459 l2cap_chan_hold(chan
);
461 list_add(&chan
->list
, &conn
->chan_l
);
464 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
466 mutex_lock(&conn
->chan_lock
);
467 __l2cap_chan_add(conn
, chan
);
468 mutex_unlock(&conn
->chan_lock
);
471 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
473 struct sock
*sk
= chan
->sk
;
474 struct l2cap_conn
*conn
= chan
->conn
;
475 struct sock
*parent
= bt_sk(sk
)->parent
;
477 __clear_chan_timer(chan
);
479 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
482 /* Delete from channel list */
483 list_del(&chan
->list
);
485 l2cap_chan_put(chan
);
488 hci_conn_put(conn
->hcon
);
493 __l2cap_state_change(chan
, BT_CLOSED
);
494 sock_set_flag(sk
, SOCK_ZAPPED
);
497 __l2cap_chan_set_err(chan
, err
);
500 bt_accept_unlink(sk
);
501 parent
->sk_data_ready(parent
, 0);
503 sk
->sk_state_change(sk
);
507 if (!(test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
) &&
508 test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)))
511 skb_queue_purge(&chan
->tx_q
);
513 if (chan
->mode
== L2CAP_MODE_ERTM
) {
514 struct srej_list
*l
, *tmp
;
516 __clear_retrans_timer(chan
);
517 __clear_monitor_timer(chan
);
518 __clear_ack_timer(chan
);
520 skb_queue_purge(&chan
->srej_q
);
522 l2cap_seq_list_free(&chan
->srej_list
);
523 l2cap_seq_list_free(&chan
->retrans_list
);
524 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
531 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
535 BT_DBG("parent %p", parent
);
537 /* Close not yet accepted channels */
538 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
539 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
541 l2cap_chan_lock(chan
);
542 __clear_chan_timer(chan
);
543 l2cap_chan_close(chan
, ECONNRESET
);
544 l2cap_chan_unlock(chan
);
546 chan
->ops
->close(chan
->data
);
550 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
552 struct l2cap_conn
*conn
= chan
->conn
;
553 struct sock
*sk
= chan
->sk
;
555 BT_DBG("chan %p state %s sk %p", chan
,
556 state_to_string(chan
->state
), sk
);
558 switch (chan
->state
) {
561 l2cap_chan_cleanup_listen(sk
);
563 __l2cap_state_change(chan
, BT_CLOSED
);
564 sock_set_flag(sk
, SOCK_ZAPPED
);
570 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
571 conn
->hcon
->type
== ACL_LINK
) {
572 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
573 l2cap_send_disconn_req(conn
, chan
, reason
);
575 l2cap_chan_del(chan
, reason
);
579 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
580 conn
->hcon
->type
== ACL_LINK
) {
581 struct l2cap_conn_rsp rsp
;
584 if (bt_sk(sk
)->defer_setup
)
585 result
= L2CAP_CR_SEC_BLOCK
;
587 result
= L2CAP_CR_BAD_PSM
;
588 l2cap_state_change(chan
, BT_DISCONN
);
590 rsp
.scid
= cpu_to_le16(chan
->dcid
);
591 rsp
.dcid
= cpu_to_le16(chan
->scid
);
592 rsp
.result
= cpu_to_le16(result
);
593 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
594 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
598 l2cap_chan_del(chan
, reason
);
603 l2cap_chan_del(chan
, reason
);
608 sock_set_flag(sk
, SOCK_ZAPPED
);
614 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
616 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
617 switch (chan
->sec_level
) {
618 case BT_SECURITY_HIGH
:
619 return HCI_AT_DEDICATED_BONDING_MITM
;
620 case BT_SECURITY_MEDIUM
:
621 return HCI_AT_DEDICATED_BONDING
;
623 return HCI_AT_NO_BONDING
;
625 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
626 if (chan
->sec_level
== BT_SECURITY_LOW
)
627 chan
->sec_level
= BT_SECURITY_SDP
;
629 if (chan
->sec_level
== BT_SECURITY_HIGH
)
630 return HCI_AT_NO_BONDING_MITM
;
632 return HCI_AT_NO_BONDING
;
634 switch (chan
->sec_level
) {
635 case BT_SECURITY_HIGH
:
636 return HCI_AT_GENERAL_BONDING_MITM
;
637 case BT_SECURITY_MEDIUM
:
638 return HCI_AT_GENERAL_BONDING
;
640 return HCI_AT_NO_BONDING
;
645 /* Service level security */
646 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
648 struct l2cap_conn
*conn
= chan
->conn
;
651 auth_type
= l2cap_get_auth_type(chan
);
653 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
656 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
660 /* Get next available identificator.
661 * 1 - 128 are used by kernel.
662 * 129 - 199 are reserved.
663 * 200 - 254 are used by utilities like l2ping, etc.
666 spin_lock(&conn
->lock
);
668 if (++conn
->tx_ident
> 128)
673 spin_unlock(&conn
->lock
);
678 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
680 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
683 BT_DBG("code 0x%2.2x", code
);
688 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
689 flags
= ACL_START_NO_FLUSH
;
693 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
694 skb
->priority
= HCI_PRIO_MAX
;
696 hci_send_acl(conn
->hchan
, skb
, flags
);
699 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
701 struct hci_conn
*hcon
= chan
->conn
->hcon
;
704 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
707 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
708 lmp_no_flush_capable(hcon
->hdev
))
709 flags
= ACL_START_NO_FLUSH
;
713 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
714 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
717 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
719 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
720 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
722 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
725 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
726 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
733 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
734 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
741 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
743 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
744 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
746 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
749 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
750 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
757 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
758 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
765 static inline void __unpack_control(struct l2cap_chan
*chan
,
768 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
769 __unpack_extended_control(get_unaligned_le32(skb
->data
),
770 &bt_cb(skb
)->control
);
772 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
773 &bt_cb(skb
)->control
);
777 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
781 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
782 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
784 if (control
->sframe
) {
785 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
786 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
787 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
789 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
790 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
796 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
800 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
801 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
803 if (control
->sframe
) {
804 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
805 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
806 packed
|= L2CAP_CTRL_FRAME_TYPE
;
808 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
809 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
815 static inline void __pack_control(struct l2cap_chan
*chan
,
816 struct l2cap_ctrl
*control
,
819 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
820 put_unaligned_le32(__pack_extended_control(control
),
821 skb
->data
+ L2CAP_HDR_SIZE
);
823 put_unaligned_le16(__pack_enhanced_control(control
),
824 skb
->data
+ L2CAP_HDR_SIZE
);
828 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u32 control
)
831 struct l2cap_hdr
*lh
;
832 struct l2cap_conn
*conn
= chan
->conn
;
835 if (chan
->state
!= BT_CONNECTED
)
838 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
839 hlen
= L2CAP_EXT_HDR_SIZE
;
841 hlen
= L2CAP_ENH_HDR_SIZE
;
843 if (chan
->fcs
== L2CAP_FCS_CRC16
)
844 hlen
+= L2CAP_FCS_SIZE
;
846 BT_DBG("chan %p, control 0x%8.8x", chan
, control
);
848 count
= min_t(unsigned int, conn
->mtu
, hlen
);
850 control
|= __set_sframe(chan
);
852 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
853 control
|= __set_ctrl_final(chan
);
855 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
856 control
|= __set_ctrl_poll(chan
);
858 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
862 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
863 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
864 lh
->cid
= cpu_to_le16(chan
->dcid
);
866 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
868 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
869 u16 fcs
= crc16(0, (u8
*)lh
, count
- L2CAP_FCS_SIZE
);
870 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
873 skb
->priority
= HCI_PRIO_MAX
;
874 l2cap_do_send(chan
, skb
);
877 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u32 control
)
879 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
880 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
881 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
883 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
885 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
887 l2cap_send_sframe(chan
, control
);
890 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
892 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
895 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
897 struct l2cap_conn
*conn
= chan
->conn
;
898 struct l2cap_conn_req req
;
900 req
.scid
= cpu_to_le16(chan
->scid
);
903 chan
->ident
= l2cap_get_ident(conn
);
905 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
907 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
910 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
912 struct sock
*sk
= chan
->sk
;
917 parent
= bt_sk(sk
)->parent
;
919 BT_DBG("sk %p, parent %p", sk
, parent
);
921 chan
->conf_state
= 0;
922 __clear_chan_timer(chan
);
924 __l2cap_state_change(chan
, BT_CONNECTED
);
925 sk
->sk_state_change(sk
);
928 parent
->sk_data_ready(parent
, 0);
933 static void l2cap_do_start(struct l2cap_chan
*chan
)
935 struct l2cap_conn
*conn
= chan
->conn
;
937 if (conn
->hcon
->type
== LE_LINK
) {
938 l2cap_chan_ready(chan
);
942 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
943 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
946 if (l2cap_chan_check_security(chan
) &&
947 __l2cap_no_conn_pending(chan
))
948 l2cap_send_conn_req(chan
);
950 struct l2cap_info_req req
;
951 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
953 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
954 conn
->info_ident
= l2cap_get_ident(conn
);
956 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
958 l2cap_send_cmd(conn
, conn
->info_ident
,
959 L2CAP_INFO_REQ
, sizeof(req
), &req
);
963 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
965 u32 local_feat_mask
= l2cap_feat_mask
;
967 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
970 case L2CAP_MODE_ERTM
:
971 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
972 case L2CAP_MODE_STREAMING
:
973 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
979 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
981 struct sock
*sk
= chan
->sk
;
982 struct l2cap_disconn_req req
;
987 if (chan
->mode
== L2CAP_MODE_ERTM
) {
988 __clear_retrans_timer(chan
);
989 __clear_monitor_timer(chan
);
990 __clear_ack_timer(chan
);
993 req
.dcid
= cpu_to_le16(chan
->dcid
);
994 req
.scid
= cpu_to_le16(chan
->scid
);
995 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
996 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
999 __l2cap_state_change(chan
, BT_DISCONN
);
1000 __l2cap_chan_set_err(chan
, err
);
1004 /* ---- L2CAP connections ---- */
1005 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1007 struct l2cap_chan
*chan
, *tmp
;
1009 BT_DBG("conn %p", conn
);
1011 mutex_lock(&conn
->chan_lock
);
1013 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1014 struct sock
*sk
= chan
->sk
;
1016 l2cap_chan_lock(chan
);
1018 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1019 l2cap_chan_unlock(chan
);
1023 if (chan
->state
== BT_CONNECT
) {
1024 if (!l2cap_chan_check_security(chan
) ||
1025 !__l2cap_no_conn_pending(chan
)) {
1026 l2cap_chan_unlock(chan
);
1030 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1031 && test_bit(CONF_STATE2_DEVICE
,
1032 &chan
->conf_state
)) {
1033 l2cap_chan_close(chan
, ECONNRESET
);
1034 l2cap_chan_unlock(chan
);
1038 l2cap_send_conn_req(chan
);
1040 } else if (chan
->state
== BT_CONNECT2
) {
1041 struct l2cap_conn_rsp rsp
;
1043 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1044 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1046 if (l2cap_chan_check_security(chan
)) {
1048 if (bt_sk(sk
)->defer_setup
) {
1049 struct sock
*parent
= bt_sk(sk
)->parent
;
1050 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1051 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1053 parent
->sk_data_ready(parent
, 0);
1056 __l2cap_state_change(chan
, BT_CONFIG
);
1057 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1058 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1062 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1063 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1066 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1069 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1070 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1071 l2cap_chan_unlock(chan
);
1075 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1076 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1077 l2cap_build_conf_req(chan
, buf
), buf
);
1078 chan
->num_conf_req
++;
1081 l2cap_chan_unlock(chan
);
1084 mutex_unlock(&conn
->chan_lock
);
1087 /* Find socket with cid and source/destination bdaddr.
1088 * Returns closest match, locked.
1090 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1094 struct l2cap_chan
*c
, *c1
= NULL
;
1096 read_lock(&chan_list_lock
);
1098 list_for_each_entry(c
, &chan_list
, global_l
) {
1099 struct sock
*sk
= c
->sk
;
1101 if (state
&& c
->state
!= state
)
1104 if (c
->scid
== cid
) {
1105 int src_match
, dst_match
;
1106 int src_any
, dst_any
;
1109 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1110 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1111 if (src_match
&& dst_match
) {
1112 read_unlock(&chan_list_lock
);
1117 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1118 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1119 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1120 (src_any
&& dst_any
))
1125 read_unlock(&chan_list_lock
);
1130 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1132 struct sock
*parent
, *sk
;
1133 struct l2cap_chan
*chan
, *pchan
;
1137 /* Check if we have socket listening on cid */
1138 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1139 conn
->src
, conn
->dst
);
1147 /* Check for backlog size */
1148 if (sk_acceptq_is_full(parent
)) {
1149 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
1153 chan
= pchan
->ops
->new_connection(pchan
->data
);
1159 hci_conn_hold(conn
->hcon
);
1161 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1162 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1164 bt_accept_enqueue(parent
, sk
);
1166 l2cap_chan_add(conn
, chan
);
1168 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1170 __l2cap_state_change(chan
, BT_CONNECTED
);
1171 parent
->sk_data_ready(parent
, 0);
1174 release_sock(parent
);
1177 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1179 struct l2cap_chan
*chan
;
1181 BT_DBG("conn %p", conn
);
1183 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1184 l2cap_le_conn_ready(conn
);
1186 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1187 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1189 mutex_lock(&conn
->chan_lock
);
1191 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1193 l2cap_chan_lock(chan
);
1195 if (conn
->hcon
->type
== LE_LINK
) {
1196 if (smp_conn_security(conn
, chan
->sec_level
))
1197 l2cap_chan_ready(chan
);
1199 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1200 struct sock
*sk
= chan
->sk
;
1201 __clear_chan_timer(chan
);
1203 __l2cap_state_change(chan
, BT_CONNECTED
);
1204 sk
->sk_state_change(sk
);
1207 } else if (chan
->state
== BT_CONNECT
)
1208 l2cap_do_start(chan
);
1210 l2cap_chan_unlock(chan
);
1213 mutex_unlock(&conn
->chan_lock
);
1216 /* Notify sockets that we cannot guaranty reliability anymore */
1217 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1219 struct l2cap_chan
*chan
;
1221 BT_DBG("conn %p", conn
);
1223 mutex_lock(&conn
->chan_lock
);
1225 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1226 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1227 __l2cap_chan_set_err(chan
, err
);
1230 mutex_unlock(&conn
->chan_lock
);
1233 static void l2cap_info_timeout(struct work_struct
*work
)
1235 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1238 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1239 conn
->info_ident
= 0;
1241 l2cap_conn_start(conn
);
1244 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1246 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1247 struct l2cap_chan
*chan
, *l
;
1252 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1254 kfree_skb(conn
->rx_skb
);
1256 mutex_lock(&conn
->chan_lock
);
1259 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1260 l2cap_chan_hold(chan
);
1261 l2cap_chan_lock(chan
);
1263 l2cap_chan_del(chan
, err
);
1265 l2cap_chan_unlock(chan
);
1267 chan
->ops
->close(chan
->data
);
1268 l2cap_chan_put(chan
);
1271 mutex_unlock(&conn
->chan_lock
);
1273 hci_chan_del(conn
->hchan
);
1275 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1276 cancel_delayed_work_sync(&conn
->info_timer
);
1278 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1279 cancel_delayed_work_sync(&conn
->security_timer
);
1280 smp_chan_destroy(conn
);
1283 hcon
->l2cap_data
= NULL
;
1287 static void security_timeout(struct work_struct
*work
)
1289 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1290 security_timer
.work
);
1292 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1295 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1297 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1298 struct hci_chan
*hchan
;
1303 hchan
= hci_chan_create(hcon
);
1307 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1309 hci_chan_del(hchan
);
1313 hcon
->l2cap_data
= conn
;
1315 conn
->hchan
= hchan
;
1317 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1319 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1320 conn
->mtu
= hcon
->hdev
->le_mtu
;
1322 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1324 conn
->src
= &hcon
->hdev
->bdaddr
;
1325 conn
->dst
= &hcon
->dst
;
1327 conn
->feat_mask
= 0;
1329 spin_lock_init(&conn
->lock
);
1330 mutex_init(&conn
->chan_lock
);
1332 INIT_LIST_HEAD(&conn
->chan_l
);
1334 if (hcon
->type
== LE_LINK
)
1335 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1337 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1339 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1344 /* ---- Socket interface ---- */
1346 /* Find socket with psm and source / destination bdaddr.
1347 * Returns closest match.
1349 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1353 struct l2cap_chan
*c
, *c1
= NULL
;
1355 read_lock(&chan_list_lock
);
1357 list_for_each_entry(c
, &chan_list
, global_l
) {
1358 struct sock
*sk
= c
->sk
;
1360 if (state
&& c
->state
!= state
)
1363 if (c
->psm
== psm
) {
1364 int src_match
, dst_match
;
1365 int src_any
, dst_any
;
1368 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1369 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1370 if (src_match
&& dst_match
) {
1371 read_unlock(&chan_list_lock
);
1376 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1377 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1378 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1379 (src_any
&& dst_any
))
1384 read_unlock(&chan_list_lock
);
1389 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1390 bdaddr_t
*dst
, u8 dst_type
)
1392 struct sock
*sk
= chan
->sk
;
1393 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1394 struct l2cap_conn
*conn
;
1395 struct hci_conn
*hcon
;
1396 struct hci_dev
*hdev
;
1400 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1401 dst_type
, __le16_to_cpu(chan
->psm
));
1403 hdev
= hci_get_route(dst
, src
);
1405 return -EHOSTUNREACH
;
1409 l2cap_chan_lock(chan
);
1411 /* PSM must be odd and lsb of upper byte must be 0 */
1412 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1413 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1418 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1423 switch (chan
->mode
) {
1424 case L2CAP_MODE_BASIC
:
1426 case L2CAP_MODE_ERTM
:
1427 case L2CAP_MODE_STREAMING
:
1438 switch (sk
->sk_state
) {
1442 /* Already connecting */
1448 /* Already connected */
1464 /* Set destination address and psm */
1465 bacpy(&bt_sk(sk
)->dst
, dst
);
1472 auth_type
= l2cap_get_auth_type(chan
);
1474 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1475 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1476 chan
->sec_level
, auth_type
);
1478 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1479 chan
->sec_level
, auth_type
);
1482 err
= PTR_ERR(hcon
);
1486 conn
= l2cap_conn_add(hcon
, 0);
1493 if (hcon
->type
== LE_LINK
) {
1496 if (!list_empty(&conn
->chan_l
)) {
1505 /* Update source addr of the socket */
1506 bacpy(src
, conn
->src
);
1508 l2cap_chan_unlock(chan
);
1509 l2cap_chan_add(conn
, chan
);
1510 l2cap_chan_lock(chan
);
1512 l2cap_state_change(chan
, BT_CONNECT
);
1513 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1515 if (hcon
->state
== BT_CONNECTED
) {
1516 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1517 __clear_chan_timer(chan
);
1518 if (l2cap_chan_check_security(chan
))
1519 l2cap_state_change(chan
, BT_CONNECTED
);
1521 l2cap_do_start(chan
);
1527 l2cap_chan_unlock(chan
);
1528 hci_dev_unlock(hdev
);
1533 int __l2cap_wait_ack(struct sock
*sk
)
1535 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1536 DECLARE_WAITQUEUE(wait
, current
);
1540 add_wait_queue(sk_sleep(sk
), &wait
);
1541 set_current_state(TASK_INTERRUPTIBLE
);
1542 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1546 if (signal_pending(current
)) {
1547 err
= sock_intr_errno(timeo
);
1552 timeo
= schedule_timeout(timeo
);
1554 set_current_state(TASK_INTERRUPTIBLE
);
1556 err
= sock_error(sk
);
1560 set_current_state(TASK_RUNNING
);
1561 remove_wait_queue(sk_sleep(sk
), &wait
);
1565 static void l2cap_monitor_timeout(struct work_struct
*work
)
1567 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1568 monitor_timer
.work
);
1570 BT_DBG("chan %p", chan
);
1572 l2cap_chan_lock(chan
);
1574 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1575 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1576 l2cap_chan_unlock(chan
);
1577 l2cap_chan_put(chan
);
1581 chan
->retry_count
++;
1582 __set_monitor_timer(chan
);
1584 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1585 l2cap_chan_unlock(chan
);
1586 l2cap_chan_put(chan
);
1589 static void l2cap_retrans_timeout(struct work_struct
*work
)
1591 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1592 retrans_timer
.work
);
1594 BT_DBG("chan %p", chan
);
1596 l2cap_chan_lock(chan
);
1598 chan
->retry_count
= 1;
1599 __set_monitor_timer(chan
);
1601 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1603 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1605 l2cap_chan_unlock(chan
);
1606 l2cap_chan_put(chan
);
1609 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1611 struct sk_buff
*skb
;
1613 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1614 chan
->unacked_frames
) {
1615 if (bt_cb(skb
)->control
.txseq
== chan
->expected_ack_seq
)
1618 skb
= skb_dequeue(&chan
->tx_q
);
1621 chan
->unacked_frames
--;
1624 if (!chan
->unacked_frames
)
1625 __clear_retrans_timer(chan
);
1628 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1630 struct sk_buff
*skb
;
1634 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1635 control
= __get_control(chan
, skb
->data
+ L2CAP_HDR_SIZE
);
1636 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1637 control
|= __set_ctrl_sar(chan
, bt_cb(skb
)->control
.sar
);
1638 __put_control(chan
, control
, skb
->data
+ L2CAP_HDR_SIZE
);
1640 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1641 fcs
= crc16(0, (u8
*)skb
->data
,
1642 skb
->len
- L2CAP_FCS_SIZE
);
1643 put_unaligned_le16(fcs
,
1644 skb
->data
+ skb
->len
- L2CAP_FCS_SIZE
);
1647 l2cap_do_send(chan
, skb
);
1649 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1653 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u16 tx_seq
)
1655 struct sk_buff
*skb
, *tx_skb
;
1659 skb
= skb_peek(&chan
->tx_q
);
1663 while (bt_cb(skb
)->control
.txseq
!= tx_seq
) {
1664 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1667 skb
= skb_queue_next(&chan
->tx_q
, skb
);
1670 if (bt_cb(skb
)->control
.retries
== chan
->remote_max_tx
&&
1671 chan
->remote_max_tx
) {
1672 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1676 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1677 bt_cb(skb
)->control
.retries
++;
1679 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1680 control
&= __get_sar_mask(chan
);
1682 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1683 control
|= __set_ctrl_final(chan
);
1685 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1686 control
|= __set_txseq(chan
, tx_seq
);
1688 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1690 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1691 fcs
= crc16(0, (u8
*)tx_skb
->data
,
1692 tx_skb
->len
- L2CAP_FCS_SIZE
);
1693 put_unaligned_le16(fcs
,
1694 tx_skb
->data
+ tx_skb
->len
- L2CAP_FCS_SIZE
);
1697 l2cap_do_send(chan
, tx_skb
);
1700 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1702 struct sk_buff
*skb
, *tx_skb
;
1707 if (chan
->state
!= BT_CONNECTED
)
1710 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1713 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1715 if (bt_cb(skb
)->control
.retries
== chan
->remote_max_tx
&&
1716 chan
->remote_max_tx
) {
1717 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1721 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1723 bt_cb(skb
)->control
.retries
++;
1725 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1726 control
&= __get_sar_mask(chan
);
1728 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1729 control
|= __set_ctrl_final(chan
);
1731 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1732 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1733 control
|= __set_ctrl_sar(chan
, bt_cb(skb
)->control
.sar
);
1735 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1737 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1738 fcs
= crc16(0, (u8
*)skb
->data
,
1739 tx_skb
->len
- L2CAP_FCS_SIZE
);
1740 put_unaligned_le16(fcs
, skb
->data
+
1741 tx_skb
->len
- L2CAP_FCS_SIZE
);
1744 l2cap_do_send(chan
, tx_skb
);
1746 __set_retrans_timer(chan
);
1748 bt_cb(skb
)->control
.txseq
= chan
->next_tx_seq
;
1750 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1752 if (bt_cb(skb
)->control
.retries
== 1) {
1753 chan
->unacked_frames
++;
1756 __clear_ack_timer(chan
);
1759 chan
->frames_sent
++;
1761 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1762 chan
->tx_send_head
= NULL
;
1764 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1770 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1774 if (!skb_queue_empty(&chan
->tx_q
))
1775 chan
->tx_send_head
= chan
->tx_q
.next
;
1777 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1778 ret
= l2cap_ertm_send(chan
);
1782 static void __l2cap_send_ack(struct l2cap_chan
*chan
)
1786 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1788 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1789 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
1790 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1791 l2cap_send_sframe(chan
, control
);
1795 if (l2cap_ertm_send(chan
) > 0)
1798 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
1799 l2cap_send_sframe(chan
, control
);
1802 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1804 __clear_ack_timer(chan
);
1805 __l2cap_send_ack(chan
);
1808 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1810 struct srej_list
*tail
;
1813 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
1814 control
|= __set_ctrl_final(chan
);
1816 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1817 control
|= __set_reqseq(chan
, tail
->tx_seq
);
1819 l2cap_send_sframe(chan
, control
);
1822 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1823 struct msghdr
*msg
, int len
,
1824 int count
, struct sk_buff
*skb
)
1826 struct l2cap_conn
*conn
= chan
->conn
;
1827 struct sk_buff
**frag
;
1830 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1836 /* Continuation fragments (no L2CAP header) */
1837 frag
= &skb_shinfo(skb
)->frag_list
;
1839 struct sk_buff
*tmp
;
1841 count
= min_t(unsigned int, conn
->mtu
, len
);
1843 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1844 msg
->msg_flags
& MSG_DONTWAIT
);
1846 return PTR_ERR(tmp
);
1850 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1853 (*frag
)->priority
= skb
->priority
;
1858 skb
->len
+= (*frag
)->len
;
1859 skb
->data_len
+= (*frag
)->len
;
1861 frag
= &(*frag
)->next
;
1867 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1868 struct msghdr
*msg
, size_t len
,
1871 struct l2cap_conn
*conn
= chan
->conn
;
1872 struct sk_buff
*skb
;
1873 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1874 struct l2cap_hdr
*lh
;
1876 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
1878 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1880 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1881 msg
->msg_flags
& MSG_DONTWAIT
);
1885 skb
->priority
= priority
;
1887 /* Create L2CAP header */
1888 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1889 lh
->cid
= cpu_to_le16(chan
->dcid
);
1890 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
1891 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
1893 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1894 if (unlikely(err
< 0)) {
1896 return ERR_PTR(err
);
1901 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1902 struct msghdr
*msg
, size_t len
,
1905 struct l2cap_conn
*conn
= chan
->conn
;
1906 struct sk_buff
*skb
;
1908 struct l2cap_hdr
*lh
;
1910 BT_DBG("chan %p len %d", chan
, (int)len
);
1912 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
1914 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
1915 msg
->msg_flags
& MSG_DONTWAIT
);
1919 skb
->priority
= priority
;
1921 /* Create L2CAP header */
1922 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1923 lh
->cid
= cpu_to_le16(chan
->dcid
);
1924 lh
->len
= cpu_to_le16(len
);
1926 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1927 if (unlikely(err
< 0)) {
1929 return ERR_PTR(err
);
1934 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1935 struct msghdr
*msg
, size_t len
,
1938 struct l2cap_conn
*conn
= chan
->conn
;
1939 struct sk_buff
*skb
;
1940 int err
, count
, hlen
;
1941 struct l2cap_hdr
*lh
;
1943 BT_DBG("chan %p len %d", chan
, (int)len
);
1946 return ERR_PTR(-ENOTCONN
);
1948 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1949 hlen
= L2CAP_EXT_HDR_SIZE
;
1951 hlen
= L2CAP_ENH_HDR_SIZE
;
1954 hlen
+= L2CAP_SDULEN_SIZE
;
1956 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1957 hlen
+= L2CAP_FCS_SIZE
;
1959 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1961 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1962 msg
->msg_flags
& MSG_DONTWAIT
);
1966 /* Create L2CAP header */
1967 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1968 lh
->cid
= cpu_to_le16(chan
->dcid
);
1969 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1971 __put_control(chan
, 0, skb_put(skb
, __ctrl_size(chan
)));
1974 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
1976 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1977 if (unlikely(err
< 0)) {
1979 return ERR_PTR(err
);
1982 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1983 put_unaligned_le16(0, skb_put(skb
, L2CAP_FCS_SIZE
));
1985 bt_cb(skb
)->control
.retries
= 0;
1989 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
1990 struct sk_buff_head
*seg_queue
,
1991 struct msghdr
*msg
, size_t len
)
1993 struct sk_buff
*skb
;
1999 BT_DBG("chan %p, msg %p, len %d", chan
, msg
, (int)len
);
2001 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2002 * so fragmented skbs are not used. The HCI layer's handling
2003 * of fragmented skbs is not compatible with ERTM's queueing.
2006 /* PDU size is derived from the HCI MTU */
2007 pdu_len
= chan
->conn
->mtu
;
2009 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2011 /* Adjust for largest possible L2CAP overhead. */
2012 pdu_len
-= L2CAP_EXT_HDR_SIZE
+ L2CAP_FCS_SIZE
;
2014 /* Remote device may have requested smaller PDUs */
2015 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2017 if (len
<= pdu_len
) {
2018 sar
= L2CAP_SAR_UNSEGMENTED
;
2022 sar
= L2CAP_SAR_START
;
2024 pdu_len
-= L2CAP_SDULEN_SIZE
;
2028 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2031 __skb_queue_purge(seg_queue
);
2032 return PTR_ERR(skb
);
2035 bt_cb(skb
)->control
.sar
= sar
;
2036 __skb_queue_tail(seg_queue
, skb
);
2041 pdu_len
+= L2CAP_SDULEN_SIZE
;
2044 if (len
<= pdu_len
) {
2045 sar
= L2CAP_SAR_END
;
2048 sar
= L2CAP_SAR_CONTINUE
;
2055 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2058 struct sk_buff
*skb
;
2060 struct sk_buff_head seg_queue
;
2062 /* Connectionless channel */
2063 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2064 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2066 return PTR_ERR(skb
);
2068 l2cap_do_send(chan
, skb
);
2072 switch (chan
->mode
) {
2073 case L2CAP_MODE_BASIC
:
2074 /* Check outgoing MTU */
2075 if (len
> chan
->omtu
)
2078 /* Create a basic PDU */
2079 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2081 return PTR_ERR(skb
);
2083 l2cap_do_send(chan
, skb
);
2087 case L2CAP_MODE_ERTM
:
2088 case L2CAP_MODE_STREAMING
:
2089 /* Check outgoing MTU */
2090 if (len
> chan
->omtu
) {
2095 __skb_queue_head_init(&seg_queue
);
2097 /* Do segmentation before calling in to the state machine,
2098 * since it's possible to block while waiting for memory
2101 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2103 /* The channel could have been closed while segmenting,
2104 * check that it is still connected.
2106 if (chan
->state
!= BT_CONNECTED
) {
2107 __skb_queue_purge(&seg_queue
);
2114 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->tx_send_head
== NULL
)
2115 chan
->tx_send_head
= seg_queue
.next
;
2116 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2118 if (chan
->mode
== L2CAP_MODE_ERTM
)
2119 err
= l2cap_ertm_send(chan
);
2121 l2cap_streaming_send(chan
);
2126 /* If the skbs were not queued for sending, they'll still be in
2127 * seg_queue and need to be purged.
2129 __skb_queue_purge(&seg_queue
);
2133 BT_DBG("bad state %1.1x", chan
->mode
);
2140 /* Copy frame to all raw sockets on that connection */
2141 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2143 struct sk_buff
*nskb
;
2144 struct l2cap_chan
*chan
;
2146 BT_DBG("conn %p", conn
);
2148 mutex_lock(&conn
->chan_lock
);
2150 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2151 struct sock
*sk
= chan
->sk
;
2152 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2155 /* Don't send frame to the socket it came from */
2158 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2162 if (chan
->ops
->recv(chan
->data
, nskb
))
2166 mutex_unlock(&conn
->chan_lock
);
2169 /* ---- L2CAP signalling commands ---- */
2170 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2171 u8 code
, u8 ident
, u16 dlen
, void *data
)
2173 struct sk_buff
*skb
, **frag
;
2174 struct l2cap_cmd_hdr
*cmd
;
2175 struct l2cap_hdr
*lh
;
2178 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2179 conn
, code
, ident
, dlen
);
2181 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2182 count
= min_t(unsigned int, conn
->mtu
, len
);
2184 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2188 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2189 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2191 if (conn
->hcon
->type
== LE_LINK
)
2192 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2194 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2196 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2199 cmd
->len
= cpu_to_le16(dlen
);
2202 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2203 memcpy(skb_put(skb
, count
), data
, count
);
2209 /* Continuation fragments (no L2CAP header) */
2210 frag
= &skb_shinfo(skb
)->frag_list
;
2212 count
= min_t(unsigned int, conn
->mtu
, len
);
2214 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2218 memcpy(skb_put(*frag
, count
), data
, count
);
2223 frag
= &(*frag
)->next
;
2233 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2235 struct l2cap_conf_opt
*opt
= *ptr
;
2238 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2246 *val
= *((u8
*) opt
->val
);
2250 *val
= get_unaligned_le16(opt
->val
);
2254 *val
= get_unaligned_le32(opt
->val
);
2258 *val
= (unsigned long) opt
->val
;
2262 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2266 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2268 struct l2cap_conf_opt
*opt
= *ptr
;
2270 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2277 *((u8
*) opt
->val
) = val
;
2281 put_unaligned_le16(val
, opt
->val
);
2285 put_unaligned_le32(val
, opt
->val
);
2289 memcpy(opt
->val
, (void *) val
, len
);
2293 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2296 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2298 struct l2cap_conf_efs efs
;
2300 switch (chan
->mode
) {
2301 case L2CAP_MODE_ERTM
:
2302 efs
.id
= chan
->local_id
;
2303 efs
.stype
= chan
->local_stype
;
2304 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2305 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2306 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2307 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2310 case L2CAP_MODE_STREAMING
:
2312 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2313 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2314 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2323 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2324 (unsigned long) &efs
);
2327 static void l2cap_ack_timeout(struct work_struct
*work
)
2329 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2332 BT_DBG("chan %p", chan
);
2334 l2cap_chan_lock(chan
);
2336 __l2cap_send_ack(chan
);
2338 l2cap_chan_unlock(chan
);
2340 l2cap_chan_put(chan
);
2343 static inline int l2cap_ertm_init(struct l2cap_chan
*chan
)
2347 chan
->next_tx_seq
= 0;
2348 chan
->expected_tx_seq
= 0;
2349 chan
->expected_ack_seq
= 0;
2350 chan
->unacked_frames
= 0;
2351 chan
->buffer_seq
= 0;
2352 chan
->num_acked
= 0;
2353 chan
->frames_sent
= 0;
2354 chan
->last_acked_seq
= 0;
2356 chan
->sdu_last_frag
= NULL
;
2359 skb_queue_head_init(&chan
->tx_q
);
2361 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2364 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2365 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2367 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2368 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2369 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2371 skb_queue_head_init(&chan
->srej_q
);
2373 INIT_LIST_HEAD(&chan
->srej_l
);
2374 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2378 return l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2381 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2384 case L2CAP_MODE_STREAMING
:
2385 case L2CAP_MODE_ERTM
:
2386 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2390 return L2CAP_MODE_BASIC
;
2394 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2396 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2399 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2401 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2404 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2406 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2407 __l2cap_ews_supported(chan
)) {
2408 /* use extended control field */
2409 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2410 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2412 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2413 L2CAP_DEFAULT_TX_WINDOW
);
2414 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2418 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2420 struct l2cap_conf_req
*req
= data
;
2421 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2422 void *ptr
= req
->data
;
2425 BT_DBG("chan %p", chan
);
2427 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2430 switch (chan
->mode
) {
2431 case L2CAP_MODE_STREAMING
:
2432 case L2CAP_MODE_ERTM
:
2433 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2436 if (__l2cap_efs_supported(chan
))
2437 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2441 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2446 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2447 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2449 switch (chan
->mode
) {
2450 case L2CAP_MODE_BASIC
:
2451 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2452 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2455 rfc
.mode
= L2CAP_MODE_BASIC
;
2457 rfc
.max_transmit
= 0;
2458 rfc
.retrans_timeout
= 0;
2459 rfc
.monitor_timeout
= 0;
2460 rfc
.max_pdu_size
= 0;
2462 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2463 (unsigned long) &rfc
);
2466 case L2CAP_MODE_ERTM
:
2467 rfc
.mode
= L2CAP_MODE_ERTM
;
2468 rfc
.max_transmit
= chan
->max_tx
;
2469 rfc
.retrans_timeout
= 0;
2470 rfc
.monitor_timeout
= 0;
2472 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2473 L2CAP_EXT_HDR_SIZE
-
2476 rfc
.max_pdu_size
= cpu_to_le16(size
);
2478 l2cap_txwin_setup(chan
);
2480 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2481 L2CAP_DEFAULT_TX_WINDOW
);
2483 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2484 (unsigned long) &rfc
);
2486 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2487 l2cap_add_opt_efs(&ptr
, chan
);
2489 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2492 if (chan
->fcs
== L2CAP_FCS_NONE
||
2493 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2494 chan
->fcs
= L2CAP_FCS_NONE
;
2495 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2498 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2499 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2503 case L2CAP_MODE_STREAMING
:
2504 rfc
.mode
= L2CAP_MODE_STREAMING
;
2506 rfc
.max_transmit
= 0;
2507 rfc
.retrans_timeout
= 0;
2508 rfc
.monitor_timeout
= 0;
2510 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2511 L2CAP_EXT_HDR_SIZE
-
2514 rfc
.max_pdu_size
= cpu_to_le16(size
);
2516 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2517 (unsigned long) &rfc
);
2519 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2520 l2cap_add_opt_efs(&ptr
, chan
);
2522 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2525 if (chan
->fcs
== L2CAP_FCS_NONE
||
2526 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2527 chan
->fcs
= L2CAP_FCS_NONE
;
2528 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2533 req
->dcid
= cpu_to_le16(chan
->dcid
);
2534 req
->flags
= cpu_to_le16(0);
2539 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2541 struct l2cap_conf_rsp
*rsp
= data
;
2542 void *ptr
= rsp
->data
;
2543 void *req
= chan
->conf_req
;
2544 int len
= chan
->conf_len
;
2545 int type
, hint
, olen
;
2547 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2548 struct l2cap_conf_efs efs
;
2550 u16 mtu
= L2CAP_DEFAULT_MTU
;
2551 u16 result
= L2CAP_CONF_SUCCESS
;
2554 BT_DBG("chan %p", chan
);
2556 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2557 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2559 hint
= type
& L2CAP_CONF_HINT
;
2560 type
&= L2CAP_CONF_MASK
;
2563 case L2CAP_CONF_MTU
:
2567 case L2CAP_CONF_FLUSH_TO
:
2568 chan
->flush_to
= val
;
2571 case L2CAP_CONF_QOS
:
2574 case L2CAP_CONF_RFC
:
2575 if (olen
== sizeof(rfc
))
2576 memcpy(&rfc
, (void *) val
, olen
);
2579 case L2CAP_CONF_FCS
:
2580 if (val
== L2CAP_FCS_NONE
)
2581 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2584 case L2CAP_CONF_EFS
:
2586 if (olen
== sizeof(efs
))
2587 memcpy(&efs
, (void *) val
, olen
);
2590 case L2CAP_CONF_EWS
:
2592 return -ECONNREFUSED
;
2594 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2595 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2596 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2597 chan
->remote_tx_win
= val
;
2604 result
= L2CAP_CONF_UNKNOWN
;
2605 *((u8
*) ptr
++) = type
;
2610 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2613 switch (chan
->mode
) {
2614 case L2CAP_MODE_STREAMING
:
2615 case L2CAP_MODE_ERTM
:
2616 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2617 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2618 chan
->conn
->feat_mask
);
2623 if (__l2cap_efs_supported(chan
))
2624 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2626 return -ECONNREFUSED
;
2629 if (chan
->mode
!= rfc
.mode
)
2630 return -ECONNREFUSED
;
2636 if (chan
->mode
!= rfc
.mode
) {
2637 result
= L2CAP_CONF_UNACCEPT
;
2638 rfc
.mode
= chan
->mode
;
2640 if (chan
->num_conf_rsp
== 1)
2641 return -ECONNREFUSED
;
2643 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2644 sizeof(rfc
), (unsigned long) &rfc
);
2647 if (result
== L2CAP_CONF_SUCCESS
) {
2648 /* Configure output options and let the other side know
2649 * which ones we don't like. */
2651 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2652 result
= L2CAP_CONF_UNACCEPT
;
2655 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2657 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2660 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2661 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2662 efs
.stype
!= chan
->local_stype
) {
2664 result
= L2CAP_CONF_UNACCEPT
;
2666 if (chan
->num_conf_req
>= 1)
2667 return -ECONNREFUSED
;
2669 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2671 (unsigned long) &efs
);
2673 /* Send PENDING Conf Rsp */
2674 result
= L2CAP_CONF_PENDING
;
2675 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2680 case L2CAP_MODE_BASIC
:
2681 chan
->fcs
= L2CAP_FCS_NONE
;
2682 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2685 case L2CAP_MODE_ERTM
:
2686 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
2687 chan
->remote_tx_win
= rfc
.txwin_size
;
2689 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2691 chan
->remote_max_tx
= rfc
.max_transmit
;
2693 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2695 L2CAP_EXT_HDR_SIZE
-
2698 rfc
.max_pdu_size
= cpu_to_le16(size
);
2699 chan
->remote_mps
= size
;
2701 rfc
.retrans_timeout
=
2702 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2703 rfc
.monitor_timeout
=
2704 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2706 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2708 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2709 sizeof(rfc
), (unsigned long) &rfc
);
2711 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2712 chan
->remote_id
= efs
.id
;
2713 chan
->remote_stype
= efs
.stype
;
2714 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
2715 chan
->remote_flush_to
=
2716 le32_to_cpu(efs
.flush_to
);
2717 chan
->remote_acc_lat
=
2718 le32_to_cpu(efs
.acc_lat
);
2719 chan
->remote_sdu_itime
=
2720 le32_to_cpu(efs
.sdu_itime
);
2721 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2722 sizeof(efs
), (unsigned long) &efs
);
2726 case L2CAP_MODE_STREAMING
:
2727 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2729 L2CAP_EXT_HDR_SIZE
-
2732 rfc
.max_pdu_size
= cpu_to_le16(size
);
2733 chan
->remote_mps
= size
;
2735 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2737 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2738 sizeof(rfc
), (unsigned long) &rfc
);
2743 result
= L2CAP_CONF_UNACCEPT
;
2745 memset(&rfc
, 0, sizeof(rfc
));
2746 rfc
.mode
= chan
->mode
;
2749 if (result
== L2CAP_CONF_SUCCESS
)
2750 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2752 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2753 rsp
->result
= cpu_to_le16(result
);
2754 rsp
->flags
= cpu_to_le16(0x0000);
2759 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2761 struct l2cap_conf_req
*req
= data
;
2762 void *ptr
= req
->data
;
2765 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2766 struct l2cap_conf_efs efs
;
2768 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2770 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2771 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2774 case L2CAP_CONF_MTU
:
2775 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2776 *result
= L2CAP_CONF_UNACCEPT
;
2777 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2780 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2783 case L2CAP_CONF_FLUSH_TO
:
2784 chan
->flush_to
= val
;
2785 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2789 case L2CAP_CONF_RFC
:
2790 if (olen
== sizeof(rfc
))
2791 memcpy(&rfc
, (void *)val
, olen
);
2793 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2794 rfc
.mode
!= chan
->mode
)
2795 return -ECONNREFUSED
;
2799 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2800 sizeof(rfc
), (unsigned long) &rfc
);
2803 case L2CAP_CONF_EWS
:
2804 chan
->tx_win
= min_t(u16
, val
,
2805 L2CAP_DEFAULT_EXT_WINDOW
);
2806 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2810 case L2CAP_CONF_EFS
:
2811 if (olen
== sizeof(efs
))
2812 memcpy(&efs
, (void *)val
, olen
);
2814 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2815 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2816 efs
.stype
!= chan
->local_stype
)
2817 return -ECONNREFUSED
;
2819 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2820 sizeof(efs
), (unsigned long) &efs
);
2825 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2826 return -ECONNREFUSED
;
2828 chan
->mode
= rfc
.mode
;
2830 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
2832 case L2CAP_MODE_ERTM
:
2833 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2834 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2835 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2837 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2838 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
2839 chan
->local_sdu_itime
=
2840 le32_to_cpu(efs
.sdu_itime
);
2841 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
2842 chan
->local_flush_to
=
2843 le32_to_cpu(efs
.flush_to
);
2847 case L2CAP_MODE_STREAMING
:
2848 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2852 req
->dcid
= cpu_to_le16(chan
->dcid
);
2853 req
->flags
= cpu_to_le16(0x0000);
2858 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2860 struct l2cap_conf_rsp
*rsp
= data
;
2861 void *ptr
= rsp
->data
;
2863 BT_DBG("chan %p", chan
);
2865 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2866 rsp
->result
= cpu_to_le16(result
);
2867 rsp
->flags
= cpu_to_le16(flags
);
2872 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2874 struct l2cap_conn_rsp rsp
;
2875 struct l2cap_conn
*conn
= chan
->conn
;
2878 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2879 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2880 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2881 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2882 l2cap_send_cmd(conn
, chan
->ident
,
2883 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2885 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2888 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2889 l2cap_build_conf_req(chan
, buf
), buf
);
2890 chan
->num_conf_req
++;
2893 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2897 struct l2cap_conf_rfc rfc
;
2899 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2901 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2904 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2905 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2908 case L2CAP_CONF_RFC
:
2909 if (olen
== sizeof(rfc
))
2910 memcpy(&rfc
, (void *)val
, olen
);
2915 /* Use sane default values in case a misbehaving remote device
2916 * did not send an RFC option.
2918 rfc
.mode
= chan
->mode
;
2919 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2920 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2921 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
2923 BT_ERR("Expected RFC option was not found, using defaults");
2927 case L2CAP_MODE_ERTM
:
2928 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2929 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2930 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2932 case L2CAP_MODE_STREAMING
:
2933 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2937 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2939 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2941 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2944 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2945 cmd
->ident
== conn
->info_ident
) {
2946 cancel_delayed_work(&conn
->info_timer
);
2948 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2949 conn
->info_ident
= 0;
2951 l2cap_conn_start(conn
);
2957 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2959 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2960 struct l2cap_conn_rsp rsp
;
2961 struct l2cap_chan
*chan
= NULL
, *pchan
;
2962 struct sock
*parent
, *sk
= NULL
;
2963 int result
, status
= L2CAP_CS_NO_INFO
;
2965 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2966 __le16 psm
= req
->psm
;
2968 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
2970 /* Check if we have socket listening on psm */
2971 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
2973 result
= L2CAP_CR_BAD_PSM
;
2979 mutex_lock(&conn
->chan_lock
);
2982 /* Check if the ACL is secure enough (if not SDP) */
2983 if (psm
!= cpu_to_le16(0x0001) &&
2984 !hci_conn_check_link_mode(conn
->hcon
)) {
2985 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
2986 result
= L2CAP_CR_SEC_BLOCK
;
2990 result
= L2CAP_CR_NO_MEM
;
2992 /* Check for backlog size */
2993 if (sk_acceptq_is_full(parent
)) {
2994 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2998 chan
= pchan
->ops
->new_connection(pchan
->data
);
3004 /* Check if we already have channel with that dcid */
3005 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
3006 sock_set_flag(sk
, SOCK_ZAPPED
);
3007 chan
->ops
->close(chan
->data
);
3011 hci_conn_hold(conn
->hcon
);
3013 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3014 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3018 bt_accept_enqueue(parent
, sk
);
3020 __l2cap_chan_add(conn
, chan
);
3024 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3026 chan
->ident
= cmd
->ident
;
3028 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3029 if (l2cap_chan_check_security(chan
)) {
3030 if (bt_sk(sk
)->defer_setup
) {
3031 __l2cap_state_change(chan
, BT_CONNECT2
);
3032 result
= L2CAP_CR_PEND
;
3033 status
= L2CAP_CS_AUTHOR_PEND
;
3034 parent
->sk_data_ready(parent
, 0);
3036 __l2cap_state_change(chan
, BT_CONFIG
);
3037 result
= L2CAP_CR_SUCCESS
;
3038 status
= L2CAP_CS_NO_INFO
;
3041 __l2cap_state_change(chan
, BT_CONNECT2
);
3042 result
= L2CAP_CR_PEND
;
3043 status
= L2CAP_CS_AUTHEN_PEND
;
3046 __l2cap_state_change(chan
, BT_CONNECT2
);
3047 result
= L2CAP_CR_PEND
;
3048 status
= L2CAP_CS_NO_INFO
;
3052 release_sock(parent
);
3053 mutex_unlock(&conn
->chan_lock
);
3056 rsp
.scid
= cpu_to_le16(scid
);
3057 rsp
.dcid
= cpu_to_le16(dcid
);
3058 rsp
.result
= cpu_to_le16(result
);
3059 rsp
.status
= cpu_to_le16(status
);
3060 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3062 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3063 struct l2cap_info_req info
;
3064 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3066 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3067 conn
->info_ident
= l2cap_get_ident(conn
);
3069 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3071 l2cap_send_cmd(conn
, conn
->info_ident
,
3072 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3075 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3076 result
== L2CAP_CR_SUCCESS
) {
3078 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3079 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3080 l2cap_build_conf_req(chan
, buf
), buf
);
3081 chan
->num_conf_req
++;
3087 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3089 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3090 u16 scid
, dcid
, result
, status
;
3091 struct l2cap_chan
*chan
;
3095 scid
= __le16_to_cpu(rsp
->scid
);
3096 dcid
= __le16_to_cpu(rsp
->dcid
);
3097 result
= __le16_to_cpu(rsp
->result
);
3098 status
= __le16_to_cpu(rsp
->status
);
3100 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3101 dcid
, scid
, result
, status
);
3103 mutex_lock(&conn
->chan_lock
);
3106 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3112 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3121 l2cap_chan_lock(chan
);
3124 case L2CAP_CR_SUCCESS
:
3125 l2cap_state_change(chan
, BT_CONFIG
);
3128 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3130 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3133 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3134 l2cap_build_conf_req(chan
, req
), req
);
3135 chan
->num_conf_req
++;
3139 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3143 l2cap_chan_del(chan
, ECONNREFUSED
);
3147 l2cap_chan_unlock(chan
);
3150 mutex_unlock(&conn
->chan_lock
);
3155 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3157 /* FCS is enabled only in ERTM or streaming mode, if one or both
3160 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3161 chan
->fcs
= L2CAP_FCS_NONE
;
3162 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3163 chan
->fcs
= L2CAP_FCS_CRC16
;
3166 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3168 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3171 struct l2cap_chan
*chan
;
3174 dcid
= __le16_to_cpu(req
->dcid
);
3175 flags
= __le16_to_cpu(req
->flags
);
3177 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3179 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3183 l2cap_chan_lock(chan
);
3185 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3186 struct l2cap_cmd_rej_cid rej
;
3188 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3189 rej
.scid
= cpu_to_le16(chan
->scid
);
3190 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3192 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3197 /* Reject if config buffer is too small. */
3198 len
= cmd_len
- sizeof(*req
);
3199 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3200 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3201 l2cap_build_conf_rsp(chan
, rsp
,
3202 L2CAP_CONF_REJECT
, flags
), rsp
);
3207 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3208 chan
->conf_len
+= len
;
3210 if (flags
& 0x0001) {
3211 /* Incomplete config. Send empty response. */
3212 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3213 l2cap_build_conf_rsp(chan
, rsp
,
3214 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3218 /* Complete config. */
3219 len
= l2cap_parse_conf_req(chan
, rsp
);
3221 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3225 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3226 chan
->num_conf_rsp
++;
3228 /* Reset config buffer. */
3231 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3234 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3235 set_default_fcs(chan
);
3237 l2cap_state_change(chan
, BT_CONNECTED
);
3239 if (chan
->mode
== L2CAP_MODE_ERTM
||
3240 chan
->mode
== L2CAP_MODE_STREAMING
)
3241 err
= l2cap_ertm_init(chan
);
3244 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3246 l2cap_chan_ready(chan
);
3251 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3253 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3254 l2cap_build_conf_req(chan
, buf
), buf
);
3255 chan
->num_conf_req
++;
3258 /* Got Conf Rsp PENDING from remote side and asume we sent
3259 Conf Rsp PENDING in the code above */
3260 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3261 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3263 /* check compatibility */
3265 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3266 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3268 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3269 l2cap_build_conf_rsp(chan
, rsp
,
3270 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
3274 l2cap_chan_unlock(chan
);
3278 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3280 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3281 u16 scid
, flags
, result
;
3282 struct l2cap_chan
*chan
;
3283 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3286 scid
= __le16_to_cpu(rsp
->scid
);
3287 flags
= __le16_to_cpu(rsp
->flags
);
3288 result
= __le16_to_cpu(rsp
->result
);
3290 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3293 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3297 l2cap_chan_lock(chan
);
3300 case L2CAP_CONF_SUCCESS
:
3301 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3302 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3305 case L2CAP_CONF_PENDING
:
3306 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3308 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3311 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3314 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3318 /* check compatibility */
3320 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3321 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3323 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3324 l2cap_build_conf_rsp(chan
, buf
,
3325 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3329 case L2CAP_CONF_UNACCEPT
:
3330 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3333 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3334 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3338 /* throw out any old stored conf requests */
3339 result
= L2CAP_CONF_SUCCESS
;
3340 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3343 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3347 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3348 L2CAP_CONF_REQ
, len
, req
);
3349 chan
->num_conf_req
++;
3350 if (result
!= L2CAP_CONF_SUCCESS
)
3356 l2cap_chan_set_err(chan
, ECONNRESET
);
3358 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3359 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3366 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3368 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3369 set_default_fcs(chan
);
3371 l2cap_state_change(chan
, BT_CONNECTED
);
3372 if (chan
->mode
== L2CAP_MODE_ERTM
||
3373 chan
->mode
== L2CAP_MODE_STREAMING
)
3374 err
= l2cap_ertm_init(chan
);
3377 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3379 l2cap_chan_ready(chan
);
3383 l2cap_chan_unlock(chan
);
3387 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3389 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3390 struct l2cap_disconn_rsp rsp
;
3392 struct l2cap_chan
*chan
;
3395 scid
= __le16_to_cpu(req
->scid
);
3396 dcid
= __le16_to_cpu(req
->dcid
);
3398 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3400 mutex_lock(&conn
->chan_lock
);
3402 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3404 mutex_unlock(&conn
->chan_lock
);
3408 l2cap_chan_lock(chan
);
3412 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3413 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3414 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3417 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3420 l2cap_chan_hold(chan
);
3421 l2cap_chan_del(chan
, ECONNRESET
);
3423 l2cap_chan_unlock(chan
);
3425 chan
->ops
->close(chan
->data
);
3426 l2cap_chan_put(chan
);
3428 mutex_unlock(&conn
->chan_lock
);
3433 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3435 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3437 struct l2cap_chan
*chan
;
3439 scid
= __le16_to_cpu(rsp
->scid
);
3440 dcid
= __le16_to_cpu(rsp
->dcid
);
3442 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3444 mutex_lock(&conn
->chan_lock
);
3446 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3448 mutex_unlock(&conn
->chan_lock
);
3452 l2cap_chan_lock(chan
);
3454 l2cap_chan_hold(chan
);
3455 l2cap_chan_del(chan
, 0);
3457 l2cap_chan_unlock(chan
);
3459 chan
->ops
->close(chan
->data
);
3460 l2cap_chan_put(chan
);
3462 mutex_unlock(&conn
->chan_lock
);
3467 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3469 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3472 type
= __le16_to_cpu(req
->type
);
3474 BT_DBG("type 0x%4.4x", type
);
3476 if (type
== L2CAP_IT_FEAT_MASK
) {
3478 u32 feat_mask
= l2cap_feat_mask
;
3479 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3480 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3481 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3483 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3486 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3487 | L2CAP_FEAT_EXT_WINDOW
;
3489 put_unaligned_le32(feat_mask
, rsp
->data
);
3490 l2cap_send_cmd(conn
, cmd
->ident
,
3491 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3492 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3494 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3497 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3499 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3501 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3502 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3503 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3504 l2cap_send_cmd(conn
, cmd
->ident
,
3505 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3507 struct l2cap_info_rsp rsp
;
3508 rsp
.type
= cpu_to_le16(type
);
3509 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3510 l2cap_send_cmd(conn
, cmd
->ident
,
3511 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3517 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3519 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3522 type
= __le16_to_cpu(rsp
->type
);
3523 result
= __le16_to_cpu(rsp
->result
);
3525 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3527 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3528 if (cmd
->ident
!= conn
->info_ident
||
3529 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3532 cancel_delayed_work(&conn
->info_timer
);
3534 if (result
!= L2CAP_IR_SUCCESS
) {
3535 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3536 conn
->info_ident
= 0;
3538 l2cap_conn_start(conn
);
3544 case L2CAP_IT_FEAT_MASK
:
3545 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3547 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3548 struct l2cap_info_req req
;
3549 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3551 conn
->info_ident
= l2cap_get_ident(conn
);
3553 l2cap_send_cmd(conn
, conn
->info_ident
,
3554 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3556 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3557 conn
->info_ident
= 0;
3559 l2cap_conn_start(conn
);
3563 case L2CAP_IT_FIXED_CHAN
:
3564 conn
->fixed_chan_mask
= rsp
->data
[0];
3565 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3566 conn
->info_ident
= 0;
3568 l2cap_conn_start(conn
);
3575 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3576 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3579 struct l2cap_create_chan_req
*req
= data
;
3580 struct l2cap_create_chan_rsp rsp
;
3583 if (cmd_len
!= sizeof(*req
))
3589 psm
= le16_to_cpu(req
->psm
);
3590 scid
= le16_to_cpu(req
->scid
);
3592 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3594 /* Placeholder: Always reject */
3596 rsp
.scid
= cpu_to_le16(scid
);
3597 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
3598 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3600 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3606 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3607 struct l2cap_cmd_hdr
*cmd
, void *data
)
3609 BT_DBG("conn %p", conn
);
3611 return l2cap_connect_rsp(conn
, cmd
, data
);
3614 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3615 u16 icid
, u16 result
)
3617 struct l2cap_move_chan_rsp rsp
;
3619 BT_DBG("icid %d, result %d", icid
, result
);
3621 rsp
.icid
= cpu_to_le16(icid
);
3622 rsp
.result
= cpu_to_le16(result
);
3624 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3627 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3628 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3630 struct l2cap_move_chan_cfm cfm
;
3633 BT_DBG("icid %d, result %d", icid
, result
);
3635 ident
= l2cap_get_ident(conn
);
3637 chan
->ident
= ident
;
3639 cfm
.icid
= cpu_to_le16(icid
);
3640 cfm
.result
= cpu_to_le16(result
);
3642 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
3645 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
3648 struct l2cap_move_chan_cfm_rsp rsp
;
3650 BT_DBG("icid %d", icid
);
3652 rsp
.icid
= cpu_to_le16(icid
);
3653 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
3656 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
3657 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3659 struct l2cap_move_chan_req
*req
= data
;
3661 u16 result
= L2CAP_MR_NOT_ALLOWED
;
3663 if (cmd_len
!= sizeof(*req
))
3666 icid
= le16_to_cpu(req
->icid
);
3668 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
3673 /* Placeholder: Always refuse */
3674 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
3679 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
3680 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3682 struct l2cap_move_chan_rsp
*rsp
= data
;
3685 if (cmd_len
!= sizeof(*rsp
))
3688 icid
= le16_to_cpu(rsp
->icid
);
3689 result
= le16_to_cpu(rsp
->result
);
3691 BT_DBG("icid %d, result %d", icid
, result
);
3693 /* Placeholder: Always unconfirmed */
3694 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
3699 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
3700 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3702 struct l2cap_move_chan_cfm
*cfm
= data
;
3705 if (cmd_len
!= sizeof(*cfm
))
3708 icid
= le16_to_cpu(cfm
->icid
);
3709 result
= le16_to_cpu(cfm
->result
);
3711 BT_DBG("icid %d, result %d", icid
, result
);
3713 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
3718 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
3719 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3721 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
3724 if (cmd_len
!= sizeof(*rsp
))
3727 icid
= le16_to_cpu(rsp
->icid
);
3729 BT_DBG("icid %d", icid
);
3734 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
3739 if (min
> max
|| min
< 6 || max
> 3200)
3742 if (to_multiplier
< 10 || to_multiplier
> 3200)
3745 if (max
>= to_multiplier
* 8)
3748 max_latency
= (to_multiplier
* 8 / max
) - 1;
3749 if (latency
> 499 || latency
> max_latency
)
3755 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
3756 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3758 struct hci_conn
*hcon
= conn
->hcon
;
3759 struct l2cap_conn_param_update_req
*req
;
3760 struct l2cap_conn_param_update_rsp rsp
;
3761 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
3764 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
3767 cmd_len
= __le16_to_cpu(cmd
->len
);
3768 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
3771 req
= (struct l2cap_conn_param_update_req
*) data
;
3772 min
= __le16_to_cpu(req
->min
);
3773 max
= __le16_to_cpu(req
->max
);
3774 latency
= __le16_to_cpu(req
->latency
);
3775 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
3777 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3778 min
, max
, latency
, to_multiplier
);
3780 memset(&rsp
, 0, sizeof(rsp
));
3782 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
3784 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
3786 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
3788 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
3792 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
3797 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
3798 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3802 switch (cmd
->code
) {
3803 case L2CAP_COMMAND_REJ
:
3804 l2cap_command_rej(conn
, cmd
, data
);
3807 case L2CAP_CONN_REQ
:
3808 err
= l2cap_connect_req(conn
, cmd
, data
);
3811 case L2CAP_CONN_RSP
:
3812 err
= l2cap_connect_rsp(conn
, cmd
, data
);
3815 case L2CAP_CONF_REQ
:
3816 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
3819 case L2CAP_CONF_RSP
:
3820 err
= l2cap_config_rsp(conn
, cmd
, data
);
3823 case L2CAP_DISCONN_REQ
:
3824 err
= l2cap_disconnect_req(conn
, cmd
, data
);
3827 case L2CAP_DISCONN_RSP
:
3828 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
3831 case L2CAP_ECHO_REQ
:
3832 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3835 case L2CAP_ECHO_RSP
:
3838 case L2CAP_INFO_REQ
:
3839 err
= l2cap_information_req(conn
, cmd
, data
);
3842 case L2CAP_INFO_RSP
:
3843 err
= l2cap_information_rsp(conn
, cmd
, data
);
3846 case L2CAP_CREATE_CHAN_REQ
:
3847 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
3850 case L2CAP_CREATE_CHAN_RSP
:
3851 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
3854 case L2CAP_MOVE_CHAN_REQ
:
3855 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
3858 case L2CAP_MOVE_CHAN_RSP
:
3859 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
3862 case L2CAP_MOVE_CHAN_CFM
:
3863 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
3866 case L2CAP_MOVE_CHAN_CFM_RSP
:
3867 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
3871 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
3879 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
3880 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3882 switch (cmd
->code
) {
3883 case L2CAP_COMMAND_REJ
:
3886 case L2CAP_CONN_PARAM_UPDATE_REQ
:
3887 return l2cap_conn_param_update_req(conn
, cmd
, data
);
3889 case L2CAP_CONN_PARAM_UPDATE_RSP
:
3893 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
3898 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
3899 struct sk_buff
*skb
)
3901 u8
*data
= skb
->data
;
3903 struct l2cap_cmd_hdr cmd
;
3906 l2cap_raw_recv(conn
, skb
);
3908 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3910 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3911 data
+= L2CAP_CMD_HDR_SIZE
;
3912 len
-= L2CAP_CMD_HDR_SIZE
;
3914 cmd_len
= le16_to_cpu(cmd
.len
);
3916 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3918 if (cmd_len
> len
|| !cmd
.ident
) {
3919 BT_DBG("corrupted command");
3923 if (conn
->hcon
->type
== LE_LINK
)
3924 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3926 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3929 struct l2cap_cmd_rej_unk rej
;
3931 BT_ERR("Wrong link type (%d)", err
);
3933 /* FIXME: Map err to a valid reason */
3934 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3935 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3945 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3947 u16 our_fcs
, rcv_fcs
;
3950 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3951 hdr_size
= L2CAP_EXT_HDR_SIZE
;
3953 hdr_size
= L2CAP_ENH_HDR_SIZE
;
3955 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3956 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
3957 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3958 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3960 if (our_fcs
!= rcv_fcs
)
3966 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3970 chan
->frames_sent
= 0;
3972 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
3974 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3975 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3976 l2cap_send_sframe(chan
, control
);
3977 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3980 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3981 l2cap_retransmit_frames(chan
);
3983 l2cap_ertm_send(chan
);
3985 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3986 chan
->frames_sent
== 0) {
3987 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3988 l2cap_send_sframe(chan
, control
);
3992 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 tx_seq
, u8 sar
)
3994 struct sk_buff
*next_skb
;
3995 int tx_seq_offset
, next_tx_seq_offset
;
3997 bt_cb(skb
)->control
.txseq
= tx_seq
;
3998 bt_cb(skb
)->control
.sar
= sar
;
4000 next_skb
= skb_peek(&chan
->srej_q
);
4002 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
4005 if (bt_cb(next_skb
)->control
.txseq
== tx_seq
)
4008 next_tx_seq_offset
= __seq_offset(chan
,
4009 bt_cb(next_skb
)->control
.txseq
, chan
->buffer_seq
);
4011 if (next_tx_seq_offset
> tx_seq_offset
) {
4012 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
4016 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
4019 next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
);
4022 __skb_queue_tail(&chan
->srej_q
, skb
);
4027 static void append_skb_frag(struct sk_buff
*skb
,
4028 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4030 /* skb->len reflects data in skb as well as all fragments
4031 * skb->data_len reflects only data in fragments
4033 if (!skb_has_frag_list(skb
))
4034 skb_shinfo(skb
)->frag_list
= new_frag
;
4036 new_frag
->next
= NULL
;
4038 (*last_frag
)->next
= new_frag
;
4039 *last_frag
= new_frag
;
4041 skb
->len
+= new_frag
->len
;
4042 skb
->data_len
+= new_frag
->len
;
4043 skb
->truesize
+= new_frag
->truesize
;
4046 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u32 control
)
4050 switch (__get_ctrl_sar(chan
, control
)) {
4051 case L2CAP_SAR_UNSEGMENTED
:
4055 err
= chan
->ops
->recv(chan
->data
, skb
);
4058 case L2CAP_SAR_START
:
4062 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4063 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4065 if (chan
->sdu_len
> chan
->imtu
) {
4070 if (skb
->len
>= chan
->sdu_len
)
4074 chan
->sdu_last_frag
= skb
;
4080 case L2CAP_SAR_CONTINUE
:
4084 append_skb_frag(chan
->sdu
, skb
,
4085 &chan
->sdu_last_frag
);
4088 if (chan
->sdu
->len
>= chan
->sdu_len
)
4098 append_skb_frag(chan
->sdu
, skb
,
4099 &chan
->sdu_last_frag
);
4102 if (chan
->sdu
->len
!= chan
->sdu_len
)
4105 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
4108 /* Reassembly complete */
4110 chan
->sdu_last_frag
= NULL
;
4118 kfree_skb(chan
->sdu
);
4120 chan
->sdu_last_frag
= NULL
;
4127 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
4129 BT_DBG("chan %p, Enter local busy", chan
);
4131 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
4132 l2cap_seq_list_clear(&chan
->srej_list
);
4134 __set_ack_timer(chan
);
4137 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
4141 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
4144 control
= __set_reqseq(chan
, chan
->buffer_seq
);
4145 control
|= __set_ctrl_poll(chan
);
4146 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4147 l2cap_send_sframe(chan
, control
);
4148 chan
->retry_count
= 1;
4150 __clear_retrans_timer(chan
);
4151 __set_monitor_timer(chan
);
4153 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
4156 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
4157 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
4159 BT_DBG("chan %p, Exit local busy", chan
);
4162 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4164 if (chan
->mode
== L2CAP_MODE_ERTM
) {
4166 l2cap_ertm_enter_local_busy(chan
);
4168 l2cap_ertm_exit_local_busy(chan
);
4172 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u16 tx_seq
)
4174 struct sk_buff
*skb
;
4177 while ((skb
= skb_peek(&chan
->srej_q
)) &&
4178 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4181 if (bt_cb(skb
)->control
.txseq
!= tx_seq
)
4184 skb
= skb_dequeue(&chan
->srej_q
);
4185 control
= __set_ctrl_sar(chan
, bt_cb(skb
)->control
.sar
);
4186 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4189 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4193 chan
->buffer_seq_srej
= __next_seq(chan
, chan
->buffer_seq_srej
);
4194 tx_seq
= __next_seq(chan
, tx_seq
);
4198 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
4200 struct srej_list
*l
, *tmp
;
4203 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
4204 if (l
->tx_seq
== tx_seq
) {
4209 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
4210 control
|= __set_reqseq(chan
, l
->tx_seq
);
4211 l2cap_send_sframe(chan
, control
);
4213 list_add_tail(&l
->list
, &chan
->srej_l
);
4217 static int l2cap_send_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
4219 struct srej_list
*new;
4222 while (tx_seq
!= chan
->expected_tx_seq
) {
4223 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
4224 control
|= __set_reqseq(chan
, chan
->expected_tx_seq
);
4225 l2cap_seq_list_append(&chan
->srej_list
, chan
->expected_tx_seq
);
4226 l2cap_send_sframe(chan
, control
);
4228 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
4232 new->tx_seq
= chan
->expected_tx_seq
;
4234 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4236 list_add_tail(&new->list
, &chan
->srej_l
);
4239 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4244 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4246 u16 tx_seq
= __get_txseq(chan
, rx_control
);
4247 u16 req_seq
= __get_reqseq(chan
, rx_control
);
4248 u8 sar
= __get_ctrl_sar(chan
, rx_control
);
4249 int tx_seq_offset
, expected_tx_seq_offset
;
4250 int num_to_ack
= (chan
->tx_win
/6) + 1;
4253 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan
, skb
->len
,
4254 tx_seq
, rx_control
);
4256 if (__is_ctrl_final(chan
, rx_control
) &&
4257 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4258 __clear_monitor_timer(chan
);
4259 if (chan
->unacked_frames
> 0)
4260 __set_retrans_timer(chan
);
4261 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4264 chan
->expected_ack_seq
= req_seq
;
4265 l2cap_drop_acked_frames(chan
);
4267 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
4269 /* invalid tx_seq */
4270 if (tx_seq_offset
>= chan
->tx_win
) {
4271 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4275 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4276 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
4277 l2cap_send_ack(chan
);
4281 if (tx_seq
== chan
->expected_tx_seq
)
4284 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4285 struct srej_list
*first
;
4287 first
= list_first_entry(&chan
->srej_l
,
4288 struct srej_list
, list
);
4289 if (tx_seq
== first
->tx_seq
) {
4290 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
4291 l2cap_check_srej_gap(chan
, tx_seq
);
4293 list_del(&first
->list
);
4296 if (list_empty(&chan
->srej_l
)) {
4297 chan
->buffer_seq
= chan
->buffer_seq_srej
;
4298 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
4299 l2cap_send_ack(chan
);
4300 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
4303 struct srej_list
*l
;
4305 /* duplicated tx_seq */
4306 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
4309 list_for_each_entry(l
, &chan
->srej_l
, list
) {
4310 if (l
->tx_seq
== tx_seq
) {
4311 l2cap_resend_srejframe(chan
, tx_seq
);
4316 err
= l2cap_send_srejframe(chan
, tx_seq
);
4318 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4323 expected_tx_seq_offset
= __seq_offset(chan
,
4324 chan
->expected_tx_seq
, chan
->buffer_seq
);
4326 /* duplicated tx_seq */
4327 if (tx_seq_offset
< expected_tx_seq_offset
)
4330 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
4332 BT_DBG("chan %p, Enter SREJ", chan
);
4334 INIT_LIST_HEAD(&chan
->srej_l
);
4335 chan
->buffer_seq_srej
= chan
->buffer_seq
;
4337 __skb_queue_head_init(&chan
->srej_q
);
4338 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
4340 /* Set P-bit only if there are some I-frames to ack. */
4341 if (__clear_ack_timer(chan
))
4342 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
4344 err
= l2cap_send_srejframe(chan
, tx_seq
);
4346 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4353 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4355 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4356 bt_cb(skb
)->control
.txseq
= tx_seq
;
4357 bt_cb(skb
)->control
.sar
= sar
;
4358 __skb_queue_tail(&chan
->srej_q
, skb
);
4362 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
4363 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4366 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4370 if (__is_ctrl_final(chan
, rx_control
)) {
4371 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4372 l2cap_retransmit_frames(chan
);
4376 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
4377 if (chan
->num_acked
== num_to_ack
- 1)
4378 l2cap_send_ack(chan
);
4380 __set_ack_timer(chan
);
4389 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4391 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
,
4392 __get_reqseq(chan
, rx_control
), rx_control
);
4394 chan
->expected_ack_seq
= __get_reqseq(chan
, rx_control
);
4395 l2cap_drop_acked_frames(chan
);
4397 if (__is_ctrl_poll(chan
, rx_control
)) {
4398 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4399 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4400 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4401 (chan
->unacked_frames
> 0))
4402 __set_retrans_timer(chan
);
4404 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4405 l2cap_send_srejtail(chan
);
4407 l2cap_send_i_or_rr_or_rnr(chan
);
4410 } else if (__is_ctrl_final(chan
, rx_control
)) {
4411 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4413 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4414 l2cap_retransmit_frames(chan
);
4417 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4418 (chan
->unacked_frames
> 0))
4419 __set_retrans_timer(chan
);
4421 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4422 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
4423 l2cap_send_ack(chan
);
4425 l2cap_ertm_send(chan
);
4429 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4431 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4433 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4435 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4437 chan
->expected_ack_seq
= tx_seq
;
4438 l2cap_drop_acked_frames(chan
);
4440 if (__is_ctrl_final(chan
, rx_control
)) {
4441 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4442 l2cap_retransmit_frames(chan
);
4444 l2cap_retransmit_frames(chan
);
4446 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
4447 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4450 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4452 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4454 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4456 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4458 if (__is_ctrl_poll(chan
, rx_control
)) {
4459 chan
->expected_ack_seq
= tx_seq
;
4460 l2cap_drop_acked_frames(chan
);
4462 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4463 l2cap_retransmit_one_frame(chan
, tx_seq
);
4465 l2cap_ertm_send(chan
);
4467 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4468 chan
->srej_save_reqseq
= tx_seq
;
4469 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4471 } else if (__is_ctrl_final(chan
, rx_control
)) {
4472 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
4473 chan
->srej_save_reqseq
== tx_seq
)
4474 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4476 l2cap_retransmit_one_frame(chan
, tx_seq
);
4478 l2cap_retransmit_one_frame(chan
, tx_seq
);
4479 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4480 chan
->srej_save_reqseq
= tx_seq
;
4481 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4486 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4488 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4490 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4492 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4493 chan
->expected_ack_seq
= tx_seq
;
4494 l2cap_drop_acked_frames(chan
);
4496 if (__is_ctrl_poll(chan
, rx_control
))
4497 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4499 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4500 __clear_retrans_timer(chan
);
4501 if (__is_ctrl_poll(chan
, rx_control
))
4502 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
4506 if (__is_ctrl_poll(chan
, rx_control
)) {
4507 l2cap_send_srejtail(chan
);
4509 rx_control
= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4510 l2cap_send_sframe(chan
, rx_control
);
4514 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4516 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan
, rx_control
, skb
->len
);
4518 if (__is_ctrl_final(chan
, rx_control
) &&
4519 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4520 __clear_monitor_timer(chan
);
4521 if (chan
->unacked_frames
> 0)
4522 __set_retrans_timer(chan
);
4523 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4526 switch (__get_ctrl_super(chan
, rx_control
)) {
4527 case L2CAP_SUPER_RR
:
4528 l2cap_data_channel_rrframe(chan
, rx_control
);
4531 case L2CAP_SUPER_REJ
:
4532 l2cap_data_channel_rejframe(chan
, rx_control
);
4535 case L2CAP_SUPER_SREJ
:
4536 l2cap_data_channel_srejframe(chan
, rx_control
);
4539 case L2CAP_SUPER_RNR
:
4540 l2cap_data_channel_rnrframe(chan
, rx_control
);
4548 static int l2cap_ertm_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4552 int len
, next_tx_seq_offset
, req_seq_offset
;
4554 __unpack_control(chan
, skb
);
4556 control
= __get_control(chan
, skb
->data
);
4557 skb_pull(skb
, __ctrl_size(chan
));
4561 * We can just drop the corrupted I-frame here.
4562 * Receiver will miss it and start proper recovery
4563 * procedures and ask retransmission.
4565 if (l2cap_check_fcs(chan
, skb
))
4568 if (__is_sar_start(chan
, control
) && !__is_sframe(chan
, control
))
4569 len
-= L2CAP_SDULEN_SIZE
;
4571 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4572 len
-= L2CAP_FCS_SIZE
;
4574 if (len
> chan
->mps
) {
4575 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4579 req_seq
= __get_reqseq(chan
, control
);
4581 req_seq_offset
= __seq_offset(chan
, req_seq
, chan
->expected_ack_seq
);
4583 next_tx_seq_offset
= __seq_offset(chan
, chan
->next_tx_seq
,
4584 chan
->expected_ack_seq
);
4586 /* check for invalid req-seq */
4587 if (req_seq_offset
> next_tx_seq_offset
) {
4588 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4592 if (!__is_sframe(chan
, control
)) {
4594 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4598 l2cap_data_channel_iframe(chan
, control
, skb
);
4602 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4606 l2cap_data_channel_sframe(chan
, control
, skb
);
4616 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4618 struct l2cap_chan
*chan
;
4623 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4625 BT_DBG("unknown cid 0x%4.4x", cid
);
4626 /* Drop packet and return */
4631 l2cap_chan_lock(chan
);
4633 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4635 if (chan
->state
!= BT_CONNECTED
)
4638 switch (chan
->mode
) {
4639 case L2CAP_MODE_BASIC
:
4640 /* If socket recv buffers overflows we drop data here
4641 * which is *bad* because L2CAP has to be reliable.
4642 * But we don't have any other choice. L2CAP doesn't
4643 * provide flow control mechanism. */
4645 if (chan
->imtu
< skb
->len
)
4648 if (!chan
->ops
->recv(chan
->data
, skb
))
4652 case L2CAP_MODE_ERTM
:
4653 l2cap_ertm_data_rcv(chan
, skb
);
4657 case L2CAP_MODE_STREAMING
:
4658 control
= __get_control(chan
, skb
->data
);
4659 skb_pull(skb
, __ctrl_size(chan
));
4662 if (l2cap_check_fcs(chan
, skb
))
4665 if (__is_sar_start(chan
, control
))
4666 len
-= L2CAP_SDULEN_SIZE
;
4668 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4669 len
-= L2CAP_FCS_SIZE
;
4671 if (len
> chan
->mps
|| len
< 0 || __is_sframe(chan
, control
))
4674 tx_seq
= __get_txseq(chan
, control
);
4676 if (chan
->expected_tx_seq
!= tx_seq
) {
4677 /* Frame(s) missing - must discard partial SDU */
4678 kfree_skb(chan
->sdu
);
4680 chan
->sdu_last_frag
= NULL
;
4683 /* TODO: Notify userland of missing data */
4686 chan
->expected_tx_seq
= __next_seq(chan
, tx_seq
);
4688 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
4689 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4694 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
4702 l2cap_chan_unlock(chan
);
4707 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4709 struct l2cap_chan
*chan
;
4711 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
4715 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4717 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4720 if (chan
->imtu
< skb
->len
)
4723 if (!chan
->ops
->recv(chan
->data
, skb
))
4732 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
4733 struct sk_buff
*skb
)
4735 struct l2cap_chan
*chan
;
4737 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
4741 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4743 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4746 if (chan
->imtu
< skb
->len
)
4749 if (!chan
->ops
->recv(chan
->data
, skb
))
4758 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4760 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4764 skb_pull(skb
, L2CAP_HDR_SIZE
);
4765 cid
= __le16_to_cpu(lh
->cid
);
4766 len
= __le16_to_cpu(lh
->len
);
4768 if (len
!= skb
->len
) {
4773 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4776 case L2CAP_CID_LE_SIGNALING
:
4777 case L2CAP_CID_SIGNALING
:
4778 l2cap_sig_channel(conn
, skb
);
4781 case L2CAP_CID_CONN_LESS
:
4782 psm
= get_unaligned((__le16
*) skb
->data
);
4784 l2cap_conless_channel(conn
, psm
, skb
);
4787 case L2CAP_CID_LE_DATA
:
4788 l2cap_att_channel(conn
, cid
, skb
);
4792 if (smp_sig_channel(conn
, skb
))
4793 l2cap_conn_del(conn
->hcon
, EACCES
);
4797 l2cap_data_channel(conn
, cid
, skb
);
4802 /* ---- L2CAP interface with lower layer (HCI) ---- */
4804 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
4806 int exact
= 0, lm1
= 0, lm2
= 0;
4807 struct l2cap_chan
*c
;
4809 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4811 /* Find listening sockets and check their link_mode */
4812 read_lock(&chan_list_lock
);
4813 list_for_each_entry(c
, &chan_list
, global_l
) {
4814 struct sock
*sk
= c
->sk
;
4816 if (c
->state
!= BT_LISTEN
)
4819 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4820 lm1
|= HCI_LM_ACCEPT
;
4821 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4822 lm1
|= HCI_LM_MASTER
;
4824 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4825 lm2
|= HCI_LM_ACCEPT
;
4826 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4827 lm2
|= HCI_LM_MASTER
;
4830 read_unlock(&chan_list_lock
);
4832 return exact
? lm1
: lm2
;
4835 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4837 struct l2cap_conn
*conn
;
4839 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4842 conn
= l2cap_conn_add(hcon
, status
);
4844 l2cap_conn_ready(conn
);
4846 l2cap_conn_del(hcon
, bt_to_errno(status
));
4851 int l2cap_disconn_ind(struct hci_conn
*hcon
)
4853 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4855 BT_DBG("hcon %p", hcon
);
4858 return HCI_ERROR_REMOTE_USER_TERM
;
4859 return conn
->disc_reason
;
4862 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4864 BT_DBG("hcon %p reason %d", hcon
, reason
);
4866 l2cap_conn_del(hcon
, bt_to_errno(reason
));
4870 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4872 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4875 if (encrypt
== 0x00) {
4876 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4877 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
4878 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4879 l2cap_chan_close(chan
, ECONNREFUSED
);
4881 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4882 __clear_chan_timer(chan
);
4886 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4888 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4889 struct l2cap_chan
*chan
;
4894 BT_DBG("conn %p", conn
);
4896 if (hcon
->type
== LE_LINK
) {
4897 if (!status
&& encrypt
)
4898 smp_distribute_keys(conn
, 0);
4899 cancel_delayed_work(&conn
->security_timer
);
4902 mutex_lock(&conn
->chan_lock
);
4904 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
4905 l2cap_chan_lock(chan
);
4907 BT_DBG("chan->scid %d", chan
->scid
);
4909 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4910 if (!status
&& encrypt
) {
4911 chan
->sec_level
= hcon
->sec_level
;
4912 l2cap_chan_ready(chan
);
4915 l2cap_chan_unlock(chan
);
4919 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4920 l2cap_chan_unlock(chan
);
4924 if (!status
&& (chan
->state
== BT_CONNECTED
||
4925 chan
->state
== BT_CONFIG
)) {
4926 struct sock
*sk
= chan
->sk
;
4928 bt_sk(sk
)->suspended
= false;
4929 sk
->sk_state_change(sk
);
4931 l2cap_check_encryption(chan
, encrypt
);
4932 l2cap_chan_unlock(chan
);
4936 if (chan
->state
== BT_CONNECT
) {
4938 l2cap_send_conn_req(chan
);
4940 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4942 } else if (chan
->state
== BT_CONNECT2
) {
4943 struct sock
*sk
= chan
->sk
;
4944 struct l2cap_conn_rsp rsp
;
4950 if (bt_sk(sk
)->defer_setup
) {
4951 struct sock
*parent
= bt_sk(sk
)->parent
;
4952 res
= L2CAP_CR_PEND
;
4953 stat
= L2CAP_CS_AUTHOR_PEND
;
4955 parent
->sk_data_ready(parent
, 0);
4957 __l2cap_state_change(chan
, BT_CONFIG
);
4958 res
= L2CAP_CR_SUCCESS
;
4959 stat
= L2CAP_CS_NO_INFO
;
4962 __l2cap_state_change(chan
, BT_DISCONN
);
4963 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4964 res
= L2CAP_CR_SEC_BLOCK
;
4965 stat
= L2CAP_CS_NO_INFO
;
4970 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4971 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4972 rsp
.result
= cpu_to_le16(res
);
4973 rsp
.status
= cpu_to_le16(stat
);
4974 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4978 l2cap_chan_unlock(chan
);
4981 mutex_unlock(&conn
->chan_lock
);
4986 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4988 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4991 conn
= l2cap_conn_add(hcon
, 0);
4996 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4998 if (!(flags
& ACL_CONT
)) {
4999 struct l2cap_hdr
*hdr
;
5000 struct l2cap_chan
*chan
;
5005 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5006 kfree_skb(conn
->rx_skb
);
5007 conn
->rx_skb
= NULL
;
5009 l2cap_conn_unreliable(conn
, ECOMM
);
5012 /* Start fragment always begin with Basic L2CAP header */
5013 if (skb
->len
< L2CAP_HDR_SIZE
) {
5014 BT_ERR("Frame is too short (len %d)", skb
->len
);
5015 l2cap_conn_unreliable(conn
, ECOMM
);
5019 hdr
= (struct l2cap_hdr
*) skb
->data
;
5020 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5021 cid
= __le16_to_cpu(hdr
->cid
);
5023 if (len
== skb
->len
) {
5024 /* Complete frame received */
5025 l2cap_recv_frame(conn
, skb
);
5029 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5031 if (skb
->len
> len
) {
5032 BT_ERR("Frame is too long (len %d, expected len %d)",
5034 l2cap_conn_unreliable(conn
, ECOMM
);
5038 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5040 if (chan
&& chan
->sk
) {
5041 struct sock
*sk
= chan
->sk
;
5044 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
5045 BT_ERR("Frame exceeding recv MTU (len %d, "
5049 l2cap_conn_unreliable(conn
, ECOMM
);
5055 /* Allocate skb for the complete frame (with header) */
5056 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5060 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5062 conn
->rx_len
= len
- skb
->len
;
5064 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5066 if (!conn
->rx_len
) {
5067 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5068 l2cap_conn_unreliable(conn
, ECOMM
);
5072 if (skb
->len
> conn
->rx_len
) {
5073 BT_ERR("Fragment is too long (len %d, expected %d)",
5074 skb
->len
, conn
->rx_len
);
5075 kfree_skb(conn
->rx_skb
);
5076 conn
->rx_skb
= NULL
;
5078 l2cap_conn_unreliable(conn
, ECOMM
);
5082 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5084 conn
->rx_len
-= skb
->len
;
5086 if (!conn
->rx_len
) {
5087 /* Complete frame received */
5088 l2cap_recv_frame(conn
, conn
->rx_skb
);
5089 conn
->rx_skb
= NULL
;
5098 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5100 struct l2cap_chan
*c
;
5102 read_lock(&chan_list_lock
);
5104 list_for_each_entry(c
, &chan_list
, global_l
) {
5105 struct sock
*sk
= c
->sk
;
5107 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5108 batostr(&bt_sk(sk
)->src
),
5109 batostr(&bt_sk(sk
)->dst
),
5110 c
->state
, __le16_to_cpu(c
->psm
),
5111 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5112 c
->sec_level
, c
->mode
);
5115 read_unlock(&chan_list_lock
);
5120 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5122 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5125 static const struct file_operations l2cap_debugfs_fops
= {
5126 .open
= l2cap_debugfs_open
,
5128 .llseek
= seq_lseek
,
5129 .release
= single_release
,
5132 static struct dentry
*l2cap_debugfs
;
5134 int __init
l2cap_init(void)
5138 err
= l2cap_init_sockets();
5143 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5144 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5146 BT_ERR("Failed to create L2CAP debug file");
5152 void l2cap_exit(void)
5154 debugfs_remove(l2cap_debugfs
);
5155 l2cap_cleanup_sockets();
5158 module_param(disable_ertm
, bool, 0644);
5159 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");