2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm
= 0;
60 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
61 static u8 l2cap_fixed_chan
[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops
;
65 static struct workqueue_struct
*_busy_wq
;
67 static struct bt_sock_list l2cap_sk_list
= {
68 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
71 static void l2cap_busy_work(struct work_struct
*work
);
73 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
74 static void l2cap_sock_close(struct sock
*sk
);
75 static void l2cap_sock_kill(struct sock
*sk
);
77 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
78 u8 code
, u8 ident
, u16 dlen
, void *data
);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg
)
83 struct sock
*sk
= (struct sock
*) arg
;
86 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
90 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
91 reason
= ECONNREFUSED
;
92 else if (sk
->sk_state
== BT_CONNECT
&&
93 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
94 reason
= ECONNREFUSED
;
98 __l2cap_sock_close(sk
, reason
);
106 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
108 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
109 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
112 static void l2cap_sock_clear_timer(struct sock
*sk
)
114 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
115 sk_stop_timer(sk
, &sk
->sk_timer
);
118 /* ---- L2CAP channels ---- */
119 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
122 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
123 if (l2cap_pi(s
)->dcid
== cid
)
129 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
132 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
133 if (l2cap_pi(s
)->scid
== cid
)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
145 s
= __l2cap_get_chan_by_scid(l
, cid
);
148 read_unlock(&l
->lock
);
152 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
155 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
156 if (l2cap_pi(s
)->ident
== ident
)
162 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
166 s
= __l2cap_get_chan_by_ident(l
, ident
);
169 read_unlock(&l
->lock
);
173 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
175 u16 cid
= L2CAP_CID_DYN_START
;
177 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
178 if (!__l2cap_get_chan_by_scid(l
, cid
))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
190 l2cap_pi(l
->head
)->prev_c
= sk
;
192 l2cap_pi(sk
)->next_c
= l
->head
;
193 l2cap_pi(sk
)->prev_c
= NULL
;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
199 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
201 write_lock_bh(&l
->lock
);
206 l2cap_pi(next
)->prev_c
= prev
;
208 l2cap_pi(prev
)->next_c
= next
;
209 write_unlock_bh(&l
->lock
);
214 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
216 struct l2cap_chan_list
*l
= &conn
->chan_list
;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
219 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
221 conn
->disc_reason
= 0x13;
223 l2cap_pi(sk
)->conn
= conn
;
225 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
228 } else if (sk
->sk_type
== SOCK_DGRAM
) {
229 /* Connectionless socket */
230 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
231 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
232 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
236 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
237 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
240 __l2cap_chan_link(l
, sk
);
243 bt_accept_enqueue(parent
, sk
);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock
*sk
, int err
)
250 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
251 struct sock
*parent
= bt_sk(sk
)->parent
;
253 l2cap_sock_clear_timer(sk
);
255 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn
->chan_list
, sk
);
260 l2cap_pi(sk
)->conn
= NULL
;
261 hci_conn_put(conn
->hcon
);
264 sk
->sk_state
= BT_CLOSED
;
265 sock_set_flag(sk
, SOCK_ZAPPED
);
271 bt_accept_unlink(sk
);
272 parent
->sk_data_ready(parent
, 0);
274 sk
->sk_state_change(sk
);
276 skb_queue_purge(TX_QUEUE(sk
));
278 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
279 struct srej_list
*l
, *tmp
;
281 del_timer(&l2cap_pi(sk
)->retrans_timer
);
282 del_timer(&l2cap_pi(sk
)->monitor_timer
);
283 del_timer(&l2cap_pi(sk
)->ack_timer
);
285 skb_queue_purge(SREJ_QUEUE(sk
));
286 skb_queue_purge(BUSY_QUEUE(sk
));
288 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock
*sk
)
298 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
301 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
303 auth_type
= HCI_AT_NO_BONDING_MITM
;
305 auth_type
= HCI_AT_NO_BONDING
;
307 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
308 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
310 switch (l2cap_pi(sk
)->sec_level
) {
311 case BT_SECURITY_HIGH
:
312 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
314 case BT_SECURITY_MEDIUM
:
315 auth_type
= HCI_AT_GENERAL_BONDING
;
318 auth_type
= HCI_AT_NO_BONDING
;
323 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
327 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn
->lock
);
339 if (++conn
->tx_ident
> 128)
344 spin_unlock_bh(&conn
->lock
);
349 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
351 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
353 BT_DBG("code 0x%2.2x", code
);
358 hci_send_acl(conn
->hcon
, skb
, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
364 struct l2cap_hdr
*lh
;
365 struct l2cap_conn
*conn
= pi
->conn
;
366 struct sock
*sk
= (struct sock
*)pi
;
367 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
369 if (sk
->sk_state
!= BT_CONNECTED
)
372 if (pi
->fcs
== L2CAP_FCS_CRC16
)
375 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
377 count
= min_t(unsigned int, conn
->mtu
, hlen
);
378 control
|= L2CAP_CTRL_FRAME_TYPE
;
380 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
381 control
|= L2CAP_CTRL_FINAL
;
382 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
385 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
386 control
|= L2CAP_CTRL_POLL
;
387 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
390 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
394 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
395 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
396 lh
->cid
= cpu_to_le16(pi
->dcid
);
397 put_unaligned_le16(control
, skb_put(skb
, 2));
399 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
400 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
401 put_unaligned_le16(fcs
, skb_put(skb
, 2));
404 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
409 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
410 control
|= L2CAP_SUPER_RCV_NOT_READY
;
411 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
413 control
|= L2CAP_SUPER_RCV_READY
;
415 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
417 l2cap_send_sframe(pi
, control
);
420 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
422 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
425 static void l2cap_do_start(struct sock
*sk
)
427 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
429 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
430 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
433 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
434 struct l2cap_conn_req req
;
435 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
436 req
.psm
= l2cap_pi(sk
)->psm
;
438 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
439 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
441 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
442 L2CAP_CONN_REQ
, sizeof(req
), &req
);
445 struct l2cap_info_req req
;
446 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
448 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
449 conn
->info_ident
= l2cap_get_ident(conn
);
451 mod_timer(&conn
->info_timer
, jiffies
+
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
454 l2cap_send_cmd(conn
, conn
->info_ident
,
455 L2CAP_INFO_REQ
, sizeof(req
), &req
);
459 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
, int err
)
461 struct l2cap_disconn_req req
;
466 skb_queue_purge(TX_QUEUE(sk
));
468 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
469 del_timer(&l2cap_pi(sk
)->retrans_timer
);
470 del_timer(&l2cap_pi(sk
)->monitor_timer
);
471 del_timer(&l2cap_pi(sk
)->ack_timer
);
474 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
475 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
476 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
477 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
479 sk
->sk_state
= BT_DISCONN
;
483 /* ---- L2CAP connections ---- */
484 static void l2cap_conn_start(struct l2cap_conn
*conn
)
486 struct l2cap_chan_list
*l
= &conn
->chan_list
;
489 BT_DBG("conn %p", conn
);
493 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
496 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
497 sk
->sk_type
!= SOCK_STREAM
) {
502 if (sk
->sk_state
== BT_CONNECT
) {
503 if (l2cap_check_security(sk
) &&
504 __l2cap_no_conn_pending(sk
)) {
505 struct l2cap_conn_req req
;
506 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
507 req
.psm
= l2cap_pi(sk
)->psm
;
509 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
510 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
512 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
513 L2CAP_CONN_REQ
, sizeof(req
), &req
);
515 } else if (sk
->sk_state
== BT_CONNECT2
) {
516 struct l2cap_conn_rsp rsp
;
517 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
518 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
520 if (l2cap_check_security(sk
)) {
521 if (bt_sk(sk
)->defer_setup
) {
522 struct sock
*parent
= bt_sk(sk
)->parent
;
523 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
524 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
525 parent
->sk_data_ready(parent
, 0);
528 sk
->sk_state
= BT_CONFIG
;
529 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
530 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
533 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
534 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
537 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
538 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
544 read_unlock(&l
->lock
);
547 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
549 struct l2cap_chan_list
*l
= &conn
->chan_list
;
552 BT_DBG("conn %p", conn
);
556 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
559 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
560 sk
->sk_type
!= SOCK_STREAM
) {
561 l2cap_sock_clear_timer(sk
);
562 sk
->sk_state
= BT_CONNECTED
;
563 sk
->sk_state_change(sk
);
564 } else if (sk
->sk_state
== BT_CONNECT
)
570 read_unlock(&l
->lock
);
573 /* Notify sockets that we cannot guaranty reliability anymore */
574 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
576 struct l2cap_chan_list
*l
= &conn
->chan_list
;
579 BT_DBG("conn %p", conn
);
583 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
584 if (l2cap_pi(sk
)->force_reliable
)
588 read_unlock(&l
->lock
);
591 static void l2cap_info_timeout(unsigned long arg
)
593 struct l2cap_conn
*conn
= (void *) arg
;
595 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
596 conn
->info_ident
= 0;
598 l2cap_conn_start(conn
);
601 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
603 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
608 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
612 hcon
->l2cap_data
= conn
;
615 BT_DBG("hcon %p conn %p", hcon
, conn
);
617 conn
->mtu
= hcon
->hdev
->acl_mtu
;
618 conn
->src
= &hcon
->hdev
->bdaddr
;
619 conn
->dst
= &hcon
->dst
;
623 spin_lock_init(&conn
->lock
);
624 rwlock_init(&conn
->chan_list
.lock
);
626 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
627 (unsigned long) conn
);
629 conn
->disc_reason
= 0x13;
634 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
636 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
642 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
644 kfree_skb(conn
->rx_skb
);
647 while ((sk
= conn
->chan_list
.head
)) {
649 l2cap_chan_del(sk
, err
);
654 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
655 del_timer_sync(&conn
->info_timer
);
657 hcon
->l2cap_data
= NULL
;
661 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
663 struct l2cap_chan_list
*l
= &conn
->chan_list
;
664 write_lock_bh(&l
->lock
);
665 __l2cap_chan_add(conn
, sk
, parent
);
666 write_unlock_bh(&l
->lock
);
669 /* ---- Socket interface ---- */
670 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
673 struct hlist_node
*node
;
674 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
675 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
682 /* Find socket with psm and source bdaddr.
683 * Returns closest match.
685 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
687 struct sock
*sk
= NULL
, *sk1
= NULL
;
688 struct hlist_node
*node
;
690 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
691 if (state
&& sk
->sk_state
!= state
)
694 if (l2cap_pi(sk
)->psm
== psm
) {
696 if (!bacmp(&bt_sk(sk
)->src
, src
))
700 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
704 return node
? sk
: sk1
;
707 /* Find socket with given address (psm, src).
708 * Returns locked socket */
709 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
712 read_lock(&l2cap_sk_list
.lock
);
713 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
716 read_unlock(&l2cap_sk_list
.lock
);
720 static void l2cap_sock_destruct(struct sock
*sk
)
724 skb_queue_purge(&sk
->sk_receive_queue
);
725 skb_queue_purge(&sk
->sk_write_queue
);
728 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
732 BT_DBG("parent %p", parent
);
734 /* Close not yet accepted channels */
735 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
736 l2cap_sock_close(sk
);
738 parent
->sk_state
= BT_CLOSED
;
739 sock_set_flag(parent
, SOCK_ZAPPED
);
742 /* Kill socket (only if zapped and orphan)
743 * Must be called on unlocked socket.
745 static void l2cap_sock_kill(struct sock
*sk
)
747 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
750 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
752 /* Kill poor orphan */
753 bt_sock_unlink(&l2cap_sk_list
, sk
);
754 sock_set_flag(sk
, SOCK_DEAD
);
758 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
760 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
762 switch (sk
->sk_state
) {
764 l2cap_sock_cleanup_listen(sk
);
769 if (sk
->sk_type
== SOCK_SEQPACKET
||
770 sk
->sk_type
== SOCK_STREAM
) {
771 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
773 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
774 l2cap_send_disconn_req(conn
, sk
, reason
);
776 l2cap_chan_del(sk
, reason
);
780 if (sk
->sk_type
== SOCK_SEQPACKET
||
781 sk
->sk_type
== SOCK_STREAM
) {
782 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
783 struct l2cap_conn_rsp rsp
;
786 if (bt_sk(sk
)->defer_setup
)
787 result
= L2CAP_CR_SEC_BLOCK
;
789 result
= L2CAP_CR_BAD_PSM
;
791 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
792 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
793 rsp
.result
= cpu_to_le16(result
);
794 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
795 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
796 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
798 l2cap_chan_del(sk
, reason
);
803 l2cap_chan_del(sk
, reason
);
807 sock_set_flag(sk
, SOCK_ZAPPED
);
812 /* Must be called on unlocked socket. */
813 static void l2cap_sock_close(struct sock
*sk
)
815 l2cap_sock_clear_timer(sk
);
817 __l2cap_sock_close(sk
, ECONNRESET
);
822 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
824 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
829 sk
->sk_type
= parent
->sk_type
;
830 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
832 pi
->imtu
= l2cap_pi(parent
)->imtu
;
833 pi
->omtu
= l2cap_pi(parent
)->omtu
;
834 pi
->conf_state
= l2cap_pi(parent
)->conf_state
;
835 pi
->mode
= l2cap_pi(parent
)->mode
;
836 pi
->fcs
= l2cap_pi(parent
)->fcs
;
837 pi
->max_tx
= l2cap_pi(parent
)->max_tx
;
838 pi
->tx_win
= l2cap_pi(parent
)->tx_win
;
839 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
840 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
841 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
843 pi
->imtu
= L2CAP_DEFAULT_MTU
;
845 if (enable_ertm
&& sk
->sk_type
== SOCK_STREAM
) {
846 pi
->mode
= L2CAP_MODE_ERTM
;
847 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
849 pi
->mode
= L2CAP_MODE_BASIC
;
851 pi
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
852 pi
->fcs
= L2CAP_FCS_CRC16
;
853 pi
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
854 pi
->sec_level
= BT_SECURITY_LOW
;
856 pi
->force_reliable
= 0;
859 /* Default config options */
861 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
862 skb_queue_head_init(TX_QUEUE(sk
));
863 skb_queue_head_init(SREJ_QUEUE(sk
));
864 skb_queue_head_init(BUSY_QUEUE(sk
));
865 INIT_LIST_HEAD(SREJ_LIST(sk
));
868 static struct proto l2cap_proto
= {
870 .owner
= THIS_MODULE
,
871 .obj_size
= sizeof(struct l2cap_pinfo
)
874 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
878 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
882 sock_init_data(sock
, sk
);
883 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
885 sk
->sk_destruct
= l2cap_sock_destruct
;
886 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
888 sock_reset_flag(sk
, SOCK_ZAPPED
);
890 sk
->sk_protocol
= proto
;
891 sk
->sk_state
= BT_OPEN
;
893 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
895 bt_sock_link(&l2cap_sk_list
, sk
);
899 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
904 BT_DBG("sock %p", sock
);
906 sock
->state
= SS_UNCONNECTED
;
908 if (sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
&&
909 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
910 return -ESOCKTNOSUPPORT
;
912 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
915 sock
->ops
= &l2cap_sock_ops
;
917 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
921 l2cap_sock_init(sk
, NULL
);
925 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
927 struct sock
*sk
= sock
->sk
;
928 struct sockaddr_l2 la
;
933 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
936 memset(&la
, 0, sizeof(la
));
937 len
= min_t(unsigned int, sizeof(la
), alen
);
938 memcpy(&la
, addr
, len
);
945 if (sk
->sk_state
!= BT_OPEN
) {
950 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
951 !capable(CAP_NET_BIND_SERVICE
)) {
956 write_lock_bh(&l2cap_sk_list
.lock
);
958 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
961 /* Save source address */
962 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
963 l2cap_pi(sk
)->psm
= la
.l2_psm
;
964 l2cap_pi(sk
)->sport
= la
.l2_psm
;
965 sk
->sk_state
= BT_BOUND
;
967 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
968 __le16_to_cpu(la
.l2_psm
) == 0x0003)
969 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
972 write_unlock_bh(&l2cap_sk_list
.lock
);
979 static int l2cap_do_connect(struct sock
*sk
)
981 bdaddr_t
*src
= &bt_sk(sk
)->src
;
982 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
983 struct l2cap_conn
*conn
;
984 struct hci_conn
*hcon
;
985 struct hci_dev
*hdev
;
989 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
992 hdev
= hci_get_route(dst
, src
);
994 return -EHOSTUNREACH
;
996 hci_dev_lock_bh(hdev
);
1000 if (sk
->sk_type
== SOCK_RAW
) {
1001 switch (l2cap_pi(sk
)->sec_level
) {
1002 case BT_SECURITY_HIGH
:
1003 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
1005 case BT_SECURITY_MEDIUM
:
1006 auth_type
= HCI_AT_DEDICATED_BONDING
;
1009 auth_type
= HCI_AT_NO_BONDING
;
1012 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
1013 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
1014 auth_type
= HCI_AT_NO_BONDING_MITM
;
1016 auth_type
= HCI_AT_NO_BONDING
;
1018 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
1019 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1021 switch (l2cap_pi(sk
)->sec_level
) {
1022 case BT_SECURITY_HIGH
:
1023 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
1025 case BT_SECURITY_MEDIUM
:
1026 auth_type
= HCI_AT_GENERAL_BONDING
;
1029 auth_type
= HCI_AT_NO_BONDING
;
1034 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1035 l2cap_pi(sk
)->sec_level
, auth_type
);
1039 conn
= l2cap_conn_add(hcon
, 0);
1047 /* Update source addr of the socket */
1048 bacpy(src
, conn
->src
);
1050 l2cap_chan_add(conn
, sk
, NULL
);
1052 sk
->sk_state
= BT_CONNECT
;
1053 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
1055 if (hcon
->state
== BT_CONNECTED
) {
1056 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
1057 sk
->sk_type
!= SOCK_STREAM
) {
1058 l2cap_sock_clear_timer(sk
);
1059 sk
->sk_state
= BT_CONNECTED
;
1065 hci_dev_unlock_bh(hdev
);
1070 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
1072 struct sock
*sk
= sock
->sk
;
1073 struct sockaddr_l2 la
;
1076 BT_DBG("sk %p", sk
);
1078 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
1079 addr
->sa_family
!= AF_BLUETOOTH
)
1082 memset(&la
, 0, sizeof(la
));
1083 len
= min_t(unsigned int, sizeof(la
), alen
);
1084 memcpy(&la
, addr
, len
);
1091 if ((sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
)
1097 switch (l2cap_pi(sk
)->mode
) {
1098 case L2CAP_MODE_BASIC
:
1100 case L2CAP_MODE_ERTM
:
1101 case L2CAP_MODE_STREAMING
:
1110 switch (sk
->sk_state
) {
1114 /* Already connecting */
1118 /* Already connected */
1131 /* Set destination address and psm */
1132 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1133 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1135 err
= l2cap_do_connect(sk
);
1140 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1141 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1147 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1149 struct sock
*sk
= sock
->sk
;
1152 BT_DBG("sk %p backlog %d", sk
, backlog
);
1156 if ((sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
)
1157 || sk
->sk_state
!= BT_BOUND
) {
1162 switch (l2cap_pi(sk
)->mode
) {
1163 case L2CAP_MODE_BASIC
:
1165 case L2CAP_MODE_ERTM
:
1166 case L2CAP_MODE_STREAMING
:
1175 if (!l2cap_pi(sk
)->psm
) {
1176 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1181 write_lock_bh(&l2cap_sk_list
.lock
);
1183 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1184 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1185 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1186 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1191 write_unlock_bh(&l2cap_sk_list
.lock
);
1197 sk
->sk_max_ack_backlog
= backlog
;
1198 sk
->sk_ack_backlog
= 0;
1199 sk
->sk_state
= BT_LISTEN
;
1206 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1208 DECLARE_WAITQUEUE(wait
, current
);
1209 struct sock
*sk
= sock
->sk
, *nsk
;
1213 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1215 if (sk
->sk_state
!= BT_LISTEN
) {
1220 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1222 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1224 /* Wait for an incoming connection. (wake-one). */
1225 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1226 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1227 set_current_state(TASK_INTERRUPTIBLE
);
1234 timeo
= schedule_timeout(timeo
);
1235 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1237 if (sk
->sk_state
!= BT_LISTEN
) {
1242 if (signal_pending(current
)) {
1243 err
= sock_intr_errno(timeo
);
1247 set_current_state(TASK_RUNNING
);
1248 remove_wait_queue(sk_sleep(sk
), &wait
);
1253 newsock
->state
= SS_CONNECTED
;
1255 BT_DBG("new socket %p", nsk
);
1262 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1264 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1265 struct sock
*sk
= sock
->sk
;
1267 BT_DBG("sock %p, sk %p", sock
, sk
);
1269 addr
->sa_family
= AF_BLUETOOTH
;
1270 *len
= sizeof(struct sockaddr_l2
);
1273 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1274 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1275 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1277 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1278 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1279 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1285 static int __l2cap_wait_ack(struct sock
*sk
)
1287 DECLARE_WAITQUEUE(wait
, current
);
1291 add_wait_queue(sk_sleep(sk
), &wait
);
1292 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
1293 set_current_state(TASK_INTERRUPTIBLE
);
1298 if (signal_pending(current
)) {
1299 err
= sock_intr_errno(timeo
);
1304 timeo
= schedule_timeout(timeo
);
1307 err
= sock_error(sk
);
1311 set_current_state(TASK_RUNNING
);
1312 remove_wait_queue(sk_sleep(sk
), &wait
);
1316 static void l2cap_monitor_timeout(unsigned long arg
)
1318 struct sock
*sk
= (void *) arg
;
1320 BT_DBG("sk %p", sk
);
1323 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1324 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
, ECONNABORTED
);
1329 l2cap_pi(sk
)->retry_count
++;
1330 __mod_monitor_timer();
1332 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1336 static void l2cap_retrans_timeout(unsigned long arg
)
1338 struct sock
*sk
= (void *) arg
;
1340 BT_DBG("sk %p", sk
);
1343 l2cap_pi(sk
)->retry_count
= 1;
1344 __mod_monitor_timer();
1346 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1348 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1352 static void l2cap_drop_acked_frames(struct sock
*sk
)
1354 struct sk_buff
*skb
;
1356 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
1357 l2cap_pi(sk
)->unacked_frames
) {
1358 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1361 skb
= skb_dequeue(TX_QUEUE(sk
));
1364 l2cap_pi(sk
)->unacked_frames
--;
1367 if (!l2cap_pi(sk
)->unacked_frames
)
1368 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1371 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1373 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1375 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1377 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1380 static int l2cap_streaming_send(struct sock
*sk
)
1382 struct sk_buff
*skb
, *tx_skb
;
1383 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1386 while ((skb
= sk
->sk_send_head
)) {
1387 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1389 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1390 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1391 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1393 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1394 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1395 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1398 l2cap_do_send(sk
, tx_skb
);
1400 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1402 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1403 sk
->sk_send_head
= NULL
;
1405 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1407 skb
= skb_dequeue(TX_QUEUE(sk
));
1413 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1415 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1416 struct sk_buff
*skb
, *tx_skb
;
1419 skb
= skb_peek(TX_QUEUE(sk
));
1424 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1427 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1430 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1432 if (pi
->remote_max_tx
&&
1433 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1434 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1438 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1439 bt_cb(skb
)->retries
++;
1440 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1442 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1443 control
|= L2CAP_CTRL_FINAL
;
1444 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1447 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1448 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1450 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1452 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1453 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1454 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1457 l2cap_do_send(sk
, tx_skb
);
1460 static int l2cap_ertm_send(struct sock
*sk
)
1462 struct sk_buff
*skb
, *tx_skb
;
1463 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1467 if (sk
->sk_state
!= BT_CONNECTED
)
1470 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1472 if (pi
->remote_max_tx
&&
1473 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1474 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1478 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1480 bt_cb(skb
)->retries
++;
1482 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1483 control
&= L2CAP_CTRL_SAR
;
1485 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1486 control
|= L2CAP_CTRL_FINAL
;
1487 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1489 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1490 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1491 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1494 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1495 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1496 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1499 l2cap_do_send(sk
, tx_skb
);
1501 __mod_retrans_timer();
1503 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1504 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1506 pi
->unacked_frames
++;
1509 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1510 sk
->sk_send_head
= NULL
;
1512 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1520 static int l2cap_retransmit_frames(struct sock
*sk
)
1522 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1525 spin_lock_bh(&pi
->send_lock
);
1527 if (!skb_queue_empty(TX_QUEUE(sk
)))
1528 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1530 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1531 ret
= l2cap_ertm_send(sk
);
1533 spin_unlock_bh(&pi
->send_lock
);
1538 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1540 struct sock
*sk
= (struct sock
*)pi
;
1544 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1546 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1547 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1548 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1549 l2cap_send_sframe(pi
, control
);
1553 spin_lock_bh(&pi
->send_lock
);
1554 nframes
= l2cap_ertm_send(sk
);
1555 spin_unlock_bh(&pi
->send_lock
);
1560 control
|= L2CAP_SUPER_RCV_READY
;
1561 l2cap_send_sframe(pi
, control
);
1564 static void l2cap_send_srejtail(struct sock
*sk
)
1566 struct srej_list
*tail
;
1569 control
= L2CAP_SUPER_SELECT_REJECT
;
1570 control
|= L2CAP_CTRL_FINAL
;
1572 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1573 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1575 l2cap_send_sframe(l2cap_pi(sk
), control
);
1578 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1580 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1581 struct sk_buff
**frag
;
1584 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1590 /* Continuation fragments (no L2CAP header) */
1591 frag
= &skb_shinfo(skb
)->frag_list
;
1593 count
= min_t(unsigned int, conn
->mtu
, len
);
1595 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1598 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1604 frag
= &(*frag
)->next
;
1610 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1612 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1613 struct sk_buff
*skb
;
1614 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1615 struct l2cap_hdr
*lh
;
1617 BT_DBG("sk %p len %d", sk
, (int)len
);
1619 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1620 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1621 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1623 return ERR_PTR(-ENOMEM
);
1625 /* Create L2CAP header */
1626 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1627 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1628 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1629 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1631 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1632 if (unlikely(err
< 0)) {
1634 return ERR_PTR(err
);
1639 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1641 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1642 struct sk_buff
*skb
;
1643 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1644 struct l2cap_hdr
*lh
;
1646 BT_DBG("sk %p len %d", sk
, (int)len
);
1648 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1649 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1650 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1652 return ERR_PTR(-ENOMEM
);
1654 /* Create L2CAP header */
1655 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1656 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1657 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1659 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1660 if (unlikely(err
< 0)) {
1662 return ERR_PTR(err
);
1667 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1669 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1670 struct sk_buff
*skb
;
1671 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1672 struct l2cap_hdr
*lh
;
1674 BT_DBG("sk %p len %d", sk
, (int)len
);
1677 return ERR_PTR(-ENOTCONN
);
1682 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1685 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1686 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1687 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1689 return ERR_PTR(-ENOMEM
);
1691 /* Create L2CAP header */
1692 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1693 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1694 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1695 put_unaligned_le16(control
, skb_put(skb
, 2));
1697 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1699 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1700 if (unlikely(err
< 0)) {
1702 return ERR_PTR(err
);
1705 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1706 put_unaligned_le16(0, skb_put(skb
, 2));
1708 bt_cb(skb
)->retries
= 0;
1712 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1714 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1715 struct sk_buff
*skb
;
1716 struct sk_buff_head sar_queue
;
1720 skb_queue_head_init(&sar_queue
);
1721 control
= L2CAP_SDU_START
;
1722 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1724 return PTR_ERR(skb
);
1726 __skb_queue_tail(&sar_queue
, skb
);
1727 len
-= pi
->remote_mps
;
1728 size
+= pi
->remote_mps
;
1733 if (len
> pi
->remote_mps
) {
1734 control
= L2CAP_SDU_CONTINUE
;
1735 buflen
= pi
->remote_mps
;
1737 control
= L2CAP_SDU_END
;
1741 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1743 skb_queue_purge(&sar_queue
);
1744 return PTR_ERR(skb
);
1747 __skb_queue_tail(&sar_queue
, skb
);
1751 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1752 spin_lock_bh(&pi
->send_lock
);
1753 if (sk
->sk_send_head
== NULL
)
1754 sk
->sk_send_head
= sar_queue
.next
;
1755 spin_unlock_bh(&pi
->send_lock
);
1760 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1762 struct sock
*sk
= sock
->sk
;
1763 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1764 struct sk_buff
*skb
;
1768 BT_DBG("sock %p, sk %p", sock
, sk
);
1770 err
= sock_error(sk
);
1774 if (msg
->msg_flags
& MSG_OOB
)
1779 if (sk
->sk_state
!= BT_CONNECTED
) {
1784 /* Connectionless channel */
1785 if (sk
->sk_type
== SOCK_DGRAM
) {
1786 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1790 l2cap_do_send(sk
, skb
);
1797 case L2CAP_MODE_BASIC
:
1798 /* Check outgoing MTU */
1799 if (len
> pi
->omtu
) {
1804 /* Create a basic PDU */
1805 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1811 l2cap_do_send(sk
, skb
);
1815 case L2CAP_MODE_ERTM
:
1816 case L2CAP_MODE_STREAMING
:
1817 /* Entire SDU fits into one PDU */
1818 if (len
<= pi
->remote_mps
) {
1819 control
= L2CAP_SDU_UNSEGMENTED
;
1820 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1825 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1827 if (pi
->mode
== L2CAP_MODE_ERTM
)
1828 spin_lock_bh(&pi
->send_lock
);
1830 if (sk
->sk_send_head
== NULL
)
1831 sk
->sk_send_head
= skb
;
1833 if (pi
->mode
== L2CAP_MODE_ERTM
)
1834 spin_unlock_bh(&pi
->send_lock
);
1836 /* Segment SDU into multiples PDUs */
1837 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1842 if (pi
->mode
== L2CAP_MODE_STREAMING
) {
1843 err
= l2cap_streaming_send(sk
);
1845 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
&&
1846 pi
->conn_state
&& L2CAP_CONN_WAIT_F
) {
1850 spin_lock_bh(&pi
->send_lock
);
1851 err
= l2cap_ertm_send(sk
);
1852 spin_unlock_bh(&pi
->send_lock
);
1860 BT_DBG("bad state %1.1x", pi
->mode
);
1869 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1871 struct sock
*sk
= sock
->sk
;
1875 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1876 struct l2cap_conn_rsp rsp
;
1878 sk
->sk_state
= BT_CONFIG
;
1880 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1881 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1882 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1883 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1884 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1885 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1893 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1896 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1898 struct sock
*sk
= sock
->sk
;
1899 struct l2cap_options opts
;
1903 BT_DBG("sk %p", sk
);
1909 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1910 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1911 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1912 opts
.mode
= l2cap_pi(sk
)->mode
;
1913 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1914 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1915 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1917 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1918 if (copy_from_user((char *) &opts
, optval
, len
)) {
1923 if (opts
.txwin_size
> L2CAP_DEFAULT_TX_WINDOW
) {
1928 l2cap_pi(sk
)->mode
= opts
.mode
;
1929 switch (l2cap_pi(sk
)->mode
) {
1930 case L2CAP_MODE_BASIC
:
1931 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_STATE2_DEVICE
;
1933 case L2CAP_MODE_ERTM
:
1934 case L2CAP_MODE_STREAMING
:
1943 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1944 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1945 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1946 l2cap_pi(sk
)->max_tx
= opts
.max_tx
;
1947 l2cap_pi(sk
)->tx_win
= (__u8
)opts
.txwin_size
;
1951 if (get_user(opt
, (u32 __user
*) optval
)) {
1956 if (opt
& L2CAP_LM_AUTH
)
1957 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1958 if (opt
& L2CAP_LM_ENCRYPT
)
1959 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1960 if (opt
& L2CAP_LM_SECURE
)
1961 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1963 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1964 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1976 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1978 struct sock
*sk
= sock
->sk
;
1979 struct bt_security sec
;
1983 BT_DBG("sk %p", sk
);
1985 if (level
== SOL_L2CAP
)
1986 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
1988 if (level
!= SOL_BLUETOOTH
)
1989 return -ENOPROTOOPT
;
1995 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
1996 && sk
->sk_type
!= SOCK_RAW
) {
2001 sec
.level
= BT_SECURITY_LOW
;
2003 len
= min_t(unsigned int, sizeof(sec
), optlen
);
2004 if (copy_from_user((char *) &sec
, optval
, len
)) {
2009 if (sec
.level
< BT_SECURITY_LOW
||
2010 sec
.level
> BT_SECURITY_HIGH
) {
2015 l2cap_pi(sk
)->sec_level
= sec
.level
;
2018 case BT_DEFER_SETUP
:
2019 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2024 if (get_user(opt
, (u32 __user
*) optval
)) {
2029 bt_sk(sk
)->defer_setup
= opt
;
2041 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
2043 struct sock
*sk
= sock
->sk
;
2044 struct l2cap_options opts
;
2045 struct l2cap_conninfo cinfo
;
2049 BT_DBG("sk %p", sk
);
2051 if (get_user(len
, optlen
))
2058 opts
.imtu
= l2cap_pi(sk
)->imtu
;
2059 opts
.omtu
= l2cap_pi(sk
)->omtu
;
2060 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
2061 opts
.mode
= l2cap_pi(sk
)->mode
;
2062 opts
.fcs
= l2cap_pi(sk
)->fcs
;
2063 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
2064 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
2066 len
= min_t(unsigned int, len
, sizeof(opts
));
2067 if (copy_to_user(optval
, (char *) &opts
, len
))
2073 switch (l2cap_pi(sk
)->sec_level
) {
2074 case BT_SECURITY_LOW
:
2075 opt
= L2CAP_LM_AUTH
;
2077 case BT_SECURITY_MEDIUM
:
2078 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
2080 case BT_SECURITY_HIGH
:
2081 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
2089 if (l2cap_pi(sk
)->role_switch
)
2090 opt
|= L2CAP_LM_MASTER
;
2092 if (l2cap_pi(sk
)->force_reliable
)
2093 opt
|= L2CAP_LM_RELIABLE
;
2095 if (put_user(opt
, (u32 __user
*) optval
))
2099 case L2CAP_CONNINFO
:
2100 if (sk
->sk_state
!= BT_CONNECTED
&&
2101 !(sk
->sk_state
== BT_CONNECT2
&&
2102 bt_sk(sk
)->defer_setup
)) {
2107 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
2108 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
2110 len
= min_t(unsigned int, len
, sizeof(cinfo
));
2111 if (copy_to_user(optval
, (char *) &cinfo
, len
))
2125 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
2127 struct sock
*sk
= sock
->sk
;
2128 struct bt_security sec
;
2131 BT_DBG("sk %p", sk
);
2133 if (level
== SOL_L2CAP
)
2134 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
2136 if (level
!= SOL_BLUETOOTH
)
2137 return -ENOPROTOOPT
;
2139 if (get_user(len
, optlen
))
2146 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2147 && sk
->sk_type
!= SOCK_RAW
) {
2152 sec
.level
= l2cap_pi(sk
)->sec_level
;
2154 len
= min_t(unsigned int, len
, sizeof(sec
));
2155 if (copy_to_user(optval
, (char *) &sec
, len
))
2160 case BT_DEFER_SETUP
:
2161 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2166 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
2180 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
2182 struct sock
*sk
= sock
->sk
;
2185 BT_DBG("sock %p, sk %p", sock
, sk
);
2191 if (!sk
->sk_shutdown
) {
2192 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2193 err
= __l2cap_wait_ack(sk
);
2195 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2196 l2cap_sock_clear_timer(sk
);
2197 __l2cap_sock_close(sk
, 0);
2199 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
2200 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
2204 if (!err
&& sk
->sk_err
)
2211 static int l2cap_sock_release(struct socket
*sock
)
2213 struct sock
*sk
= sock
->sk
;
2216 BT_DBG("sock %p, sk %p", sock
, sk
);
2221 err
= l2cap_sock_shutdown(sock
, 2);
2224 l2cap_sock_kill(sk
);
2228 static void l2cap_chan_ready(struct sock
*sk
)
2230 struct sock
*parent
= bt_sk(sk
)->parent
;
2232 BT_DBG("sk %p, parent %p", sk
, parent
);
2234 l2cap_pi(sk
)->conf_state
= 0;
2235 l2cap_sock_clear_timer(sk
);
2238 /* Outgoing channel.
2239 * Wake up socket sleeping on connect.
2241 sk
->sk_state
= BT_CONNECTED
;
2242 sk
->sk_state_change(sk
);
2244 /* Incoming channel.
2245 * Wake up socket sleeping on accept.
2247 parent
->sk_data_ready(parent
, 0);
2251 /* Copy frame to all raw sockets on that connection */
2252 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2254 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2255 struct sk_buff
*nskb
;
2258 BT_DBG("conn %p", conn
);
2260 read_lock(&l
->lock
);
2261 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2262 if (sk
->sk_type
!= SOCK_RAW
)
2265 /* Don't send frame to the socket it came from */
2268 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2272 if (sock_queue_rcv_skb(sk
, nskb
))
2275 read_unlock(&l
->lock
);
2278 /* ---- L2CAP signalling commands ---- */
2279 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2280 u8 code
, u8 ident
, u16 dlen
, void *data
)
2282 struct sk_buff
*skb
, **frag
;
2283 struct l2cap_cmd_hdr
*cmd
;
2284 struct l2cap_hdr
*lh
;
2287 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2288 conn
, code
, ident
, dlen
);
2290 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2291 count
= min_t(unsigned int, conn
->mtu
, len
);
2293 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2297 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2298 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2299 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2301 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2304 cmd
->len
= cpu_to_le16(dlen
);
2307 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2308 memcpy(skb_put(skb
, count
), data
, count
);
2314 /* Continuation fragments (no L2CAP header) */
2315 frag
= &skb_shinfo(skb
)->frag_list
;
2317 count
= min_t(unsigned int, conn
->mtu
, len
);
2319 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2323 memcpy(skb_put(*frag
, count
), data
, count
);
2328 frag
= &(*frag
)->next
;
2338 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2340 struct l2cap_conf_opt
*opt
= *ptr
;
2343 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2351 *val
= *((u8
*) opt
->val
);
2355 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2359 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2363 *val
= (unsigned long) opt
->val
;
2367 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2371 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2373 struct l2cap_conf_opt
*opt
= *ptr
;
2375 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2382 *((u8
*) opt
->val
) = val
;
2386 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2390 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2394 memcpy(opt
->val
, (void *) val
, len
);
2398 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2401 static void l2cap_ack_timeout(unsigned long arg
)
2403 struct sock
*sk
= (void *) arg
;
2406 l2cap_send_ack(l2cap_pi(sk
));
2410 static inline void l2cap_ertm_init(struct sock
*sk
)
2412 l2cap_pi(sk
)->expected_ack_seq
= 0;
2413 l2cap_pi(sk
)->unacked_frames
= 0;
2414 l2cap_pi(sk
)->buffer_seq
= 0;
2415 l2cap_pi(sk
)->num_acked
= 0;
2416 l2cap_pi(sk
)->frames_sent
= 0;
2418 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2419 l2cap_retrans_timeout
, (unsigned long) sk
);
2420 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2421 l2cap_monitor_timeout
, (unsigned long) sk
);
2422 setup_timer(&l2cap_pi(sk
)->ack_timer
,
2423 l2cap_ack_timeout
, (unsigned long) sk
);
2425 __skb_queue_head_init(SREJ_QUEUE(sk
));
2426 __skb_queue_head_init(BUSY_QUEUE(sk
));
2427 spin_lock_init(&l2cap_pi(sk
)->send_lock
);
2429 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
2432 static int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
2434 u32 local_feat_mask
= l2cap_feat_mask
;
2436 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
2439 case L2CAP_MODE_ERTM
:
2440 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
2441 case L2CAP_MODE_STREAMING
:
2442 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
2448 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2451 case L2CAP_MODE_STREAMING
:
2452 case L2CAP_MODE_ERTM
:
2453 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2457 return L2CAP_MODE_BASIC
;
2461 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2463 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2464 struct l2cap_conf_req
*req
= data
;
2465 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
2466 void *ptr
= req
->data
;
2468 BT_DBG("sk %p", sk
);
2470 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2474 case L2CAP_MODE_STREAMING
:
2475 case L2CAP_MODE_ERTM
:
2476 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
2477 pi
->mode
= l2cap_select_mode(rfc
.mode
,
2478 pi
->conn
->feat_mask
);
2482 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2483 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
2486 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2492 case L2CAP_MODE_BASIC
:
2493 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2494 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2496 rfc
.mode
= L2CAP_MODE_BASIC
;
2498 rfc
.max_transmit
= 0;
2499 rfc
.retrans_timeout
= 0;
2500 rfc
.monitor_timeout
= 0;
2501 rfc
.max_pdu_size
= 0;
2505 case L2CAP_MODE_ERTM
:
2506 rfc
.mode
= L2CAP_MODE_ERTM
;
2507 rfc
.txwin_size
= pi
->tx_win
;
2508 rfc
.max_transmit
= pi
->max_tx
;
2509 rfc
.retrans_timeout
= 0;
2510 rfc
.monitor_timeout
= 0;
2511 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2512 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2513 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2515 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2518 if (pi
->fcs
== L2CAP_FCS_NONE
||
2519 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2520 pi
->fcs
= L2CAP_FCS_NONE
;
2521 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2525 case L2CAP_MODE_STREAMING
:
2526 rfc
.mode
= L2CAP_MODE_STREAMING
;
2528 rfc
.max_transmit
= 0;
2529 rfc
.retrans_timeout
= 0;
2530 rfc
.monitor_timeout
= 0;
2531 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2532 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2533 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2535 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2538 if (pi
->fcs
== L2CAP_FCS_NONE
||
2539 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2540 pi
->fcs
= L2CAP_FCS_NONE
;
2541 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2546 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2547 (unsigned long) &rfc
);
2549 /* FIXME: Need actual value of the flush timeout */
2550 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2551 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2553 req
->dcid
= cpu_to_le16(pi
->dcid
);
2554 req
->flags
= cpu_to_le16(0);
2559 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2561 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2562 struct l2cap_conf_rsp
*rsp
= data
;
2563 void *ptr
= rsp
->data
;
2564 void *req
= pi
->conf_req
;
2565 int len
= pi
->conf_len
;
2566 int type
, hint
, olen
;
2568 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2569 u16 mtu
= L2CAP_DEFAULT_MTU
;
2570 u16 result
= L2CAP_CONF_SUCCESS
;
2572 BT_DBG("sk %p", sk
);
2574 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2575 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2577 hint
= type
& L2CAP_CONF_HINT
;
2578 type
&= L2CAP_CONF_MASK
;
2581 case L2CAP_CONF_MTU
:
2585 case L2CAP_CONF_FLUSH_TO
:
2589 case L2CAP_CONF_QOS
:
2592 case L2CAP_CONF_RFC
:
2593 if (olen
== sizeof(rfc
))
2594 memcpy(&rfc
, (void *) val
, olen
);
2597 case L2CAP_CONF_FCS
:
2598 if (val
== L2CAP_FCS_NONE
)
2599 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2607 result
= L2CAP_CONF_UNKNOWN
;
2608 *((u8
*) ptr
++) = type
;
2613 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2617 case L2CAP_MODE_STREAMING
:
2618 case L2CAP_MODE_ERTM
:
2619 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
2620 pi
->mode
= l2cap_select_mode(rfc
.mode
,
2621 pi
->conn
->feat_mask
);
2625 if (pi
->mode
!= rfc
.mode
)
2626 return -ECONNREFUSED
;
2632 if (pi
->mode
!= rfc
.mode
) {
2633 result
= L2CAP_CONF_UNACCEPT
;
2634 rfc
.mode
= pi
->mode
;
2636 if (pi
->num_conf_rsp
== 1)
2637 return -ECONNREFUSED
;
2639 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2640 sizeof(rfc
), (unsigned long) &rfc
);
2644 if (result
== L2CAP_CONF_SUCCESS
) {
2645 /* Configure output options and let the other side know
2646 * which ones we don't like. */
2648 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2649 result
= L2CAP_CONF_UNACCEPT
;
2652 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2654 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2657 case L2CAP_MODE_BASIC
:
2658 pi
->fcs
= L2CAP_FCS_NONE
;
2659 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2662 case L2CAP_MODE_ERTM
:
2663 pi
->remote_tx_win
= rfc
.txwin_size
;
2664 pi
->remote_max_tx
= rfc
.max_transmit
;
2665 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2666 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2668 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2670 rfc
.retrans_timeout
=
2671 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2672 rfc
.monitor_timeout
=
2673 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2675 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2677 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2678 sizeof(rfc
), (unsigned long) &rfc
);
2682 case L2CAP_MODE_STREAMING
:
2683 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2684 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2686 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2688 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2690 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2691 sizeof(rfc
), (unsigned long) &rfc
);
2696 result
= L2CAP_CONF_UNACCEPT
;
2698 memset(&rfc
, 0, sizeof(rfc
));
2699 rfc
.mode
= pi
->mode
;
2702 if (result
== L2CAP_CONF_SUCCESS
)
2703 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2705 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2706 rsp
->result
= cpu_to_le16(result
);
2707 rsp
->flags
= cpu_to_le16(0x0000);
2712 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2714 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2715 struct l2cap_conf_req
*req
= data
;
2716 void *ptr
= req
->data
;
2719 struct l2cap_conf_rfc rfc
;
2721 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2723 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2724 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2727 case L2CAP_CONF_MTU
:
2728 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2729 *result
= L2CAP_CONF_UNACCEPT
;
2730 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2733 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2736 case L2CAP_CONF_FLUSH_TO
:
2738 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2742 case L2CAP_CONF_RFC
:
2743 if (olen
== sizeof(rfc
))
2744 memcpy(&rfc
, (void *)val
, olen
);
2746 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2747 rfc
.mode
!= pi
->mode
)
2748 return -ECONNREFUSED
;
2750 pi
->mode
= rfc
.mode
;
2753 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2754 sizeof(rfc
), (unsigned long) &rfc
);
2759 if (*result
== L2CAP_CONF_SUCCESS
) {
2761 case L2CAP_MODE_ERTM
:
2762 pi
->remote_tx_win
= rfc
.txwin_size
;
2763 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2764 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2765 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2767 case L2CAP_MODE_STREAMING
:
2768 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2772 req
->dcid
= cpu_to_le16(pi
->dcid
);
2773 req
->flags
= cpu_to_le16(0x0000);
2778 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2780 struct l2cap_conf_rsp
*rsp
= data
;
2781 void *ptr
= rsp
->data
;
2783 BT_DBG("sk %p", sk
);
2785 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2786 rsp
->result
= cpu_to_le16(result
);
2787 rsp
->flags
= cpu_to_le16(flags
);
2792 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2794 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2797 struct l2cap_conf_rfc rfc
;
2799 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2801 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2804 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2805 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2808 case L2CAP_CONF_RFC
:
2809 if (olen
== sizeof(rfc
))
2810 memcpy(&rfc
, (void *)val
, olen
);
2817 case L2CAP_MODE_ERTM
:
2818 pi
->remote_tx_win
= rfc
.txwin_size
;
2819 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2820 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2821 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2823 case L2CAP_MODE_STREAMING
:
2824 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2828 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2830 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2832 if (rej
->reason
!= 0x0000)
2835 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2836 cmd
->ident
== conn
->info_ident
) {
2837 del_timer(&conn
->info_timer
);
2839 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2840 conn
->info_ident
= 0;
2842 l2cap_conn_start(conn
);
2848 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2850 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2851 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2852 struct l2cap_conn_rsp rsp
;
2853 struct sock
*sk
, *parent
;
2854 int result
, status
= L2CAP_CS_NO_INFO
;
2856 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2857 __le16 psm
= req
->psm
;
2859 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2861 /* Check if we have socket listening on psm */
2862 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2864 result
= L2CAP_CR_BAD_PSM
;
2868 /* Check if the ACL is secure enough (if not SDP) */
2869 if (psm
!= cpu_to_le16(0x0001) &&
2870 !hci_conn_check_link_mode(conn
->hcon
)) {
2871 conn
->disc_reason
= 0x05;
2872 result
= L2CAP_CR_SEC_BLOCK
;
2876 result
= L2CAP_CR_NO_MEM
;
2878 /* Check for backlog size */
2879 if (sk_acceptq_is_full(parent
)) {
2880 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2884 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2888 write_lock_bh(&list
->lock
);
2890 /* Check if we already have channel with that dcid */
2891 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2892 write_unlock_bh(&list
->lock
);
2893 sock_set_flag(sk
, SOCK_ZAPPED
);
2894 l2cap_sock_kill(sk
);
2898 hci_conn_hold(conn
->hcon
);
2900 l2cap_sock_init(sk
, parent
);
2901 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2902 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2903 l2cap_pi(sk
)->psm
= psm
;
2904 l2cap_pi(sk
)->dcid
= scid
;
2906 __l2cap_chan_add(conn
, sk
, parent
);
2907 dcid
= l2cap_pi(sk
)->scid
;
2909 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2911 l2cap_pi(sk
)->ident
= cmd
->ident
;
2913 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2914 if (l2cap_check_security(sk
)) {
2915 if (bt_sk(sk
)->defer_setup
) {
2916 sk
->sk_state
= BT_CONNECT2
;
2917 result
= L2CAP_CR_PEND
;
2918 status
= L2CAP_CS_AUTHOR_PEND
;
2919 parent
->sk_data_ready(parent
, 0);
2921 sk
->sk_state
= BT_CONFIG
;
2922 result
= L2CAP_CR_SUCCESS
;
2923 status
= L2CAP_CS_NO_INFO
;
2926 sk
->sk_state
= BT_CONNECT2
;
2927 result
= L2CAP_CR_PEND
;
2928 status
= L2CAP_CS_AUTHEN_PEND
;
2931 sk
->sk_state
= BT_CONNECT2
;
2932 result
= L2CAP_CR_PEND
;
2933 status
= L2CAP_CS_NO_INFO
;
2936 write_unlock_bh(&list
->lock
);
2939 bh_unlock_sock(parent
);
2942 rsp
.scid
= cpu_to_le16(scid
);
2943 rsp
.dcid
= cpu_to_le16(dcid
);
2944 rsp
.result
= cpu_to_le16(result
);
2945 rsp
.status
= cpu_to_le16(status
);
2946 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2948 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2949 struct l2cap_info_req info
;
2950 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2952 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2953 conn
->info_ident
= l2cap_get_ident(conn
);
2955 mod_timer(&conn
->info_timer
, jiffies
+
2956 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2958 l2cap_send_cmd(conn
, conn
->info_ident
,
2959 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2965 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2967 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2968 u16 scid
, dcid
, result
, status
;
2972 scid
= __le16_to_cpu(rsp
->scid
);
2973 dcid
= __le16_to_cpu(rsp
->dcid
);
2974 result
= __le16_to_cpu(rsp
->result
);
2975 status
= __le16_to_cpu(rsp
->status
);
2977 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2980 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2984 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2990 case L2CAP_CR_SUCCESS
:
2991 sk
->sk_state
= BT_CONFIG
;
2992 l2cap_pi(sk
)->ident
= 0;
2993 l2cap_pi(sk
)->dcid
= dcid
;
2994 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2995 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2997 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2998 l2cap_build_conf_req(sk
, req
), req
);
2999 l2cap_pi(sk
)->num_conf_req
++;
3003 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3007 l2cap_chan_del(sk
, ECONNREFUSED
);
3015 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3017 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3023 dcid
= __le16_to_cpu(req
->dcid
);
3024 flags
= __le16_to_cpu(req
->flags
);
3026 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3028 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3032 if (sk
->sk_state
== BT_DISCONN
)
3035 /* Reject if config buffer is too small. */
3036 len
= cmd_len
- sizeof(*req
);
3037 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
3038 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3039 l2cap_build_conf_rsp(sk
, rsp
,
3040 L2CAP_CONF_REJECT
, flags
), rsp
);
3045 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
3046 l2cap_pi(sk
)->conf_len
+= len
;
3048 if (flags
& 0x0001) {
3049 /* Incomplete config. Send empty response. */
3050 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3051 l2cap_build_conf_rsp(sk
, rsp
,
3052 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3056 /* Complete config. */
3057 len
= l2cap_parse_conf_req(sk
, rsp
);
3059 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3063 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3064 l2cap_pi(sk
)->num_conf_rsp
++;
3066 /* Reset config buffer. */
3067 l2cap_pi(sk
)->conf_len
= 0;
3069 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
3072 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
3073 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3074 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3075 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3077 sk
->sk_state
= BT_CONNECTED
;
3079 l2cap_pi(sk
)->next_tx_seq
= 0;
3080 l2cap_pi(sk
)->expected_tx_seq
= 0;
3081 __skb_queue_head_init(TX_QUEUE(sk
));
3082 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3083 l2cap_ertm_init(sk
);
3085 l2cap_chan_ready(sk
);
3089 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
3091 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3092 l2cap_build_conf_req(sk
, buf
), buf
);
3093 l2cap_pi(sk
)->num_conf_req
++;
3101 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3103 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3104 u16 scid
, flags
, result
;
3106 int len
= cmd
->len
- sizeof(*rsp
);
3108 scid
= __le16_to_cpu(rsp
->scid
);
3109 flags
= __le16_to_cpu(rsp
->flags
);
3110 result
= __le16_to_cpu(rsp
->result
);
3112 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3113 scid
, flags
, result
);
3115 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3120 case L2CAP_CONF_SUCCESS
:
3121 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
3124 case L2CAP_CONF_UNACCEPT
:
3125 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3128 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3129 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3133 /* throw out any old stored conf requests */
3134 result
= L2CAP_CONF_SUCCESS
;
3135 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
3138 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3142 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3143 L2CAP_CONF_REQ
, len
, req
);
3144 l2cap_pi(sk
)->num_conf_req
++;
3145 if (result
!= L2CAP_CONF_SUCCESS
)
3151 sk
->sk_err
= ECONNRESET
;
3152 l2cap_sock_set_timer(sk
, HZ
* 5);
3153 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3160 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
3162 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
3163 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3164 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3165 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3167 sk
->sk_state
= BT_CONNECTED
;
3168 l2cap_pi(sk
)->next_tx_seq
= 0;
3169 l2cap_pi(sk
)->expected_tx_seq
= 0;
3170 __skb_queue_head_init(TX_QUEUE(sk
));
3171 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3172 l2cap_ertm_init(sk
);
3174 l2cap_chan_ready(sk
);
3182 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3184 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3185 struct l2cap_disconn_rsp rsp
;
3189 scid
= __le16_to_cpu(req
->scid
);
3190 dcid
= __le16_to_cpu(req
->dcid
);
3192 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3194 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3198 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3199 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3200 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3202 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3204 l2cap_chan_del(sk
, ECONNRESET
);
3207 l2cap_sock_kill(sk
);
3211 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3213 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3217 scid
= __le16_to_cpu(rsp
->scid
);
3218 dcid
= __le16_to_cpu(rsp
->dcid
);
3220 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3222 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3226 l2cap_chan_del(sk
, 0);
3229 l2cap_sock_kill(sk
);
3233 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3235 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3238 type
= __le16_to_cpu(req
->type
);
3240 BT_DBG("type 0x%4.4x", type
);
3242 if (type
== L2CAP_IT_FEAT_MASK
) {
3244 u32 feat_mask
= l2cap_feat_mask
;
3245 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3246 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3247 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3249 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3251 put_unaligned_le32(feat_mask
, rsp
->data
);
3252 l2cap_send_cmd(conn
, cmd
->ident
,
3253 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3254 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3256 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3257 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3258 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3259 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
3260 l2cap_send_cmd(conn
, cmd
->ident
,
3261 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3263 struct l2cap_info_rsp rsp
;
3264 rsp
.type
= cpu_to_le16(type
);
3265 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3266 l2cap_send_cmd(conn
, cmd
->ident
,
3267 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3273 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3275 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3278 type
= __le16_to_cpu(rsp
->type
);
3279 result
= __le16_to_cpu(rsp
->result
);
3281 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3283 del_timer(&conn
->info_timer
);
3285 if (type
== L2CAP_IT_FEAT_MASK
) {
3286 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3288 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3289 struct l2cap_info_req req
;
3290 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3292 conn
->info_ident
= l2cap_get_ident(conn
);
3294 l2cap_send_cmd(conn
, conn
->info_ident
,
3295 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3297 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3298 conn
->info_ident
= 0;
3300 l2cap_conn_start(conn
);
3302 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3303 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3304 conn
->info_ident
= 0;
3306 l2cap_conn_start(conn
);
3312 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3314 u8
*data
= skb
->data
;
3316 struct l2cap_cmd_hdr cmd
;
3319 l2cap_raw_recv(conn
, skb
);
3321 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3323 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3324 data
+= L2CAP_CMD_HDR_SIZE
;
3325 len
-= L2CAP_CMD_HDR_SIZE
;
3327 cmd_len
= le16_to_cpu(cmd
.len
);
3329 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3331 if (cmd_len
> len
|| !cmd
.ident
) {
3332 BT_DBG("corrupted command");
3337 case L2CAP_COMMAND_REJ
:
3338 l2cap_command_rej(conn
, &cmd
, data
);
3341 case L2CAP_CONN_REQ
:
3342 err
= l2cap_connect_req(conn
, &cmd
, data
);
3345 case L2CAP_CONN_RSP
:
3346 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3349 case L2CAP_CONF_REQ
:
3350 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3353 case L2CAP_CONF_RSP
:
3354 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3357 case L2CAP_DISCONN_REQ
:
3358 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3361 case L2CAP_DISCONN_RSP
:
3362 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3365 case L2CAP_ECHO_REQ
:
3366 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3369 case L2CAP_ECHO_RSP
:
3372 case L2CAP_INFO_REQ
:
3373 err
= l2cap_information_req(conn
, &cmd
, data
);
3376 case L2CAP_INFO_RSP
:
3377 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3381 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3387 struct l2cap_cmd_rej rej
;
3388 BT_DBG("error %d", err
);
3390 /* FIXME: Map err to a valid reason */
3391 rej
.reason
= cpu_to_le16(0);
3392 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3402 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3404 u16 our_fcs
, rcv_fcs
;
3405 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3407 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3408 skb_trim(skb
, skb
->len
- 2);
3409 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3410 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3412 if (our_fcs
!= rcv_fcs
)
3418 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
3420 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3423 pi
->frames_sent
= 0;
3425 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3427 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3428 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3429 l2cap_send_sframe(pi
, control
);
3430 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3433 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3434 l2cap_retransmit_frames(sk
);
3436 spin_lock_bh(&pi
->send_lock
);
3437 l2cap_ertm_send(sk
);
3438 spin_unlock_bh(&pi
->send_lock
);
3440 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3441 pi
->frames_sent
== 0) {
3442 control
|= L2CAP_SUPER_RCV_READY
;
3443 l2cap_send_sframe(pi
, control
);
3447 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3449 struct sk_buff
*next_skb
;
3450 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3451 int tx_seq_offset
, next_tx_seq_offset
;
3453 bt_cb(skb
)->tx_seq
= tx_seq
;
3454 bt_cb(skb
)->sar
= sar
;
3456 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3458 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3462 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3463 if (tx_seq_offset
< 0)
3464 tx_seq_offset
+= 64;
3467 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3470 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3471 pi
->buffer_seq
) % 64;
3472 if (next_tx_seq_offset
< 0)
3473 next_tx_seq_offset
+= 64;
3475 if (next_tx_seq_offset
> tx_seq_offset
) {
3476 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3480 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3483 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3485 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3490 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3492 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3493 struct sk_buff
*_skb
;
3496 switch (control
& L2CAP_CTRL_SAR
) {
3497 case L2CAP_SDU_UNSEGMENTED
:
3498 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3501 err
= sock_queue_rcv_skb(sk
, skb
);
3507 case L2CAP_SDU_START
:
3508 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3511 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3513 if (pi
->sdu_len
> pi
->imtu
)
3516 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3520 /* pull sdu_len bytes only after alloc, because of Local Busy
3521 * condition we have to be sure that this will be executed
3522 * only once, i.e., when alloc does not fail */
3525 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3527 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3528 pi
->partial_sdu_len
= skb
->len
;
3531 case L2CAP_SDU_CONTINUE
:
3532 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3538 pi
->partial_sdu_len
+= skb
->len
;
3539 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3542 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3547 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3553 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3554 pi
->partial_sdu_len
+= skb
->len
;
3556 if (pi
->partial_sdu_len
> pi
->imtu
)
3559 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
3562 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3565 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3567 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3571 err
= sock_queue_rcv_skb(sk
, _skb
);
3574 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3578 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3579 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3593 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3598 static void l2cap_busy_work(struct work_struct
*work
)
3600 DECLARE_WAITQUEUE(wait
, current
);
3601 struct l2cap_pinfo
*pi
=
3602 container_of(work
, struct l2cap_pinfo
, busy_work
);
3603 struct sock
*sk
= (struct sock
*)pi
;
3604 int n_tries
= 0, timeo
= HZ
/5, err
;
3605 struct sk_buff
*skb
;
3610 add_wait_queue(sk_sleep(sk
), &wait
);
3611 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3612 set_current_state(TASK_INTERRUPTIBLE
);
3614 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3616 l2cap_send_disconn_req(pi
->conn
, sk
, EBUSY
);
3623 if (signal_pending(current
)) {
3624 err
= sock_intr_errno(timeo
);
3629 timeo
= schedule_timeout(timeo
);
3632 err
= sock_error(sk
);
3636 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
3637 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3638 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3640 skb_queue_head(BUSY_QUEUE(sk
), skb
);
3644 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3651 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
3654 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3655 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3656 l2cap_send_sframe(pi
, control
);
3657 l2cap_pi(sk
)->retry_count
= 1;
3659 del_timer(&pi
->retrans_timer
);
3660 __mod_monitor_timer();
3662 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
3665 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3666 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3668 BT_DBG("sk %p, Exit local busy", sk
);
3670 set_current_state(TASK_RUNNING
);
3671 remove_wait_queue(sk_sleep(sk
), &wait
);
3676 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3678 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3681 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3682 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3683 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3687 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3689 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3693 /* Busy Condition */
3694 BT_DBG("sk %p, Enter local busy", sk
);
3696 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3697 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3698 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3700 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3701 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3702 l2cap_send_sframe(pi
, sctrl
);
3704 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3706 del_timer(&pi
->ack_timer
);
3708 queue_work(_busy_wq
, &pi
->busy_work
);
3713 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3715 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3716 struct sk_buff
*_skb
;
3720 * TODO: We have to notify the userland if some data is lost with the
3724 switch (control
& L2CAP_CTRL_SAR
) {
3725 case L2CAP_SDU_UNSEGMENTED
:
3726 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3731 err
= sock_queue_rcv_skb(sk
, skb
);
3737 case L2CAP_SDU_START
:
3738 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3743 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3746 if (pi
->sdu_len
> pi
->imtu
) {
3751 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3757 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3759 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3760 pi
->partial_sdu_len
= skb
->len
;
3764 case L2CAP_SDU_CONTINUE
:
3765 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3768 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3770 pi
->partial_sdu_len
+= skb
->len
;
3771 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3779 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3782 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3784 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3785 pi
->partial_sdu_len
+= skb
->len
;
3787 if (pi
->partial_sdu_len
> pi
->imtu
)
3790 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3791 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3792 err
= sock_queue_rcv_skb(sk
, _skb
);
3807 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3809 struct sk_buff
*skb
;
3812 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3813 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3816 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3817 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3818 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3819 l2cap_pi(sk
)->buffer_seq_srej
=
3820 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3821 tx_seq
= (tx_seq
+ 1) % 64;
3825 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3827 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3828 struct srej_list
*l
, *tmp
;
3831 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3832 if (l
->tx_seq
== tx_seq
) {
3837 control
= L2CAP_SUPER_SELECT_REJECT
;
3838 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3839 l2cap_send_sframe(pi
, control
);
3841 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3845 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3847 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3848 struct srej_list
*new;
3851 while (tx_seq
!= pi
->expected_tx_seq
) {
3852 control
= L2CAP_SUPER_SELECT_REJECT
;
3853 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3854 l2cap_send_sframe(pi
, control
);
3856 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3857 new->tx_seq
= pi
->expected_tx_seq
;
3858 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3859 list_add_tail(&new->list
, SREJ_LIST(sk
));
3861 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3864 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3866 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3867 u8 tx_seq
= __get_txseq(rx_control
);
3868 u8 req_seq
= __get_reqseq(rx_control
);
3869 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3870 int tx_seq_offset
, expected_tx_seq_offset
;
3871 int num_to_ack
= (pi
->tx_win
/6) + 1;
3874 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk
, skb
->len
, tx_seq
,
3877 if (L2CAP_CTRL_FINAL
& rx_control
&&
3878 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3879 del_timer(&pi
->monitor_timer
);
3880 if (pi
->unacked_frames
> 0)
3881 __mod_retrans_timer();
3882 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3885 pi
->expected_ack_seq
= req_seq
;
3886 l2cap_drop_acked_frames(sk
);
3888 if (tx_seq
== pi
->expected_tx_seq
)
3891 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3892 if (tx_seq_offset
< 0)
3893 tx_seq_offset
+= 64;
3895 /* invalid tx_seq */
3896 if (tx_seq_offset
>= pi
->tx_win
) {
3897 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3901 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3904 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3905 struct srej_list
*first
;
3907 first
= list_first_entry(SREJ_LIST(sk
),
3908 struct srej_list
, list
);
3909 if (tx_seq
== first
->tx_seq
) {
3910 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3911 l2cap_check_srej_gap(sk
, tx_seq
);
3913 list_del(&first
->list
);
3916 if (list_empty(SREJ_LIST(sk
))) {
3917 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3918 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3920 BT_DBG("sk %p, Exit SREJ_SENT", sk
);
3923 struct srej_list
*l
;
3925 /* duplicated tx_seq */
3926 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
3929 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3930 if (l
->tx_seq
== tx_seq
) {
3931 l2cap_resend_srejframe(sk
, tx_seq
);
3935 l2cap_send_srejframe(sk
, tx_seq
);
3938 expected_tx_seq_offset
=
3939 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
3940 if (expected_tx_seq_offset
< 0)
3941 expected_tx_seq_offset
+= 64;
3943 /* duplicated tx_seq */
3944 if (tx_seq_offset
< expected_tx_seq_offset
)
3947 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3949 BT_DBG("sk %p, Enter SREJ", sk
);
3951 INIT_LIST_HEAD(SREJ_LIST(sk
));
3952 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3954 __skb_queue_head_init(SREJ_QUEUE(sk
));
3955 __skb_queue_head_init(BUSY_QUEUE(sk
));
3956 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3958 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3960 l2cap_send_srejframe(sk
, tx_seq
);
3962 del_timer(&pi
->ack_timer
);
3967 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3969 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3970 bt_cb(skb
)->tx_seq
= tx_seq
;
3971 bt_cb(skb
)->sar
= sar
;
3972 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3976 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
3980 if (rx_control
& L2CAP_CTRL_FINAL
) {
3981 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3982 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3984 l2cap_retransmit_frames(sk
);
3989 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
3990 if (pi
->num_acked
== num_to_ack
- 1)
4000 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
4002 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4004 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, __get_reqseq(rx_control
),
4007 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
4008 l2cap_drop_acked_frames(sk
);
4010 if (rx_control
& L2CAP_CTRL_POLL
) {
4011 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4012 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4013 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4014 (pi
->unacked_frames
> 0))
4015 __mod_retrans_timer();
4017 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4018 l2cap_send_srejtail(sk
);
4020 l2cap_send_i_or_rr_or_rnr(sk
);
4023 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4024 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4026 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4027 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4029 l2cap_retransmit_frames(sk
);
4032 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4033 (pi
->unacked_frames
> 0))
4034 __mod_retrans_timer();
4036 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4037 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4040 spin_lock_bh(&pi
->send_lock
);
4041 l2cap_ertm_send(sk
);
4042 spin_unlock_bh(&pi
->send_lock
);
4047 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
4049 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4050 u8 tx_seq
= __get_reqseq(rx_control
);
4052 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4054 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4056 pi
->expected_ack_seq
= tx_seq
;
4057 l2cap_drop_acked_frames(sk
);
4059 if (rx_control
& L2CAP_CTRL_FINAL
) {
4060 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4061 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4063 l2cap_retransmit_frames(sk
);
4065 l2cap_retransmit_frames(sk
);
4067 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
4068 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
4071 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
4073 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4074 u8 tx_seq
= __get_reqseq(rx_control
);
4076 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4078 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4080 if (rx_control
& L2CAP_CTRL_POLL
) {
4081 pi
->expected_ack_seq
= tx_seq
;
4082 l2cap_drop_acked_frames(sk
);
4084 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4085 l2cap_retransmit_one_frame(sk
, tx_seq
);
4087 spin_lock_bh(&pi
->send_lock
);
4088 l2cap_ertm_send(sk
);
4089 spin_unlock_bh(&pi
->send_lock
);
4091 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4092 pi
->srej_save_reqseq
= tx_seq
;
4093 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4095 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4096 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
4097 pi
->srej_save_reqseq
== tx_seq
)
4098 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
4100 l2cap_retransmit_one_frame(sk
, tx_seq
);
4102 l2cap_retransmit_one_frame(sk
, tx_seq
);
4103 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4104 pi
->srej_save_reqseq
= tx_seq
;
4105 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4110 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
4112 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4113 u8 tx_seq
= __get_reqseq(rx_control
);
4115 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4117 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
4118 pi
->expected_ack_seq
= tx_seq
;
4119 l2cap_drop_acked_frames(sk
);
4121 if (rx_control
& L2CAP_CTRL_POLL
)
4122 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4124 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
4125 del_timer(&pi
->retrans_timer
);
4126 if (rx_control
& L2CAP_CTRL_POLL
)
4127 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
4131 if (rx_control
& L2CAP_CTRL_POLL
)
4132 l2cap_send_srejtail(sk
);
4134 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
4137 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
4139 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
4141 if (L2CAP_CTRL_FINAL
& rx_control
&&
4142 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
4143 del_timer(&l2cap_pi(sk
)->monitor_timer
);
4144 if (l2cap_pi(sk
)->unacked_frames
> 0)
4145 __mod_retrans_timer();
4146 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
4149 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
4150 case L2CAP_SUPER_RCV_READY
:
4151 l2cap_data_channel_rrframe(sk
, rx_control
);
4154 case L2CAP_SUPER_REJECT
:
4155 l2cap_data_channel_rejframe(sk
, rx_control
);
4158 case L2CAP_SUPER_SELECT_REJECT
:
4159 l2cap_data_channel_srejframe(sk
, rx_control
);
4162 case L2CAP_SUPER_RCV_NOT_READY
:
4163 l2cap_data_channel_rnrframe(sk
, rx_control
);
4171 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4174 struct l2cap_pinfo
*pi
;
4177 int len
, next_tx_seq_offset
, req_seq_offset
;
4179 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4181 BT_DBG("unknown cid 0x%4.4x", cid
);
4187 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4189 if (sk
->sk_state
!= BT_CONNECTED
)
4193 case L2CAP_MODE_BASIC
:
4194 /* If socket recv buffers overflows we drop data here
4195 * which is *bad* because L2CAP has to be reliable.
4196 * But we don't have any other choice. L2CAP doesn't
4197 * provide flow control mechanism. */
4199 if (pi
->imtu
< skb
->len
)
4202 if (!sock_queue_rcv_skb(sk
, skb
))
4206 case L2CAP_MODE_ERTM
:
4207 control
= get_unaligned_le16(skb
->data
);
4212 * We can just drop the corrupted I-frame here.
4213 * Receiver will miss it and start proper recovery
4214 * procedures and ask retransmission.
4216 if (l2cap_check_fcs(pi
, skb
))
4219 if (__is_sar_start(control
) && __is_iframe(control
))
4222 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4225 if (len
> pi
->mps
) {
4226 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4230 req_seq
= __get_reqseq(control
);
4231 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
4232 if (req_seq_offset
< 0)
4233 req_seq_offset
+= 64;
4235 next_tx_seq_offset
=
4236 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
4237 if (next_tx_seq_offset
< 0)
4238 next_tx_seq_offset
+= 64;
4240 /* check for invalid req-seq */
4241 if (req_seq_offset
> next_tx_seq_offset
) {
4242 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4246 if (__is_iframe(control
)) {
4248 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4252 l2cap_data_channel_iframe(sk
, control
, skb
);
4255 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4259 l2cap_data_channel_sframe(sk
, control
, skb
);
4264 case L2CAP_MODE_STREAMING
:
4265 control
= get_unaligned_le16(skb
->data
);
4269 if (l2cap_check_fcs(pi
, skb
))
4272 if (__is_sar_start(control
))
4275 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4278 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
4281 tx_seq
= __get_txseq(control
);
4283 if (pi
->expected_tx_seq
== tx_seq
)
4284 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4286 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
4288 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
4293 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
4307 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4311 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
4315 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4317 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4320 if (l2cap_pi(sk
)->imtu
< skb
->len
)
4323 if (!sock_queue_rcv_skb(sk
, skb
))
4335 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4337 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4341 skb_pull(skb
, L2CAP_HDR_SIZE
);
4342 cid
= __le16_to_cpu(lh
->cid
);
4343 len
= __le16_to_cpu(lh
->len
);
4345 if (len
!= skb
->len
) {
4350 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4353 case L2CAP_CID_SIGNALING
:
4354 l2cap_sig_channel(conn
, skb
);
4357 case L2CAP_CID_CONN_LESS
:
4358 psm
= get_unaligned_le16(skb
->data
);
4360 l2cap_conless_channel(conn
, psm
, skb
);
4364 l2cap_data_channel(conn
, cid
, skb
);
4369 /* ---- L2CAP interface with lower layer (HCI) ---- */
4371 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4373 int exact
= 0, lm1
= 0, lm2
= 0;
4374 register struct sock
*sk
;
4375 struct hlist_node
*node
;
4377 if (type
!= ACL_LINK
)
4380 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4382 /* Find listening sockets and check their link_mode */
4383 read_lock(&l2cap_sk_list
.lock
);
4384 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4385 if (sk
->sk_state
!= BT_LISTEN
)
4388 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4389 lm1
|= HCI_LM_ACCEPT
;
4390 if (l2cap_pi(sk
)->role_switch
)
4391 lm1
|= HCI_LM_MASTER
;
4393 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4394 lm2
|= HCI_LM_ACCEPT
;
4395 if (l2cap_pi(sk
)->role_switch
)
4396 lm2
|= HCI_LM_MASTER
;
4399 read_unlock(&l2cap_sk_list
.lock
);
4401 return exact
? lm1
: lm2
;
4404 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4406 struct l2cap_conn
*conn
;
4408 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4410 if (hcon
->type
!= ACL_LINK
)
4414 conn
= l2cap_conn_add(hcon
, status
);
4416 l2cap_conn_ready(conn
);
4418 l2cap_conn_del(hcon
, bt_err(status
));
4423 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4425 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4427 BT_DBG("hcon %p", hcon
);
4429 if (hcon
->type
!= ACL_LINK
|| !conn
)
4432 return conn
->disc_reason
;
4435 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4437 BT_DBG("hcon %p reason %d", hcon
, reason
);
4439 if (hcon
->type
!= ACL_LINK
)
4442 l2cap_conn_del(hcon
, bt_err(reason
));
4447 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
4449 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
4452 if (encrypt
== 0x00) {
4453 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
4454 l2cap_sock_clear_timer(sk
);
4455 l2cap_sock_set_timer(sk
, HZ
* 5);
4456 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
4457 __l2cap_sock_close(sk
, ECONNREFUSED
);
4459 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
4460 l2cap_sock_clear_timer(sk
);
4464 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4466 struct l2cap_chan_list
*l
;
4467 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4473 l
= &conn
->chan_list
;
4475 BT_DBG("conn %p", conn
);
4477 read_lock(&l
->lock
);
4479 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
4482 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4487 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4488 sk
->sk_state
== BT_CONFIG
)) {
4489 l2cap_check_encryption(sk
, encrypt
);
4494 if (sk
->sk_state
== BT_CONNECT
) {
4496 struct l2cap_conn_req req
;
4497 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4498 req
.psm
= l2cap_pi(sk
)->psm
;
4500 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
4501 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4503 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4504 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4506 l2cap_sock_clear_timer(sk
);
4507 l2cap_sock_set_timer(sk
, HZ
/ 10);
4509 } else if (sk
->sk_state
== BT_CONNECT2
) {
4510 struct l2cap_conn_rsp rsp
;
4514 sk
->sk_state
= BT_CONFIG
;
4515 result
= L2CAP_CR_SUCCESS
;
4517 sk
->sk_state
= BT_DISCONN
;
4518 l2cap_sock_set_timer(sk
, HZ
/ 10);
4519 result
= L2CAP_CR_SEC_BLOCK
;
4522 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
4523 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4524 rsp
.result
= cpu_to_le16(result
);
4525 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4526 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4527 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
4533 read_unlock(&l
->lock
);
4538 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4540 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4542 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
4545 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4547 if (flags
& ACL_START
) {
4548 struct l2cap_hdr
*hdr
;
4552 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4553 kfree_skb(conn
->rx_skb
);
4554 conn
->rx_skb
= NULL
;
4556 l2cap_conn_unreliable(conn
, ECOMM
);
4560 BT_ERR("Frame is too short (len %d)", skb
->len
);
4561 l2cap_conn_unreliable(conn
, ECOMM
);
4565 hdr
= (struct l2cap_hdr
*) skb
->data
;
4566 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4568 if (len
== skb
->len
) {
4569 /* Complete frame received */
4570 l2cap_recv_frame(conn
, skb
);
4574 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4576 if (skb
->len
> len
) {
4577 BT_ERR("Frame is too long (len %d, expected len %d)",
4579 l2cap_conn_unreliable(conn
, ECOMM
);
4583 /* Allocate skb for the complete frame (with header) */
4584 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4588 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4590 conn
->rx_len
= len
- skb
->len
;
4592 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4594 if (!conn
->rx_len
) {
4595 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4596 l2cap_conn_unreliable(conn
, ECOMM
);
4600 if (skb
->len
> conn
->rx_len
) {
4601 BT_ERR("Fragment is too long (len %d, expected %d)",
4602 skb
->len
, conn
->rx_len
);
4603 kfree_skb(conn
->rx_skb
);
4604 conn
->rx_skb
= NULL
;
4606 l2cap_conn_unreliable(conn
, ECOMM
);
4610 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4612 conn
->rx_len
-= skb
->len
;
4614 if (!conn
->rx_len
) {
4615 /* Complete frame received */
4616 l2cap_recv_frame(conn
, conn
->rx_skb
);
4617 conn
->rx_skb
= NULL
;
4626 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4629 struct hlist_node
*node
;
4631 read_lock_bh(&l2cap_sk_list
.lock
);
4633 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4634 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4636 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4637 batostr(&bt_sk(sk
)->src
),
4638 batostr(&bt_sk(sk
)->dst
),
4639 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4641 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4644 read_unlock_bh(&l2cap_sk_list
.lock
);
4649 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4651 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4654 static const struct file_operations l2cap_debugfs_fops
= {
4655 .open
= l2cap_debugfs_open
,
4657 .llseek
= seq_lseek
,
4658 .release
= single_release
,
4661 static struct dentry
*l2cap_debugfs
;
4663 static const struct proto_ops l2cap_sock_ops
= {
4664 .family
= PF_BLUETOOTH
,
4665 .owner
= THIS_MODULE
,
4666 .release
= l2cap_sock_release
,
4667 .bind
= l2cap_sock_bind
,
4668 .connect
= l2cap_sock_connect
,
4669 .listen
= l2cap_sock_listen
,
4670 .accept
= l2cap_sock_accept
,
4671 .getname
= l2cap_sock_getname
,
4672 .sendmsg
= l2cap_sock_sendmsg
,
4673 .recvmsg
= l2cap_sock_recvmsg
,
4674 .poll
= bt_sock_poll
,
4675 .ioctl
= bt_sock_ioctl
,
4676 .mmap
= sock_no_mmap
,
4677 .socketpair
= sock_no_socketpair
,
4678 .shutdown
= l2cap_sock_shutdown
,
4679 .setsockopt
= l2cap_sock_setsockopt
,
4680 .getsockopt
= l2cap_sock_getsockopt
4683 static const struct net_proto_family l2cap_sock_family_ops
= {
4684 .family
= PF_BLUETOOTH
,
4685 .owner
= THIS_MODULE
,
4686 .create
= l2cap_sock_create
,
4689 static struct hci_proto l2cap_hci_proto
= {
4691 .id
= HCI_PROTO_L2CAP
,
4692 .connect_ind
= l2cap_connect_ind
,
4693 .connect_cfm
= l2cap_connect_cfm
,
4694 .disconn_ind
= l2cap_disconn_ind
,
4695 .disconn_cfm
= l2cap_disconn_cfm
,
4696 .security_cfm
= l2cap_security_cfm
,
4697 .recv_acldata
= l2cap_recv_acldata
4700 static int __init
l2cap_init(void)
4704 err
= proto_register(&l2cap_proto
, 0);
4708 _busy_wq
= create_singlethread_workqueue("l2cap");
4712 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4714 BT_ERR("L2CAP socket registration failed");
4718 err
= hci_register_proto(&l2cap_hci_proto
);
4720 BT_ERR("L2CAP protocol registration failed");
4721 bt_sock_unregister(BTPROTO_L2CAP
);
4726 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4727 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4729 BT_ERR("Failed to create L2CAP debug file");
4732 BT_INFO("L2CAP ver %s", VERSION
);
4733 BT_INFO("L2CAP socket layer initialized");
4738 proto_unregister(&l2cap_proto
);
4742 static void __exit
l2cap_exit(void)
4744 debugfs_remove(l2cap_debugfs
);
4746 flush_workqueue(_busy_wq
);
4747 destroy_workqueue(_busy_wq
);
4749 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4750 BT_ERR("L2CAP socket unregistration failed");
4752 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4753 BT_ERR("L2CAP protocol unregistration failed");
4755 proto_unregister(&l2cap_proto
);
4758 void l2cap_load(void)
4760 /* Dummy function to trigger automatic L2CAP module loading by
4761 * other modules that use L2CAP sockets but don't use any other
4762 * symbols from it. */
4764 EXPORT_SYMBOL(l2cap_load
);
4766 module_init(l2cap_init
);
4767 module_exit(l2cap_exit
);
4769 module_param(enable_ertm
, bool, 0644);
4770 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4772 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4773 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4774 MODULE_VERSION(VERSION
);
4775 MODULE_LICENSE("GPL");
4776 MODULE_ALIAS("bt-proto-0");