Bluetooth: clean up l2cap_sock_recvmsg()
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 int disable_ertm;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static struct workqueue_struct *_busy_wq;
64
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 };
68
69 static void l2cap_busy_work(struct work_struct *work);
70
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
74
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
85 return c;
86 }
87 return NULL;
88
89 }
90
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
98 return c;
99 }
100 return NULL;
101 }
102
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106 {
107 struct l2cap_chan *c;
108
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 bh_lock_sock(c->sk);
113 read_unlock(&conn->chan_lock);
114 return c;
115 }
116
117 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
118 {
119 struct l2cap_chan *c;
120
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
123 return c;
124 }
125 return NULL;
126 }
127
128 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 {
130 struct l2cap_chan *c;
131
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
134 if (c)
135 bh_lock_sock(c->sk);
136 read_unlock(&conn->chan_lock);
137 return c;
138 }
139
140 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
141 {
142 u16 cid = L2CAP_CID_DYN_START;
143
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
146 return cid;
147 }
148
149 return 0;
150 }
151
152 static struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
153 {
154 struct l2cap_chan *chan;
155
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
157 if (!chan)
158 return NULL;
159
160 chan->sk = sk;
161
162 return chan;
163 }
164
165 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
166 {
167 struct sock *sk = chan->sk;
168
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
171
172 conn->disc_reason = 0x13;
173
174 l2cap_pi(sk)->conn = conn;
175
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
178 /* LE connection */
179 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
182 } else {
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
186 }
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
192 } else {
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
197 }
198
199 sock_hold(sk);
200
201 list_add(&chan->list, &conn->chan_l);
202 }
203
204 /* Delete channel.
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan *chan, int err)
207 {
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
211
212 l2cap_sock_clear_timer(sk);
213
214 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
215
216 if (conn) {
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
221 __sock_put(sk);
222
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
225 }
226
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
229
230 if (err)
231 sk->sk_err = err;
232
233 if (parent) {
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
236 } else
237 sk->sk_state_change(sk);
238
239 skb_queue_purge(TX_QUEUE(sk));
240
241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
242 struct srej_list *l, *tmp;
243
244 del_timer(&l2cap_pi(sk)->retrans_timer);
245 del_timer(&l2cap_pi(sk)->monitor_timer);
246 del_timer(&l2cap_pi(sk)->ack_timer);
247
248 skb_queue_purge(SREJ_QUEUE(sk));
249 skb_queue_purge(BUSY_QUEUE(sk));
250
251 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
252 list_del(&l->list);
253 kfree(l);
254 }
255 }
256
257 kfree(chan);
258 }
259
260 static inline u8 l2cap_get_auth_type(struct sock *sk)
261 {
262 if (sk->sk_type == SOCK_RAW) {
263 switch (l2cap_pi(sk)->sec_level) {
264 case BT_SECURITY_HIGH:
265 return HCI_AT_DEDICATED_BONDING_MITM;
266 case BT_SECURITY_MEDIUM:
267 return HCI_AT_DEDICATED_BONDING;
268 default:
269 return HCI_AT_NO_BONDING;
270 }
271 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
272 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
273 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
274
275 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
276 return HCI_AT_NO_BONDING_MITM;
277 else
278 return HCI_AT_NO_BONDING;
279 } else {
280 switch (l2cap_pi(sk)->sec_level) {
281 case BT_SECURITY_HIGH:
282 return HCI_AT_GENERAL_BONDING_MITM;
283 case BT_SECURITY_MEDIUM:
284 return HCI_AT_GENERAL_BONDING;
285 default:
286 return HCI_AT_NO_BONDING;
287 }
288 }
289 }
290
291 /* Service level security */
292 static inline int l2cap_check_security(struct sock *sk)
293 {
294 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
295 __u8 auth_type;
296
297 auth_type = l2cap_get_auth_type(sk);
298
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
300 auth_type);
301 }
302
303 u8 l2cap_get_ident(struct l2cap_conn *conn)
304 {
305 u8 id;
306
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
311 */
312
313 spin_lock_bh(&conn->lock);
314
315 if (++conn->tx_ident > 128)
316 conn->tx_ident = 1;
317
318 id = conn->tx_ident;
319
320 spin_unlock_bh(&conn->lock);
321
322 return id;
323 }
324
325 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326 {
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328 u8 flags;
329
330 BT_DBG("code 0x%2.2x", code);
331
332 if (!skb)
333 return;
334
335 if (lmp_no_flush_capable(conn->hcon->hdev))
336 flags = ACL_START_NO_FLUSH;
337 else
338 flags = ACL_START;
339
340 hci_send_acl(conn->hcon, skb, flags);
341 }
342
343 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
344 {
345 struct sk_buff *skb;
346 struct l2cap_hdr *lh;
347 struct l2cap_conn *conn = pi->conn;
348 struct sock *sk = (struct sock *)pi;
349 int count, hlen = L2CAP_HDR_SIZE + 2;
350 u8 flags;
351
352 if (sk->sk_state != BT_CONNECTED)
353 return;
354
355 if (pi->fcs == L2CAP_FCS_CRC16)
356 hlen += 2;
357
358 BT_DBG("pi %p, control 0x%2.2x", pi, control);
359
360 count = min_t(unsigned int, conn->mtu, hlen);
361 control |= L2CAP_CTRL_FRAME_TYPE;
362
363 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
364 control |= L2CAP_CTRL_FINAL;
365 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
366 }
367
368 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
369 control |= L2CAP_CTRL_POLL;
370 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
371 }
372
373 skb = bt_skb_alloc(count, GFP_ATOMIC);
374 if (!skb)
375 return;
376
377 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
378 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
379 lh->cid = cpu_to_le16(pi->dcid);
380 put_unaligned_le16(control, skb_put(skb, 2));
381
382 if (pi->fcs == L2CAP_FCS_CRC16) {
383 u16 fcs = crc16(0, (u8 *)lh, count - 2);
384 put_unaligned_le16(fcs, skb_put(skb, 2));
385 }
386
387 if (lmp_no_flush_capable(conn->hcon->hdev))
388 flags = ACL_START_NO_FLUSH;
389 else
390 flags = ACL_START;
391
392 hci_send_acl(pi->conn->hcon, skb, flags);
393 }
394
395 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
396 {
397 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
398 control |= L2CAP_SUPER_RCV_NOT_READY;
399 pi->conn_state |= L2CAP_CONN_RNR_SENT;
400 } else
401 control |= L2CAP_SUPER_RCV_READY;
402
403 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
404
405 l2cap_send_sframe(pi, control);
406 }
407
408 static inline int __l2cap_no_conn_pending(struct sock *sk)
409 {
410 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
411 }
412
413 static void l2cap_do_start(struct l2cap_chan *chan)
414 {
415 struct sock *sk = chan->sk;
416 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
417
418 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
419 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
420 return;
421
422 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
423 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 req.psm = l2cap_pi(sk)->psm;
426
427 chan->ident = l2cap_get_ident(conn);
428 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
429
430 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
431 sizeof(req), &req);
432 }
433 } else {
434 struct l2cap_info_req req;
435 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
436
437 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
438 conn->info_ident = l2cap_get_ident(conn);
439
440 mod_timer(&conn->info_timer, jiffies +
441 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
442
443 l2cap_send_cmd(conn, conn->info_ident,
444 L2CAP_INFO_REQ, sizeof(req), &req);
445 }
446 }
447
448 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
449 {
450 u32 local_feat_mask = l2cap_feat_mask;
451 if (!disable_ertm)
452 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
453
454 switch (mode) {
455 case L2CAP_MODE_ERTM:
456 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
457 case L2CAP_MODE_STREAMING:
458 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
459 default:
460 return 0x00;
461 }
462 }
463
464 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
465 {
466 struct l2cap_disconn_req req;
467
468 if (!conn)
469 return;
470
471 skb_queue_purge(TX_QUEUE(sk));
472
473 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
474 del_timer(&l2cap_pi(sk)->retrans_timer);
475 del_timer(&l2cap_pi(sk)->monitor_timer);
476 del_timer(&l2cap_pi(sk)->ack_timer);
477 }
478
479 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
481 l2cap_send_cmd(conn, l2cap_get_ident(conn),
482 L2CAP_DISCONN_REQ, sizeof(req), &req);
483
484 sk->sk_state = BT_DISCONN;
485 sk->sk_err = err;
486 }
487
488 /* ---- L2CAP connections ---- */
489 static void l2cap_conn_start(struct l2cap_conn *conn)
490 {
491 struct l2cap_chan *chan, *tmp;
492
493 BT_DBG("conn %p", conn);
494
495 read_lock(&conn->chan_lock);
496
497 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
498 struct sock *sk = chan->sk;
499
500 bh_lock_sock(sk);
501
502 if (sk->sk_type != SOCK_SEQPACKET &&
503 sk->sk_type != SOCK_STREAM) {
504 bh_unlock_sock(sk);
505 continue;
506 }
507
508 if (sk->sk_state == BT_CONNECT) {
509 struct l2cap_conn_req req;
510
511 if (!l2cap_check_security(sk) ||
512 !__l2cap_no_conn_pending(sk)) {
513 bh_unlock_sock(sk);
514 continue;
515 }
516
517 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
518 conn->feat_mask)
519 && l2cap_pi(sk)->conf_state &
520 L2CAP_CONF_STATE2_DEVICE) {
521 /* __l2cap_sock_close() calls list_del(chan)
522 * so release the lock */
523 read_unlock_bh(&conn->chan_lock);
524 __l2cap_sock_close(sk, ECONNRESET);
525 read_lock_bh(&conn->chan_lock);
526 bh_unlock_sock(sk);
527 continue;
528 }
529
530 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
531 req.psm = l2cap_pi(sk)->psm;
532
533 chan->ident = l2cap_get_ident(conn);
534 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
535
536 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
537 sizeof(req), &req);
538
539 } else if (sk->sk_state == BT_CONNECT2) {
540 struct l2cap_conn_rsp rsp;
541 char buf[128];
542 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
543 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
544
545 if (l2cap_check_security(sk)) {
546 if (bt_sk(sk)->defer_setup) {
547 struct sock *parent = bt_sk(sk)->parent;
548 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
549 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
550 parent->sk_data_ready(parent, 0);
551
552 } else {
553 sk->sk_state = BT_CONFIG;
554 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
555 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
556 }
557 } else {
558 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
559 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
560 }
561
562 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
563 sizeof(rsp), &rsp);
564
565 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
566 rsp.result != L2CAP_CR_SUCCESS) {
567 bh_unlock_sock(sk);
568 continue;
569 }
570
571 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
572 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
573 l2cap_build_conf_req(chan, buf), buf);
574 chan->num_conf_req++;
575 }
576
577 bh_unlock_sock(sk);
578 }
579
580 read_unlock(&conn->chan_lock);
581 }
582
583 /* Find socket with cid and source bdaddr.
584 * Returns closest match, locked.
585 */
586 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
587 {
588 struct sock *s, *sk = NULL, *sk1 = NULL;
589 struct hlist_node *node;
590
591 read_lock(&l2cap_sk_list.lock);
592
593 sk_for_each(sk, node, &l2cap_sk_list.head) {
594 if (state && sk->sk_state != state)
595 continue;
596
597 if (l2cap_pi(sk)->scid == cid) {
598 /* Exact match. */
599 if (!bacmp(&bt_sk(sk)->src, src))
600 break;
601
602 /* Closest match */
603 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
604 sk1 = sk;
605 }
606 }
607 s = node ? sk : sk1;
608 if (s)
609 bh_lock_sock(s);
610 read_unlock(&l2cap_sk_list.lock);
611
612 return s;
613 }
614
615 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
616 {
617 struct sock *parent, *uninitialized_var(sk);
618 struct l2cap_chan *chan;
619
620 BT_DBG("");
621
622 /* Check if we have socket listening on cid */
623 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
624 conn->src);
625 if (!parent)
626 return;
627
628 /* Check for backlog size */
629 if (sk_acceptq_is_full(parent)) {
630 BT_DBG("backlog full %d", parent->sk_ack_backlog);
631 goto clean;
632 }
633
634 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
635 if (!sk)
636 goto clean;
637
638 chan = l2cap_chan_alloc(sk);
639 if (!chan) {
640 l2cap_sock_kill(sk);
641 goto clean;
642 }
643
644 write_lock_bh(&conn->chan_lock);
645
646 hci_conn_hold(conn->hcon);
647
648 l2cap_sock_init(sk, parent);
649
650 bacpy(&bt_sk(sk)->src, conn->src);
651 bacpy(&bt_sk(sk)->dst, conn->dst);
652
653 bt_accept_enqueue(parent, sk);
654
655 __l2cap_chan_add(conn, chan);
656
657 l2cap_pi(sk)->chan = chan;
658
659 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
660
661 sk->sk_state = BT_CONNECTED;
662 parent->sk_data_ready(parent, 0);
663
664 write_unlock_bh(&conn->chan_lock);
665
666 clean:
667 bh_unlock_sock(parent);
668 }
669
670 static void l2cap_conn_ready(struct l2cap_conn *conn)
671 {
672 struct l2cap_chan *chan;
673
674 BT_DBG("conn %p", conn);
675
676 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
677 l2cap_le_conn_ready(conn);
678
679 read_lock(&conn->chan_lock);
680
681 list_for_each_entry(chan, &conn->chan_l, list) {
682 struct sock *sk = chan->sk;
683
684 bh_lock_sock(sk);
685
686 if (conn->hcon->type == LE_LINK) {
687 l2cap_sock_clear_timer(sk);
688 sk->sk_state = BT_CONNECTED;
689 sk->sk_state_change(sk);
690 }
691
692 if (sk->sk_type != SOCK_SEQPACKET &&
693 sk->sk_type != SOCK_STREAM) {
694 l2cap_sock_clear_timer(sk);
695 sk->sk_state = BT_CONNECTED;
696 sk->sk_state_change(sk);
697 } else if (sk->sk_state == BT_CONNECT)
698 l2cap_do_start(chan);
699
700 bh_unlock_sock(sk);
701 }
702
703 read_unlock(&conn->chan_lock);
704 }
705
706 /* Notify sockets that we cannot guaranty reliability anymore */
707 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
708 {
709 struct l2cap_chan *chan;
710
711 BT_DBG("conn %p", conn);
712
713 read_lock(&conn->chan_lock);
714
715 list_for_each_entry(chan, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
717
718 if (l2cap_pi(sk)->force_reliable)
719 sk->sk_err = err;
720 }
721
722 read_unlock(&conn->chan_lock);
723 }
724
725 static void l2cap_info_timeout(unsigned long arg)
726 {
727 struct l2cap_conn *conn = (void *) arg;
728
729 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
730 conn->info_ident = 0;
731
732 l2cap_conn_start(conn);
733 }
734
735 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
736 {
737 struct l2cap_conn *conn = hcon->l2cap_data;
738
739 if (conn || status)
740 return conn;
741
742 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
743 if (!conn)
744 return NULL;
745
746 hcon->l2cap_data = conn;
747 conn->hcon = hcon;
748
749 BT_DBG("hcon %p conn %p", hcon, conn);
750
751 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
752 conn->mtu = hcon->hdev->le_mtu;
753 else
754 conn->mtu = hcon->hdev->acl_mtu;
755
756 conn->src = &hcon->hdev->bdaddr;
757 conn->dst = &hcon->dst;
758
759 conn->feat_mask = 0;
760
761 spin_lock_init(&conn->lock);
762 rwlock_init(&conn->chan_lock);
763
764 INIT_LIST_HEAD(&conn->chan_l);
765
766 if (hcon->type != LE_LINK)
767 setup_timer(&conn->info_timer, l2cap_info_timeout,
768 (unsigned long) conn);
769
770 conn->disc_reason = 0x13;
771
772 return conn;
773 }
774
775 static void l2cap_conn_del(struct hci_conn *hcon, int err)
776 {
777 struct l2cap_conn *conn = hcon->l2cap_data;
778 struct l2cap_chan *chan, *l;
779 struct sock *sk;
780
781 if (!conn)
782 return;
783
784 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
785
786 kfree_skb(conn->rx_skb);
787
788 /* Kill channels */
789 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
790 sk = chan->sk;
791 bh_lock_sock(sk);
792 l2cap_chan_del(chan, err);
793 bh_unlock_sock(sk);
794 l2cap_sock_kill(sk);
795 }
796
797 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
798 del_timer_sync(&conn->info_timer);
799
800 hcon->l2cap_data = NULL;
801 kfree(conn);
802 }
803
804 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
805 {
806 write_lock_bh(&conn->chan_lock);
807 __l2cap_chan_add(conn, chan);
808 write_unlock_bh(&conn->chan_lock);
809 }
810
811 /* ---- Socket interface ---- */
812
813 /* Find socket with psm and source bdaddr.
814 * Returns closest match.
815 */
816 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
817 {
818 struct sock *sk = NULL, *sk1 = NULL;
819 struct hlist_node *node;
820
821 read_lock(&l2cap_sk_list.lock);
822
823 sk_for_each(sk, node, &l2cap_sk_list.head) {
824 if (state && sk->sk_state != state)
825 continue;
826
827 if (l2cap_pi(sk)->psm == psm) {
828 /* Exact match. */
829 if (!bacmp(&bt_sk(sk)->src, src))
830 break;
831
832 /* Closest match */
833 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
834 sk1 = sk;
835 }
836 }
837
838 read_unlock(&l2cap_sk_list.lock);
839
840 return node ? sk : sk1;
841 }
842
843 int l2cap_do_connect(struct sock *sk)
844 {
845 bdaddr_t *src = &bt_sk(sk)->src;
846 bdaddr_t *dst = &bt_sk(sk)->dst;
847 struct l2cap_conn *conn;
848 struct l2cap_chan *chan;
849 struct hci_conn *hcon;
850 struct hci_dev *hdev;
851 __u8 auth_type;
852 int err;
853
854 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
855 l2cap_pi(sk)->psm);
856
857 hdev = hci_get_route(dst, src);
858 if (!hdev)
859 return -EHOSTUNREACH;
860
861 hci_dev_lock_bh(hdev);
862
863 auth_type = l2cap_get_auth_type(sk);
864
865 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
866 hcon = hci_connect(hdev, LE_LINK, dst,
867 l2cap_pi(sk)->sec_level, auth_type);
868 else
869 hcon = hci_connect(hdev, ACL_LINK, dst,
870 l2cap_pi(sk)->sec_level, auth_type);
871
872 if (IS_ERR(hcon)) {
873 err = PTR_ERR(hcon);
874 goto done;
875 }
876
877 conn = l2cap_conn_add(hcon, 0);
878 if (!conn) {
879 hci_conn_put(hcon);
880 err = -ENOMEM;
881 goto done;
882 }
883
884 chan = l2cap_chan_alloc(sk);
885 if (!chan) {
886 hci_conn_put(hcon);
887 err = -ENOMEM;
888 goto done;
889 }
890
891 /* Update source addr of the socket */
892 bacpy(src, conn->src);
893
894 l2cap_chan_add(conn, chan);
895
896 l2cap_pi(sk)->chan = chan;
897
898 sk->sk_state = BT_CONNECT;
899 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
900
901 if (hcon->state == BT_CONNECTED) {
902 if (sk->sk_type != SOCK_SEQPACKET &&
903 sk->sk_type != SOCK_STREAM) {
904 l2cap_sock_clear_timer(sk);
905 if (l2cap_check_security(sk))
906 sk->sk_state = BT_CONNECTED;
907 } else
908 l2cap_do_start(chan);
909 }
910
911 err = 0;
912
913 done:
914 hci_dev_unlock_bh(hdev);
915 hci_dev_put(hdev);
916 return err;
917 }
918
919 int __l2cap_wait_ack(struct sock *sk)
920 {
921 DECLARE_WAITQUEUE(wait, current);
922 int err = 0;
923 int timeo = HZ/5;
924
925 add_wait_queue(sk_sleep(sk), &wait);
926 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
927 set_current_state(TASK_INTERRUPTIBLE);
928
929 if (!timeo)
930 timeo = HZ/5;
931
932 if (signal_pending(current)) {
933 err = sock_intr_errno(timeo);
934 break;
935 }
936
937 release_sock(sk);
938 timeo = schedule_timeout(timeo);
939 lock_sock(sk);
940
941 err = sock_error(sk);
942 if (err)
943 break;
944 }
945 set_current_state(TASK_RUNNING);
946 remove_wait_queue(sk_sleep(sk), &wait);
947 return err;
948 }
949
950 static void l2cap_monitor_timeout(unsigned long arg)
951 {
952 struct sock *sk = (void *) arg;
953
954 BT_DBG("sk %p", sk);
955
956 bh_lock_sock(sk);
957 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
958 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
959 bh_unlock_sock(sk);
960 return;
961 }
962
963 l2cap_pi(sk)->retry_count++;
964 __mod_monitor_timer();
965
966 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
967 bh_unlock_sock(sk);
968 }
969
970 static void l2cap_retrans_timeout(unsigned long arg)
971 {
972 struct sock *sk = (void *) arg;
973
974 BT_DBG("sk %p", sk);
975
976 bh_lock_sock(sk);
977 l2cap_pi(sk)->retry_count = 1;
978 __mod_monitor_timer();
979
980 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
981
982 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
983 bh_unlock_sock(sk);
984 }
985
986 static void l2cap_drop_acked_frames(struct sock *sk)
987 {
988 struct sk_buff *skb;
989
990 while ((skb = skb_peek(TX_QUEUE(sk))) &&
991 l2cap_pi(sk)->unacked_frames) {
992 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
993 break;
994
995 skb = skb_dequeue(TX_QUEUE(sk));
996 kfree_skb(skb);
997
998 l2cap_pi(sk)->unacked_frames--;
999 }
1000
1001 if (!l2cap_pi(sk)->unacked_frames)
1002 del_timer(&l2cap_pi(sk)->retrans_timer);
1003 }
1004
1005 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1006 {
1007 struct l2cap_pinfo *pi = l2cap_pi(sk);
1008 struct hci_conn *hcon = pi->conn->hcon;
1009 u16 flags;
1010
1011 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1012
1013 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1014 flags = ACL_START_NO_FLUSH;
1015 else
1016 flags = ACL_START;
1017
1018 hci_send_acl(hcon, skb, flags);
1019 }
1020
1021 void l2cap_streaming_send(struct sock *sk)
1022 {
1023 struct sk_buff *skb;
1024 struct l2cap_pinfo *pi = l2cap_pi(sk);
1025 u16 control, fcs;
1026
1027 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1028 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1029 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1030 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1031
1032 if (pi->fcs == L2CAP_FCS_CRC16) {
1033 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1034 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1035 }
1036
1037 l2cap_do_send(sk, skb);
1038
1039 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1040 }
1041 }
1042
1043 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1044 {
1045 struct l2cap_pinfo *pi = l2cap_pi(sk);
1046 struct sk_buff *skb, *tx_skb;
1047 u16 control, fcs;
1048
1049 skb = skb_peek(TX_QUEUE(sk));
1050 if (!skb)
1051 return;
1052
1053 do {
1054 if (bt_cb(skb)->tx_seq == tx_seq)
1055 break;
1056
1057 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1058 return;
1059
1060 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1061
1062 if (pi->remote_max_tx &&
1063 bt_cb(skb)->retries == pi->remote_max_tx) {
1064 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1065 return;
1066 }
1067
1068 tx_skb = skb_clone(skb, GFP_ATOMIC);
1069 bt_cb(skb)->retries++;
1070 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1071
1072 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1073 control |= L2CAP_CTRL_FINAL;
1074 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1075 }
1076
1077 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1078 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1079
1080 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1081
1082 if (pi->fcs == L2CAP_FCS_CRC16) {
1083 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1084 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1085 }
1086
1087 l2cap_do_send(sk, tx_skb);
1088 }
1089
1090 int l2cap_ertm_send(struct sock *sk)
1091 {
1092 struct sk_buff *skb, *tx_skb;
1093 struct l2cap_pinfo *pi = l2cap_pi(sk);
1094 u16 control, fcs;
1095 int nsent = 0;
1096
1097 if (sk->sk_state != BT_CONNECTED)
1098 return -ENOTCONN;
1099
1100 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1101
1102 if (pi->remote_max_tx &&
1103 bt_cb(skb)->retries == pi->remote_max_tx) {
1104 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1105 break;
1106 }
1107
1108 tx_skb = skb_clone(skb, GFP_ATOMIC);
1109
1110 bt_cb(skb)->retries++;
1111
1112 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1113 control &= L2CAP_CTRL_SAR;
1114
1115 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1116 control |= L2CAP_CTRL_FINAL;
1117 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1118 }
1119 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1120 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1121 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1122
1123
1124 if (pi->fcs == L2CAP_FCS_CRC16) {
1125 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1126 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1127 }
1128
1129 l2cap_do_send(sk, tx_skb);
1130
1131 __mod_retrans_timer();
1132
1133 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1134 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1135
1136 if (bt_cb(skb)->retries == 1)
1137 pi->unacked_frames++;
1138
1139 pi->frames_sent++;
1140
1141 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1142 sk->sk_send_head = NULL;
1143 else
1144 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1145
1146 nsent++;
1147 }
1148
1149 return nsent;
1150 }
1151
1152 static int l2cap_retransmit_frames(struct sock *sk)
1153 {
1154 struct l2cap_pinfo *pi = l2cap_pi(sk);
1155 int ret;
1156
1157 if (!skb_queue_empty(TX_QUEUE(sk)))
1158 sk->sk_send_head = TX_QUEUE(sk)->next;
1159
1160 pi->next_tx_seq = pi->expected_ack_seq;
1161 ret = l2cap_ertm_send(sk);
1162 return ret;
1163 }
1164
1165 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1166 {
1167 struct sock *sk = (struct sock *)pi;
1168 u16 control = 0;
1169
1170 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1171
1172 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1173 control |= L2CAP_SUPER_RCV_NOT_READY;
1174 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1175 l2cap_send_sframe(pi, control);
1176 return;
1177 }
1178
1179 if (l2cap_ertm_send(sk) > 0)
1180 return;
1181
1182 control |= L2CAP_SUPER_RCV_READY;
1183 l2cap_send_sframe(pi, control);
1184 }
1185
1186 static void l2cap_send_srejtail(struct sock *sk)
1187 {
1188 struct srej_list *tail;
1189 u16 control;
1190
1191 control = L2CAP_SUPER_SELECT_REJECT;
1192 control |= L2CAP_CTRL_FINAL;
1193
1194 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1195 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1196
1197 l2cap_send_sframe(l2cap_pi(sk), control);
1198 }
1199
1200 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1201 {
1202 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1203 struct sk_buff **frag;
1204 int err, sent = 0;
1205
1206 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1207 return -EFAULT;
1208
1209 sent += count;
1210 len -= count;
1211
1212 /* Continuation fragments (no L2CAP header) */
1213 frag = &skb_shinfo(skb)->frag_list;
1214 while (len) {
1215 count = min_t(unsigned int, conn->mtu, len);
1216
1217 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1218 if (!*frag)
1219 return err;
1220 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1221 return -EFAULT;
1222
1223 sent += count;
1224 len -= count;
1225
1226 frag = &(*frag)->next;
1227 }
1228
1229 return sent;
1230 }
1231
1232 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1233 {
1234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1235 struct sk_buff *skb;
1236 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1237 struct l2cap_hdr *lh;
1238
1239 BT_DBG("sk %p len %d", sk, (int)len);
1240
1241 count = min_t(unsigned int, (conn->mtu - hlen), len);
1242 skb = bt_skb_send_alloc(sk, count + hlen,
1243 msg->msg_flags & MSG_DONTWAIT, &err);
1244 if (!skb)
1245 return ERR_PTR(err);
1246
1247 /* Create L2CAP header */
1248 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1249 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1250 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1251 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1252
1253 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1254 if (unlikely(err < 0)) {
1255 kfree_skb(skb);
1256 return ERR_PTR(err);
1257 }
1258 return skb;
1259 }
1260
1261 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1262 {
1263 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1264 struct sk_buff *skb;
1265 int err, count, hlen = L2CAP_HDR_SIZE;
1266 struct l2cap_hdr *lh;
1267
1268 BT_DBG("sk %p len %d", sk, (int)len);
1269
1270 count = min_t(unsigned int, (conn->mtu - hlen), len);
1271 skb = bt_skb_send_alloc(sk, count + hlen,
1272 msg->msg_flags & MSG_DONTWAIT, &err);
1273 if (!skb)
1274 return ERR_PTR(err);
1275
1276 /* Create L2CAP header */
1277 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1278 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1279 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1280
1281 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1282 if (unlikely(err < 0)) {
1283 kfree_skb(skb);
1284 return ERR_PTR(err);
1285 }
1286 return skb;
1287 }
1288
1289 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1290 {
1291 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1292 struct sk_buff *skb;
1293 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1294 struct l2cap_hdr *lh;
1295
1296 BT_DBG("sk %p len %d", sk, (int)len);
1297
1298 if (!conn)
1299 return ERR_PTR(-ENOTCONN);
1300
1301 if (sdulen)
1302 hlen += 2;
1303
1304 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1305 hlen += 2;
1306
1307 count = min_t(unsigned int, (conn->mtu - hlen), len);
1308 skb = bt_skb_send_alloc(sk, count + hlen,
1309 msg->msg_flags & MSG_DONTWAIT, &err);
1310 if (!skb)
1311 return ERR_PTR(err);
1312
1313 /* Create L2CAP header */
1314 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1315 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1316 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1317 put_unaligned_le16(control, skb_put(skb, 2));
1318 if (sdulen)
1319 put_unaligned_le16(sdulen, skb_put(skb, 2));
1320
1321 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1322 if (unlikely(err < 0)) {
1323 kfree_skb(skb);
1324 return ERR_PTR(err);
1325 }
1326
1327 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1328 put_unaligned_le16(0, skb_put(skb, 2));
1329
1330 bt_cb(skb)->retries = 0;
1331 return skb;
1332 }
1333
1334 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1335 {
1336 struct l2cap_pinfo *pi = l2cap_pi(sk);
1337 struct sk_buff *skb;
1338 struct sk_buff_head sar_queue;
1339 u16 control;
1340 size_t size = 0;
1341
1342 skb_queue_head_init(&sar_queue);
1343 control = L2CAP_SDU_START;
1344 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1345 if (IS_ERR(skb))
1346 return PTR_ERR(skb);
1347
1348 __skb_queue_tail(&sar_queue, skb);
1349 len -= pi->remote_mps;
1350 size += pi->remote_mps;
1351
1352 while (len > 0) {
1353 size_t buflen;
1354
1355 if (len > pi->remote_mps) {
1356 control = L2CAP_SDU_CONTINUE;
1357 buflen = pi->remote_mps;
1358 } else {
1359 control = L2CAP_SDU_END;
1360 buflen = len;
1361 }
1362
1363 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1364 if (IS_ERR(skb)) {
1365 skb_queue_purge(&sar_queue);
1366 return PTR_ERR(skb);
1367 }
1368
1369 __skb_queue_tail(&sar_queue, skb);
1370 len -= buflen;
1371 size += buflen;
1372 }
1373 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1374 if (sk->sk_send_head == NULL)
1375 sk->sk_send_head = sar_queue.next;
1376
1377 return size;
1378 }
1379
1380 static void l2cap_chan_ready(struct sock *sk)
1381 {
1382 struct sock *parent = bt_sk(sk)->parent;
1383
1384 BT_DBG("sk %p, parent %p", sk, parent);
1385
1386 l2cap_pi(sk)->conf_state = 0;
1387 l2cap_sock_clear_timer(sk);
1388
1389 if (!parent) {
1390 /* Outgoing channel.
1391 * Wake up socket sleeping on connect.
1392 */
1393 sk->sk_state = BT_CONNECTED;
1394 sk->sk_state_change(sk);
1395 } else {
1396 /* Incoming channel.
1397 * Wake up socket sleeping on accept.
1398 */
1399 parent->sk_data_ready(parent, 0);
1400 }
1401 }
1402
1403 /* Copy frame to all raw sockets on that connection */
1404 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1405 {
1406 struct sk_buff *nskb;
1407 struct l2cap_chan *chan;
1408
1409 BT_DBG("conn %p", conn);
1410
1411 read_lock(&conn->chan_lock);
1412 list_for_each_entry(chan, &conn->chan_l, list) {
1413 struct sock *sk = chan->sk;
1414 if (sk->sk_type != SOCK_RAW)
1415 continue;
1416
1417 /* Don't send frame to the socket it came from */
1418 if (skb->sk == sk)
1419 continue;
1420 nskb = skb_clone(skb, GFP_ATOMIC);
1421 if (!nskb)
1422 continue;
1423
1424 if (sock_queue_rcv_skb(sk, nskb))
1425 kfree_skb(nskb);
1426 }
1427 read_unlock(&conn->chan_lock);
1428 }
1429
1430 /* ---- L2CAP signalling commands ---- */
1431 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1432 u8 code, u8 ident, u16 dlen, void *data)
1433 {
1434 struct sk_buff *skb, **frag;
1435 struct l2cap_cmd_hdr *cmd;
1436 struct l2cap_hdr *lh;
1437 int len, count;
1438
1439 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1440 conn, code, ident, dlen);
1441
1442 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1443 count = min_t(unsigned int, conn->mtu, len);
1444
1445 skb = bt_skb_alloc(count, GFP_ATOMIC);
1446 if (!skb)
1447 return NULL;
1448
1449 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1450 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1451
1452 if (conn->hcon->type == LE_LINK)
1453 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1454 else
1455 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1456
1457 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1458 cmd->code = code;
1459 cmd->ident = ident;
1460 cmd->len = cpu_to_le16(dlen);
1461
1462 if (dlen) {
1463 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1464 memcpy(skb_put(skb, count), data, count);
1465 data += count;
1466 }
1467
1468 len -= skb->len;
1469
1470 /* Continuation fragments (no L2CAP header) */
1471 frag = &skb_shinfo(skb)->frag_list;
1472 while (len) {
1473 count = min_t(unsigned int, conn->mtu, len);
1474
1475 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1476 if (!*frag)
1477 goto fail;
1478
1479 memcpy(skb_put(*frag, count), data, count);
1480
1481 len -= count;
1482 data += count;
1483
1484 frag = &(*frag)->next;
1485 }
1486
1487 return skb;
1488
1489 fail:
1490 kfree_skb(skb);
1491 return NULL;
1492 }
1493
1494 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1495 {
1496 struct l2cap_conf_opt *opt = *ptr;
1497 int len;
1498
1499 len = L2CAP_CONF_OPT_SIZE + opt->len;
1500 *ptr += len;
1501
1502 *type = opt->type;
1503 *olen = opt->len;
1504
1505 switch (opt->len) {
1506 case 1:
1507 *val = *((u8 *) opt->val);
1508 break;
1509
1510 case 2:
1511 *val = get_unaligned_le16(opt->val);
1512 break;
1513
1514 case 4:
1515 *val = get_unaligned_le32(opt->val);
1516 break;
1517
1518 default:
1519 *val = (unsigned long) opt->val;
1520 break;
1521 }
1522
1523 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1524 return len;
1525 }
1526
1527 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1528 {
1529 struct l2cap_conf_opt *opt = *ptr;
1530
1531 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1532
1533 opt->type = type;
1534 opt->len = len;
1535
1536 switch (len) {
1537 case 1:
1538 *((u8 *) opt->val) = val;
1539 break;
1540
1541 case 2:
1542 put_unaligned_le16(val, opt->val);
1543 break;
1544
1545 case 4:
1546 put_unaligned_le32(val, opt->val);
1547 break;
1548
1549 default:
1550 memcpy(opt->val, (void *) val, len);
1551 break;
1552 }
1553
1554 *ptr += L2CAP_CONF_OPT_SIZE + len;
1555 }
1556
1557 static void l2cap_ack_timeout(unsigned long arg)
1558 {
1559 struct sock *sk = (void *) arg;
1560
1561 bh_lock_sock(sk);
1562 l2cap_send_ack(l2cap_pi(sk));
1563 bh_unlock_sock(sk);
1564 }
1565
1566 static inline void l2cap_ertm_init(struct sock *sk)
1567 {
1568 l2cap_pi(sk)->expected_ack_seq = 0;
1569 l2cap_pi(sk)->unacked_frames = 0;
1570 l2cap_pi(sk)->buffer_seq = 0;
1571 l2cap_pi(sk)->num_acked = 0;
1572 l2cap_pi(sk)->frames_sent = 0;
1573
1574 setup_timer(&l2cap_pi(sk)->retrans_timer,
1575 l2cap_retrans_timeout, (unsigned long) sk);
1576 setup_timer(&l2cap_pi(sk)->monitor_timer,
1577 l2cap_monitor_timeout, (unsigned long) sk);
1578 setup_timer(&l2cap_pi(sk)->ack_timer,
1579 l2cap_ack_timeout, (unsigned long) sk);
1580
1581 __skb_queue_head_init(SREJ_QUEUE(sk));
1582 __skb_queue_head_init(BUSY_QUEUE(sk));
1583
1584 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1585
1586 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1587 }
1588
1589 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1590 {
1591 switch (mode) {
1592 case L2CAP_MODE_STREAMING:
1593 case L2CAP_MODE_ERTM:
1594 if (l2cap_mode_supported(mode, remote_feat_mask))
1595 return mode;
1596 /* fall through */
1597 default:
1598 return L2CAP_MODE_BASIC;
1599 }
1600 }
1601
1602 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1603 {
1604 struct sock *sk = chan->sk;
1605 struct l2cap_pinfo *pi = l2cap_pi(sk);
1606 struct l2cap_conf_req *req = data;
1607 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1608 void *ptr = req->data;
1609
1610 BT_DBG("sk %p", sk);
1611
1612 if (chan->num_conf_req || chan->num_conf_rsp)
1613 goto done;
1614
1615 switch (pi->mode) {
1616 case L2CAP_MODE_STREAMING:
1617 case L2CAP_MODE_ERTM:
1618 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1619 break;
1620
1621 /* fall through */
1622 default:
1623 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1624 break;
1625 }
1626
1627 done:
1628 if (pi->imtu != L2CAP_DEFAULT_MTU)
1629 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1630
1631 switch (pi->mode) {
1632 case L2CAP_MODE_BASIC:
1633 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1634 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1635 break;
1636
1637 rfc.mode = L2CAP_MODE_BASIC;
1638 rfc.txwin_size = 0;
1639 rfc.max_transmit = 0;
1640 rfc.retrans_timeout = 0;
1641 rfc.monitor_timeout = 0;
1642 rfc.max_pdu_size = 0;
1643
1644 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1645 (unsigned long) &rfc);
1646 break;
1647
1648 case L2CAP_MODE_ERTM:
1649 rfc.mode = L2CAP_MODE_ERTM;
1650 rfc.txwin_size = pi->tx_win;
1651 rfc.max_transmit = pi->max_tx;
1652 rfc.retrans_timeout = 0;
1653 rfc.monitor_timeout = 0;
1654 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1655 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1656 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1657
1658 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1659 (unsigned long) &rfc);
1660
1661 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1662 break;
1663
1664 if (pi->fcs == L2CAP_FCS_NONE ||
1665 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1666 pi->fcs = L2CAP_FCS_NONE;
1667 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1668 }
1669 break;
1670
1671 case L2CAP_MODE_STREAMING:
1672 rfc.mode = L2CAP_MODE_STREAMING;
1673 rfc.txwin_size = 0;
1674 rfc.max_transmit = 0;
1675 rfc.retrans_timeout = 0;
1676 rfc.monitor_timeout = 0;
1677 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1678 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1679 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1680
1681 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1682 (unsigned long) &rfc);
1683
1684 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1685 break;
1686
1687 if (pi->fcs == L2CAP_FCS_NONE ||
1688 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1689 pi->fcs = L2CAP_FCS_NONE;
1690 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1691 }
1692 break;
1693 }
1694
1695 req->dcid = cpu_to_le16(pi->dcid);
1696 req->flags = cpu_to_le16(0);
1697
1698 return ptr - data;
1699 }
1700
1701 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1702 {
1703 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1704 struct l2cap_conf_rsp *rsp = data;
1705 void *ptr = rsp->data;
1706 void *req = chan->conf_req;
1707 int len = chan->conf_len;
1708 int type, hint, olen;
1709 unsigned long val;
1710 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1711 u16 mtu = L2CAP_DEFAULT_MTU;
1712 u16 result = L2CAP_CONF_SUCCESS;
1713
1714 BT_DBG("chan %p", chan);
1715
1716 while (len >= L2CAP_CONF_OPT_SIZE) {
1717 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1718
1719 hint = type & L2CAP_CONF_HINT;
1720 type &= L2CAP_CONF_MASK;
1721
1722 switch (type) {
1723 case L2CAP_CONF_MTU:
1724 mtu = val;
1725 break;
1726
1727 case L2CAP_CONF_FLUSH_TO:
1728 pi->flush_to = val;
1729 break;
1730
1731 case L2CAP_CONF_QOS:
1732 break;
1733
1734 case L2CAP_CONF_RFC:
1735 if (olen == sizeof(rfc))
1736 memcpy(&rfc, (void *) val, olen);
1737 break;
1738
1739 case L2CAP_CONF_FCS:
1740 if (val == L2CAP_FCS_NONE)
1741 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1742
1743 break;
1744
1745 default:
1746 if (hint)
1747 break;
1748
1749 result = L2CAP_CONF_UNKNOWN;
1750 *((u8 *) ptr++) = type;
1751 break;
1752 }
1753 }
1754
1755 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1756 goto done;
1757
1758 switch (pi->mode) {
1759 case L2CAP_MODE_STREAMING:
1760 case L2CAP_MODE_ERTM:
1761 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1762 pi->mode = l2cap_select_mode(rfc.mode,
1763 pi->conn->feat_mask);
1764 break;
1765 }
1766
1767 if (pi->mode != rfc.mode)
1768 return -ECONNREFUSED;
1769
1770 break;
1771 }
1772
1773 done:
1774 if (pi->mode != rfc.mode) {
1775 result = L2CAP_CONF_UNACCEPT;
1776 rfc.mode = pi->mode;
1777
1778 if (chan->num_conf_rsp == 1)
1779 return -ECONNREFUSED;
1780
1781 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1782 sizeof(rfc), (unsigned long) &rfc);
1783 }
1784
1785
1786 if (result == L2CAP_CONF_SUCCESS) {
1787 /* Configure output options and let the other side know
1788 * which ones we don't like. */
1789
1790 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1791 result = L2CAP_CONF_UNACCEPT;
1792 else {
1793 pi->omtu = mtu;
1794 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1795 }
1796 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1797
1798 switch (rfc.mode) {
1799 case L2CAP_MODE_BASIC:
1800 pi->fcs = L2CAP_FCS_NONE;
1801 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1802 break;
1803
1804 case L2CAP_MODE_ERTM:
1805 pi->remote_tx_win = rfc.txwin_size;
1806 pi->remote_max_tx = rfc.max_transmit;
1807
1808 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1809 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1810
1811 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1812
1813 rfc.retrans_timeout =
1814 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1815 rfc.monitor_timeout =
1816 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1817
1818 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1819
1820 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1821 sizeof(rfc), (unsigned long) &rfc);
1822
1823 break;
1824
1825 case L2CAP_MODE_STREAMING:
1826 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1827 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1828
1829 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1830
1831 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1832
1833 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1834 sizeof(rfc), (unsigned long) &rfc);
1835
1836 break;
1837
1838 default:
1839 result = L2CAP_CONF_UNACCEPT;
1840
1841 memset(&rfc, 0, sizeof(rfc));
1842 rfc.mode = pi->mode;
1843 }
1844
1845 if (result == L2CAP_CONF_SUCCESS)
1846 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1847 }
1848 rsp->scid = cpu_to_le16(pi->dcid);
1849 rsp->result = cpu_to_le16(result);
1850 rsp->flags = cpu_to_le16(0x0000);
1851
1852 return ptr - data;
1853 }
1854
1855 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1856 {
1857 struct l2cap_pinfo *pi = l2cap_pi(sk);
1858 struct l2cap_conf_req *req = data;
1859 void *ptr = req->data;
1860 int type, olen;
1861 unsigned long val;
1862 struct l2cap_conf_rfc rfc;
1863
1864 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1865
1866 while (len >= L2CAP_CONF_OPT_SIZE) {
1867 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1868
1869 switch (type) {
1870 case L2CAP_CONF_MTU:
1871 if (val < L2CAP_DEFAULT_MIN_MTU) {
1872 *result = L2CAP_CONF_UNACCEPT;
1873 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1874 } else
1875 pi->imtu = val;
1876 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1877 break;
1878
1879 case L2CAP_CONF_FLUSH_TO:
1880 pi->flush_to = val;
1881 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1882 2, pi->flush_to);
1883 break;
1884
1885 case L2CAP_CONF_RFC:
1886 if (olen == sizeof(rfc))
1887 memcpy(&rfc, (void *)val, olen);
1888
1889 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1890 rfc.mode != pi->mode)
1891 return -ECONNREFUSED;
1892
1893 pi->fcs = 0;
1894
1895 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1896 sizeof(rfc), (unsigned long) &rfc);
1897 break;
1898 }
1899 }
1900
1901 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1902 return -ECONNREFUSED;
1903
1904 pi->mode = rfc.mode;
1905
1906 if (*result == L2CAP_CONF_SUCCESS) {
1907 switch (rfc.mode) {
1908 case L2CAP_MODE_ERTM:
1909 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1910 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1911 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1912 break;
1913 case L2CAP_MODE_STREAMING:
1914 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1915 }
1916 }
1917
1918 req->dcid = cpu_to_le16(pi->dcid);
1919 req->flags = cpu_to_le16(0x0000);
1920
1921 return ptr - data;
1922 }
1923
1924 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1925 {
1926 struct l2cap_conf_rsp *rsp = data;
1927 void *ptr = rsp->data;
1928
1929 BT_DBG("sk %p", sk);
1930
1931 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1932 rsp->result = cpu_to_le16(result);
1933 rsp->flags = cpu_to_le16(flags);
1934
1935 return ptr - data;
1936 }
1937
1938 void __l2cap_connect_rsp_defer(struct sock *sk)
1939 {
1940 struct l2cap_conn_rsp rsp;
1941 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1942 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1943 u8 buf[128];
1944
1945 sk->sk_state = BT_CONFIG;
1946
1947 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1948 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1949 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1950 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1951 l2cap_send_cmd(conn, chan->ident,
1952 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1953
1954 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
1955 return;
1956
1957 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1958 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1959 l2cap_build_conf_req(chan, buf), buf);
1960 chan->num_conf_req++;
1961 }
1962
1963 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1964 {
1965 struct l2cap_pinfo *pi = l2cap_pi(sk);
1966 int type, olen;
1967 unsigned long val;
1968 struct l2cap_conf_rfc rfc;
1969
1970 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1971
1972 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1973 return;
1974
1975 while (len >= L2CAP_CONF_OPT_SIZE) {
1976 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1977
1978 switch (type) {
1979 case L2CAP_CONF_RFC:
1980 if (olen == sizeof(rfc))
1981 memcpy(&rfc, (void *)val, olen);
1982 goto done;
1983 }
1984 }
1985
1986 done:
1987 switch (rfc.mode) {
1988 case L2CAP_MODE_ERTM:
1989 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1990 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1991 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1992 break;
1993 case L2CAP_MODE_STREAMING:
1994 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1995 }
1996 }
1997
1998 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1999 {
2000 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2001
2002 if (rej->reason != 0x0000)
2003 return 0;
2004
2005 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2006 cmd->ident == conn->info_ident) {
2007 del_timer(&conn->info_timer);
2008
2009 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2010 conn->info_ident = 0;
2011
2012 l2cap_conn_start(conn);
2013 }
2014
2015 return 0;
2016 }
2017
2018 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2019 {
2020 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2021 struct l2cap_conn_rsp rsp;
2022 struct l2cap_chan *chan = NULL;
2023 struct sock *parent, *sk = NULL;
2024 int result, status = L2CAP_CS_NO_INFO;
2025
2026 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2027 __le16 psm = req->psm;
2028
2029 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2030
2031 /* Check if we have socket listening on psm */
2032 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2033 if (!parent) {
2034 result = L2CAP_CR_BAD_PSM;
2035 goto sendresp;
2036 }
2037
2038 bh_lock_sock(parent);
2039
2040 /* Check if the ACL is secure enough (if not SDP) */
2041 if (psm != cpu_to_le16(0x0001) &&
2042 !hci_conn_check_link_mode(conn->hcon)) {
2043 conn->disc_reason = 0x05;
2044 result = L2CAP_CR_SEC_BLOCK;
2045 goto response;
2046 }
2047
2048 result = L2CAP_CR_NO_MEM;
2049
2050 /* Check for backlog size */
2051 if (sk_acceptq_is_full(parent)) {
2052 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2053 goto response;
2054 }
2055
2056 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2057 if (!sk)
2058 goto response;
2059
2060 chan = l2cap_chan_alloc(sk);
2061 if (!chan) {
2062 l2cap_sock_kill(sk);
2063 goto response;
2064 }
2065
2066 write_lock_bh(&conn->chan_lock);
2067
2068 /* Check if we already have channel with that dcid */
2069 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2070 write_unlock_bh(&conn->chan_lock);
2071 sock_set_flag(sk, SOCK_ZAPPED);
2072 l2cap_sock_kill(sk);
2073 goto response;
2074 }
2075
2076 hci_conn_hold(conn->hcon);
2077
2078 l2cap_sock_init(sk, parent);
2079 bacpy(&bt_sk(sk)->src, conn->src);
2080 bacpy(&bt_sk(sk)->dst, conn->dst);
2081 l2cap_pi(sk)->psm = psm;
2082 l2cap_pi(sk)->dcid = scid;
2083
2084 bt_accept_enqueue(parent, sk);
2085
2086 __l2cap_chan_add(conn, chan);
2087
2088 l2cap_pi(sk)->chan = chan;
2089
2090 dcid = l2cap_pi(sk)->scid;
2091
2092 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2093
2094 chan->ident = cmd->ident;
2095
2096 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2097 if (l2cap_check_security(sk)) {
2098 if (bt_sk(sk)->defer_setup) {
2099 sk->sk_state = BT_CONNECT2;
2100 result = L2CAP_CR_PEND;
2101 status = L2CAP_CS_AUTHOR_PEND;
2102 parent->sk_data_ready(parent, 0);
2103 } else {
2104 sk->sk_state = BT_CONFIG;
2105 result = L2CAP_CR_SUCCESS;
2106 status = L2CAP_CS_NO_INFO;
2107 }
2108 } else {
2109 sk->sk_state = BT_CONNECT2;
2110 result = L2CAP_CR_PEND;
2111 status = L2CAP_CS_AUTHEN_PEND;
2112 }
2113 } else {
2114 sk->sk_state = BT_CONNECT2;
2115 result = L2CAP_CR_PEND;
2116 status = L2CAP_CS_NO_INFO;
2117 }
2118
2119 write_unlock_bh(&conn->chan_lock);
2120
2121 response:
2122 bh_unlock_sock(parent);
2123
2124 sendresp:
2125 rsp.scid = cpu_to_le16(scid);
2126 rsp.dcid = cpu_to_le16(dcid);
2127 rsp.result = cpu_to_le16(result);
2128 rsp.status = cpu_to_le16(status);
2129 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2130
2131 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2132 struct l2cap_info_req info;
2133 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2134
2135 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2136 conn->info_ident = l2cap_get_ident(conn);
2137
2138 mod_timer(&conn->info_timer, jiffies +
2139 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2140
2141 l2cap_send_cmd(conn, conn->info_ident,
2142 L2CAP_INFO_REQ, sizeof(info), &info);
2143 }
2144
2145 if (chan && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2146 result == L2CAP_CR_SUCCESS) {
2147 u8 buf[128];
2148 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2149 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2150 l2cap_build_conf_req(chan, buf), buf);
2151 chan->num_conf_req++;
2152 }
2153
2154 return 0;
2155 }
2156
2157 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2158 {
2159 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2160 u16 scid, dcid, result, status;
2161 struct l2cap_chan *chan;
2162 struct sock *sk;
2163 u8 req[128];
2164
2165 scid = __le16_to_cpu(rsp->scid);
2166 dcid = __le16_to_cpu(rsp->dcid);
2167 result = __le16_to_cpu(rsp->result);
2168 status = __le16_to_cpu(rsp->status);
2169
2170 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2171
2172 if (scid) {
2173 chan = l2cap_get_chan_by_scid(conn, scid);
2174 if (!chan)
2175 return -EFAULT;
2176 } else {
2177 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2178 if (!chan)
2179 return -EFAULT;
2180 }
2181
2182 sk = chan->sk;
2183
2184 switch (result) {
2185 case L2CAP_CR_SUCCESS:
2186 sk->sk_state = BT_CONFIG;
2187 chan->ident = 0;
2188 l2cap_pi(sk)->dcid = dcid;
2189 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2190
2191 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2192 break;
2193
2194 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2195
2196 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2197 l2cap_build_conf_req(chan, req), req);
2198 chan->num_conf_req++;
2199 break;
2200
2201 case L2CAP_CR_PEND:
2202 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2203 break;
2204
2205 default:
2206 /* don't delete l2cap channel if sk is owned by user */
2207 if (sock_owned_by_user(sk)) {
2208 sk->sk_state = BT_DISCONN;
2209 l2cap_sock_clear_timer(sk);
2210 l2cap_sock_set_timer(sk, HZ / 5);
2211 break;
2212 }
2213
2214 l2cap_chan_del(chan, ECONNREFUSED);
2215 break;
2216 }
2217
2218 bh_unlock_sock(sk);
2219 return 0;
2220 }
2221
2222 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2223 {
2224 /* FCS is enabled only in ERTM or streaming mode, if one or both
2225 * sides request it.
2226 */
2227 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2228 pi->fcs = L2CAP_FCS_NONE;
2229 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2230 pi->fcs = L2CAP_FCS_CRC16;
2231 }
2232
2233 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2234 {
2235 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2236 u16 dcid, flags;
2237 u8 rsp[64];
2238 struct l2cap_chan *chan;
2239 struct sock *sk;
2240 int len;
2241
2242 dcid = __le16_to_cpu(req->dcid);
2243 flags = __le16_to_cpu(req->flags);
2244
2245 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2246
2247 chan = l2cap_get_chan_by_scid(conn, dcid);
2248 if (!chan)
2249 return -ENOENT;
2250
2251 sk = chan->sk;
2252
2253 if (sk->sk_state != BT_CONFIG) {
2254 struct l2cap_cmd_rej rej;
2255
2256 rej.reason = cpu_to_le16(0x0002);
2257 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2258 sizeof(rej), &rej);
2259 goto unlock;
2260 }
2261
2262 /* Reject if config buffer is too small. */
2263 len = cmd_len - sizeof(*req);
2264 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2265 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2266 l2cap_build_conf_rsp(sk, rsp,
2267 L2CAP_CONF_REJECT, flags), rsp);
2268 goto unlock;
2269 }
2270
2271 /* Store config. */
2272 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2273 chan->conf_len += len;
2274
2275 if (flags & 0x0001) {
2276 /* Incomplete config. Send empty response. */
2277 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2278 l2cap_build_conf_rsp(sk, rsp,
2279 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2280 goto unlock;
2281 }
2282
2283 /* Complete config. */
2284 len = l2cap_parse_conf_req(chan, rsp);
2285 if (len < 0) {
2286 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2287 goto unlock;
2288 }
2289
2290 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2291 chan->num_conf_rsp++;
2292
2293 /* Reset config buffer. */
2294 chan->conf_len = 0;
2295
2296 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2297 goto unlock;
2298
2299 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2300 set_default_fcs(l2cap_pi(sk));
2301
2302 sk->sk_state = BT_CONNECTED;
2303
2304 l2cap_pi(sk)->next_tx_seq = 0;
2305 l2cap_pi(sk)->expected_tx_seq = 0;
2306 __skb_queue_head_init(TX_QUEUE(sk));
2307 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2308 l2cap_ertm_init(sk);
2309
2310 l2cap_chan_ready(sk);
2311 goto unlock;
2312 }
2313
2314 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2315 u8 buf[64];
2316 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2317 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2318 l2cap_build_conf_req(chan, buf), buf);
2319 chan->num_conf_req++;
2320 }
2321
2322 unlock:
2323 bh_unlock_sock(sk);
2324 return 0;
2325 }
2326
2327 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2328 {
2329 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2330 u16 scid, flags, result;
2331 struct l2cap_chan *chan;
2332 struct sock *sk;
2333 int len = cmd->len - sizeof(*rsp);
2334
2335 scid = __le16_to_cpu(rsp->scid);
2336 flags = __le16_to_cpu(rsp->flags);
2337 result = __le16_to_cpu(rsp->result);
2338
2339 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2340 scid, flags, result);
2341
2342 chan = l2cap_get_chan_by_scid(conn, scid);
2343 if (!chan)
2344 return 0;
2345
2346 sk = chan->sk;
2347
2348 switch (result) {
2349 case L2CAP_CONF_SUCCESS:
2350 l2cap_conf_rfc_get(sk, rsp->data, len);
2351 break;
2352
2353 case L2CAP_CONF_UNACCEPT:
2354 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2355 char req[64];
2356
2357 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2358 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2359 goto done;
2360 }
2361
2362 /* throw out any old stored conf requests */
2363 result = L2CAP_CONF_SUCCESS;
2364 len = l2cap_parse_conf_rsp(sk, rsp->data,
2365 len, req, &result);
2366 if (len < 0) {
2367 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2368 goto done;
2369 }
2370
2371 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2372 L2CAP_CONF_REQ, len, req);
2373 chan->num_conf_req++;
2374 if (result != L2CAP_CONF_SUCCESS)
2375 goto done;
2376 break;
2377 }
2378
2379 default:
2380 sk->sk_err = ECONNRESET;
2381 l2cap_sock_set_timer(sk, HZ * 5);
2382 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2383 goto done;
2384 }
2385
2386 if (flags & 0x01)
2387 goto done;
2388
2389 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2390
2391 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2392 set_default_fcs(l2cap_pi(sk));
2393
2394 sk->sk_state = BT_CONNECTED;
2395 l2cap_pi(sk)->next_tx_seq = 0;
2396 l2cap_pi(sk)->expected_tx_seq = 0;
2397 __skb_queue_head_init(TX_QUEUE(sk));
2398 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2399 l2cap_ertm_init(sk);
2400
2401 l2cap_chan_ready(sk);
2402 }
2403
2404 done:
2405 bh_unlock_sock(sk);
2406 return 0;
2407 }
2408
2409 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2410 {
2411 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2412 struct l2cap_disconn_rsp rsp;
2413 u16 dcid, scid;
2414 struct l2cap_chan *chan;
2415 struct sock *sk;
2416
2417 scid = __le16_to_cpu(req->scid);
2418 dcid = __le16_to_cpu(req->dcid);
2419
2420 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2421
2422 chan = l2cap_get_chan_by_scid(conn, dcid);
2423 if (!chan)
2424 return 0;
2425
2426 sk = chan->sk;
2427
2428 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2429 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2430 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2431
2432 sk->sk_shutdown = SHUTDOWN_MASK;
2433
2434 /* don't delete l2cap channel if sk is owned by user */
2435 if (sock_owned_by_user(sk)) {
2436 sk->sk_state = BT_DISCONN;
2437 l2cap_sock_clear_timer(sk);
2438 l2cap_sock_set_timer(sk, HZ / 5);
2439 bh_unlock_sock(sk);
2440 return 0;
2441 }
2442
2443 l2cap_chan_del(chan, ECONNRESET);
2444 bh_unlock_sock(sk);
2445
2446 l2cap_sock_kill(sk);
2447 return 0;
2448 }
2449
2450 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2451 {
2452 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2453 u16 dcid, scid;
2454 struct l2cap_chan *chan;
2455 struct sock *sk;
2456
2457 scid = __le16_to_cpu(rsp->scid);
2458 dcid = __le16_to_cpu(rsp->dcid);
2459
2460 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2461
2462 chan = l2cap_get_chan_by_scid(conn, scid);
2463 if (!chan)
2464 return 0;
2465
2466 sk = chan->sk;
2467
2468 /* don't delete l2cap channel if sk is owned by user */
2469 if (sock_owned_by_user(sk)) {
2470 sk->sk_state = BT_DISCONN;
2471 l2cap_sock_clear_timer(sk);
2472 l2cap_sock_set_timer(sk, HZ / 5);
2473 bh_unlock_sock(sk);
2474 return 0;
2475 }
2476
2477 l2cap_chan_del(chan, 0);
2478 bh_unlock_sock(sk);
2479
2480 l2cap_sock_kill(sk);
2481 return 0;
2482 }
2483
2484 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2485 {
2486 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2487 u16 type;
2488
2489 type = __le16_to_cpu(req->type);
2490
2491 BT_DBG("type 0x%4.4x", type);
2492
2493 if (type == L2CAP_IT_FEAT_MASK) {
2494 u8 buf[8];
2495 u32 feat_mask = l2cap_feat_mask;
2496 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2497 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2498 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2499 if (!disable_ertm)
2500 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2501 | L2CAP_FEAT_FCS;
2502 put_unaligned_le32(feat_mask, rsp->data);
2503 l2cap_send_cmd(conn, cmd->ident,
2504 L2CAP_INFO_RSP, sizeof(buf), buf);
2505 } else if (type == L2CAP_IT_FIXED_CHAN) {
2506 u8 buf[12];
2507 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2508 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2509 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2510 memcpy(buf + 4, l2cap_fixed_chan, 8);
2511 l2cap_send_cmd(conn, cmd->ident,
2512 L2CAP_INFO_RSP, sizeof(buf), buf);
2513 } else {
2514 struct l2cap_info_rsp rsp;
2515 rsp.type = cpu_to_le16(type);
2516 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2517 l2cap_send_cmd(conn, cmd->ident,
2518 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2519 }
2520
2521 return 0;
2522 }
2523
2524 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2525 {
2526 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2527 u16 type, result;
2528
2529 type = __le16_to_cpu(rsp->type);
2530 result = __le16_to_cpu(rsp->result);
2531
2532 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2533
2534 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2535 if (cmd->ident != conn->info_ident ||
2536 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2537 return 0;
2538
2539 del_timer(&conn->info_timer);
2540
2541 if (result != L2CAP_IR_SUCCESS) {
2542 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2543 conn->info_ident = 0;
2544
2545 l2cap_conn_start(conn);
2546
2547 return 0;
2548 }
2549
2550 if (type == L2CAP_IT_FEAT_MASK) {
2551 conn->feat_mask = get_unaligned_le32(rsp->data);
2552
2553 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2554 struct l2cap_info_req req;
2555 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2556
2557 conn->info_ident = l2cap_get_ident(conn);
2558
2559 l2cap_send_cmd(conn, conn->info_ident,
2560 L2CAP_INFO_REQ, sizeof(req), &req);
2561 } else {
2562 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2563 conn->info_ident = 0;
2564
2565 l2cap_conn_start(conn);
2566 }
2567 } else if (type == L2CAP_IT_FIXED_CHAN) {
2568 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2569 conn->info_ident = 0;
2570
2571 l2cap_conn_start(conn);
2572 }
2573
2574 return 0;
2575 }
2576
2577 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2578 u16 to_multiplier)
2579 {
2580 u16 max_latency;
2581
2582 if (min > max || min < 6 || max > 3200)
2583 return -EINVAL;
2584
2585 if (to_multiplier < 10 || to_multiplier > 3200)
2586 return -EINVAL;
2587
2588 if (max >= to_multiplier * 8)
2589 return -EINVAL;
2590
2591 max_latency = (to_multiplier * 8 / max) - 1;
2592 if (latency > 499 || latency > max_latency)
2593 return -EINVAL;
2594
2595 return 0;
2596 }
2597
2598 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2599 struct l2cap_cmd_hdr *cmd, u8 *data)
2600 {
2601 struct hci_conn *hcon = conn->hcon;
2602 struct l2cap_conn_param_update_req *req;
2603 struct l2cap_conn_param_update_rsp rsp;
2604 u16 min, max, latency, to_multiplier, cmd_len;
2605 int err;
2606
2607 if (!(hcon->link_mode & HCI_LM_MASTER))
2608 return -EINVAL;
2609
2610 cmd_len = __le16_to_cpu(cmd->len);
2611 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2612 return -EPROTO;
2613
2614 req = (struct l2cap_conn_param_update_req *) data;
2615 min = __le16_to_cpu(req->min);
2616 max = __le16_to_cpu(req->max);
2617 latency = __le16_to_cpu(req->latency);
2618 to_multiplier = __le16_to_cpu(req->to_multiplier);
2619
2620 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2621 min, max, latency, to_multiplier);
2622
2623 memset(&rsp, 0, sizeof(rsp));
2624
2625 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2626 if (err)
2627 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2628 else
2629 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2630
2631 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2632 sizeof(rsp), &rsp);
2633
2634 if (!err)
2635 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2636
2637 return 0;
2638 }
2639
2640 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2641 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2642 {
2643 int err = 0;
2644
2645 switch (cmd->code) {
2646 case L2CAP_COMMAND_REJ:
2647 l2cap_command_rej(conn, cmd, data);
2648 break;
2649
2650 case L2CAP_CONN_REQ:
2651 err = l2cap_connect_req(conn, cmd, data);
2652 break;
2653
2654 case L2CAP_CONN_RSP:
2655 err = l2cap_connect_rsp(conn, cmd, data);
2656 break;
2657
2658 case L2CAP_CONF_REQ:
2659 err = l2cap_config_req(conn, cmd, cmd_len, data);
2660 break;
2661
2662 case L2CAP_CONF_RSP:
2663 err = l2cap_config_rsp(conn, cmd, data);
2664 break;
2665
2666 case L2CAP_DISCONN_REQ:
2667 err = l2cap_disconnect_req(conn, cmd, data);
2668 break;
2669
2670 case L2CAP_DISCONN_RSP:
2671 err = l2cap_disconnect_rsp(conn, cmd, data);
2672 break;
2673
2674 case L2CAP_ECHO_REQ:
2675 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2676 break;
2677
2678 case L2CAP_ECHO_RSP:
2679 break;
2680
2681 case L2CAP_INFO_REQ:
2682 err = l2cap_information_req(conn, cmd, data);
2683 break;
2684
2685 case L2CAP_INFO_RSP:
2686 err = l2cap_information_rsp(conn, cmd, data);
2687 break;
2688
2689 default:
2690 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2691 err = -EINVAL;
2692 break;
2693 }
2694
2695 return err;
2696 }
2697
2698 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2699 struct l2cap_cmd_hdr *cmd, u8 *data)
2700 {
2701 switch (cmd->code) {
2702 case L2CAP_COMMAND_REJ:
2703 return 0;
2704
2705 case L2CAP_CONN_PARAM_UPDATE_REQ:
2706 return l2cap_conn_param_update_req(conn, cmd, data);
2707
2708 case L2CAP_CONN_PARAM_UPDATE_RSP:
2709 return 0;
2710
2711 default:
2712 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2713 return -EINVAL;
2714 }
2715 }
2716
2717 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2718 struct sk_buff *skb)
2719 {
2720 u8 *data = skb->data;
2721 int len = skb->len;
2722 struct l2cap_cmd_hdr cmd;
2723 int err;
2724
2725 l2cap_raw_recv(conn, skb);
2726
2727 while (len >= L2CAP_CMD_HDR_SIZE) {
2728 u16 cmd_len;
2729 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2730 data += L2CAP_CMD_HDR_SIZE;
2731 len -= L2CAP_CMD_HDR_SIZE;
2732
2733 cmd_len = le16_to_cpu(cmd.len);
2734
2735 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2736
2737 if (cmd_len > len || !cmd.ident) {
2738 BT_DBG("corrupted command");
2739 break;
2740 }
2741
2742 if (conn->hcon->type == LE_LINK)
2743 err = l2cap_le_sig_cmd(conn, &cmd, data);
2744 else
2745 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2746
2747 if (err) {
2748 struct l2cap_cmd_rej rej;
2749
2750 BT_ERR("Wrong link type (%d)", err);
2751
2752 /* FIXME: Map err to a valid reason */
2753 rej.reason = cpu_to_le16(0);
2754 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2755 }
2756
2757 data += cmd_len;
2758 len -= cmd_len;
2759 }
2760
2761 kfree_skb(skb);
2762 }
2763
2764 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2765 {
2766 u16 our_fcs, rcv_fcs;
2767 int hdr_size = L2CAP_HDR_SIZE + 2;
2768
2769 if (pi->fcs == L2CAP_FCS_CRC16) {
2770 skb_trim(skb, skb->len - 2);
2771 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2772 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2773
2774 if (our_fcs != rcv_fcs)
2775 return -EBADMSG;
2776 }
2777 return 0;
2778 }
2779
2780 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2781 {
2782 struct l2cap_pinfo *pi = l2cap_pi(sk);
2783 u16 control = 0;
2784
2785 pi->frames_sent = 0;
2786
2787 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2788
2789 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2790 control |= L2CAP_SUPER_RCV_NOT_READY;
2791 l2cap_send_sframe(pi, control);
2792 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2793 }
2794
2795 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2796 l2cap_retransmit_frames(sk);
2797
2798 l2cap_ertm_send(sk);
2799
2800 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2801 pi->frames_sent == 0) {
2802 control |= L2CAP_SUPER_RCV_READY;
2803 l2cap_send_sframe(pi, control);
2804 }
2805 }
2806
2807 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2808 {
2809 struct sk_buff *next_skb;
2810 struct l2cap_pinfo *pi = l2cap_pi(sk);
2811 int tx_seq_offset, next_tx_seq_offset;
2812
2813 bt_cb(skb)->tx_seq = tx_seq;
2814 bt_cb(skb)->sar = sar;
2815
2816 next_skb = skb_peek(SREJ_QUEUE(sk));
2817 if (!next_skb) {
2818 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2819 return 0;
2820 }
2821
2822 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2823 if (tx_seq_offset < 0)
2824 tx_seq_offset += 64;
2825
2826 do {
2827 if (bt_cb(next_skb)->tx_seq == tx_seq)
2828 return -EINVAL;
2829
2830 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2831 pi->buffer_seq) % 64;
2832 if (next_tx_seq_offset < 0)
2833 next_tx_seq_offset += 64;
2834
2835 if (next_tx_seq_offset > tx_seq_offset) {
2836 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2837 return 0;
2838 }
2839
2840 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2841 break;
2842
2843 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2844
2845 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2846
2847 return 0;
2848 }
2849
2850 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2851 {
2852 struct l2cap_pinfo *pi = l2cap_pi(sk);
2853 struct sk_buff *_skb;
2854 int err;
2855
2856 switch (control & L2CAP_CTRL_SAR) {
2857 case L2CAP_SDU_UNSEGMENTED:
2858 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2859 goto drop;
2860
2861 err = sock_queue_rcv_skb(sk, skb);
2862 if (!err)
2863 return err;
2864
2865 break;
2866
2867 case L2CAP_SDU_START:
2868 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2869 goto drop;
2870
2871 pi->sdu_len = get_unaligned_le16(skb->data);
2872
2873 if (pi->sdu_len > pi->imtu)
2874 goto disconnect;
2875
2876 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2877 if (!pi->sdu)
2878 return -ENOMEM;
2879
2880 /* pull sdu_len bytes only after alloc, because of Local Busy
2881 * condition we have to be sure that this will be executed
2882 * only once, i.e., when alloc does not fail */
2883 skb_pull(skb, 2);
2884
2885 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2886
2887 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2888 pi->partial_sdu_len = skb->len;
2889 break;
2890
2891 case L2CAP_SDU_CONTINUE:
2892 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2893 goto disconnect;
2894
2895 if (!pi->sdu)
2896 goto disconnect;
2897
2898 pi->partial_sdu_len += skb->len;
2899 if (pi->partial_sdu_len > pi->sdu_len)
2900 goto drop;
2901
2902 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2903
2904 break;
2905
2906 case L2CAP_SDU_END:
2907 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2908 goto disconnect;
2909
2910 if (!pi->sdu)
2911 goto disconnect;
2912
2913 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2914 pi->partial_sdu_len += skb->len;
2915
2916 if (pi->partial_sdu_len > pi->imtu)
2917 goto drop;
2918
2919 if (pi->partial_sdu_len != pi->sdu_len)
2920 goto drop;
2921
2922 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2923 }
2924
2925 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2926 if (!_skb) {
2927 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2928 return -ENOMEM;
2929 }
2930
2931 err = sock_queue_rcv_skb(sk, _skb);
2932 if (err < 0) {
2933 kfree_skb(_skb);
2934 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2935 return err;
2936 }
2937
2938 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2939 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2940
2941 kfree_skb(pi->sdu);
2942 break;
2943 }
2944
2945 kfree_skb(skb);
2946 return 0;
2947
2948 drop:
2949 kfree_skb(pi->sdu);
2950 pi->sdu = NULL;
2951
2952 disconnect:
2953 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2954 kfree_skb(skb);
2955 return 0;
2956 }
2957
2958 static int l2cap_try_push_rx_skb(struct sock *sk)
2959 {
2960 struct l2cap_pinfo *pi = l2cap_pi(sk);
2961 struct sk_buff *skb;
2962 u16 control;
2963 int err;
2964
2965 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2966 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2967 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2968 if (err < 0) {
2969 skb_queue_head(BUSY_QUEUE(sk), skb);
2970 return -EBUSY;
2971 }
2972
2973 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2974 }
2975
2976 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2977 goto done;
2978
2979 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2980 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2981 l2cap_send_sframe(pi, control);
2982 l2cap_pi(sk)->retry_count = 1;
2983
2984 del_timer(&pi->retrans_timer);
2985 __mod_monitor_timer();
2986
2987 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2988
2989 done:
2990 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2991 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2992
2993 BT_DBG("sk %p, Exit local busy", sk);
2994
2995 return 0;
2996 }
2997
2998 static void l2cap_busy_work(struct work_struct *work)
2999 {
3000 DECLARE_WAITQUEUE(wait, current);
3001 struct l2cap_pinfo *pi =
3002 container_of(work, struct l2cap_pinfo, busy_work);
3003 struct sock *sk = (struct sock *)pi;
3004 int n_tries = 0, timeo = HZ/5, err;
3005 struct sk_buff *skb;
3006
3007 lock_sock(sk);
3008
3009 add_wait_queue(sk_sleep(sk), &wait);
3010 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3011 set_current_state(TASK_INTERRUPTIBLE);
3012
3013 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3014 err = -EBUSY;
3015 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3016 break;
3017 }
3018
3019 if (!timeo)
3020 timeo = HZ/5;
3021
3022 if (signal_pending(current)) {
3023 err = sock_intr_errno(timeo);
3024 break;
3025 }
3026
3027 release_sock(sk);
3028 timeo = schedule_timeout(timeo);
3029 lock_sock(sk);
3030
3031 err = sock_error(sk);
3032 if (err)
3033 break;
3034
3035 if (l2cap_try_push_rx_skb(sk) == 0)
3036 break;
3037 }
3038
3039 set_current_state(TASK_RUNNING);
3040 remove_wait_queue(sk_sleep(sk), &wait);
3041
3042 release_sock(sk);
3043 }
3044
3045 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3046 {
3047 struct l2cap_pinfo *pi = l2cap_pi(sk);
3048 int sctrl, err;
3049
3050 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3051 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3052 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3053 return l2cap_try_push_rx_skb(sk);
3054
3055
3056 }
3057
3058 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3059 if (err >= 0) {
3060 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3061 return err;
3062 }
3063
3064 /* Busy Condition */
3065 BT_DBG("sk %p, Enter local busy", sk);
3066
3067 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3068 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3069 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3070
3071 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3072 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3073 l2cap_send_sframe(pi, sctrl);
3074
3075 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3076
3077 del_timer(&pi->ack_timer);
3078
3079 queue_work(_busy_wq, &pi->busy_work);
3080
3081 return err;
3082 }
3083
3084 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3085 {
3086 struct l2cap_pinfo *pi = l2cap_pi(sk);
3087 struct sk_buff *_skb;
3088 int err = -EINVAL;
3089
3090 /*
3091 * TODO: We have to notify the userland if some data is lost with the
3092 * Streaming Mode.
3093 */
3094
3095 switch (control & L2CAP_CTRL_SAR) {
3096 case L2CAP_SDU_UNSEGMENTED:
3097 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3098 kfree_skb(pi->sdu);
3099 break;
3100 }
3101
3102 err = sock_queue_rcv_skb(sk, skb);
3103 if (!err)
3104 return 0;
3105
3106 break;
3107
3108 case L2CAP_SDU_START:
3109 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3110 kfree_skb(pi->sdu);
3111 break;
3112 }
3113
3114 pi->sdu_len = get_unaligned_le16(skb->data);
3115 skb_pull(skb, 2);
3116
3117 if (pi->sdu_len > pi->imtu) {
3118 err = -EMSGSIZE;
3119 break;
3120 }
3121
3122 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3123 if (!pi->sdu) {
3124 err = -ENOMEM;
3125 break;
3126 }
3127
3128 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3129
3130 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3131 pi->partial_sdu_len = skb->len;
3132 err = 0;
3133 break;
3134
3135 case L2CAP_SDU_CONTINUE:
3136 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3137 break;
3138
3139 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3140
3141 pi->partial_sdu_len += skb->len;
3142 if (pi->partial_sdu_len > pi->sdu_len)
3143 kfree_skb(pi->sdu);
3144 else
3145 err = 0;
3146
3147 break;
3148
3149 case L2CAP_SDU_END:
3150 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3151 break;
3152
3153 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3154
3155 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3156 pi->partial_sdu_len += skb->len;
3157
3158 if (pi->partial_sdu_len > pi->imtu)
3159 goto drop;
3160
3161 if (pi->partial_sdu_len == pi->sdu_len) {
3162 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3163 err = sock_queue_rcv_skb(sk, _skb);
3164 if (err < 0)
3165 kfree_skb(_skb);
3166 }
3167 err = 0;
3168
3169 drop:
3170 kfree_skb(pi->sdu);
3171 break;
3172 }
3173
3174 kfree_skb(skb);
3175 return err;
3176 }
3177
3178 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3179 {
3180 struct sk_buff *skb;
3181 u16 control;
3182
3183 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3184 if (bt_cb(skb)->tx_seq != tx_seq)
3185 break;
3186
3187 skb = skb_dequeue(SREJ_QUEUE(sk));
3188 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3189 l2cap_ertm_reassembly_sdu(sk, skb, control);
3190 l2cap_pi(sk)->buffer_seq_srej =
3191 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3192 tx_seq = (tx_seq + 1) % 64;
3193 }
3194 }
3195
3196 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3197 {
3198 struct l2cap_pinfo *pi = l2cap_pi(sk);
3199 struct srej_list *l, *tmp;
3200 u16 control;
3201
3202 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3203 if (l->tx_seq == tx_seq) {
3204 list_del(&l->list);
3205 kfree(l);
3206 return;
3207 }
3208 control = L2CAP_SUPER_SELECT_REJECT;
3209 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3210 l2cap_send_sframe(pi, control);
3211 list_del(&l->list);
3212 list_add_tail(&l->list, SREJ_LIST(sk));
3213 }
3214 }
3215
3216 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3217 {
3218 struct l2cap_pinfo *pi = l2cap_pi(sk);
3219 struct srej_list *new;
3220 u16 control;
3221
3222 while (tx_seq != pi->expected_tx_seq) {
3223 control = L2CAP_SUPER_SELECT_REJECT;
3224 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3225 l2cap_send_sframe(pi, control);
3226
3227 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3228 new->tx_seq = pi->expected_tx_seq;
3229 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3230 list_add_tail(&new->list, SREJ_LIST(sk));
3231 }
3232 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3233 }
3234
3235 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3236 {
3237 struct l2cap_pinfo *pi = l2cap_pi(sk);
3238 u8 tx_seq = __get_txseq(rx_control);
3239 u8 req_seq = __get_reqseq(rx_control);
3240 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3241 int tx_seq_offset, expected_tx_seq_offset;
3242 int num_to_ack = (pi->tx_win/6) + 1;
3243 int err = 0;
3244
3245 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3246 rx_control);
3247
3248 if (L2CAP_CTRL_FINAL & rx_control &&
3249 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3250 del_timer(&pi->monitor_timer);
3251 if (pi->unacked_frames > 0)
3252 __mod_retrans_timer();
3253 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3254 }
3255
3256 pi->expected_ack_seq = req_seq;
3257 l2cap_drop_acked_frames(sk);
3258
3259 if (tx_seq == pi->expected_tx_seq)
3260 goto expected;
3261
3262 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3263 if (tx_seq_offset < 0)
3264 tx_seq_offset += 64;
3265
3266 /* invalid tx_seq */
3267 if (tx_seq_offset >= pi->tx_win) {
3268 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3269 goto drop;
3270 }
3271
3272 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3273 goto drop;
3274
3275 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3276 struct srej_list *first;
3277
3278 first = list_first_entry(SREJ_LIST(sk),
3279 struct srej_list, list);
3280 if (tx_seq == first->tx_seq) {
3281 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3282 l2cap_check_srej_gap(sk, tx_seq);
3283
3284 list_del(&first->list);
3285 kfree(first);
3286
3287 if (list_empty(SREJ_LIST(sk))) {
3288 pi->buffer_seq = pi->buffer_seq_srej;
3289 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3290 l2cap_send_ack(pi);
3291 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3292 }
3293 } else {
3294 struct srej_list *l;
3295
3296 /* duplicated tx_seq */
3297 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3298 goto drop;
3299
3300 list_for_each_entry(l, SREJ_LIST(sk), list) {
3301 if (l->tx_seq == tx_seq) {
3302 l2cap_resend_srejframe(sk, tx_seq);
3303 return 0;
3304 }
3305 }
3306 l2cap_send_srejframe(sk, tx_seq);
3307 }
3308 } else {
3309 expected_tx_seq_offset =
3310 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3311 if (expected_tx_seq_offset < 0)
3312 expected_tx_seq_offset += 64;
3313
3314 /* duplicated tx_seq */
3315 if (tx_seq_offset < expected_tx_seq_offset)
3316 goto drop;
3317
3318 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3319
3320 BT_DBG("sk %p, Enter SREJ", sk);
3321
3322 INIT_LIST_HEAD(SREJ_LIST(sk));
3323 pi->buffer_seq_srej = pi->buffer_seq;
3324
3325 __skb_queue_head_init(SREJ_QUEUE(sk));
3326 __skb_queue_head_init(BUSY_QUEUE(sk));
3327 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3328
3329 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3330
3331 l2cap_send_srejframe(sk, tx_seq);
3332
3333 del_timer(&pi->ack_timer);
3334 }
3335 return 0;
3336
3337 expected:
3338 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3339
3340 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3341 bt_cb(skb)->tx_seq = tx_seq;
3342 bt_cb(skb)->sar = sar;
3343 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3344 return 0;
3345 }
3346
3347 err = l2cap_push_rx_skb(sk, skb, rx_control);
3348 if (err < 0)
3349 return 0;
3350
3351 if (rx_control & L2CAP_CTRL_FINAL) {
3352 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3353 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3354 else
3355 l2cap_retransmit_frames(sk);
3356 }
3357
3358 __mod_ack_timer();
3359
3360 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3361 if (pi->num_acked == num_to_ack - 1)
3362 l2cap_send_ack(pi);
3363
3364 return 0;
3365
3366 drop:
3367 kfree_skb(skb);
3368 return 0;
3369 }
3370
3371 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3372 {
3373 struct l2cap_pinfo *pi = l2cap_pi(sk);
3374
3375 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3376 rx_control);
3377
3378 pi->expected_ack_seq = __get_reqseq(rx_control);
3379 l2cap_drop_acked_frames(sk);
3380
3381 if (rx_control & L2CAP_CTRL_POLL) {
3382 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3383 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3384 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3385 (pi->unacked_frames > 0))
3386 __mod_retrans_timer();
3387
3388 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3389 l2cap_send_srejtail(sk);
3390 } else {
3391 l2cap_send_i_or_rr_or_rnr(sk);
3392 }
3393
3394 } else if (rx_control & L2CAP_CTRL_FINAL) {
3395 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3396
3397 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3398 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3399 else
3400 l2cap_retransmit_frames(sk);
3401
3402 } else {
3403 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3404 (pi->unacked_frames > 0))
3405 __mod_retrans_timer();
3406
3407 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3408 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3409 l2cap_send_ack(pi);
3410 else
3411 l2cap_ertm_send(sk);
3412 }
3413 }
3414
3415 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3416 {
3417 struct l2cap_pinfo *pi = l2cap_pi(sk);
3418 u8 tx_seq = __get_reqseq(rx_control);
3419
3420 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3421
3422 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3423
3424 pi->expected_ack_seq = tx_seq;
3425 l2cap_drop_acked_frames(sk);
3426
3427 if (rx_control & L2CAP_CTRL_FINAL) {
3428 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3429 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3430 else
3431 l2cap_retransmit_frames(sk);
3432 } else {
3433 l2cap_retransmit_frames(sk);
3434
3435 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3436 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3437 }
3438 }
3439 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3440 {
3441 struct l2cap_pinfo *pi = l2cap_pi(sk);
3442 u8 tx_seq = __get_reqseq(rx_control);
3443
3444 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3445
3446 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3447
3448 if (rx_control & L2CAP_CTRL_POLL) {
3449 pi->expected_ack_seq = tx_seq;
3450 l2cap_drop_acked_frames(sk);
3451
3452 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3453 l2cap_retransmit_one_frame(sk, tx_seq);
3454
3455 l2cap_ertm_send(sk);
3456
3457 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3458 pi->srej_save_reqseq = tx_seq;
3459 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3460 }
3461 } else if (rx_control & L2CAP_CTRL_FINAL) {
3462 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3463 pi->srej_save_reqseq == tx_seq)
3464 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3465 else
3466 l2cap_retransmit_one_frame(sk, tx_seq);
3467 } else {
3468 l2cap_retransmit_one_frame(sk, tx_seq);
3469 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3470 pi->srej_save_reqseq = tx_seq;
3471 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3472 }
3473 }
3474 }
3475
3476 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3477 {
3478 struct l2cap_pinfo *pi = l2cap_pi(sk);
3479 u8 tx_seq = __get_reqseq(rx_control);
3480
3481 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3482
3483 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3484 pi->expected_ack_seq = tx_seq;
3485 l2cap_drop_acked_frames(sk);
3486
3487 if (rx_control & L2CAP_CTRL_POLL)
3488 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3489
3490 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3491 del_timer(&pi->retrans_timer);
3492 if (rx_control & L2CAP_CTRL_POLL)
3493 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3494 return;
3495 }
3496
3497 if (rx_control & L2CAP_CTRL_POLL)
3498 l2cap_send_srejtail(sk);
3499 else
3500 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3501 }
3502
3503 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3504 {
3505 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3506
3507 if (L2CAP_CTRL_FINAL & rx_control &&
3508 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3509 del_timer(&l2cap_pi(sk)->monitor_timer);
3510 if (l2cap_pi(sk)->unacked_frames > 0)
3511 __mod_retrans_timer();
3512 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3513 }
3514
3515 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3516 case L2CAP_SUPER_RCV_READY:
3517 l2cap_data_channel_rrframe(sk, rx_control);
3518 break;
3519
3520 case L2CAP_SUPER_REJECT:
3521 l2cap_data_channel_rejframe(sk, rx_control);
3522 break;
3523
3524 case L2CAP_SUPER_SELECT_REJECT:
3525 l2cap_data_channel_srejframe(sk, rx_control);
3526 break;
3527
3528 case L2CAP_SUPER_RCV_NOT_READY:
3529 l2cap_data_channel_rnrframe(sk, rx_control);
3530 break;
3531 }
3532
3533 kfree_skb(skb);
3534 return 0;
3535 }
3536
3537 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3538 {
3539 struct l2cap_pinfo *pi = l2cap_pi(sk);
3540 u16 control;
3541 u8 req_seq;
3542 int len, next_tx_seq_offset, req_seq_offset;
3543
3544 control = get_unaligned_le16(skb->data);
3545 skb_pull(skb, 2);
3546 len = skb->len;
3547
3548 /*
3549 * We can just drop the corrupted I-frame here.
3550 * Receiver will miss it and start proper recovery
3551 * procedures and ask retransmission.
3552 */
3553 if (l2cap_check_fcs(pi, skb))
3554 goto drop;
3555
3556 if (__is_sar_start(control) && __is_iframe(control))
3557 len -= 2;
3558
3559 if (pi->fcs == L2CAP_FCS_CRC16)
3560 len -= 2;
3561
3562 if (len > pi->mps) {
3563 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3564 goto drop;
3565 }
3566
3567 req_seq = __get_reqseq(control);
3568 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3569 if (req_seq_offset < 0)
3570 req_seq_offset += 64;
3571
3572 next_tx_seq_offset =
3573 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3574 if (next_tx_seq_offset < 0)
3575 next_tx_seq_offset += 64;
3576
3577 /* check for invalid req-seq */
3578 if (req_seq_offset > next_tx_seq_offset) {
3579 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3580 goto drop;
3581 }
3582
3583 if (__is_iframe(control)) {
3584 if (len < 0) {
3585 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3586 goto drop;
3587 }
3588
3589 l2cap_data_channel_iframe(sk, control, skb);
3590 } else {
3591 if (len != 0) {
3592 BT_ERR("%d", len);
3593 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3594 goto drop;
3595 }
3596
3597 l2cap_data_channel_sframe(sk, control, skb);
3598 }
3599
3600 return 0;
3601
3602 drop:
3603 kfree_skb(skb);
3604 return 0;
3605 }
3606
3607 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3608 {
3609 struct l2cap_chan *chan;
3610 struct sock *sk;
3611 struct l2cap_pinfo *pi;
3612 u16 control;
3613 u8 tx_seq;
3614 int len;
3615
3616 chan = l2cap_get_chan_by_scid(conn, cid);
3617 if (!chan) {
3618 BT_DBG("unknown cid 0x%4.4x", cid);
3619 goto drop;
3620 }
3621
3622 sk = chan->sk;
3623 pi = l2cap_pi(sk);
3624
3625 BT_DBG("sk %p, len %d", sk, skb->len);
3626
3627 if (sk->sk_state != BT_CONNECTED)
3628 goto drop;
3629
3630 switch (pi->mode) {
3631 case L2CAP_MODE_BASIC:
3632 /* If socket recv buffers overflows we drop data here
3633 * which is *bad* because L2CAP has to be reliable.
3634 * But we don't have any other choice. L2CAP doesn't
3635 * provide flow control mechanism. */
3636
3637 if (pi->imtu < skb->len)
3638 goto drop;
3639
3640 if (!sock_queue_rcv_skb(sk, skb))
3641 goto done;
3642 break;
3643
3644 case L2CAP_MODE_ERTM:
3645 if (!sock_owned_by_user(sk)) {
3646 l2cap_ertm_data_rcv(sk, skb);
3647 } else {
3648 if (sk_add_backlog(sk, skb))
3649 goto drop;
3650 }
3651
3652 goto done;
3653
3654 case L2CAP_MODE_STREAMING:
3655 control = get_unaligned_le16(skb->data);
3656 skb_pull(skb, 2);
3657 len = skb->len;
3658
3659 if (l2cap_check_fcs(pi, skb))
3660 goto drop;
3661
3662 if (__is_sar_start(control))
3663 len -= 2;
3664
3665 if (pi->fcs == L2CAP_FCS_CRC16)
3666 len -= 2;
3667
3668 if (len > pi->mps || len < 0 || __is_sframe(control))
3669 goto drop;
3670
3671 tx_seq = __get_txseq(control);
3672
3673 if (pi->expected_tx_seq == tx_seq)
3674 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3675 else
3676 pi->expected_tx_seq = (tx_seq + 1) % 64;
3677
3678 l2cap_streaming_reassembly_sdu(sk, skb, control);
3679
3680 goto done;
3681
3682 default:
3683 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3684 break;
3685 }
3686
3687 drop:
3688 kfree_skb(skb);
3689
3690 done:
3691 if (sk)
3692 bh_unlock_sock(sk);
3693
3694 return 0;
3695 }
3696
3697 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3698 {
3699 struct sock *sk;
3700
3701 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3702 if (!sk)
3703 goto drop;
3704
3705 bh_lock_sock(sk);
3706
3707 BT_DBG("sk %p, len %d", sk, skb->len);
3708
3709 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3710 goto drop;
3711
3712 if (l2cap_pi(sk)->imtu < skb->len)
3713 goto drop;
3714
3715 if (!sock_queue_rcv_skb(sk, skb))
3716 goto done;
3717
3718 drop:
3719 kfree_skb(skb);
3720
3721 done:
3722 if (sk)
3723 bh_unlock_sock(sk);
3724 return 0;
3725 }
3726
3727 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3728 {
3729 struct l2cap_hdr *lh = (void *) skb->data;
3730 u16 cid, len;
3731 __le16 psm;
3732
3733 skb_pull(skb, L2CAP_HDR_SIZE);
3734 cid = __le16_to_cpu(lh->cid);
3735 len = __le16_to_cpu(lh->len);
3736
3737 if (len != skb->len) {
3738 kfree_skb(skb);
3739 return;
3740 }
3741
3742 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3743
3744 switch (cid) {
3745 case L2CAP_CID_LE_SIGNALING:
3746 case L2CAP_CID_SIGNALING:
3747 l2cap_sig_channel(conn, skb);
3748 break;
3749
3750 case L2CAP_CID_CONN_LESS:
3751 psm = get_unaligned_le16(skb->data);
3752 skb_pull(skb, 2);
3753 l2cap_conless_channel(conn, psm, skb);
3754 break;
3755
3756 default:
3757 l2cap_data_channel(conn, cid, skb);
3758 break;
3759 }
3760 }
3761
3762 /* ---- L2CAP interface with lower layer (HCI) ---- */
3763
3764 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3765 {
3766 int exact = 0, lm1 = 0, lm2 = 0;
3767 register struct sock *sk;
3768 struct hlist_node *node;
3769
3770 if (type != ACL_LINK)
3771 return -EINVAL;
3772
3773 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3774
3775 /* Find listening sockets and check their link_mode */
3776 read_lock(&l2cap_sk_list.lock);
3777 sk_for_each(sk, node, &l2cap_sk_list.head) {
3778 if (sk->sk_state != BT_LISTEN)
3779 continue;
3780
3781 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3782 lm1 |= HCI_LM_ACCEPT;
3783 if (l2cap_pi(sk)->role_switch)
3784 lm1 |= HCI_LM_MASTER;
3785 exact++;
3786 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3787 lm2 |= HCI_LM_ACCEPT;
3788 if (l2cap_pi(sk)->role_switch)
3789 lm2 |= HCI_LM_MASTER;
3790 }
3791 }
3792 read_unlock(&l2cap_sk_list.lock);
3793
3794 return exact ? lm1 : lm2;
3795 }
3796
3797 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3798 {
3799 struct l2cap_conn *conn;
3800
3801 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3802
3803 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3804 return -EINVAL;
3805
3806 if (!status) {
3807 conn = l2cap_conn_add(hcon, status);
3808 if (conn)
3809 l2cap_conn_ready(conn);
3810 } else
3811 l2cap_conn_del(hcon, bt_err(status));
3812
3813 return 0;
3814 }
3815
3816 static int l2cap_disconn_ind(struct hci_conn *hcon)
3817 {
3818 struct l2cap_conn *conn = hcon->l2cap_data;
3819
3820 BT_DBG("hcon %p", hcon);
3821
3822 if (hcon->type != ACL_LINK || !conn)
3823 return 0x13;
3824
3825 return conn->disc_reason;
3826 }
3827
3828 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3829 {
3830 BT_DBG("hcon %p reason %d", hcon, reason);
3831
3832 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3833 return -EINVAL;
3834
3835 l2cap_conn_del(hcon, bt_err(reason));
3836
3837 return 0;
3838 }
3839
3840 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3841 {
3842 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3843 return;
3844
3845 if (encrypt == 0x00) {
3846 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3847 l2cap_sock_clear_timer(sk);
3848 l2cap_sock_set_timer(sk, HZ * 5);
3849 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3850 __l2cap_sock_close(sk, ECONNREFUSED);
3851 } else {
3852 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3853 l2cap_sock_clear_timer(sk);
3854 }
3855 }
3856
3857 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3858 {
3859 struct l2cap_conn *conn = hcon->l2cap_data;
3860 struct l2cap_chan *chan;
3861
3862 if (!conn)
3863 return 0;
3864
3865 BT_DBG("conn %p", conn);
3866
3867 read_lock(&conn->chan_lock);
3868
3869 list_for_each_entry(chan, &conn->chan_l, list) {
3870 struct sock *sk = chan->sk;
3871
3872 bh_lock_sock(sk);
3873
3874 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3875 bh_unlock_sock(sk);
3876 continue;
3877 }
3878
3879 if (!status && (sk->sk_state == BT_CONNECTED ||
3880 sk->sk_state == BT_CONFIG)) {
3881 l2cap_check_encryption(sk, encrypt);
3882 bh_unlock_sock(sk);
3883 continue;
3884 }
3885
3886 if (sk->sk_state == BT_CONNECT) {
3887 if (!status) {
3888 struct l2cap_conn_req req;
3889 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3890 req.psm = l2cap_pi(sk)->psm;
3891
3892 chan->ident = l2cap_get_ident(conn);
3893 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3894
3895 l2cap_send_cmd(conn, chan->ident,
3896 L2CAP_CONN_REQ, sizeof(req), &req);
3897 } else {
3898 l2cap_sock_clear_timer(sk);
3899 l2cap_sock_set_timer(sk, HZ / 10);
3900 }
3901 } else if (sk->sk_state == BT_CONNECT2) {
3902 struct l2cap_conn_rsp rsp;
3903 __u16 result;
3904
3905 if (!status) {
3906 sk->sk_state = BT_CONFIG;
3907 result = L2CAP_CR_SUCCESS;
3908 } else {
3909 sk->sk_state = BT_DISCONN;
3910 l2cap_sock_set_timer(sk, HZ / 10);
3911 result = L2CAP_CR_SEC_BLOCK;
3912 }
3913
3914 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3915 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3916 rsp.result = cpu_to_le16(result);
3917 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3918 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3919 sizeof(rsp), &rsp);
3920 }
3921
3922 bh_unlock_sock(sk);
3923 }
3924
3925 read_unlock(&conn->chan_lock);
3926
3927 return 0;
3928 }
3929
3930 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3931 {
3932 struct l2cap_conn *conn = hcon->l2cap_data;
3933
3934 if (!conn)
3935 conn = l2cap_conn_add(hcon, 0);
3936
3937 if (!conn)
3938 goto drop;
3939
3940 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3941
3942 if (!(flags & ACL_CONT)) {
3943 struct l2cap_hdr *hdr;
3944 struct l2cap_chan *chan;
3945 u16 cid;
3946 int len;
3947
3948 if (conn->rx_len) {
3949 BT_ERR("Unexpected start frame (len %d)", skb->len);
3950 kfree_skb(conn->rx_skb);
3951 conn->rx_skb = NULL;
3952 conn->rx_len = 0;
3953 l2cap_conn_unreliable(conn, ECOMM);
3954 }
3955
3956 /* Start fragment always begin with Basic L2CAP header */
3957 if (skb->len < L2CAP_HDR_SIZE) {
3958 BT_ERR("Frame is too short (len %d)", skb->len);
3959 l2cap_conn_unreliable(conn, ECOMM);
3960 goto drop;
3961 }
3962
3963 hdr = (struct l2cap_hdr *) skb->data;
3964 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3965 cid = __le16_to_cpu(hdr->cid);
3966
3967 if (len == skb->len) {
3968 /* Complete frame received */
3969 l2cap_recv_frame(conn, skb);
3970 return 0;
3971 }
3972
3973 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3974
3975 if (skb->len > len) {
3976 BT_ERR("Frame is too long (len %d, expected len %d)",
3977 skb->len, len);
3978 l2cap_conn_unreliable(conn, ECOMM);
3979 goto drop;
3980 }
3981
3982 chan = l2cap_get_chan_by_scid(conn, cid);
3983
3984 if (chan && chan->sk) {
3985 struct sock *sk = chan->sk;
3986
3987 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3988 BT_ERR("Frame exceeding recv MTU (len %d, "
3989 "MTU %d)", len,
3990 l2cap_pi(sk)->imtu);
3991 bh_unlock_sock(sk);
3992 l2cap_conn_unreliable(conn, ECOMM);
3993 goto drop;
3994 }
3995 bh_unlock_sock(sk);
3996 }
3997
3998 /* Allocate skb for the complete frame (with header) */
3999 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4000 if (!conn->rx_skb)
4001 goto drop;
4002
4003 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4004 skb->len);
4005 conn->rx_len = len - skb->len;
4006 } else {
4007 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4008
4009 if (!conn->rx_len) {
4010 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4011 l2cap_conn_unreliable(conn, ECOMM);
4012 goto drop;
4013 }
4014
4015 if (skb->len > conn->rx_len) {
4016 BT_ERR("Fragment is too long (len %d, expected %d)",
4017 skb->len, conn->rx_len);
4018 kfree_skb(conn->rx_skb);
4019 conn->rx_skb = NULL;
4020 conn->rx_len = 0;
4021 l2cap_conn_unreliable(conn, ECOMM);
4022 goto drop;
4023 }
4024
4025 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4026 skb->len);
4027 conn->rx_len -= skb->len;
4028
4029 if (!conn->rx_len) {
4030 /* Complete frame received */
4031 l2cap_recv_frame(conn, conn->rx_skb);
4032 conn->rx_skb = NULL;
4033 }
4034 }
4035
4036 drop:
4037 kfree_skb(skb);
4038 return 0;
4039 }
4040
4041 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4042 {
4043 struct sock *sk;
4044 struct hlist_node *node;
4045
4046 read_lock_bh(&l2cap_sk_list.lock);
4047
4048 sk_for_each(sk, node, &l2cap_sk_list.head) {
4049 struct l2cap_pinfo *pi = l2cap_pi(sk);
4050
4051 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4052 batostr(&bt_sk(sk)->src),
4053 batostr(&bt_sk(sk)->dst),
4054 sk->sk_state, __le16_to_cpu(pi->psm),
4055 pi->scid, pi->dcid,
4056 pi->imtu, pi->omtu, pi->sec_level,
4057 pi->mode);
4058 }
4059
4060 read_unlock_bh(&l2cap_sk_list.lock);
4061
4062 return 0;
4063 }
4064
4065 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4066 {
4067 return single_open(file, l2cap_debugfs_show, inode->i_private);
4068 }
4069
4070 static const struct file_operations l2cap_debugfs_fops = {
4071 .open = l2cap_debugfs_open,
4072 .read = seq_read,
4073 .llseek = seq_lseek,
4074 .release = single_release,
4075 };
4076
4077 static struct dentry *l2cap_debugfs;
4078
4079 static struct hci_proto l2cap_hci_proto = {
4080 .name = "L2CAP",
4081 .id = HCI_PROTO_L2CAP,
4082 .connect_ind = l2cap_connect_ind,
4083 .connect_cfm = l2cap_connect_cfm,
4084 .disconn_ind = l2cap_disconn_ind,
4085 .disconn_cfm = l2cap_disconn_cfm,
4086 .security_cfm = l2cap_security_cfm,
4087 .recv_acldata = l2cap_recv_acldata
4088 };
4089
4090 int __init l2cap_init(void)
4091 {
4092 int err;
4093
4094 err = l2cap_init_sockets();
4095 if (err < 0)
4096 return err;
4097
4098 _busy_wq = create_singlethread_workqueue("l2cap");
4099 if (!_busy_wq) {
4100 err = -ENOMEM;
4101 goto error;
4102 }
4103
4104 err = hci_register_proto(&l2cap_hci_proto);
4105 if (err < 0) {
4106 BT_ERR("L2CAP protocol registration failed");
4107 bt_sock_unregister(BTPROTO_L2CAP);
4108 goto error;
4109 }
4110
4111 if (bt_debugfs) {
4112 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4113 bt_debugfs, NULL, &l2cap_debugfs_fops);
4114 if (!l2cap_debugfs)
4115 BT_ERR("Failed to create L2CAP debug file");
4116 }
4117
4118 return 0;
4119
4120 error:
4121 destroy_workqueue(_busy_wq);
4122 l2cap_cleanup_sockets();
4123 return err;
4124 }
4125
4126 void l2cap_exit(void)
4127 {
4128 debugfs_remove(l2cap_debugfs);
4129
4130 flush_workqueue(_busy_wq);
4131 destroy_workqueue(_busy_wq);
4132
4133 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4134 BT_ERR("L2CAP protocol unregistration failed");
4135
4136 l2cap_cleanup_sockets();
4137 }
4138
4139 module_param(disable_ertm, bool, 0644);
4140 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.231717 seconds and 5 git commands to generate.