Bluetooth: Add support for resuming socket when SMP is finished
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58
59 int disable_ertm;
60
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
63
64 static struct workqueue_struct *_busy_wq;
65
66 static LIST_HEAD(chan_list);
67 static DEFINE_RWLOCK(chan_list_lock);
68
69 static void l2cap_busy_work(struct work_struct *work);
70
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
74 void *data);
75 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
76 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
77 struct l2cap_chan *chan, int err);
78
79 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
80
81 /* ---- L2CAP channels ---- */
82
83 static inline void chan_hold(struct l2cap_chan *c)
84 {
85 atomic_inc(&c->refcnt);
86 }
87
88 static inline void chan_put(struct l2cap_chan *c)
89 {
90 if (atomic_dec_and_test(&c->refcnt))
91 kfree(c);
92 }
93
94 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 {
96 struct l2cap_chan *c;
97
98 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->dcid == cid)
100 return c;
101 }
102 return NULL;
103
104 }
105
106 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 {
108 struct l2cap_chan *c;
109
110 list_for_each_entry(c, &conn->chan_l, list) {
111 if (c->scid == cid)
112 return c;
113 }
114 return NULL;
115 }
116
117 /* Find channel with given SCID.
118 * Returns locked socket */
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
120 {
121 struct l2cap_chan *c;
122
123 read_lock(&conn->chan_lock);
124 c = __l2cap_get_chan_by_scid(conn, cid);
125 if (c)
126 bh_lock_sock(c->sk);
127 read_unlock(&conn->chan_lock);
128 return c;
129 }
130
131 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
132 {
133 struct l2cap_chan *c;
134
135 list_for_each_entry(c, &conn->chan_l, list) {
136 if (c->ident == ident)
137 return c;
138 }
139 return NULL;
140 }
141
142 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
143 {
144 struct l2cap_chan *c;
145
146 read_lock(&conn->chan_lock);
147 c = __l2cap_get_chan_by_ident(conn, ident);
148 if (c)
149 bh_lock_sock(c->sk);
150 read_unlock(&conn->chan_lock);
151 return c;
152 }
153
154 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
155 {
156 struct l2cap_chan *c;
157
158 list_for_each_entry(c, &chan_list, global_l) {
159 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
160 goto found;
161 }
162
163 c = NULL;
164 found:
165 return c;
166 }
167
168 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 {
170 int err;
171
172 write_lock_bh(&chan_list_lock);
173
174 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
175 err = -EADDRINUSE;
176 goto done;
177 }
178
179 if (psm) {
180 chan->psm = psm;
181 chan->sport = psm;
182 err = 0;
183 } else {
184 u16 p;
185
186 err = -EINVAL;
187 for (p = 0x1001; p < 0x1100; p += 2)
188 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
189 chan->psm = cpu_to_le16(p);
190 chan->sport = cpu_to_le16(p);
191 err = 0;
192 break;
193 }
194 }
195
196 done:
197 write_unlock_bh(&chan_list_lock);
198 return err;
199 }
200
201 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
202 {
203 write_lock_bh(&chan_list_lock);
204
205 chan->scid = scid;
206
207 write_unlock_bh(&chan_list_lock);
208
209 return 0;
210 }
211
212 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
213 {
214 u16 cid = L2CAP_CID_DYN_START;
215
216 for (; cid < L2CAP_CID_DYN_END; cid++) {
217 if (!__l2cap_get_chan_by_scid(conn, cid))
218 return cid;
219 }
220
221 return 0;
222 }
223
224 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
225 {
226 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
227
228 if (!mod_timer(timer, jiffies + timeout))
229 chan_hold(chan);
230 }
231
232 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
233 {
234 BT_DBG("chan %p state %d", chan, chan->state);
235
236 if (timer_pending(timer) && del_timer(timer))
237 chan_put(chan);
238 }
239
240 static void l2cap_state_change(struct l2cap_chan *chan, int state)
241 {
242 chan->state = state;
243 chan->ops->state_change(chan->data, state);
244 }
245
246 static void l2cap_chan_timeout(unsigned long arg)
247 {
248 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
249 struct sock *sk = chan->sk;
250 int reason;
251
252 BT_DBG("chan %p state %d", chan, chan->state);
253
254 bh_lock_sock(sk);
255
256 if (sock_owned_by_user(sk)) {
257 /* sk is owned by user. Try again later */
258 __set_chan_timer(chan, HZ / 5);
259 bh_unlock_sock(sk);
260 chan_put(chan);
261 return;
262 }
263
264 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
265 reason = ECONNREFUSED;
266 else if (chan->state == BT_CONNECT &&
267 chan->sec_level != BT_SECURITY_SDP)
268 reason = ECONNREFUSED;
269 else
270 reason = ETIMEDOUT;
271
272 l2cap_chan_close(chan, reason);
273
274 bh_unlock_sock(sk);
275
276 chan->ops->close(chan->data);
277 chan_put(chan);
278 }
279
280 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
281 {
282 struct l2cap_chan *chan;
283
284 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
285 if (!chan)
286 return NULL;
287
288 chan->sk = sk;
289
290 write_lock_bh(&chan_list_lock);
291 list_add(&chan->global_l, &chan_list);
292 write_unlock_bh(&chan_list_lock);
293
294 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
295
296 chan->state = BT_OPEN;
297
298 atomic_set(&chan->refcnt, 1);
299
300 return chan;
301 }
302
303 void l2cap_chan_destroy(struct l2cap_chan *chan)
304 {
305 write_lock_bh(&chan_list_lock);
306 list_del(&chan->global_l);
307 write_unlock_bh(&chan_list_lock);
308
309 chan_put(chan);
310 }
311
312 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
313 {
314 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
315 chan->psm, chan->dcid);
316
317 conn->disc_reason = 0x13;
318
319 chan->conn = conn;
320
321 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
322 if (conn->hcon->type == LE_LINK) {
323 /* LE connection */
324 chan->omtu = L2CAP_LE_DEFAULT_MTU;
325 chan->scid = L2CAP_CID_LE_DATA;
326 chan->dcid = L2CAP_CID_LE_DATA;
327 } else {
328 /* Alloc CID for connection-oriented socket */
329 chan->scid = l2cap_alloc_cid(conn);
330 chan->omtu = L2CAP_DEFAULT_MTU;
331 }
332 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
333 /* Connectionless socket */
334 chan->scid = L2CAP_CID_CONN_LESS;
335 chan->dcid = L2CAP_CID_CONN_LESS;
336 chan->omtu = L2CAP_DEFAULT_MTU;
337 } else {
338 /* Raw socket can send/recv signalling messages only */
339 chan->scid = L2CAP_CID_SIGNALING;
340 chan->dcid = L2CAP_CID_SIGNALING;
341 chan->omtu = L2CAP_DEFAULT_MTU;
342 }
343
344 chan_hold(chan);
345
346 list_add(&chan->list, &conn->chan_l);
347 }
348
349 /* Delete channel.
350 * Must be called on the locked socket. */
351 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
352 {
353 struct sock *sk = chan->sk;
354 struct l2cap_conn *conn = chan->conn;
355 struct sock *parent = bt_sk(sk)->parent;
356
357 __clear_chan_timer(chan);
358
359 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
360
361 if (conn) {
362 /* Delete from channel list */
363 write_lock_bh(&conn->chan_lock);
364 list_del(&chan->list);
365 write_unlock_bh(&conn->chan_lock);
366 chan_put(chan);
367
368 chan->conn = NULL;
369 hci_conn_put(conn->hcon);
370 }
371
372 l2cap_state_change(chan, BT_CLOSED);
373 sock_set_flag(sk, SOCK_ZAPPED);
374
375 if (err)
376 sk->sk_err = err;
377
378 if (parent) {
379 bt_accept_unlink(sk);
380 parent->sk_data_ready(parent, 0);
381 } else
382 sk->sk_state_change(sk);
383
384 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
385 chan->conf_state & L2CAP_CONF_INPUT_DONE))
386 return;
387
388 skb_queue_purge(&chan->tx_q);
389
390 if (chan->mode == L2CAP_MODE_ERTM) {
391 struct srej_list *l, *tmp;
392
393 __clear_retrans_timer(chan);
394 __clear_monitor_timer(chan);
395 __clear_ack_timer(chan);
396
397 skb_queue_purge(&chan->srej_q);
398 skb_queue_purge(&chan->busy_q);
399
400 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
401 list_del(&l->list);
402 kfree(l);
403 }
404 }
405 }
406
407 static void l2cap_chan_cleanup_listen(struct sock *parent)
408 {
409 struct sock *sk;
410
411 BT_DBG("parent %p", parent);
412
413 /* Close not yet accepted channels */
414 while ((sk = bt_accept_dequeue(parent, NULL))) {
415 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
416 __clear_chan_timer(chan);
417 lock_sock(sk);
418 l2cap_chan_close(chan, ECONNRESET);
419 release_sock(sk);
420 chan->ops->close(chan->data);
421 }
422 }
423
424 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
425 {
426 struct l2cap_conn *conn = chan->conn;
427 struct sock *sk = chan->sk;
428
429 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
430
431 switch (chan->state) {
432 case BT_LISTEN:
433 l2cap_chan_cleanup_listen(sk);
434
435 l2cap_state_change(chan, BT_CLOSED);
436 sock_set_flag(sk, SOCK_ZAPPED);
437 break;
438
439 case BT_CONNECTED:
440 case BT_CONFIG:
441 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
442 conn->hcon->type == ACL_LINK) {
443 __clear_chan_timer(chan);
444 __set_chan_timer(chan, sk->sk_sndtimeo);
445 l2cap_send_disconn_req(conn, chan, reason);
446 } else
447 l2cap_chan_del(chan, reason);
448 break;
449
450 case BT_CONNECT2:
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 struct l2cap_conn_rsp rsp;
454 __u16 result;
455
456 if (bt_sk(sk)->defer_setup)
457 result = L2CAP_CR_SEC_BLOCK;
458 else
459 result = L2CAP_CR_BAD_PSM;
460 l2cap_state_change(chan, BT_DISCONN);
461
462 rsp.scid = cpu_to_le16(chan->dcid);
463 rsp.dcid = cpu_to_le16(chan->scid);
464 rsp.result = cpu_to_le16(result);
465 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
466 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
467 sizeof(rsp), &rsp);
468 }
469
470 l2cap_chan_del(chan, reason);
471 break;
472
473 case BT_CONNECT:
474 case BT_DISCONN:
475 l2cap_chan_del(chan, reason);
476 break;
477
478 default:
479 sock_set_flag(sk, SOCK_ZAPPED);
480 break;
481 }
482 }
483
484 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
485 {
486 if (chan->chan_type == L2CAP_CHAN_RAW) {
487 switch (chan->sec_level) {
488 case BT_SECURITY_HIGH:
489 return HCI_AT_DEDICATED_BONDING_MITM;
490 case BT_SECURITY_MEDIUM:
491 return HCI_AT_DEDICATED_BONDING;
492 default:
493 return HCI_AT_NO_BONDING;
494 }
495 } else if (chan->psm == cpu_to_le16(0x0001)) {
496 if (chan->sec_level == BT_SECURITY_LOW)
497 chan->sec_level = BT_SECURITY_SDP;
498
499 if (chan->sec_level == BT_SECURITY_HIGH)
500 return HCI_AT_NO_BONDING_MITM;
501 else
502 return HCI_AT_NO_BONDING;
503 } else {
504 switch (chan->sec_level) {
505 case BT_SECURITY_HIGH:
506 return HCI_AT_GENERAL_BONDING_MITM;
507 case BT_SECURITY_MEDIUM:
508 return HCI_AT_GENERAL_BONDING;
509 default:
510 return HCI_AT_NO_BONDING;
511 }
512 }
513 }
514
515 /* Service level security */
516 static inline int l2cap_check_security(struct l2cap_chan *chan)
517 {
518 struct l2cap_conn *conn = chan->conn;
519 __u8 auth_type;
520
521 auth_type = l2cap_get_auth_type(chan);
522
523 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
524 }
525
526 static u8 l2cap_get_ident(struct l2cap_conn *conn)
527 {
528 u8 id;
529
530 /* Get next available identificator.
531 * 1 - 128 are used by kernel.
532 * 129 - 199 are reserved.
533 * 200 - 254 are used by utilities like l2ping, etc.
534 */
535
536 spin_lock_bh(&conn->lock);
537
538 if (++conn->tx_ident > 128)
539 conn->tx_ident = 1;
540
541 id = conn->tx_ident;
542
543 spin_unlock_bh(&conn->lock);
544
545 return id;
546 }
547
548 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
549 {
550 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
551 u8 flags;
552
553 BT_DBG("code 0x%2.2x", code);
554
555 if (!skb)
556 return;
557
558 if (lmp_no_flush_capable(conn->hcon->hdev))
559 flags = ACL_START_NO_FLUSH;
560 else
561 flags = ACL_START;
562
563 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
564
565 hci_send_acl(conn->hcon, skb, flags);
566 }
567
568 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
569 {
570 struct sk_buff *skb;
571 struct l2cap_hdr *lh;
572 struct l2cap_conn *conn = chan->conn;
573 int count, hlen = L2CAP_HDR_SIZE + 2;
574 u8 flags;
575
576 if (chan->state != BT_CONNECTED)
577 return;
578
579 if (chan->fcs == L2CAP_FCS_CRC16)
580 hlen += 2;
581
582 BT_DBG("chan %p, control 0x%2.2x", chan, control);
583
584 count = min_t(unsigned int, conn->mtu, hlen);
585 control |= L2CAP_CTRL_FRAME_TYPE;
586
587 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
588 control |= L2CAP_CTRL_FINAL;
589 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
590 }
591
592 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
593 control |= L2CAP_CTRL_POLL;
594 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
595 }
596
597 skb = bt_skb_alloc(count, GFP_ATOMIC);
598 if (!skb)
599 return;
600
601 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
602 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
603 lh->cid = cpu_to_le16(chan->dcid);
604 put_unaligned_le16(control, skb_put(skb, 2));
605
606 if (chan->fcs == L2CAP_FCS_CRC16) {
607 u16 fcs = crc16(0, (u8 *)lh, count - 2);
608 put_unaligned_le16(fcs, skb_put(skb, 2));
609 }
610
611 if (lmp_no_flush_capable(conn->hcon->hdev))
612 flags = ACL_START_NO_FLUSH;
613 else
614 flags = ACL_START;
615
616 bt_cb(skb)->force_active = chan->force_active;
617
618 hci_send_acl(chan->conn->hcon, skb, flags);
619 }
620
621 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
622 {
623 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
624 control |= L2CAP_SUPER_RCV_NOT_READY;
625 chan->conn_state |= L2CAP_CONN_RNR_SENT;
626 } else
627 control |= L2CAP_SUPER_RCV_READY;
628
629 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
630
631 l2cap_send_sframe(chan, control);
632 }
633
634 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
635 {
636 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
637 }
638
639 static void l2cap_do_start(struct l2cap_chan *chan)
640 {
641 struct l2cap_conn *conn = chan->conn;
642
643 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
644 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
645 return;
646
647 if (l2cap_check_security(chan) &&
648 __l2cap_no_conn_pending(chan)) {
649 struct l2cap_conn_req req;
650 req.scid = cpu_to_le16(chan->scid);
651 req.psm = chan->psm;
652
653 chan->ident = l2cap_get_ident(conn);
654 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
655
656 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
657 sizeof(req), &req);
658 }
659 } else {
660 struct l2cap_info_req req;
661 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
662
663 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
664 conn->info_ident = l2cap_get_ident(conn);
665
666 mod_timer(&conn->info_timer, jiffies +
667 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
668
669 l2cap_send_cmd(conn, conn->info_ident,
670 L2CAP_INFO_REQ, sizeof(req), &req);
671 }
672 }
673
674 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
675 {
676 u32 local_feat_mask = l2cap_feat_mask;
677 if (!disable_ertm)
678 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
679
680 switch (mode) {
681 case L2CAP_MODE_ERTM:
682 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
683 case L2CAP_MODE_STREAMING:
684 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
685 default:
686 return 0x00;
687 }
688 }
689
690 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
691 {
692 struct sock *sk;
693 struct l2cap_disconn_req req;
694
695 if (!conn)
696 return;
697
698 sk = chan->sk;
699
700 if (chan->mode == L2CAP_MODE_ERTM) {
701 __clear_retrans_timer(chan);
702 __clear_monitor_timer(chan);
703 __clear_ack_timer(chan);
704 }
705
706 req.dcid = cpu_to_le16(chan->dcid);
707 req.scid = cpu_to_le16(chan->scid);
708 l2cap_send_cmd(conn, l2cap_get_ident(conn),
709 L2CAP_DISCONN_REQ, sizeof(req), &req);
710
711 l2cap_state_change(chan, BT_DISCONN);
712 sk->sk_err = err;
713 }
714
715 /* ---- L2CAP connections ---- */
716 static void l2cap_conn_start(struct l2cap_conn *conn)
717 {
718 struct l2cap_chan *chan, *tmp;
719
720 BT_DBG("conn %p", conn);
721
722 read_lock(&conn->chan_lock);
723
724 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
725 struct sock *sk = chan->sk;
726
727 bh_lock_sock(sk);
728
729 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
730 bh_unlock_sock(sk);
731 continue;
732 }
733
734 if (chan->state == BT_CONNECT) {
735 struct l2cap_conn_req req;
736
737 if (!l2cap_check_security(chan) ||
738 !__l2cap_no_conn_pending(chan)) {
739 bh_unlock_sock(sk);
740 continue;
741 }
742
743 if (!l2cap_mode_supported(chan->mode,
744 conn->feat_mask)
745 && chan->conf_state &
746 L2CAP_CONF_STATE2_DEVICE) {
747 /* l2cap_chan_close() calls list_del(chan)
748 * so release the lock */
749 read_unlock_bh(&conn->chan_lock);
750 l2cap_chan_close(chan, ECONNRESET);
751 read_lock_bh(&conn->chan_lock);
752 bh_unlock_sock(sk);
753 continue;
754 }
755
756 req.scid = cpu_to_le16(chan->scid);
757 req.psm = chan->psm;
758
759 chan->ident = l2cap_get_ident(conn);
760 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
761
762 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
763 sizeof(req), &req);
764
765 } else if (chan->state == BT_CONNECT2) {
766 struct l2cap_conn_rsp rsp;
767 char buf[128];
768 rsp.scid = cpu_to_le16(chan->dcid);
769 rsp.dcid = cpu_to_le16(chan->scid);
770
771 if (l2cap_check_security(chan)) {
772 if (bt_sk(sk)->defer_setup) {
773 struct sock *parent = bt_sk(sk)->parent;
774 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
775 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
776 parent->sk_data_ready(parent, 0);
777
778 } else {
779 l2cap_state_change(chan, BT_CONFIG);
780 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
781 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
782 }
783 } else {
784 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
785 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
786 }
787
788 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
789 sizeof(rsp), &rsp);
790
791 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
792 rsp.result != L2CAP_CR_SUCCESS) {
793 bh_unlock_sock(sk);
794 continue;
795 }
796
797 chan->conf_state |= L2CAP_CONF_REQ_SENT;
798 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
799 l2cap_build_conf_req(chan, buf), buf);
800 chan->num_conf_req++;
801 }
802
803 bh_unlock_sock(sk);
804 }
805
806 read_unlock(&conn->chan_lock);
807 }
808
809 /* Find socket with cid and source bdaddr.
810 * Returns closest match, locked.
811 */
812 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
813 {
814 struct l2cap_chan *c, *c1 = NULL;
815
816 read_lock(&chan_list_lock);
817
818 list_for_each_entry(c, &chan_list, global_l) {
819 struct sock *sk = c->sk;
820
821 if (state && c->state != state)
822 continue;
823
824 if (c->scid == cid) {
825 /* Exact match. */
826 if (!bacmp(&bt_sk(sk)->src, src)) {
827 read_unlock(&chan_list_lock);
828 return c;
829 }
830
831 /* Closest match */
832 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
833 c1 = c;
834 }
835 }
836
837 read_unlock(&chan_list_lock);
838
839 return c1;
840 }
841
842 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
843 {
844 struct sock *parent, *sk;
845 struct l2cap_chan *chan, *pchan;
846
847 BT_DBG("");
848
849 /* Check if we have socket listening on cid */
850 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
851 conn->src);
852 if (!pchan)
853 return;
854
855 parent = pchan->sk;
856
857 bh_lock_sock(parent);
858
859 /* Check for backlog size */
860 if (sk_acceptq_is_full(parent)) {
861 BT_DBG("backlog full %d", parent->sk_ack_backlog);
862 goto clean;
863 }
864
865 chan = pchan->ops->new_connection(pchan->data);
866 if (!chan)
867 goto clean;
868
869 sk = chan->sk;
870
871 write_lock_bh(&conn->chan_lock);
872
873 hci_conn_hold(conn->hcon);
874
875 bacpy(&bt_sk(sk)->src, conn->src);
876 bacpy(&bt_sk(sk)->dst, conn->dst);
877
878 bt_accept_enqueue(parent, sk);
879
880 __l2cap_chan_add(conn, chan);
881
882 __set_chan_timer(chan, sk->sk_sndtimeo);
883
884 l2cap_state_change(chan, BT_CONNECTED);
885 parent->sk_data_ready(parent, 0);
886
887 write_unlock_bh(&conn->chan_lock);
888
889 clean:
890 bh_unlock_sock(parent);
891 }
892
893 static void l2cap_chan_ready(struct sock *sk)
894 {
895 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
896 struct sock *parent = bt_sk(sk)->parent;
897
898 BT_DBG("sk %p, parent %p", sk, parent);
899
900 chan->conf_state = 0;
901 __clear_chan_timer(chan);
902
903 sk->sk_state = BT_CONNECTED;
904 sk->sk_state_change(sk);
905
906 if (parent)
907 parent->sk_data_ready(parent, 0);
908 }
909
910 static void l2cap_conn_ready(struct l2cap_conn *conn)
911 {
912 struct l2cap_chan *chan;
913
914 BT_DBG("conn %p", conn);
915
916 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
917 l2cap_le_conn_ready(conn);
918
919 read_lock(&conn->chan_lock);
920
921 list_for_each_entry(chan, &conn->chan_l, list) {
922 struct sock *sk = chan->sk;
923
924 bh_lock_sock(sk);
925
926 if (conn->hcon->type == LE_LINK)
927 if (smp_conn_security(conn, chan->sec_level))
928 l2cap_chan_ready(sk);
929
930 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
931 __clear_chan_timer(chan);
932 l2cap_state_change(chan, BT_CONNECTED);
933 sk->sk_state_change(sk);
934
935 } else if (chan->state == BT_CONNECT)
936 l2cap_do_start(chan);
937
938 bh_unlock_sock(sk);
939 }
940
941 read_unlock(&conn->chan_lock);
942 }
943
944 /* Notify sockets that we cannot guaranty reliability anymore */
945 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
946 {
947 struct l2cap_chan *chan;
948
949 BT_DBG("conn %p", conn);
950
951 read_lock(&conn->chan_lock);
952
953 list_for_each_entry(chan, &conn->chan_l, list) {
954 struct sock *sk = chan->sk;
955
956 if (chan->force_reliable)
957 sk->sk_err = err;
958 }
959
960 read_unlock(&conn->chan_lock);
961 }
962
963 static void l2cap_info_timeout(unsigned long arg)
964 {
965 struct l2cap_conn *conn = (void *) arg;
966
967 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
968 conn->info_ident = 0;
969
970 l2cap_conn_start(conn);
971 }
972
973 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
974 {
975 struct l2cap_conn *conn = hcon->l2cap_data;
976
977 if (conn || status)
978 return conn;
979
980 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
981 if (!conn)
982 return NULL;
983
984 hcon->l2cap_data = conn;
985 conn->hcon = hcon;
986
987 BT_DBG("hcon %p conn %p", hcon, conn);
988
989 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
990 conn->mtu = hcon->hdev->le_mtu;
991 else
992 conn->mtu = hcon->hdev->acl_mtu;
993
994 conn->src = &hcon->hdev->bdaddr;
995 conn->dst = &hcon->dst;
996
997 conn->feat_mask = 0;
998
999 spin_lock_init(&conn->lock);
1000 rwlock_init(&conn->chan_lock);
1001
1002 INIT_LIST_HEAD(&conn->chan_l);
1003
1004 if (hcon->type != LE_LINK)
1005 setup_timer(&conn->info_timer, l2cap_info_timeout,
1006 (unsigned long) conn);
1007
1008 conn->disc_reason = 0x13;
1009
1010 return conn;
1011 }
1012
1013 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1014 {
1015 struct l2cap_conn *conn = hcon->l2cap_data;
1016 struct l2cap_chan *chan, *l;
1017 struct sock *sk;
1018
1019 if (!conn)
1020 return;
1021
1022 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1023
1024 kfree_skb(conn->rx_skb);
1025
1026 /* Kill channels */
1027 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1028 sk = chan->sk;
1029 bh_lock_sock(sk);
1030 l2cap_chan_del(chan, err);
1031 bh_unlock_sock(sk);
1032 chan->ops->close(chan->data);
1033 }
1034
1035 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1036 del_timer_sync(&conn->info_timer);
1037
1038 hcon->l2cap_data = NULL;
1039 kfree(conn);
1040 }
1041
1042 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1043 {
1044 write_lock_bh(&conn->chan_lock);
1045 __l2cap_chan_add(conn, chan);
1046 write_unlock_bh(&conn->chan_lock);
1047 }
1048
1049 /* ---- Socket interface ---- */
1050
1051 /* Find socket with psm and source bdaddr.
1052 * Returns closest match.
1053 */
1054 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1055 {
1056 struct l2cap_chan *c, *c1 = NULL;
1057
1058 read_lock(&chan_list_lock);
1059
1060 list_for_each_entry(c, &chan_list, global_l) {
1061 struct sock *sk = c->sk;
1062
1063 if (state && c->state != state)
1064 continue;
1065
1066 if (c->psm == psm) {
1067 /* Exact match. */
1068 if (!bacmp(&bt_sk(sk)->src, src)) {
1069 read_unlock(&chan_list_lock);
1070 return c;
1071 }
1072
1073 /* Closest match */
1074 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1075 c1 = c;
1076 }
1077 }
1078
1079 read_unlock(&chan_list_lock);
1080
1081 return c1;
1082 }
1083
1084 int l2cap_chan_connect(struct l2cap_chan *chan)
1085 {
1086 struct sock *sk = chan->sk;
1087 bdaddr_t *src = &bt_sk(sk)->src;
1088 bdaddr_t *dst = &bt_sk(sk)->dst;
1089 struct l2cap_conn *conn;
1090 struct hci_conn *hcon;
1091 struct hci_dev *hdev;
1092 __u8 auth_type;
1093 int err;
1094
1095 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1096 chan->psm);
1097
1098 hdev = hci_get_route(dst, src);
1099 if (!hdev)
1100 return -EHOSTUNREACH;
1101
1102 hci_dev_lock_bh(hdev);
1103
1104 auth_type = l2cap_get_auth_type(chan);
1105
1106 if (chan->dcid == L2CAP_CID_LE_DATA)
1107 hcon = hci_connect(hdev, LE_LINK, dst,
1108 chan->sec_level, auth_type);
1109 else
1110 hcon = hci_connect(hdev, ACL_LINK, dst,
1111 chan->sec_level, auth_type);
1112
1113 if (IS_ERR(hcon)) {
1114 err = PTR_ERR(hcon);
1115 goto done;
1116 }
1117
1118 conn = l2cap_conn_add(hcon, 0);
1119 if (!conn) {
1120 hci_conn_put(hcon);
1121 err = -ENOMEM;
1122 goto done;
1123 }
1124
1125 /* Update source addr of the socket */
1126 bacpy(src, conn->src);
1127
1128 l2cap_chan_add(conn, chan);
1129
1130 l2cap_state_change(chan, BT_CONNECT);
1131 __set_chan_timer(chan, sk->sk_sndtimeo);
1132
1133 if (hcon->state == BT_CONNECTED) {
1134 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1135 __clear_chan_timer(chan);
1136 if (l2cap_check_security(chan))
1137 l2cap_state_change(chan, BT_CONNECTED);
1138 } else
1139 l2cap_do_start(chan);
1140 }
1141
1142 err = 0;
1143
1144 done:
1145 hci_dev_unlock_bh(hdev);
1146 hci_dev_put(hdev);
1147 return err;
1148 }
1149
1150 int __l2cap_wait_ack(struct sock *sk)
1151 {
1152 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1153 DECLARE_WAITQUEUE(wait, current);
1154 int err = 0;
1155 int timeo = HZ/5;
1156
1157 add_wait_queue(sk_sleep(sk), &wait);
1158 while ((chan->unacked_frames > 0 && chan->conn)) {
1159 set_current_state(TASK_INTERRUPTIBLE);
1160
1161 if (!timeo)
1162 timeo = HZ/5;
1163
1164 if (signal_pending(current)) {
1165 err = sock_intr_errno(timeo);
1166 break;
1167 }
1168
1169 release_sock(sk);
1170 timeo = schedule_timeout(timeo);
1171 lock_sock(sk);
1172
1173 err = sock_error(sk);
1174 if (err)
1175 break;
1176 }
1177 set_current_state(TASK_RUNNING);
1178 remove_wait_queue(sk_sleep(sk), &wait);
1179 return err;
1180 }
1181
1182 static void l2cap_monitor_timeout(unsigned long arg)
1183 {
1184 struct l2cap_chan *chan = (void *) arg;
1185 struct sock *sk = chan->sk;
1186
1187 BT_DBG("chan %p", chan);
1188
1189 bh_lock_sock(sk);
1190 if (chan->retry_count >= chan->remote_max_tx) {
1191 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1192 bh_unlock_sock(sk);
1193 return;
1194 }
1195
1196 chan->retry_count++;
1197 __set_monitor_timer(chan);
1198
1199 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1200 bh_unlock_sock(sk);
1201 }
1202
1203 static void l2cap_retrans_timeout(unsigned long arg)
1204 {
1205 struct l2cap_chan *chan = (void *) arg;
1206 struct sock *sk = chan->sk;
1207
1208 BT_DBG("chan %p", chan);
1209
1210 bh_lock_sock(sk);
1211 chan->retry_count = 1;
1212 __set_monitor_timer(chan);
1213
1214 chan->conn_state |= L2CAP_CONN_WAIT_F;
1215
1216 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1217 bh_unlock_sock(sk);
1218 }
1219
1220 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1221 {
1222 struct sk_buff *skb;
1223
1224 while ((skb = skb_peek(&chan->tx_q)) &&
1225 chan->unacked_frames) {
1226 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1227 break;
1228
1229 skb = skb_dequeue(&chan->tx_q);
1230 kfree_skb(skb);
1231
1232 chan->unacked_frames--;
1233 }
1234
1235 if (!chan->unacked_frames)
1236 __clear_retrans_timer(chan);
1237 }
1238
1239 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1240 {
1241 struct hci_conn *hcon = chan->conn->hcon;
1242 u16 flags;
1243
1244 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1245
1246 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1247 flags = ACL_START_NO_FLUSH;
1248 else
1249 flags = ACL_START;
1250
1251 bt_cb(skb)->force_active = chan->force_active;
1252 hci_send_acl(hcon, skb, flags);
1253 }
1254
1255 void l2cap_streaming_send(struct l2cap_chan *chan)
1256 {
1257 struct sk_buff *skb;
1258 u16 control, fcs;
1259
1260 while ((skb = skb_dequeue(&chan->tx_q))) {
1261 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1262 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1263 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1264
1265 if (chan->fcs == L2CAP_FCS_CRC16) {
1266 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1267 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1268 }
1269
1270 l2cap_do_send(chan, skb);
1271
1272 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1273 }
1274 }
1275
1276 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1277 {
1278 struct sk_buff *skb, *tx_skb;
1279 u16 control, fcs;
1280
1281 skb = skb_peek(&chan->tx_q);
1282 if (!skb)
1283 return;
1284
1285 do {
1286 if (bt_cb(skb)->tx_seq == tx_seq)
1287 break;
1288
1289 if (skb_queue_is_last(&chan->tx_q, skb))
1290 return;
1291
1292 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1293
1294 if (chan->remote_max_tx &&
1295 bt_cb(skb)->retries == chan->remote_max_tx) {
1296 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1297 return;
1298 }
1299
1300 tx_skb = skb_clone(skb, GFP_ATOMIC);
1301 bt_cb(skb)->retries++;
1302 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1303 control &= L2CAP_CTRL_SAR;
1304
1305 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1306 control |= L2CAP_CTRL_FINAL;
1307 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1308 }
1309
1310 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1311 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1312
1313 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1314
1315 if (chan->fcs == L2CAP_FCS_CRC16) {
1316 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1317 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1318 }
1319
1320 l2cap_do_send(chan, tx_skb);
1321 }
1322
1323 int l2cap_ertm_send(struct l2cap_chan *chan)
1324 {
1325 struct sk_buff *skb, *tx_skb;
1326 u16 control, fcs;
1327 int nsent = 0;
1328
1329 if (chan->state != BT_CONNECTED)
1330 return -ENOTCONN;
1331
1332 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1333
1334 if (chan->remote_max_tx &&
1335 bt_cb(skb)->retries == chan->remote_max_tx) {
1336 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1337 break;
1338 }
1339
1340 tx_skb = skb_clone(skb, GFP_ATOMIC);
1341
1342 bt_cb(skb)->retries++;
1343
1344 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1345 control &= L2CAP_CTRL_SAR;
1346
1347 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1348 control |= L2CAP_CTRL_FINAL;
1349 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1350 }
1351 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1352 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1353 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1354
1355
1356 if (chan->fcs == L2CAP_FCS_CRC16) {
1357 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1358 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1359 }
1360
1361 l2cap_do_send(chan, tx_skb);
1362
1363 __set_retrans_timer(chan);
1364
1365 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1366 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1367
1368 if (bt_cb(skb)->retries == 1)
1369 chan->unacked_frames++;
1370
1371 chan->frames_sent++;
1372
1373 if (skb_queue_is_last(&chan->tx_q, skb))
1374 chan->tx_send_head = NULL;
1375 else
1376 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1377
1378 nsent++;
1379 }
1380
1381 return nsent;
1382 }
1383
1384 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1385 {
1386 int ret;
1387
1388 if (!skb_queue_empty(&chan->tx_q))
1389 chan->tx_send_head = chan->tx_q.next;
1390
1391 chan->next_tx_seq = chan->expected_ack_seq;
1392 ret = l2cap_ertm_send(chan);
1393 return ret;
1394 }
1395
1396 static void l2cap_send_ack(struct l2cap_chan *chan)
1397 {
1398 u16 control = 0;
1399
1400 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1401
1402 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1403 control |= L2CAP_SUPER_RCV_NOT_READY;
1404 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1405 l2cap_send_sframe(chan, control);
1406 return;
1407 }
1408
1409 if (l2cap_ertm_send(chan) > 0)
1410 return;
1411
1412 control |= L2CAP_SUPER_RCV_READY;
1413 l2cap_send_sframe(chan, control);
1414 }
1415
1416 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1417 {
1418 struct srej_list *tail;
1419 u16 control;
1420
1421 control = L2CAP_SUPER_SELECT_REJECT;
1422 control |= L2CAP_CTRL_FINAL;
1423
1424 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1425 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1426
1427 l2cap_send_sframe(chan, control);
1428 }
1429
1430 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1431 {
1432 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1433 struct sk_buff **frag;
1434 int err, sent = 0;
1435
1436 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1437 return -EFAULT;
1438
1439 sent += count;
1440 len -= count;
1441
1442 /* Continuation fragments (no L2CAP header) */
1443 frag = &skb_shinfo(skb)->frag_list;
1444 while (len) {
1445 count = min_t(unsigned int, conn->mtu, len);
1446
1447 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1448 if (!*frag)
1449 return err;
1450 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1451 return -EFAULT;
1452
1453 sent += count;
1454 len -= count;
1455
1456 frag = &(*frag)->next;
1457 }
1458
1459 return sent;
1460 }
1461
1462 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1463 {
1464 struct sock *sk = chan->sk;
1465 struct l2cap_conn *conn = chan->conn;
1466 struct sk_buff *skb;
1467 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1468 struct l2cap_hdr *lh;
1469
1470 BT_DBG("sk %p len %d", sk, (int)len);
1471
1472 count = min_t(unsigned int, (conn->mtu - hlen), len);
1473 skb = bt_skb_send_alloc(sk, count + hlen,
1474 msg->msg_flags & MSG_DONTWAIT, &err);
1475 if (!skb)
1476 return ERR_PTR(err);
1477
1478 /* Create L2CAP header */
1479 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1480 lh->cid = cpu_to_le16(chan->dcid);
1481 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1482 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1483
1484 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1485 if (unlikely(err < 0)) {
1486 kfree_skb(skb);
1487 return ERR_PTR(err);
1488 }
1489 return skb;
1490 }
1491
1492 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1493 {
1494 struct sock *sk = chan->sk;
1495 struct l2cap_conn *conn = chan->conn;
1496 struct sk_buff *skb;
1497 int err, count, hlen = L2CAP_HDR_SIZE;
1498 struct l2cap_hdr *lh;
1499
1500 BT_DBG("sk %p len %d", sk, (int)len);
1501
1502 count = min_t(unsigned int, (conn->mtu - hlen), len);
1503 skb = bt_skb_send_alloc(sk, count + hlen,
1504 msg->msg_flags & MSG_DONTWAIT, &err);
1505 if (!skb)
1506 return ERR_PTR(err);
1507
1508 /* Create L2CAP header */
1509 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1510 lh->cid = cpu_to_le16(chan->dcid);
1511 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1512
1513 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1514 if (unlikely(err < 0)) {
1515 kfree_skb(skb);
1516 return ERR_PTR(err);
1517 }
1518 return skb;
1519 }
1520
1521 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1522 {
1523 struct sock *sk = chan->sk;
1524 struct l2cap_conn *conn = chan->conn;
1525 struct sk_buff *skb;
1526 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1527 struct l2cap_hdr *lh;
1528
1529 BT_DBG("sk %p len %d", sk, (int)len);
1530
1531 if (!conn)
1532 return ERR_PTR(-ENOTCONN);
1533
1534 if (sdulen)
1535 hlen += 2;
1536
1537 if (chan->fcs == L2CAP_FCS_CRC16)
1538 hlen += 2;
1539
1540 count = min_t(unsigned int, (conn->mtu - hlen), len);
1541 skb = bt_skb_send_alloc(sk, count + hlen,
1542 msg->msg_flags & MSG_DONTWAIT, &err);
1543 if (!skb)
1544 return ERR_PTR(err);
1545
1546 /* Create L2CAP header */
1547 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1548 lh->cid = cpu_to_le16(chan->dcid);
1549 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1550 put_unaligned_le16(control, skb_put(skb, 2));
1551 if (sdulen)
1552 put_unaligned_le16(sdulen, skb_put(skb, 2));
1553
1554 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1555 if (unlikely(err < 0)) {
1556 kfree_skb(skb);
1557 return ERR_PTR(err);
1558 }
1559
1560 if (chan->fcs == L2CAP_FCS_CRC16)
1561 put_unaligned_le16(0, skb_put(skb, 2));
1562
1563 bt_cb(skb)->retries = 0;
1564 return skb;
1565 }
1566
1567 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1568 {
1569 struct sk_buff *skb;
1570 struct sk_buff_head sar_queue;
1571 u16 control;
1572 size_t size = 0;
1573
1574 skb_queue_head_init(&sar_queue);
1575 control = L2CAP_SDU_START;
1576 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1577 if (IS_ERR(skb))
1578 return PTR_ERR(skb);
1579
1580 __skb_queue_tail(&sar_queue, skb);
1581 len -= chan->remote_mps;
1582 size += chan->remote_mps;
1583
1584 while (len > 0) {
1585 size_t buflen;
1586
1587 if (len > chan->remote_mps) {
1588 control = L2CAP_SDU_CONTINUE;
1589 buflen = chan->remote_mps;
1590 } else {
1591 control = L2CAP_SDU_END;
1592 buflen = len;
1593 }
1594
1595 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1596 if (IS_ERR(skb)) {
1597 skb_queue_purge(&sar_queue);
1598 return PTR_ERR(skb);
1599 }
1600
1601 __skb_queue_tail(&sar_queue, skb);
1602 len -= buflen;
1603 size += buflen;
1604 }
1605 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1606 if (chan->tx_send_head == NULL)
1607 chan->tx_send_head = sar_queue.next;
1608
1609 return size;
1610 }
1611
1612 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1613 {
1614 struct sk_buff *skb;
1615 u16 control;
1616 int err;
1617
1618 /* Connectionless channel */
1619 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1620 skb = l2cap_create_connless_pdu(chan, msg, len);
1621 if (IS_ERR(skb))
1622 return PTR_ERR(skb);
1623
1624 l2cap_do_send(chan, skb);
1625 return len;
1626 }
1627
1628 switch (chan->mode) {
1629 case L2CAP_MODE_BASIC:
1630 /* Check outgoing MTU */
1631 if (len > chan->omtu)
1632 return -EMSGSIZE;
1633
1634 /* Create a basic PDU */
1635 skb = l2cap_create_basic_pdu(chan, msg, len);
1636 if (IS_ERR(skb))
1637 return PTR_ERR(skb);
1638
1639 l2cap_do_send(chan, skb);
1640 err = len;
1641 break;
1642
1643 case L2CAP_MODE_ERTM:
1644 case L2CAP_MODE_STREAMING:
1645 /* Entire SDU fits into one PDU */
1646 if (len <= chan->remote_mps) {
1647 control = L2CAP_SDU_UNSEGMENTED;
1648 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1649 0);
1650 if (IS_ERR(skb))
1651 return PTR_ERR(skb);
1652
1653 __skb_queue_tail(&chan->tx_q, skb);
1654
1655 if (chan->tx_send_head == NULL)
1656 chan->tx_send_head = skb;
1657
1658 } else {
1659 /* Segment SDU into multiples PDUs */
1660 err = l2cap_sar_segment_sdu(chan, msg, len);
1661 if (err < 0)
1662 return err;
1663 }
1664
1665 if (chan->mode == L2CAP_MODE_STREAMING) {
1666 l2cap_streaming_send(chan);
1667 err = len;
1668 break;
1669 }
1670
1671 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1672 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
1673 err = len;
1674 break;
1675 }
1676
1677 err = l2cap_ertm_send(chan);
1678 if (err >= 0)
1679 err = len;
1680
1681 break;
1682
1683 default:
1684 BT_DBG("bad state %1.1x", chan->mode);
1685 err = -EBADFD;
1686 }
1687
1688 return err;
1689 }
1690
1691 /* Copy frame to all raw sockets on that connection */
1692 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1693 {
1694 struct sk_buff *nskb;
1695 struct l2cap_chan *chan;
1696
1697 BT_DBG("conn %p", conn);
1698
1699 read_lock(&conn->chan_lock);
1700 list_for_each_entry(chan, &conn->chan_l, list) {
1701 struct sock *sk = chan->sk;
1702 if (chan->chan_type != L2CAP_CHAN_RAW)
1703 continue;
1704
1705 /* Don't send frame to the socket it came from */
1706 if (skb->sk == sk)
1707 continue;
1708 nskb = skb_clone(skb, GFP_ATOMIC);
1709 if (!nskb)
1710 continue;
1711
1712 if (chan->ops->recv(chan->data, nskb))
1713 kfree_skb(nskb);
1714 }
1715 read_unlock(&conn->chan_lock);
1716 }
1717
1718 /* ---- L2CAP signalling commands ---- */
1719 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1720 u8 code, u8 ident, u16 dlen, void *data)
1721 {
1722 struct sk_buff *skb, **frag;
1723 struct l2cap_cmd_hdr *cmd;
1724 struct l2cap_hdr *lh;
1725 int len, count;
1726
1727 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1728 conn, code, ident, dlen);
1729
1730 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1731 count = min_t(unsigned int, conn->mtu, len);
1732
1733 skb = bt_skb_alloc(count, GFP_ATOMIC);
1734 if (!skb)
1735 return NULL;
1736
1737 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1738 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1739
1740 if (conn->hcon->type == LE_LINK)
1741 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1742 else
1743 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1744
1745 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1746 cmd->code = code;
1747 cmd->ident = ident;
1748 cmd->len = cpu_to_le16(dlen);
1749
1750 if (dlen) {
1751 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1752 memcpy(skb_put(skb, count), data, count);
1753 data += count;
1754 }
1755
1756 len -= skb->len;
1757
1758 /* Continuation fragments (no L2CAP header) */
1759 frag = &skb_shinfo(skb)->frag_list;
1760 while (len) {
1761 count = min_t(unsigned int, conn->mtu, len);
1762
1763 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1764 if (!*frag)
1765 goto fail;
1766
1767 memcpy(skb_put(*frag, count), data, count);
1768
1769 len -= count;
1770 data += count;
1771
1772 frag = &(*frag)->next;
1773 }
1774
1775 return skb;
1776
1777 fail:
1778 kfree_skb(skb);
1779 return NULL;
1780 }
1781
1782 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1783 {
1784 struct l2cap_conf_opt *opt = *ptr;
1785 int len;
1786
1787 len = L2CAP_CONF_OPT_SIZE + opt->len;
1788 *ptr += len;
1789
1790 *type = opt->type;
1791 *olen = opt->len;
1792
1793 switch (opt->len) {
1794 case 1:
1795 *val = *((u8 *) opt->val);
1796 break;
1797
1798 case 2:
1799 *val = get_unaligned_le16(opt->val);
1800 break;
1801
1802 case 4:
1803 *val = get_unaligned_le32(opt->val);
1804 break;
1805
1806 default:
1807 *val = (unsigned long) opt->val;
1808 break;
1809 }
1810
1811 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1812 return len;
1813 }
1814
1815 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1816 {
1817 struct l2cap_conf_opt *opt = *ptr;
1818
1819 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1820
1821 opt->type = type;
1822 opt->len = len;
1823
1824 switch (len) {
1825 case 1:
1826 *((u8 *) opt->val) = val;
1827 break;
1828
1829 case 2:
1830 put_unaligned_le16(val, opt->val);
1831 break;
1832
1833 case 4:
1834 put_unaligned_le32(val, opt->val);
1835 break;
1836
1837 default:
1838 memcpy(opt->val, (void *) val, len);
1839 break;
1840 }
1841
1842 *ptr += L2CAP_CONF_OPT_SIZE + len;
1843 }
1844
1845 static void l2cap_ack_timeout(unsigned long arg)
1846 {
1847 struct l2cap_chan *chan = (void *) arg;
1848
1849 bh_lock_sock(chan->sk);
1850 l2cap_send_ack(chan);
1851 bh_unlock_sock(chan->sk);
1852 }
1853
1854 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1855 {
1856 struct sock *sk = chan->sk;
1857
1858 chan->expected_ack_seq = 0;
1859 chan->unacked_frames = 0;
1860 chan->buffer_seq = 0;
1861 chan->num_acked = 0;
1862 chan->frames_sent = 0;
1863
1864 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1865 (unsigned long) chan);
1866 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1867 (unsigned long) chan);
1868 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1869
1870 skb_queue_head_init(&chan->srej_q);
1871 skb_queue_head_init(&chan->busy_q);
1872
1873 INIT_LIST_HEAD(&chan->srej_l);
1874
1875 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1876
1877 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1878 }
1879
1880 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1881 {
1882 switch (mode) {
1883 case L2CAP_MODE_STREAMING:
1884 case L2CAP_MODE_ERTM:
1885 if (l2cap_mode_supported(mode, remote_feat_mask))
1886 return mode;
1887 /* fall through */
1888 default:
1889 return L2CAP_MODE_BASIC;
1890 }
1891 }
1892
1893 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1894 {
1895 struct l2cap_conf_req *req = data;
1896 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1897 void *ptr = req->data;
1898
1899 BT_DBG("chan %p", chan);
1900
1901 if (chan->num_conf_req || chan->num_conf_rsp)
1902 goto done;
1903
1904 switch (chan->mode) {
1905 case L2CAP_MODE_STREAMING:
1906 case L2CAP_MODE_ERTM:
1907 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1908 break;
1909
1910 /* fall through */
1911 default:
1912 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1913 break;
1914 }
1915
1916 done:
1917 if (chan->imtu != L2CAP_DEFAULT_MTU)
1918 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1919
1920 switch (chan->mode) {
1921 case L2CAP_MODE_BASIC:
1922 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1923 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1924 break;
1925
1926 rfc.mode = L2CAP_MODE_BASIC;
1927 rfc.txwin_size = 0;
1928 rfc.max_transmit = 0;
1929 rfc.retrans_timeout = 0;
1930 rfc.monitor_timeout = 0;
1931 rfc.max_pdu_size = 0;
1932
1933 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1934 (unsigned long) &rfc);
1935 break;
1936
1937 case L2CAP_MODE_ERTM:
1938 rfc.mode = L2CAP_MODE_ERTM;
1939 rfc.txwin_size = chan->tx_win;
1940 rfc.max_transmit = chan->max_tx;
1941 rfc.retrans_timeout = 0;
1942 rfc.monitor_timeout = 0;
1943 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1944 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1945 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1946
1947 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1948 (unsigned long) &rfc);
1949
1950 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1951 break;
1952
1953 if (chan->fcs == L2CAP_FCS_NONE ||
1954 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1955 chan->fcs = L2CAP_FCS_NONE;
1956 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1957 }
1958 break;
1959
1960 case L2CAP_MODE_STREAMING:
1961 rfc.mode = L2CAP_MODE_STREAMING;
1962 rfc.txwin_size = 0;
1963 rfc.max_transmit = 0;
1964 rfc.retrans_timeout = 0;
1965 rfc.monitor_timeout = 0;
1966 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1967 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1968 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1969
1970 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1971 (unsigned long) &rfc);
1972
1973 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1974 break;
1975
1976 if (chan->fcs == L2CAP_FCS_NONE ||
1977 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1978 chan->fcs = L2CAP_FCS_NONE;
1979 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1980 }
1981 break;
1982 }
1983
1984 req->dcid = cpu_to_le16(chan->dcid);
1985 req->flags = cpu_to_le16(0);
1986
1987 return ptr - data;
1988 }
1989
1990 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1991 {
1992 struct l2cap_conf_rsp *rsp = data;
1993 void *ptr = rsp->data;
1994 void *req = chan->conf_req;
1995 int len = chan->conf_len;
1996 int type, hint, olen;
1997 unsigned long val;
1998 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1999 u16 mtu = L2CAP_DEFAULT_MTU;
2000 u16 result = L2CAP_CONF_SUCCESS;
2001
2002 BT_DBG("chan %p", chan);
2003
2004 while (len >= L2CAP_CONF_OPT_SIZE) {
2005 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2006
2007 hint = type & L2CAP_CONF_HINT;
2008 type &= L2CAP_CONF_MASK;
2009
2010 switch (type) {
2011 case L2CAP_CONF_MTU:
2012 mtu = val;
2013 break;
2014
2015 case L2CAP_CONF_FLUSH_TO:
2016 chan->flush_to = val;
2017 break;
2018
2019 case L2CAP_CONF_QOS:
2020 break;
2021
2022 case L2CAP_CONF_RFC:
2023 if (olen == sizeof(rfc))
2024 memcpy(&rfc, (void *) val, olen);
2025 break;
2026
2027 case L2CAP_CONF_FCS:
2028 if (val == L2CAP_FCS_NONE)
2029 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2030
2031 break;
2032
2033 default:
2034 if (hint)
2035 break;
2036
2037 result = L2CAP_CONF_UNKNOWN;
2038 *((u8 *) ptr++) = type;
2039 break;
2040 }
2041 }
2042
2043 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2044 goto done;
2045
2046 switch (chan->mode) {
2047 case L2CAP_MODE_STREAMING:
2048 case L2CAP_MODE_ERTM:
2049 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2050 chan->mode = l2cap_select_mode(rfc.mode,
2051 chan->conn->feat_mask);
2052 break;
2053 }
2054
2055 if (chan->mode != rfc.mode)
2056 return -ECONNREFUSED;
2057
2058 break;
2059 }
2060
2061 done:
2062 if (chan->mode != rfc.mode) {
2063 result = L2CAP_CONF_UNACCEPT;
2064 rfc.mode = chan->mode;
2065
2066 if (chan->num_conf_rsp == 1)
2067 return -ECONNREFUSED;
2068
2069 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2070 sizeof(rfc), (unsigned long) &rfc);
2071 }
2072
2073
2074 if (result == L2CAP_CONF_SUCCESS) {
2075 /* Configure output options and let the other side know
2076 * which ones we don't like. */
2077
2078 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2079 result = L2CAP_CONF_UNACCEPT;
2080 else {
2081 chan->omtu = mtu;
2082 chan->conf_state |= L2CAP_CONF_MTU_DONE;
2083 }
2084 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2085
2086 switch (rfc.mode) {
2087 case L2CAP_MODE_BASIC:
2088 chan->fcs = L2CAP_FCS_NONE;
2089 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2090 break;
2091
2092 case L2CAP_MODE_ERTM:
2093 chan->remote_tx_win = rfc.txwin_size;
2094 chan->remote_max_tx = rfc.max_transmit;
2095
2096 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2097 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2098
2099 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2100
2101 rfc.retrans_timeout =
2102 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2103 rfc.monitor_timeout =
2104 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2105
2106 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2107
2108 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2109 sizeof(rfc), (unsigned long) &rfc);
2110
2111 break;
2112
2113 case L2CAP_MODE_STREAMING:
2114 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2115 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2116
2117 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2118
2119 chan->conf_state |= L2CAP_CONF_MODE_DONE;
2120
2121 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2122 sizeof(rfc), (unsigned long) &rfc);
2123
2124 break;
2125
2126 default:
2127 result = L2CAP_CONF_UNACCEPT;
2128
2129 memset(&rfc, 0, sizeof(rfc));
2130 rfc.mode = chan->mode;
2131 }
2132
2133 if (result == L2CAP_CONF_SUCCESS)
2134 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2135 }
2136 rsp->scid = cpu_to_le16(chan->dcid);
2137 rsp->result = cpu_to_le16(result);
2138 rsp->flags = cpu_to_le16(0x0000);
2139
2140 return ptr - data;
2141 }
2142
2143 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2144 {
2145 struct l2cap_conf_req *req = data;
2146 void *ptr = req->data;
2147 int type, olen;
2148 unsigned long val;
2149 struct l2cap_conf_rfc rfc;
2150
2151 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2152
2153 while (len >= L2CAP_CONF_OPT_SIZE) {
2154 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2155
2156 switch (type) {
2157 case L2CAP_CONF_MTU:
2158 if (val < L2CAP_DEFAULT_MIN_MTU) {
2159 *result = L2CAP_CONF_UNACCEPT;
2160 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2161 } else
2162 chan->imtu = val;
2163 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2164 break;
2165
2166 case L2CAP_CONF_FLUSH_TO:
2167 chan->flush_to = val;
2168 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2169 2, chan->flush_to);
2170 break;
2171
2172 case L2CAP_CONF_RFC:
2173 if (olen == sizeof(rfc))
2174 memcpy(&rfc, (void *)val, olen);
2175
2176 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2177 rfc.mode != chan->mode)
2178 return -ECONNREFUSED;
2179
2180 chan->fcs = 0;
2181
2182 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2183 sizeof(rfc), (unsigned long) &rfc);
2184 break;
2185 }
2186 }
2187
2188 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2189 return -ECONNREFUSED;
2190
2191 chan->mode = rfc.mode;
2192
2193 if (*result == L2CAP_CONF_SUCCESS) {
2194 switch (rfc.mode) {
2195 case L2CAP_MODE_ERTM:
2196 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2197 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2198 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2199 break;
2200 case L2CAP_MODE_STREAMING:
2201 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2202 }
2203 }
2204
2205 req->dcid = cpu_to_le16(chan->dcid);
2206 req->flags = cpu_to_le16(0x0000);
2207
2208 return ptr - data;
2209 }
2210
2211 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2212 {
2213 struct l2cap_conf_rsp *rsp = data;
2214 void *ptr = rsp->data;
2215
2216 BT_DBG("chan %p", chan);
2217
2218 rsp->scid = cpu_to_le16(chan->dcid);
2219 rsp->result = cpu_to_le16(result);
2220 rsp->flags = cpu_to_le16(flags);
2221
2222 return ptr - data;
2223 }
2224
2225 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2226 {
2227 struct l2cap_conn_rsp rsp;
2228 struct l2cap_conn *conn = chan->conn;
2229 u8 buf[128];
2230
2231 rsp.scid = cpu_to_le16(chan->dcid);
2232 rsp.dcid = cpu_to_le16(chan->scid);
2233 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2234 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2235 l2cap_send_cmd(conn, chan->ident,
2236 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2237
2238 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2239 return;
2240
2241 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2242 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2243 l2cap_build_conf_req(chan, buf), buf);
2244 chan->num_conf_req++;
2245 }
2246
2247 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2248 {
2249 int type, olen;
2250 unsigned long val;
2251 struct l2cap_conf_rfc rfc;
2252
2253 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2254
2255 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2256 return;
2257
2258 while (len >= L2CAP_CONF_OPT_SIZE) {
2259 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2260
2261 switch (type) {
2262 case L2CAP_CONF_RFC:
2263 if (olen == sizeof(rfc))
2264 memcpy(&rfc, (void *)val, olen);
2265 goto done;
2266 }
2267 }
2268
2269 done:
2270 switch (rfc.mode) {
2271 case L2CAP_MODE_ERTM:
2272 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2273 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2274 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2275 break;
2276 case L2CAP_MODE_STREAMING:
2277 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2278 }
2279 }
2280
2281 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2282 {
2283 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2284
2285 if (rej->reason != 0x0000)
2286 return 0;
2287
2288 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2289 cmd->ident == conn->info_ident) {
2290 del_timer(&conn->info_timer);
2291
2292 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2293 conn->info_ident = 0;
2294
2295 l2cap_conn_start(conn);
2296 }
2297
2298 return 0;
2299 }
2300
2301 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2302 {
2303 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2304 struct l2cap_conn_rsp rsp;
2305 struct l2cap_chan *chan = NULL, *pchan;
2306 struct sock *parent, *sk = NULL;
2307 int result, status = L2CAP_CS_NO_INFO;
2308
2309 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2310 __le16 psm = req->psm;
2311
2312 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2313
2314 /* Check if we have socket listening on psm */
2315 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2316 if (!pchan) {
2317 result = L2CAP_CR_BAD_PSM;
2318 goto sendresp;
2319 }
2320
2321 parent = pchan->sk;
2322
2323 bh_lock_sock(parent);
2324
2325 /* Check if the ACL is secure enough (if not SDP) */
2326 if (psm != cpu_to_le16(0x0001) &&
2327 !hci_conn_check_link_mode(conn->hcon)) {
2328 conn->disc_reason = 0x05;
2329 result = L2CAP_CR_SEC_BLOCK;
2330 goto response;
2331 }
2332
2333 result = L2CAP_CR_NO_MEM;
2334
2335 /* Check for backlog size */
2336 if (sk_acceptq_is_full(parent)) {
2337 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2338 goto response;
2339 }
2340
2341 chan = pchan->ops->new_connection(pchan->data);
2342 if (!chan)
2343 goto response;
2344
2345 sk = chan->sk;
2346
2347 write_lock_bh(&conn->chan_lock);
2348
2349 /* Check if we already have channel with that dcid */
2350 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2351 write_unlock_bh(&conn->chan_lock);
2352 sock_set_flag(sk, SOCK_ZAPPED);
2353 chan->ops->close(chan->data);
2354 goto response;
2355 }
2356
2357 hci_conn_hold(conn->hcon);
2358
2359 bacpy(&bt_sk(sk)->src, conn->src);
2360 bacpy(&bt_sk(sk)->dst, conn->dst);
2361 chan->psm = psm;
2362 chan->dcid = scid;
2363
2364 bt_accept_enqueue(parent, sk);
2365
2366 __l2cap_chan_add(conn, chan);
2367
2368 dcid = chan->scid;
2369
2370 __set_chan_timer(chan, sk->sk_sndtimeo);
2371
2372 chan->ident = cmd->ident;
2373
2374 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2375 if (l2cap_check_security(chan)) {
2376 if (bt_sk(sk)->defer_setup) {
2377 l2cap_state_change(chan, BT_CONNECT2);
2378 result = L2CAP_CR_PEND;
2379 status = L2CAP_CS_AUTHOR_PEND;
2380 parent->sk_data_ready(parent, 0);
2381 } else {
2382 l2cap_state_change(chan, BT_CONFIG);
2383 result = L2CAP_CR_SUCCESS;
2384 status = L2CAP_CS_NO_INFO;
2385 }
2386 } else {
2387 l2cap_state_change(chan, BT_CONNECT2);
2388 result = L2CAP_CR_PEND;
2389 status = L2CAP_CS_AUTHEN_PEND;
2390 }
2391 } else {
2392 l2cap_state_change(chan, BT_CONNECT2);
2393 result = L2CAP_CR_PEND;
2394 status = L2CAP_CS_NO_INFO;
2395 }
2396
2397 write_unlock_bh(&conn->chan_lock);
2398
2399 response:
2400 bh_unlock_sock(parent);
2401
2402 sendresp:
2403 rsp.scid = cpu_to_le16(scid);
2404 rsp.dcid = cpu_to_le16(dcid);
2405 rsp.result = cpu_to_le16(result);
2406 rsp.status = cpu_to_le16(status);
2407 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2408
2409 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2410 struct l2cap_info_req info;
2411 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2412
2413 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2414 conn->info_ident = l2cap_get_ident(conn);
2415
2416 mod_timer(&conn->info_timer, jiffies +
2417 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2418
2419 l2cap_send_cmd(conn, conn->info_ident,
2420 L2CAP_INFO_REQ, sizeof(info), &info);
2421 }
2422
2423 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2424 result == L2CAP_CR_SUCCESS) {
2425 u8 buf[128];
2426 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2427 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2428 l2cap_build_conf_req(chan, buf), buf);
2429 chan->num_conf_req++;
2430 }
2431
2432 return 0;
2433 }
2434
2435 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2436 {
2437 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2438 u16 scid, dcid, result, status;
2439 struct l2cap_chan *chan;
2440 struct sock *sk;
2441 u8 req[128];
2442
2443 scid = __le16_to_cpu(rsp->scid);
2444 dcid = __le16_to_cpu(rsp->dcid);
2445 result = __le16_to_cpu(rsp->result);
2446 status = __le16_to_cpu(rsp->status);
2447
2448 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2449
2450 if (scid) {
2451 chan = l2cap_get_chan_by_scid(conn, scid);
2452 if (!chan)
2453 return -EFAULT;
2454 } else {
2455 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2456 if (!chan)
2457 return -EFAULT;
2458 }
2459
2460 sk = chan->sk;
2461
2462 switch (result) {
2463 case L2CAP_CR_SUCCESS:
2464 l2cap_state_change(chan, BT_CONFIG);
2465 chan->ident = 0;
2466 chan->dcid = dcid;
2467 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2468
2469 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2470 break;
2471
2472 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2473
2474 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2475 l2cap_build_conf_req(chan, req), req);
2476 chan->num_conf_req++;
2477 break;
2478
2479 case L2CAP_CR_PEND:
2480 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2481 break;
2482
2483 default:
2484 /* don't delete l2cap channel if sk is owned by user */
2485 if (sock_owned_by_user(sk)) {
2486 l2cap_state_change(chan, BT_DISCONN);
2487 __clear_chan_timer(chan);
2488 __set_chan_timer(chan, HZ / 5);
2489 break;
2490 }
2491
2492 l2cap_chan_del(chan, ECONNREFUSED);
2493 break;
2494 }
2495
2496 bh_unlock_sock(sk);
2497 return 0;
2498 }
2499
2500 static inline void set_default_fcs(struct l2cap_chan *chan)
2501 {
2502 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2503
2504 /* FCS is enabled only in ERTM or streaming mode, if one or both
2505 * sides request it.
2506 */
2507 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2508 chan->fcs = L2CAP_FCS_NONE;
2509 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2510 chan->fcs = L2CAP_FCS_CRC16;
2511 }
2512
2513 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2514 {
2515 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2516 u16 dcid, flags;
2517 u8 rsp[64];
2518 struct l2cap_chan *chan;
2519 struct sock *sk;
2520 int len;
2521
2522 dcid = __le16_to_cpu(req->dcid);
2523 flags = __le16_to_cpu(req->flags);
2524
2525 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2526
2527 chan = l2cap_get_chan_by_scid(conn, dcid);
2528 if (!chan)
2529 return -ENOENT;
2530
2531 sk = chan->sk;
2532
2533 if (chan->state != BT_CONFIG) {
2534 struct l2cap_cmd_rej rej;
2535
2536 rej.reason = cpu_to_le16(0x0002);
2537 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2538 sizeof(rej), &rej);
2539 goto unlock;
2540 }
2541
2542 /* Reject if config buffer is too small. */
2543 len = cmd_len - sizeof(*req);
2544 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2545 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2546 l2cap_build_conf_rsp(chan, rsp,
2547 L2CAP_CONF_REJECT, flags), rsp);
2548 goto unlock;
2549 }
2550
2551 /* Store config. */
2552 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2553 chan->conf_len += len;
2554
2555 if (flags & 0x0001) {
2556 /* Incomplete config. Send empty response. */
2557 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2558 l2cap_build_conf_rsp(chan, rsp,
2559 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2560 goto unlock;
2561 }
2562
2563 /* Complete config. */
2564 len = l2cap_parse_conf_req(chan, rsp);
2565 if (len < 0) {
2566 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2567 goto unlock;
2568 }
2569
2570 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2571 chan->num_conf_rsp++;
2572
2573 /* Reset config buffer. */
2574 chan->conf_len = 0;
2575
2576 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2577 goto unlock;
2578
2579 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2580 set_default_fcs(chan);
2581
2582 l2cap_state_change(chan, BT_CONNECTED);
2583
2584 chan->next_tx_seq = 0;
2585 chan->expected_tx_seq = 0;
2586 skb_queue_head_init(&chan->tx_q);
2587 if (chan->mode == L2CAP_MODE_ERTM)
2588 l2cap_ertm_init(chan);
2589
2590 l2cap_chan_ready(sk);
2591 goto unlock;
2592 }
2593
2594 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2595 u8 buf[64];
2596 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2597 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2598 l2cap_build_conf_req(chan, buf), buf);
2599 chan->num_conf_req++;
2600 }
2601
2602 unlock:
2603 bh_unlock_sock(sk);
2604 return 0;
2605 }
2606
2607 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2608 {
2609 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2610 u16 scid, flags, result;
2611 struct l2cap_chan *chan;
2612 struct sock *sk;
2613 int len = cmd->len - sizeof(*rsp);
2614
2615 scid = __le16_to_cpu(rsp->scid);
2616 flags = __le16_to_cpu(rsp->flags);
2617 result = __le16_to_cpu(rsp->result);
2618
2619 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2620 scid, flags, result);
2621
2622 chan = l2cap_get_chan_by_scid(conn, scid);
2623 if (!chan)
2624 return 0;
2625
2626 sk = chan->sk;
2627
2628 switch (result) {
2629 case L2CAP_CONF_SUCCESS:
2630 l2cap_conf_rfc_get(chan, rsp->data, len);
2631 break;
2632
2633 case L2CAP_CONF_UNACCEPT:
2634 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2635 char req[64];
2636
2637 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2638 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2639 goto done;
2640 }
2641
2642 /* throw out any old stored conf requests */
2643 result = L2CAP_CONF_SUCCESS;
2644 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2645 req, &result);
2646 if (len < 0) {
2647 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2648 goto done;
2649 }
2650
2651 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2652 L2CAP_CONF_REQ, len, req);
2653 chan->num_conf_req++;
2654 if (result != L2CAP_CONF_SUCCESS)
2655 goto done;
2656 break;
2657 }
2658
2659 default:
2660 sk->sk_err = ECONNRESET;
2661 __set_chan_timer(chan, HZ * 5);
2662 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2663 goto done;
2664 }
2665
2666 if (flags & 0x01)
2667 goto done;
2668
2669 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2670
2671 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2672 set_default_fcs(chan);
2673
2674 l2cap_state_change(chan, BT_CONNECTED);
2675 chan->next_tx_seq = 0;
2676 chan->expected_tx_seq = 0;
2677 skb_queue_head_init(&chan->tx_q);
2678 if (chan->mode == L2CAP_MODE_ERTM)
2679 l2cap_ertm_init(chan);
2680
2681 l2cap_chan_ready(sk);
2682 }
2683
2684 done:
2685 bh_unlock_sock(sk);
2686 return 0;
2687 }
2688
2689 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2690 {
2691 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2692 struct l2cap_disconn_rsp rsp;
2693 u16 dcid, scid;
2694 struct l2cap_chan *chan;
2695 struct sock *sk;
2696
2697 scid = __le16_to_cpu(req->scid);
2698 dcid = __le16_to_cpu(req->dcid);
2699
2700 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2701
2702 chan = l2cap_get_chan_by_scid(conn, dcid);
2703 if (!chan)
2704 return 0;
2705
2706 sk = chan->sk;
2707
2708 rsp.dcid = cpu_to_le16(chan->scid);
2709 rsp.scid = cpu_to_le16(chan->dcid);
2710 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2711
2712 sk->sk_shutdown = SHUTDOWN_MASK;
2713
2714 /* don't delete l2cap channel if sk is owned by user */
2715 if (sock_owned_by_user(sk)) {
2716 l2cap_state_change(chan, BT_DISCONN);
2717 __clear_chan_timer(chan);
2718 __set_chan_timer(chan, HZ / 5);
2719 bh_unlock_sock(sk);
2720 return 0;
2721 }
2722
2723 l2cap_chan_del(chan, ECONNRESET);
2724 bh_unlock_sock(sk);
2725
2726 chan->ops->close(chan->data);
2727 return 0;
2728 }
2729
2730 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2731 {
2732 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2733 u16 dcid, scid;
2734 struct l2cap_chan *chan;
2735 struct sock *sk;
2736
2737 scid = __le16_to_cpu(rsp->scid);
2738 dcid = __le16_to_cpu(rsp->dcid);
2739
2740 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2741
2742 chan = l2cap_get_chan_by_scid(conn, scid);
2743 if (!chan)
2744 return 0;
2745
2746 sk = chan->sk;
2747
2748 /* don't delete l2cap channel if sk is owned by user */
2749 if (sock_owned_by_user(sk)) {
2750 l2cap_state_change(chan,BT_DISCONN);
2751 __clear_chan_timer(chan);
2752 __set_chan_timer(chan, HZ / 5);
2753 bh_unlock_sock(sk);
2754 return 0;
2755 }
2756
2757 l2cap_chan_del(chan, 0);
2758 bh_unlock_sock(sk);
2759
2760 chan->ops->close(chan->data);
2761 return 0;
2762 }
2763
2764 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2765 {
2766 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2767 u16 type;
2768
2769 type = __le16_to_cpu(req->type);
2770
2771 BT_DBG("type 0x%4.4x", type);
2772
2773 if (type == L2CAP_IT_FEAT_MASK) {
2774 u8 buf[8];
2775 u32 feat_mask = l2cap_feat_mask;
2776 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2777 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2778 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2779 if (!disable_ertm)
2780 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2781 | L2CAP_FEAT_FCS;
2782 put_unaligned_le32(feat_mask, rsp->data);
2783 l2cap_send_cmd(conn, cmd->ident,
2784 L2CAP_INFO_RSP, sizeof(buf), buf);
2785 } else if (type == L2CAP_IT_FIXED_CHAN) {
2786 u8 buf[12];
2787 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2788 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2789 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2790 memcpy(buf + 4, l2cap_fixed_chan, 8);
2791 l2cap_send_cmd(conn, cmd->ident,
2792 L2CAP_INFO_RSP, sizeof(buf), buf);
2793 } else {
2794 struct l2cap_info_rsp rsp;
2795 rsp.type = cpu_to_le16(type);
2796 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2797 l2cap_send_cmd(conn, cmd->ident,
2798 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2799 }
2800
2801 return 0;
2802 }
2803
2804 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2805 {
2806 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2807 u16 type, result;
2808
2809 type = __le16_to_cpu(rsp->type);
2810 result = __le16_to_cpu(rsp->result);
2811
2812 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2813
2814 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2815 if (cmd->ident != conn->info_ident ||
2816 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2817 return 0;
2818
2819 del_timer(&conn->info_timer);
2820
2821 if (result != L2CAP_IR_SUCCESS) {
2822 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2823 conn->info_ident = 0;
2824
2825 l2cap_conn_start(conn);
2826
2827 return 0;
2828 }
2829
2830 if (type == L2CAP_IT_FEAT_MASK) {
2831 conn->feat_mask = get_unaligned_le32(rsp->data);
2832
2833 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2834 struct l2cap_info_req req;
2835 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2836
2837 conn->info_ident = l2cap_get_ident(conn);
2838
2839 l2cap_send_cmd(conn, conn->info_ident,
2840 L2CAP_INFO_REQ, sizeof(req), &req);
2841 } else {
2842 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2843 conn->info_ident = 0;
2844
2845 l2cap_conn_start(conn);
2846 }
2847 } else if (type == L2CAP_IT_FIXED_CHAN) {
2848 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2849 conn->info_ident = 0;
2850
2851 l2cap_conn_start(conn);
2852 }
2853
2854 return 0;
2855 }
2856
2857 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2858 u16 to_multiplier)
2859 {
2860 u16 max_latency;
2861
2862 if (min > max || min < 6 || max > 3200)
2863 return -EINVAL;
2864
2865 if (to_multiplier < 10 || to_multiplier > 3200)
2866 return -EINVAL;
2867
2868 if (max >= to_multiplier * 8)
2869 return -EINVAL;
2870
2871 max_latency = (to_multiplier * 8 / max) - 1;
2872 if (latency > 499 || latency > max_latency)
2873 return -EINVAL;
2874
2875 return 0;
2876 }
2877
2878 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2879 struct l2cap_cmd_hdr *cmd, u8 *data)
2880 {
2881 struct hci_conn *hcon = conn->hcon;
2882 struct l2cap_conn_param_update_req *req;
2883 struct l2cap_conn_param_update_rsp rsp;
2884 u16 min, max, latency, to_multiplier, cmd_len;
2885 int err;
2886
2887 if (!(hcon->link_mode & HCI_LM_MASTER))
2888 return -EINVAL;
2889
2890 cmd_len = __le16_to_cpu(cmd->len);
2891 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2892 return -EPROTO;
2893
2894 req = (struct l2cap_conn_param_update_req *) data;
2895 min = __le16_to_cpu(req->min);
2896 max = __le16_to_cpu(req->max);
2897 latency = __le16_to_cpu(req->latency);
2898 to_multiplier = __le16_to_cpu(req->to_multiplier);
2899
2900 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2901 min, max, latency, to_multiplier);
2902
2903 memset(&rsp, 0, sizeof(rsp));
2904
2905 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2906 if (err)
2907 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2908 else
2909 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2910
2911 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2912 sizeof(rsp), &rsp);
2913
2914 if (!err)
2915 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2916
2917 return 0;
2918 }
2919
2920 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2921 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2922 {
2923 int err = 0;
2924
2925 switch (cmd->code) {
2926 case L2CAP_COMMAND_REJ:
2927 l2cap_command_rej(conn, cmd, data);
2928 break;
2929
2930 case L2CAP_CONN_REQ:
2931 err = l2cap_connect_req(conn, cmd, data);
2932 break;
2933
2934 case L2CAP_CONN_RSP:
2935 err = l2cap_connect_rsp(conn, cmd, data);
2936 break;
2937
2938 case L2CAP_CONF_REQ:
2939 err = l2cap_config_req(conn, cmd, cmd_len, data);
2940 break;
2941
2942 case L2CAP_CONF_RSP:
2943 err = l2cap_config_rsp(conn, cmd, data);
2944 break;
2945
2946 case L2CAP_DISCONN_REQ:
2947 err = l2cap_disconnect_req(conn, cmd, data);
2948 break;
2949
2950 case L2CAP_DISCONN_RSP:
2951 err = l2cap_disconnect_rsp(conn, cmd, data);
2952 break;
2953
2954 case L2CAP_ECHO_REQ:
2955 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2956 break;
2957
2958 case L2CAP_ECHO_RSP:
2959 break;
2960
2961 case L2CAP_INFO_REQ:
2962 err = l2cap_information_req(conn, cmd, data);
2963 break;
2964
2965 case L2CAP_INFO_RSP:
2966 err = l2cap_information_rsp(conn, cmd, data);
2967 break;
2968
2969 default:
2970 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2971 err = -EINVAL;
2972 break;
2973 }
2974
2975 return err;
2976 }
2977
2978 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2979 struct l2cap_cmd_hdr *cmd, u8 *data)
2980 {
2981 switch (cmd->code) {
2982 case L2CAP_COMMAND_REJ:
2983 return 0;
2984
2985 case L2CAP_CONN_PARAM_UPDATE_REQ:
2986 return l2cap_conn_param_update_req(conn, cmd, data);
2987
2988 case L2CAP_CONN_PARAM_UPDATE_RSP:
2989 return 0;
2990
2991 default:
2992 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2993 return -EINVAL;
2994 }
2995 }
2996
2997 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2998 struct sk_buff *skb)
2999 {
3000 u8 *data = skb->data;
3001 int len = skb->len;
3002 struct l2cap_cmd_hdr cmd;
3003 int err;
3004
3005 l2cap_raw_recv(conn, skb);
3006
3007 while (len >= L2CAP_CMD_HDR_SIZE) {
3008 u16 cmd_len;
3009 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3010 data += L2CAP_CMD_HDR_SIZE;
3011 len -= L2CAP_CMD_HDR_SIZE;
3012
3013 cmd_len = le16_to_cpu(cmd.len);
3014
3015 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3016
3017 if (cmd_len > len || !cmd.ident) {
3018 BT_DBG("corrupted command");
3019 break;
3020 }
3021
3022 if (conn->hcon->type == LE_LINK)
3023 err = l2cap_le_sig_cmd(conn, &cmd, data);
3024 else
3025 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3026
3027 if (err) {
3028 struct l2cap_cmd_rej rej;
3029
3030 BT_ERR("Wrong link type (%d)", err);
3031
3032 /* FIXME: Map err to a valid reason */
3033 rej.reason = cpu_to_le16(0);
3034 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3035 }
3036
3037 data += cmd_len;
3038 len -= cmd_len;
3039 }
3040
3041 kfree_skb(skb);
3042 }
3043
3044 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3045 {
3046 u16 our_fcs, rcv_fcs;
3047 int hdr_size = L2CAP_HDR_SIZE + 2;
3048
3049 if (chan->fcs == L2CAP_FCS_CRC16) {
3050 skb_trim(skb, skb->len - 2);
3051 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3052 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3053
3054 if (our_fcs != rcv_fcs)
3055 return -EBADMSG;
3056 }
3057 return 0;
3058 }
3059
3060 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3061 {
3062 u16 control = 0;
3063
3064 chan->frames_sent = 0;
3065
3066 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3067
3068 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3069 control |= L2CAP_SUPER_RCV_NOT_READY;
3070 l2cap_send_sframe(chan, control);
3071 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3072 }
3073
3074 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
3075 l2cap_retransmit_frames(chan);
3076
3077 l2cap_ertm_send(chan);
3078
3079 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3080 chan->frames_sent == 0) {
3081 control |= L2CAP_SUPER_RCV_READY;
3082 l2cap_send_sframe(chan, control);
3083 }
3084 }
3085
3086 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3087 {
3088 struct sk_buff *next_skb;
3089 int tx_seq_offset, next_tx_seq_offset;
3090
3091 bt_cb(skb)->tx_seq = tx_seq;
3092 bt_cb(skb)->sar = sar;
3093
3094 next_skb = skb_peek(&chan->srej_q);
3095 if (!next_skb) {
3096 __skb_queue_tail(&chan->srej_q, skb);
3097 return 0;
3098 }
3099
3100 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3101 if (tx_seq_offset < 0)
3102 tx_seq_offset += 64;
3103
3104 do {
3105 if (bt_cb(next_skb)->tx_seq == tx_seq)
3106 return -EINVAL;
3107
3108 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3109 chan->buffer_seq) % 64;
3110 if (next_tx_seq_offset < 0)
3111 next_tx_seq_offset += 64;
3112
3113 if (next_tx_seq_offset > tx_seq_offset) {
3114 __skb_queue_before(&chan->srej_q, next_skb, skb);
3115 return 0;
3116 }
3117
3118 if (skb_queue_is_last(&chan->srej_q, next_skb))
3119 break;
3120
3121 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3122
3123 __skb_queue_tail(&chan->srej_q, skb);
3124
3125 return 0;
3126 }
3127
3128 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3129 {
3130 struct sk_buff *_skb;
3131 int err;
3132
3133 switch (control & L2CAP_CTRL_SAR) {
3134 case L2CAP_SDU_UNSEGMENTED:
3135 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3136 goto drop;
3137
3138 return chan->ops->recv(chan->data, skb);
3139
3140 case L2CAP_SDU_START:
3141 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3142 goto drop;
3143
3144 chan->sdu_len = get_unaligned_le16(skb->data);
3145
3146 if (chan->sdu_len > chan->imtu)
3147 goto disconnect;
3148
3149 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3150 if (!chan->sdu)
3151 return -ENOMEM;
3152
3153 /* pull sdu_len bytes only after alloc, because of Local Busy
3154 * condition we have to be sure that this will be executed
3155 * only once, i.e., when alloc does not fail */
3156 skb_pull(skb, 2);
3157
3158 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3159
3160 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3161 chan->partial_sdu_len = skb->len;
3162 break;
3163
3164 case L2CAP_SDU_CONTINUE:
3165 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3166 goto disconnect;
3167
3168 if (!chan->sdu)
3169 goto disconnect;
3170
3171 chan->partial_sdu_len += skb->len;
3172 if (chan->partial_sdu_len > chan->sdu_len)
3173 goto drop;
3174
3175 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3176
3177 break;
3178
3179 case L2CAP_SDU_END:
3180 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3181 goto disconnect;
3182
3183 if (!chan->sdu)
3184 goto disconnect;
3185
3186 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
3187 chan->partial_sdu_len += skb->len;
3188
3189 if (chan->partial_sdu_len > chan->imtu)
3190 goto drop;
3191
3192 if (chan->partial_sdu_len != chan->sdu_len)
3193 goto drop;
3194
3195 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3196 }
3197
3198 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3199 if (!_skb) {
3200 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3201 return -ENOMEM;
3202 }
3203
3204 err = chan->ops->recv(chan->data, _skb);
3205 if (err < 0) {
3206 kfree_skb(_skb);
3207 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3208 return err;
3209 }
3210
3211 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3212 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3213
3214 kfree_skb(chan->sdu);
3215 break;
3216 }
3217
3218 kfree_skb(skb);
3219 return 0;
3220
3221 drop:
3222 kfree_skb(chan->sdu);
3223 chan->sdu = NULL;
3224
3225 disconnect:
3226 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3227 kfree_skb(skb);
3228 return 0;
3229 }
3230
3231 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3232 {
3233 struct sk_buff *skb;
3234 u16 control;
3235 int err;
3236
3237 while ((skb = skb_dequeue(&chan->busy_q))) {
3238 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3239 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3240 if (err < 0) {
3241 skb_queue_head(&chan->busy_q, skb);
3242 return -EBUSY;
3243 }
3244
3245 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3246 }
3247
3248 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
3249 goto done;
3250
3251 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3252 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3253 l2cap_send_sframe(chan, control);
3254 chan->retry_count = 1;
3255
3256 __clear_retrans_timer(chan);
3257 __set_monitor_timer(chan);
3258
3259 chan->conn_state |= L2CAP_CONN_WAIT_F;
3260
3261 done:
3262 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3263 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3264
3265 BT_DBG("chan %p, Exit local busy", chan);
3266
3267 return 0;
3268 }
3269
3270 static void l2cap_busy_work(struct work_struct *work)
3271 {
3272 DECLARE_WAITQUEUE(wait, current);
3273 struct l2cap_chan *chan =
3274 container_of(work, struct l2cap_chan, busy_work);
3275 struct sock *sk = chan->sk;
3276 int n_tries = 0, timeo = HZ/5, err;
3277 struct sk_buff *skb;
3278
3279 lock_sock(sk);
3280
3281 add_wait_queue(sk_sleep(sk), &wait);
3282 while ((skb = skb_peek(&chan->busy_q))) {
3283 set_current_state(TASK_INTERRUPTIBLE);
3284
3285 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3286 err = -EBUSY;
3287 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3288 break;
3289 }
3290
3291 if (!timeo)
3292 timeo = HZ/5;
3293
3294 if (signal_pending(current)) {
3295 err = sock_intr_errno(timeo);
3296 break;
3297 }
3298
3299 release_sock(sk);
3300 timeo = schedule_timeout(timeo);
3301 lock_sock(sk);
3302
3303 err = sock_error(sk);
3304 if (err)
3305 break;
3306
3307 if (l2cap_try_push_rx_skb(chan) == 0)
3308 break;
3309 }
3310
3311 set_current_state(TASK_RUNNING);
3312 remove_wait_queue(sk_sleep(sk), &wait);
3313
3314 release_sock(sk);
3315 }
3316
3317 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3318 {
3319 int sctrl, err;
3320
3321 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3322 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3323 __skb_queue_tail(&chan->busy_q, skb);
3324 return l2cap_try_push_rx_skb(chan);
3325
3326
3327 }
3328
3329 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3330 if (err >= 0) {
3331 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3332 return err;
3333 }
3334
3335 /* Busy Condition */
3336 BT_DBG("chan %p, Enter local busy", chan);
3337
3338 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3339 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3340 __skb_queue_tail(&chan->busy_q, skb);
3341
3342 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3343 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3344 l2cap_send_sframe(chan, sctrl);
3345
3346 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3347
3348 __clear_ack_timer(chan);
3349
3350 queue_work(_busy_wq, &chan->busy_work);
3351
3352 return err;
3353 }
3354
3355 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3356 {
3357 struct sk_buff *_skb;
3358 int err = -EINVAL;
3359
3360 /*
3361 * TODO: We have to notify the userland if some data is lost with the
3362 * Streaming Mode.
3363 */
3364
3365 switch (control & L2CAP_CTRL_SAR) {
3366 case L2CAP_SDU_UNSEGMENTED:
3367 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3368 kfree_skb(chan->sdu);
3369 break;
3370 }
3371
3372 err = chan->ops->recv(chan->data, skb);
3373 if (!err)
3374 return 0;
3375
3376 break;
3377
3378 case L2CAP_SDU_START:
3379 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3380 kfree_skb(chan->sdu);
3381 break;
3382 }
3383
3384 chan->sdu_len = get_unaligned_le16(skb->data);
3385 skb_pull(skb, 2);
3386
3387 if (chan->sdu_len > chan->imtu) {
3388 err = -EMSGSIZE;
3389 break;
3390 }
3391
3392 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3393 if (!chan->sdu) {
3394 err = -ENOMEM;
3395 break;
3396 }
3397
3398 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3399
3400 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3401 chan->partial_sdu_len = skb->len;
3402 err = 0;
3403 break;
3404
3405 case L2CAP_SDU_CONTINUE:
3406 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3407 break;
3408
3409 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3410
3411 chan->partial_sdu_len += skb->len;
3412 if (chan->partial_sdu_len > chan->sdu_len)
3413 kfree_skb(chan->sdu);
3414 else
3415 err = 0;
3416
3417 break;
3418
3419 case L2CAP_SDU_END:
3420 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3421 break;
3422
3423 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3424
3425 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3426 chan->partial_sdu_len += skb->len;
3427
3428 if (chan->partial_sdu_len > chan->imtu)
3429 goto drop;
3430
3431 if (chan->partial_sdu_len == chan->sdu_len) {
3432 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3433 err = chan->ops->recv(chan->data, _skb);
3434 if (err < 0)
3435 kfree_skb(_skb);
3436 }
3437 err = 0;
3438
3439 drop:
3440 kfree_skb(chan->sdu);
3441 break;
3442 }
3443
3444 kfree_skb(skb);
3445 return err;
3446 }
3447
3448 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3449 {
3450 struct sk_buff *skb;
3451 u16 control;
3452
3453 while ((skb = skb_peek(&chan->srej_q))) {
3454 if (bt_cb(skb)->tx_seq != tx_seq)
3455 break;
3456
3457 skb = skb_dequeue(&chan->srej_q);
3458 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3459 l2cap_ertm_reassembly_sdu(chan, skb, control);
3460 chan->buffer_seq_srej =
3461 (chan->buffer_seq_srej + 1) % 64;
3462 tx_seq = (tx_seq + 1) % 64;
3463 }
3464 }
3465
3466 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3467 {
3468 struct srej_list *l, *tmp;
3469 u16 control;
3470
3471 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3472 if (l->tx_seq == tx_seq) {
3473 list_del(&l->list);
3474 kfree(l);
3475 return;
3476 }
3477 control = L2CAP_SUPER_SELECT_REJECT;
3478 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3479 l2cap_send_sframe(chan, control);
3480 list_del(&l->list);
3481 list_add_tail(&l->list, &chan->srej_l);
3482 }
3483 }
3484
3485 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3486 {
3487 struct srej_list *new;
3488 u16 control;
3489
3490 while (tx_seq != chan->expected_tx_seq) {
3491 control = L2CAP_SUPER_SELECT_REJECT;
3492 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3493 l2cap_send_sframe(chan, control);
3494
3495 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3496 new->tx_seq = chan->expected_tx_seq;
3497 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3498 list_add_tail(&new->list, &chan->srej_l);
3499 }
3500 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3501 }
3502
3503 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3504 {
3505 u8 tx_seq = __get_txseq(rx_control);
3506 u8 req_seq = __get_reqseq(rx_control);
3507 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3508 int tx_seq_offset, expected_tx_seq_offset;
3509 int num_to_ack = (chan->tx_win/6) + 1;
3510 int err = 0;
3511
3512 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3513 tx_seq, rx_control);
3514
3515 if (L2CAP_CTRL_FINAL & rx_control &&
3516 chan->conn_state & L2CAP_CONN_WAIT_F) {
3517 __clear_monitor_timer(chan);
3518 if (chan->unacked_frames > 0)
3519 __set_retrans_timer(chan);
3520 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3521 }
3522
3523 chan->expected_ack_seq = req_seq;
3524 l2cap_drop_acked_frames(chan);
3525
3526 if (tx_seq == chan->expected_tx_seq)
3527 goto expected;
3528
3529 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3530 if (tx_seq_offset < 0)
3531 tx_seq_offset += 64;
3532
3533 /* invalid tx_seq */
3534 if (tx_seq_offset >= chan->tx_win) {
3535 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3536 goto drop;
3537 }
3538
3539 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY)
3540 goto drop;
3541
3542 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3543 struct srej_list *first;
3544
3545 first = list_first_entry(&chan->srej_l,
3546 struct srej_list, list);
3547 if (tx_seq == first->tx_seq) {
3548 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3549 l2cap_check_srej_gap(chan, tx_seq);
3550
3551 list_del(&first->list);
3552 kfree(first);
3553
3554 if (list_empty(&chan->srej_l)) {
3555 chan->buffer_seq = chan->buffer_seq_srej;
3556 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3557 l2cap_send_ack(chan);
3558 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3559 }
3560 } else {
3561 struct srej_list *l;
3562
3563 /* duplicated tx_seq */
3564 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3565 goto drop;
3566
3567 list_for_each_entry(l, &chan->srej_l, list) {
3568 if (l->tx_seq == tx_seq) {
3569 l2cap_resend_srejframe(chan, tx_seq);
3570 return 0;
3571 }
3572 }
3573 l2cap_send_srejframe(chan, tx_seq);
3574 }
3575 } else {
3576 expected_tx_seq_offset =
3577 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3578 if (expected_tx_seq_offset < 0)
3579 expected_tx_seq_offset += 64;
3580
3581 /* duplicated tx_seq */
3582 if (tx_seq_offset < expected_tx_seq_offset)
3583 goto drop;
3584
3585 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3586
3587 BT_DBG("chan %p, Enter SREJ", chan);
3588
3589 INIT_LIST_HEAD(&chan->srej_l);
3590 chan->buffer_seq_srej = chan->buffer_seq;
3591
3592 __skb_queue_head_init(&chan->srej_q);
3593 __skb_queue_head_init(&chan->busy_q);
3594 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3595
3596 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3597
3598 l2cap_send_srejframe(chan, tx_seq);
3599
3600 __clear_ack_timer(chan);
3601 }
3602 return 0;
3603
3604 expected:
3605 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3606
3607 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3608 bt_cb(skb)->tx_seq = tx_seq;
3609 bt_cb(skb)->sar = sar;
3610 __skb_queue_tail(&chan->srej_q, skb);
3611 return 0;
3612 }
3613
3614 err = l2cap_push_rx_skb(chan, skb, rx_control);
3615 if (err < 0)
3616 return 0;
3617
3618 if (rx_control & L2CAP_CTRL_FINAL) {
3619 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3620 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3621 else
3622 l2cap_retransmit_frames(chan);
3623 }
3624
3625 __set_ack_timer(chan);
3626
3627 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3628 if (chan->num_acked == num_to_ack - 1)
3629 l2cap_send_ack(chan);
3630
3631 return 0;
3632
3633 drop:
3634 kfree_skb(skb);
3635 return 0;
3636 }
3637
3638 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3639 {
3640 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3641 rx_control);
3642
3643 chan->expected_ack_seq = __get_reqseq(rx_control);
3644 l2cap_drop_acked_frames(chan);
3645
3646 if (rx_control & L2CAP_CTRL_POLL) {
3647 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3648 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3649 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3650 (chan->unacked_frames > 0))
3651 __set_retrans_timer(chan);
3652
3653 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3654 l2cap_send_srejtail(chan);
3655 } else {
3656 l2cap_send_i_or_rr_or_rnr(chan);
3657 }
3658
3659 } else if (rx_control & L2CAP_CTRL_FINAL) {
3660 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3661
3662 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3663 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3664 else
3665 l2cap_retransmit_frames(chan);
3666
3667 } else {
3668 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3669 (chan->unacked_frames > 0))
3670 __set_retrans_timer(chan);
3671
3672 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3673 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3674 l2cap_send_ack(chan);
3675 else
3676 l2cap_ertm_send(chan);
3677 }
3678 }
3679
3680 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3681 {
3682 u8 tx_seq = __get_reqseq(rx_control);
3683
3684 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3685
3686 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3687
3688 chan->expected_ack_seq = tx_seq;
3689 l2cap_drop_acked_frames(chan);
3690
3691 if (rx_control & L2CAP_CTRL_FINAL) {
3692 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3693 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3694 else
3695 l2cap_retransmit_frames(chan);
3696 } else {
3697 l2cap_retransmit_frames(chan);
3698
3699 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3700 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3701 }
3702 }
3703 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3704 {
3705 u8 tx_seq = __get_reqseq(rx_control);
3706
3707 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3708
3709 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3710
3711 if (rx_control & L2CAP_CTRL_POLL) {
3712 chan->expected_ack_seq = tx_seq;
3713 l2cap_drop_acked_frames(chan);
3714
3715 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3716 l2cap_retransmit_one_frame(chan, tx_seq);
3717
3718 l2cap_ertm_send(chan);
3719
3720 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3721 chan->srej_save_reqseq = tx_seq;
3722 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3723 }
3724 } else if (rx_control & L2CAP_CTRL_FINAL) {
3725 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3726 chan->srej_save_reqseq == tx_seq)
3727 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3728 else
3729 l2cap_retransmit_one_frame(chan, tx_seq);
3730 } else {
3731 l2cap_retransmit_one_frame(chan, tx_seq);
3732 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3733 chan->srej_save_reqseq = tx_seq;
3734 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3735 }
3736 }
3737 }
3738
3739 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3740 {
3741 u8 tx_seq = __get_reqseq(rx_control);
3742
3743 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3744
3745 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3746 chan->expected_ack_seq = tx_seq;
3747 l2cap_drop_acked_frames(chan);
3748
3749 if (rx_control & L2CAP_CTRL_POLL)
3750 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3751
3752 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3753 __clear_retrans_timer(chan);
3754 if (rx_control & L2CAP_CTRL_POLL)
3755 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3756 return;
3757 }
3758
3759 if (rx_control & L2CAP_CTRL_POLL)
3760 l2cap_send_srejtail(chan);
3761 else
3762 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3763 }
3764
3765 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3766 {
3767 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3768
3769 if (L2CAP_CTRL_FINAL & rx_control &&
3770 chan->conn_state & L2CAP_CONN_WAIT_F) {
3771 __clear_monitor_timer(chan);
3772 if (chan->unacked_frames > 0)
3773 __set_retrans_timer(chan);
3774 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3775 }
3776
3777 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3778 case L2CAP_SUPER_RCV_READY:
3779 l2cap_data_channel_rrframe(chan, rx_control);
3780 break;
3781
3782 case L2CAP_SUPER_REJECT:
3783 l2cap_data_channel_rejframe(chan, rx_control);
3784 break;
3785
3786 case L2CAP_SUPER_SELECT_REJECT:
3787 l2cap_data_channel_srejframe(chan, rx_control);
3788 break;
3789
3790 case L2CAP_SUPER_RCV_NOT_READY:
3791 l2cap_data_channel_rnrframe(chan, rx_control);
3792 break;
3793 }
3794
3795 kfree_skb(skb);
3796 return 0;
3797 }
3798
3799 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3800 {
3801 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3802 u16 control;
3803 u8 req_seq;
3804 int len, next_tx_seq_offset, req_seq_offset;
3805
3806 control = get_unaligned_le16(skb->data);
3807 skb_pull(skb, 2);
3808 len = skb->len;
3809
3810 /*
3811 * We can just drop the corrupted I-frame here.
3812 * Receiver will miss it and start proper recovery
3813 * procedures and ask retransmission.
3814 */
3815 if (l2cap_check_fcs(chan, skb))
3816 goto drop;
3817
3818 if (__is_sar_start(control) && __is_iframe(control))
3819 len -= 2;
3820
3821 if (chan->fcs == L2CAP_FCS_CRC16)
3822 len -= 2;
3823
3824 if (len > chan->mps) {
3825 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3826 goto drop;
3827 }
3828
3829 req_seq = __get_reqseq(control);
3830 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3831 if (req_seq_offset < 0)
3832 req_seq_offset += 64;
3833
3834 next_tx_seq_offset =
3835 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3836 if (next_tx_seq_offset < 0)
3837 next_tx_seq_offset += 64;
3838
3839 /* check for invalid req-seq */
3840 if (req_seq_offset > next_tx_seq_offset) {
3841 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3842 goto drop;
3843 }
3844
3845 if (__is_iframe(control)) {
3846 if (len < 0) {
3847 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3848 goto drop;
3849 }
3850
3851 l2cap_data_channel_iframe(chan, control, skb);
3852 } else {
3853 if (len != 0) {
3854 BT_ERR("%d", len);
3855 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3856 goto drop;
3857 }
3858
3859 l2cap_data_channel_sframe(chan, control, skb);
3860 }
3861
3862 return 0;
3863
3864 drop:
3865 kfree_skb(skb);
3866 return 0;
3867 }
3868
3869 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3870 {
3871 struct l2cap_chan *chan;
3872 struct sock *sk = NULL;
3873 u16 control;
3874 u8 tx_seq;
3875 int len;
3876
3877 chan = l2cap_get_chan_by_scid(conn, cid);
3878 if (!chan) {
3879 BT_DBG("unknown cid 0x%4.4x", cid);
3880 goto drop;
3881 }
3882
3883 sk = chan->sk;
3884
3885 BT_DBG("chan %p, len %d", chan, skb->len);
3886
3887 if (chan->state != BT_CONNECTED)
3888 goto drop;
3889
3890 switch (chan->mode) {
3891 case L2CAP_MODE_BASIC:
3892 /* If socket recv buffers overflows we drop data here
3893 * which is *bad* because L2CAP has to be reliable.
3894 * But we don't have any other choice. L2CAP doesn't
3895 * provide flow control mechanism. */
3896
3897 if (chan->imtu < skb->len)
3898 goto drop;
3899
3900 if (!chan->ops->recv(chan->data, skb))
3901 goto done;
3902 break;
3903
3904 case L2CAP_MODE_ERTM:
3905 if (!sock_owned_by_user(sk)) {
3906 l2cap_ertm_data_rcv(sk, skb);
3907 } else {
3908 if (sk_add_backlog(sk, skb))
3909 goto drop;
3910 }
3911
3912 goto done;
3913
3914 case L2CAP_MODE_STREAMING:
3915 control = get_unaligned_le16(skb->data);
3916 skb_pull(skb, 2);
3917 len = skb->len;
3918
3919 if (l2cap_check_fcs(chan, skb))
3920 goto drop;
3921
3922 if (__is_sar_start(control))
3923 len -= 2;
3924
3925 if (chan->fcs == L2CAP_FCS_CRC16)
3926 len -= 2;
3927
3928 if (len > chan->mps || len < 0 || __is_sframe(control))
3929 goto drop;
3930
3931 tx_seq = __get_txseq(control);
3932
3933 if (chan->expected_tx_seq == tx_seq)
3934 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3935 else
3936 chan->expected_tx_seq = (tx_seq + 1) % 64;
3937
3938 l2cap_streaming_reassembly_sdu(chan, skb, control);
3939
3940 goto done;
3941
3942 default:
3943 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3944 break;
3945 }
3946
3947 drop:
3948 kfree_skb(skb);
3949
3950 done:
3951 if (sk)
3952 bh_unlock_sock(sk);
3953
3954 return 0;
3955 }
3956
3957 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3958 {
3959 struct sock *sk = NULL;
3960 struct l2cap_chan *chan;
3961
3962 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3963 if (!chan)
3964 goto drop;
3965
3966 sk = chan->sk;
3967
3968 bh_lock_sock(sk);
3969
3970 BT_DBG("sk %p, len %d", sk, skb->len);
3971
3972 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3973 goto drop;
3974
3975 if (l2cap_pi(sk)->chan->imtu < skb->len)
3976 goto drop;
3977
3978 if (!chan->ops->recv(chan->data, skb))
3979 goto done;
3980
3981 drop:
3982 kfree_skb(skb);
3983
3984 done:
3985 if (sk)
3986 bh_unlock_sock(sk);
3987 return 0;
3988 }
3989
3990 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3991 {
3992 struct sock *sk = NULL;
3993 struct l2cap_chan *chan;
3994
3995 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3996 if (!chan)
3997 goto drop;
3998
3999 sk = chan->sk;
4000
4001 bh_lock_sock(sk);
4002
4003 BT_DBG("sk %p, len %d", sk, skb->len);
4004
4005 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4006 goto drop;
4007
4008 if (l2cap_pi(sk)->chan->imtu < skb->len)
4009 goto drop;
4010
4011 if (!chan->ops->recv(chan->data, skb))
4012 goto done;
4013
4014 drop:
4015 kfree_skb(skb);
4016
4017 done:
4018 if (sk)
4019 bh_unlock_sock(sk);
4020 return 0;
4021 }
4022
4023 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4024 {
4025 struct l2cap_hdr *lh = (void *) skb->data;
4026 u16 cid, len;
4027 __le16 psm;
4028
4029 skb_pull(skb, L2CAP_HDR_SIZE);
4030 cid = __le16_to_cpu(lh->cid);
4031 len = __le16_to_cpu(lh->len);
4032
4033 if (len != skb->len) {
4034 kfree_skb(skb);
4035 return;
4036 }
4037
4038 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4039
4040 switch (cid) {
4041 case L2CAP_CID_LE_SIGNALING:
4042 case L2CAP_CID_SIGNALING:
4043 l2cap_sig_channel(conn, skb);
4044 break;
4045
4046 case L2CAP_CID_CONN_LESS:
4047 psm = get_unaligned_le16(skb->data);
4048 skb_pull(skb, 2);
4049 l2cap_conless_channel(conn, psm, skb);
4050 break;
4051
4052 case L2CAP_CID_LE_DATA:
4053 l2cap_att_channel(conn, cid, skb);
4054 break;
4055
4056 case L2CAP_CID_SMP:
4057 if (smp_sig_channel(conn, skb))
4058 l2cap_conn_del(conn->hcon, EACCES);
4059 break;
4060
4061 default:
4062 l2cap_data_channel(conn, cid, skb);
4063 break;
4064 }
4065 }
4066
4067 /* ---- L2CAP interface with lower layer (HCI) ---- */
4068
4069 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4070 {
4071 int exact = 0, lm1 = 0, lm2 = 0;
4072 struct l2cap_chan *c;
4073
4074 if (type != ACL_LINK)
4075 return -EINVAL;
4076
4077 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4078
4079 /* Find listening sockets and check their link_mode */
4080 read_lock(&chan_list_lock);
4081 list_for_each_entry(c, &chan_list, global_l) {
4082 struct sock *sk = c->sk;
4083
4084 if (c->state != BT_LISTEN)
4085 continue;
4086
4087 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4088 lm1 |= HCI_LM_ACCEPT;
4089 if (c->role_switch)
4090 lm1 |= HCI_LM_MASTER;
4091 exact++;
4092 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4093 lm2 |= HCI_LM_ACCEPT;
4094 if (c->role_switch)
4095 lm2 |= HCI_LM_MASTER;
4096 }
4097 }
4098 read_unlock(&chan_list_lock);
4099
4100 return exact ? lm1 : lm2;
4101 }
4102
4103 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4104 {
4105 struct l2cap_conn *conn;
4106
4107 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4108
4109 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4110 return -EINVAL;
4111
4112 if (!status) {
4113 conn = l2cap_conn_add(hcon, status);
4114 if (conn)
4115 l2cap_conn_ready(conn);
4116 } else
4117 l2cap_conn_del(hcon, bt_err(status));
4118
4119 return 0;
4120 }
4121
4122 static int l2cap_disconn_ind(struct hci_conn *hcon)
4123 {
4124 struct l2cap_conn *conn = hcon->l2cap_data;
4125
4126 BT_DBG("hcon %p", hcon);
4127
4128 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4129 return 0x13;
4130
4131 return conn->disc_reason;
4132 }
4133
4134 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4135 {
4136 BT_DBG("hcon %p reason %d", hcon, reason);
4137
4138 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4139 return -EINVAL;
4140
4141 l2cap_conn_del(hcon, bt_err(reason));
4142
4143 return 0;
4144 }
4145
4146 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4147 {
4148 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4149 return;
4150
4151 if (encrypt == 0x00) {
4152 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4153 __clear_chan_timer(chan);
4154 __set_chan_timer(chan, HZ * 5);
4155 } else if (chan->sec_level == BT_SECURITY_HIGH)
4156 l2cap_chan_close(chan, ECONNREFUSED);
4157 } else {
4158 if (chan->sec_level == BT_SECURITY_MEDIUM)
4159 __clear_chan_timer(chan);
4160 }
4161 }
4162
4163 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4164 {
4165 struct l2cap_conn *conn = hcon->l2cap_data;
4166 struct l2cap_chan *chan;
4167
4168 if (!conn)
4169 return 0;
4170
4171 BT_DBG("conn %p", conn);
4172
4173 read_lock(&conn->chan_lock);
4174
4175 list_for_each_entry(chan, &conn->chan_l, list) {
4176 struct sock *sk = chan->sk;
4177
4178 bh_lock_sock(sk);
4179
4180 BT_DBG("chan->scid %d", chan->scid);
4181
4182 if (chan->scid == L2CAP_CID_LE_DATA) {
4183 if (!status && encrypt) {
4184 chan->sec_level = hcon->sec_level;
4185 l2cap_chan_ready(sk);
4186 }
4187
4188 bh_unlock_sock(sk);
4189 continue;
4190 }
4191
4192 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
4193 bh_unlock_sock(sk);
4194 continue;
4195 }
4196
4197 if (!status && (chan->state == BT_CONNECTED ||
4198 chan->state == BT_CONFIG)) {
4199 l2cap_check_encryption(chan, encrypt);
4200 bh_unlock_sock(sk);
4201 continue;
4202 }
4203
4204 if (chan->state == BT_CONNECT) {
4205 if (!status) {
4206 struct l2cap_conn_req req;
4207 req.scid = cpu_to_le16(chan->scid);
4208 req.psm = chan->psm;
4209
4210 chan->ident = l2cap_get_ident(conn);
4211 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
4212
4213 l2cap_send_cmd(conn, chan->ident,
4214 L2CAP_CONN_REQ, sizeof(req), &req);
4215 } else {
4216 __clear_chan_timer(chan);
4217 __set_chan_timer(chan, HZ / 10);
4218 }
4219 } else if (chan->state == BT_CONNECT2) {
4220 struct l2cap_conn_rsp rsp;
4221 __u16 result;
4222
4223 if (!status) {
4224 l2cap_state_change(chan, BT_CONFIG);
4225 result = L2CAP_CR_SUCCESS;
4226 } else {
4227 l2cap_state_change(chan, BT_DISCONN);
4228 __set_chan_timer(chan, HZ / 10);
4229 result = L2CAP_CR_SEC_BLOCK;
4230 }
4231
4232 rsp.scid = cpu_to_le16(chan->dcid);
4233 rsp.dcid = cpu_to_le16(chan->scid);
4234 rsp.result = cpu_to_le16(result);
4235 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4236 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4237 sizeof(rsp), &rsp);
4238 }
4239
4240 bh_unlock_sock(sk);
4241 }
4242
4243 read_unlock(&conn->chan_lock);
4244
4245 return 0;
4246 }
4247
4248 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4249 {
4250 struct l2cap_conn *conn = hcon->l2cap_data;
4251
4252 if (!conn)
4253 conn = l2cap_conn_add(hcon, 0);
4254
4255 if (!conn)
4256 goto drop;
4257
4258 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4259
4260 if (!(flags & ACL_CONT)) {
4261 struct l2cap_hdr *hdr;
4262 struct l2cap_chan *chan;
4263 u16 cid;
4264 int len;
4265
4266 if (conn->rx_len) {
4267 BT_ERR("Unexpected start frame (len %d)", skb->len);
4268 kfree_skb(conn->rx_skb);
4269 conn->rx_skb = NULL;
4270 conn->rx_len = 0;
4271 l2cap_conn_unreliable(conn, ECOMM);
4272 }
4273
4274 /* Start fragment always begin with Basic L2CAP header */
4275 if (skb->len < L2CAP_HDR_SIZE) {
4276 BT_ERR("Frame is too short (len %d)", skb->len);
4277 l2cap_conn_unreliable(conn, ECOMM);
4278 goto drop;
4279 }
4280
4281 hdr = (struct l2cap_hdr *) skb->data;
4282 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4283 cid = __le16_to_cpu(hdr->cid);
4284
4285 if (len == skb->len) {
4286 /* Complete frame received */
4287 l2cap_recv_frame(conn, skb);
4288 return 0;
4289 }
4290
4291 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4292
4293 if (skb->len > len) {
4294 BT_ERR("Frame is too long (len %d, expected len %d)",
4295 skb->len, len);
4296 l2cap_conn_unreliable(conn, ECOMM);
4297 goto drop;
4298 }
4299
4300 chan = l2cap_get_chan_by_scid(conn, cid);
4301
4302 if (chan && chan->sk) {
4303 struct sock *sk = chan->sk;
4304
4305 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4306 BT_ERR("Frame exceeding recv MTU (len %d, "
4307 "MTU %d)", len,
4308 chan->imtu);
4309 bh_unlock_sock(sk);
4310 l2cap_conn_unreliable(conn, ECOMM);
4311 goto drop;
4312 }
4313 bh_unlock_sock(sk);
4314 }
4315
4316 /* Allocate skb for the complete frame (with header) */
4317 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4318 if (!conn->rx_skb)
4319 goto drop;
4320
4321 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4322 skb->len);
4323 conn->rx_len = len - skb->len;
4324 } else {
4325 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4326
4327 if (!conn->rx_len) {
4328 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4329 l2cap_conn_unreliable(conn, ECOMM);
4330 goto drop;
4331 }
4332
4333 if (skb->len > conn->rx_len) {
4334 BT_ERR("Fragment is too long (len %d, expected %d)",
4335 skb->len, conn->rx_len);
4336 kfree_skb(conn->rx_skb);
4337 conn->rx_skb = NULL;
4338 conn->rx_len = 0;
4339 l2cap_conn_unreliable(conn, ECOMM);
4340 goto drop;
4341 }
4342
4343 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4344 skb->len);
4345 conn->rx_len -= skb->len;
4346
4347 if (!conn->rx_len) {
4348 /* Complete frame received */
4349 l2cap_recv_frame(conn, conn->rx_skb);
4350 conn->rx_skb = NULL;
4351 }
4352 }
4353
4354 drop:
4355 kfree_skb(skb);
4356 return 0;
4357 }
4358
4359 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4360 {
4361 struct l2cap_chan *c;
4362
4363 read_lock_bh(&chan_list_lock);
4364
4365 list_for_each_entry(c, &chan_list, global_l) {
4366 struct sock *sk = c->sk;
4367
4368 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4369 batostr(&bt_sk(sk)->src),
4370 batostr(&bt_sk(sk)->dst),
4371 c->state, __le16_to_cpu(c->psm),
4372 c->scid, c->dcid, c->imtu, c->omtu,
4373 c->sec_level, c->mode);
4374 }
4375
4376 read_unlock_bh(&chan_list_lock);
4377
4378 return 0;
4379 }
4380
4381 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4382 {
4383 return single_open(file, l2cap_debugfs_show, inode->i_private);
4384 }
4385
4386 static const struct file_operations l2cap_debugfs_fops = {
4387 .open = l2cap_debugfs_open,
4388 .read = seq_read,
4389 .llseek = seq_lseek,
4390 .release = single_release,
4391 };
4392
4393 static struct dentry *l2cap_debugfs;
4394
4395 static struct hci_proto l2cap_hci_proto = {
4396 .name = "L2CAP",
4397 .id = HCI_PROTO_L2CAP,
4398 .connect_ind = l2cap_connect_ind,
4399 .connect_cfm = l2cap_connect_cfm,
4400 .disconn_ind = l2cap_disconn_ind,
4401 .disconn_cfm = l2cap_disconn_cfm,
4402 .security_cfm = l2cap_security_cfm,
4403 .recv_acldata = l2cap_recv_acldata
4404 };
4405
4406 int __init l2cap_init(void)
4407 {
4408 int err;
4409
4410 err = l2cap_init_sockets();
4411 if (err < 0)
4412 return err;
4413
4414 _busy_wq = create_singlethread_workqueue("l2cap");
4415 if (!_busy_wq) {
4416 err = -ENOMEM;
4417 goto error;
4418 }
4419
4420 err = hci_register_proto(&l2cap_hci_proto);
4421 if (err < 0) {
4422 BT_ERR("L2CAP protocol registration failed");
4423 bt_sock_unregister(BTPROTO_L2CAP);
4424 goto error;
4425 }
4426
4427 if (bt_debugfs) {
4428 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4429 bt_debugfs, NULL, &l2cap_debugfs_fops);
4430 if (!l2cap_debugfs)
4431 BT_ERR("Failed to create L2CAP debug file");
4432 }
4433
4434 return 0;
4435
4436 error:
4437 destroy_workqueue(_busy_wq);
4438 l2cap_cleanup_sockets();
4439 return err;
4440 }
4441
4442 void l2cap_exit(void)
4443 {
4444 debugfs_remove(l2cap_debugfs);
4445
4446 flush_workqueue(_busy_wq);
4447 destroy_workqueue(_busy_wq);
4448
4449 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4450 BT_ERR("L2CAP protocol unregistration failed");
4451
4452 l2cap_cleanup_sockets();
4453 }
4454
4455 module_param(disable_ertm, bool, 0644);
4456 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.154896 seconds and 5 git commands to generate.