Bluetooth: Check 'dev_class' in mgmt_device_found()
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP core. */
28
29#include <linux/module.h>
30
31#include <linux/types.h>
32#include <linux/capability.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
43#include <linux/list.h>
44#include <linux/device.h>
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
47#include <linux/uaccess.h>
48#include <linux/crc16.h>
49#include <net/sock.h>
50
51#include <asm/system.h>
52#include <asm/unaligned.h>
53
54#include <net/bluetooth/bluetooth.h>
55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h>
57#include <net/bluetooth/smp.h>
58
59int disable_ertm;
60
61static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62static u8 l2cap_fixed_chan[8] = { 0x02, };
63
64static LIST_HEAD(chan_list);
65static DEFINE_RWLOCK(chan_list_lock);
66
67static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
71static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
74
75static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76
77/* ---- L2CAP channels ---- */
78
79static inline void chan_hold(struct l2cap_chan *c)
80{
81 atomic_inc(&c->refcnt);
82}
83
84static inline void chan_put(struct l2cap_chan *c)
85{
86 if (atomic_dec_and_test(&c->refcnt))
87 kfree(c);
88}
89
90static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
91{
92 struct l2cap_chan *c;
93
94 list_for_each_entry(c, &conn->chan_l, list) {
95 if (c->dcid == cid)
96 return c;
97 }
98 return NULL;
99
100}
101
102static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103{
104 struct l2cap_chan *c;
105
106 list_for_each_entry(c, &conn->chan_l, list) {
107 if (c->scid == cid)
108 return c;
109 }
110 return NULL;
111}
112
113/* Find channel with given SCID.
114 * Returns locked socket */
115static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116{
117 struct l2cap_chan *c;
118
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 bh_lock_sock(c->sk);
123 read_unlock(&conn->chan_lock);
124 return c;
125}
126
127static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128{
129 struct l2cap_chan *c;
130
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
133 return c;
134 }
135 return NULL;
136}
137
138static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
139{
140 struct l2cap_chan *c;
141
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
144 if (c)
145 bh_lock_sock(c->sk);
146 read_unlock(&conn->chan_lock);
147 return c;
148}
149
150static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
151{
152 struct l2cap_chan *c;
153
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
156 goto found;
157 }
158
159 c = NULL;
160found:
161 return c;
162}
163
164int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
165{
166 int err;
167
168 write_lock_bh(&chan_list_lock);
169
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 err = -EADDRINUSE;
172 goto done;
173 }
174
175 if (psm) {
176 chan->psm = psm;
177 chan->sport = psm;
178 err = 0;
179 } else {
180 u16 p;
181
182 err = -EINVAL;
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
187 err = 0;
188 break;
189 }
190 }
191
192done:
193 write_unlock_bh(&chan_list_lock);
194 return err;
195}
196
197int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
198{
199 write_lock_bh(&chan_list_lock);
200
201 chan->scid = scid;
202
203 write_unlock_bh(&chan_list_lock);
204
205 return 0;
206}
207
208static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
209{
210 u16 cid = L2CAP_CID_DYN_START;
211
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
214 return cid;
215 }
216
217 return 0;
218}
219
220static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
221{
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
223
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 chan_hold(chan);
226}
227
228static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
229{
230 BT_DBG("chan %p state %d", chan, chan->state);
231
232 if (timer_pending(timer) && del_timer(timer))
233 chan_put(chan);
234}
235
236static void l2cap_state_change(struct l2cap_chan *chan, int state)
237{
238 chan->state = state;
239 chan->ops->state_change(chan->data, state);
240}
241
242static void l2cap_chan_timeout(unsigned long arg)
243{
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
246 int reason;
247
248 BT_DBG("chan %p state %d", chan, chan->state);
249
250 bh_lock_sock(sk);
251
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, HZ / 5);
255 bh_unlock_sock(sk);
256 chan_put(chan);
257 return;
258 }
259
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
265 else
266 reason = ETIMEDOUT;
267
268 l2cap_chan_close(chan, reason);
269
270 bh_unlock_sock(sk);
271
272 chan->ops->close(chan->data);
273 chan_put(chan);
274}
275
276struct l2cap_chan *l2cap_chan_create(struct sock *sk)
277{
278 struct l2cap_chan *chan;
279
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
281 if (!chan)
282 return NULL;
283
284 chan->sk = sk;
285
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
289
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
291
292 chan->state = BT_OPEN;
293
294 atomic_set(&chan->refcnt, 1);
295
296 return chan;
297}
298
299void l2cap_chan_destroy(struct l2cap_chan *chan)
300{
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
304
305 chan_put(chan);
306}
307
308static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
309{
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
312
313 conn->disc_reason = 0x13;
314
315 chan->conn = conn;
316
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
319 /* LE connection */
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
323 } else {
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
327 }
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
333 } else {
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
338 }
339
340 chan_hold(chan);
341
342 list_add(&chan->list, &conn->chan_l);
343}
344
345/* Delete channel.
346 * Must be called on the locked socket. */
347static void l2cap_chan_del(struct l2cap_chan *chan, int err)
348{
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
352
353 __clear_chan_timer(chan);
354
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
356
357 if (conn) {
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
362 chan_put(chan);
363
364 chan->conn = NULL;
365 hci_conn_put(conn->hcon);
366 }
367
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
370
371 if (err)
372 sk->sk_err = err;
373
374 if (parent) {
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
377 } else
378 sk->sk_state_change(sk);
379
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
382 return;
383
384 skb_queue_purge(&chan->tx_q);
385
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
388
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
392
393 skb_queue_purge(&chan->srej_q);
394
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
396 list_del(&l->list);
397 kfree(l);
398 }
399 }
400}
401
402static void l2cap_chan_cleanup_listen(struct sock *parent)
403{
404 struct sock *sk;
405
406 BT_DBG("parent %p", parent);
407
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
412 lock_sock(sk);
413 l2cap_chan_close(chan, ECONNRESET);
414 release_sock(sk);
415 chan->ops->close(chan->data);
416 }
417}
418
419void l2cap_chan_close(struct l2cap_chan *chan, int reason)
420{
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
423
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
425
426 switch (chan->state) {
427 case BT_LISTEN:
428 l2cap_chan_cleanup_listen(sk);
429
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
432 break;
433
434 case BT_CONNECTED:
435 case BT_CONFIG:
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
441 } else
442 l2cap_chan_del(chan, reason);
443 break;
444
445 case BT_CONNECT2:
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
449 __u16 result;
450
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
453 else
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
456
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
462 sizeof(rsp), &rsp);
463 }
464
465 l2cap_chan_del(chan, reason);
466 break;
467
468 case BT_CONNECT:
469 case BT_DISCONN:
470 l2cap_chan_del(chan, reason);
471 break;
472
473 default:
474 sock_set_flag(sk, SOCK_ZAPPED);
475 break;
476 }
477}
478
479static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
480{
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
487 default:
488 return HCI_AT_NO_BONDING;
489 }
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
493
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
496 else
497 return HCI_AT_NO_BONDING;
498 } else {
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
504 default:
505 return HCI_AT_NO_BONDING;
506 }
507 }
508}
509
510/* Service level security */
511static inline int l2cap_check_security(struct l2cap_chan *chan)
512{
513 struct l2cap_conn *conn = chan->conn;
514 __u8 auth_type;
515
516 auth_type = l2cap_get_auth_type(chan);
517
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
519}
520
521static u8 l2cap_get_ident(struct l2cap_conn *conn)
522{
523 u8 id;
524
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
529 */
530
531 spin_lock_bh(&conn->lock);
532
533 if (++conn->tx_ident > 128)
534 conn->tx_ident = 1;
535
536 id = conn->tx_ident;
537
538 spin_unlock_bh(&conn->lock);
539
540 return id;
541}
542
543static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
544{
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
546 u8 flags;
547
548 BT_DBG("code 0x%2.2x", code);
549
550 if (!skb)
551 return;
552
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
555 else
556 flags = ACL_START;
557
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
559
560 hci_send_acl(conn->hcon, skb, flags);
561}
562
563static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
564{
565 struct sk_buff *skb;
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
569 u8 flags;
570
571 if (chan->state != BT_CONNECTED)
572 return;
573
574 if (chan->fcs == L2CAP_FCS_CRC16)
575 hlen += 2;
576
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
578
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
581
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
584
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
587
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
589 if (!skb)
590 return;
591
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
596
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
600 }
601
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
604 else
605 flags = ACL_START;
606
607 bt_cb(skb)->force_active = chan->force_active;
608
609 hci_send_acl(chan->conn->hcon, skb, flags);
610}
611
612static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
613{
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
617 } else
618 control |= L2CAP_SUPER_RCV_READY;
619
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
621
622 l2cap_send_sframe(chan, control);
623}
624
625static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
626{
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
628}
629
630static void l2cap_do_start(struct l2cap_chan *chan)
631{
632 struct l2cap_conn *conn = chan->conn;
633
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
636 return;
637
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
642 req.psm = chan->psm;
643
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
646
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
648 sizeof(req), &req);
649 }
650 } else {
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
653
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
656
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
659
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
662 }
663}
664
665static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
666{
667 u32 local_feat_mask = l2cap_feat_mask;
668 if (!disable_ertm)
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
670
671 switch (mode) {
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
676 default:
677 return 0x00;
678 }
679}
680
681static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
682{
683 struct sock *sk;
684 struct l2cap_disconn_req req;
685
686 if (!conn)
687 return;
688
689 sk = chan->sk;
690
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
695 }
696
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
701
702 l2cap_state_change(chan, BT_DISCONN);
703 sk->sk_err = err;
704}
705
706/* ---- L2CAP connections ---- */
707static void l2cap_conn_start(struct l2cap_conn *conn)
708{
709 struct l2cap_chan *chan, *tmp;
710
711 BT_DBG("conn %p", conn);
712
713 read_lock(&conn->chan_lock);
714
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
717
718 bh_lock_sock(sk);
719
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
721 bh_unlock_sock(sk);
722 continue;
723 }
724
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
727
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
730 bh_unlock_sock(sk);
731 continue;
732 }
733
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
742 bh_unlock_sock(sk);
743 continue;
744 }
745
746 req.scid = cpu_to_le16(chan->scid);
747 req.psm = chan->psm;
748
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
751
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
753 sizeof(req), &req);
754
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
757 char buf[128];
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
760
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
766 if (parent)
767 parent->sk_data_ready(parent, 0);
768
769 } else {
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
773 }
774 } else {
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
777 }
778
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
780 sizeof(rsp), &rsp);
781
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
784 bh_unlock_sock(sk);
785 continue;
786 }
787
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf), buf);
791 chan->num_conf_req++;
792 }
793
794 bh_unlock_sock(sk);
795 }
796
797 read_unlock(&conn->chan_lock);
798}
799
800/* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
802 */
803static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
804{
805 struct l2cap_chan *c, *c1 = NULL;
806
807 read_lock(&chan_list_lock);
808
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
811
812 if (state && c->state != state)
813 continue;
814
815 if (c->scid == cid) {
816 /* Exact match. */
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
819 return c;
820 }
821
822 /* Closest match */
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
824 c1 = c;
825 }
826 }
827
828 read_unlock(&chan_list_lock);
829
830 return c1;
831}
832
833static void l2cap_le_conn_ready(struct l2cap_conn *conn)
834{
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
837
838 BT_DBG("");
839
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
842 conn->src);
843 if (!pchan)
844 return;
845
846 parent = pchan->sk;
847
848 bh_lock_sock(parent);
849
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
853 goto clean;
854 }
855
856 chan = pchan->ops->new_connection(pchan->data);
857 if (!chan)
858 goto clean;
859
860 sk = chan->sk;
861
862 write_lock_bh(&conn->chan_lock);
863
864 hci_conn_hold(conn->hcon);
865
866 bacpy(&bt_sk(sk)->src, conn->src);
867 bacpy(&bt_sk(sk)->dst, conn->dst);
868
869 bt_accept_enqueue(parent, sk);
870
871 __l2cap_chan_add(conn, chan);
872
873 __set_chan_timer(chan, sk->sk_sndtimeo);
874
875 l2cap_state_change(chan, BT_CONNECTED);
876 parent->sk_data_ready(parent, 0);
877
878 write_unlock_bh(&conn->chan_lock);
879
880clean:
881 bh_unlock_sock(parent);
882}
883
884static void l2cap_chan_ready(struct sock *sk)
885{
886 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
887 struct sock *parent = bt_sk(sk)->parent;
888
889 BT_DBG("sk %p, parent %p", sk, parent);
890
891 chan->conf_state = 0;
892 __clear_chan_timer(chan);
893
894 l2cap_state_change(chan, BT_CONNECTED);
895 sk->sk_state_change(sk);
896
897 if (parent)
898 parent->sk_data_ready(parent, 0);
899}
900
901static void l2cap_conn_ready(struct l2cap_conn *conn)
902{
903 struct l2cap_chan *chan;
904
905 BT_DBG("conn %p", conn);
906
907 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
908 l2cap_le_conn_ready(conn);
909
910 if (conn->hcon->out && conn->hcon->type == LE_LINK)
911 smp_conn_security(conn, conn->hcon->pending_sec_level);
912
913 read_lock(&conn->chan_lock);
914
915 list_for_each_entry(chan, &conn->chan_l, list) {
916 struct sock *sk = chan->sk;
917
918 bh_lock_sock(sk);
919
920 if (conn->hcon->type == LE_LINK) {
921 if (smp_conn_security(conn, chan->sec_level))
922 l2cap_chan_ready(sk);
923
924 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
925 __clear_chan_timer(chan);
926 l2cap_state_change(chan, BT_CONNECTED);
927 sk->sk_state_change(sk);
928
929 } else if (chan->state == BT_CONNECT)
930 l2cap_do_start(chan);
931
932 bh_unlock_sock(sk);
933 }
934
935 read_unlock(&conn->chan_lock);
936}
937
938/* Notify sockets that we cannot guaranty reliability anymore */
939static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
940{
941 struct l2cap_chan *chan;
942
943 BT_DBG("conn %p", conn);
944
945 read_lock(&conn->chan_lock);
946
947 list_for_each_entry(chan, &conn->chan_l, list) {
948 struct sock *sk = chan->sk;
949
950 if (chan->force_reliable)
951 sk->sk_err = err;
952 }
953
954 read_unlock(&conn->chan_lock);
955}
956
957static void l2cap_info_timeout(unsigned long arg)
958{
959 struct l2cap_conn *conn = (void *) arg;
960
961 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
962 conn->info_ident = 0;
963
964 l2cap_conn_start(conn);
965}
966
967static void l2cap_conn_del(struct hci_conn *hcon, int err)
968{
969 struct l2cap_conn *conn = hcon->l2cap_data;
970 struct l2cap_chan *chan, *l;
971 struct sock *sk;
972
973 if (!conn)
974 return;
975
976 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
977
978 kfree_skb(conn->rx_skb);
979
980 /* Kill channels */
981 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
982 sk = chan->sk;
983 bh_lock_sock(sk);
984 l2cap_chan_del(chan, err);
985 bh_unlock_sock(sk);
986 chan->ops->close(chan->data);
987 }
988
989 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
990 del_timer_sync(&conn->info_timer);
991
992 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
993 del_timer(&conn->security_timer);
994 smp_chan_destroy(conn);
995 }
996
997 hcon->l2cap_data = NULL;
998 kfree(conn);
999}
1000
1001static void security_timeout(unsigned long arg)
1002{
1003 struct l2cap_conn *conn = (void *) arg;
1004
1005 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1006}
1007
1008static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1009{
1010 struct l2cap_conn *conn = hcon->l2cap_data;
1011
1012 if (conn || status)
1013 return conn;
1014
1015 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1016 if (!conn)
1017 return NULL;
1018
1019 hcon->l2cap_data = conn;
1020 conn->hcon = hcon;
1021
1022 BT_DBG("hcon %p conn %p", hcon, conn);
1023
1024 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1025 conn->mtu = hcon->hdev->le_mtu;
1026 else
1027 conn->mtu = hcon->hdev->acl_mtu;
1028
1029 conn->src = &hcon->hdev->bdaddr;
1030 conn->dst = &hcon->dst;
1031
1032 conn->feat_mask = 0;
1033
1034 spin_lock_init(&conn->lock);
1035 rwlock_init(&conn->chan_lock);
1036
1037 INIT_LIST_HEAD(&conn->chan_l);
1038
1039 if (hcon->type == LE_LINK)
1040 setup_timer(&conn->security_timer, security_timeout,
1041 (unsigned long) conn);
1042 else
1043 setup_timer(&conn->info_timer, l2cap_info_timeout,
1044 (unsigned long) conn);
1045
1046 conn->disc_reason = 0x13;
1047
1048 return conn;
1049}
1050
1051static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1052{
1053 write_lock_bh(&conn->chan_lock);
1054 __l2cap_chan_add(conn, chan);
1055 write_unlock_bh(&conn->chan_lock);
1056}
1057
1058/* ---- Socket interface ---- */
1059
1060/* Find socket with psm and source bdaddr.
1061 * Returns closest match.
1062 */
1063static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1064{
1065 struct l2cap_chan *c, *c1 = NULL;
1066
1067 read_lock(&chan_list_lock);
1068
1069 list_for_each_entry(c, &chan_list, global_l) {
1070 struct sock *sk = c->sk;
1071
1072 if (state && c->state != state)
1073 continue;
1074
1075 if (c->psm == psm) {
1076 /* Exact match. */
1077 if (!bacmp(&bt_sk(sk)->src, src)) {
1078 read_unlock(&chan_list_lock);
1079 return c;
1080 }
1081
1082 /* Closest match */
1083 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1084 c1 = c;
1085 }
1086 }
1087
1088 read_unlock(&chan_list_lock);
1089
1090 return c1;
1091}
1092
1093int l2cap_chan_connect(struct l2cap_chan *chan)
1094{
1095 struct sock *sk = chan->sk;
1096 bdaddr_t *src = &bt_sk(sk)->src;
1097 bdaddr_t *dst = &bt_sk(sk)->dst;
1098 struct l2cap_conn *conn;
1099 struct hci_conn *hcon;
1100 struct hci_dev *hdev;
1101 __u8 auth_type;
1102 int err;
1103
1104 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1105 chan->psm);
1106
1107 hdev = hci_get_route(dst, src);
1108 if (!hdev)
1109 return -EHOSTUNREACH;
1110
1111 hci_dev_lock_bh(hdev);
1112
1113 auth_type = l2cap_get_auth_type(chan);
1114
1115 if (chan->dcid == L2CAP_CID_LE_DATA)
1116 hcon = hci_connect(hdev, LE_LINK, dst,
1117 chan->sec_level, auth_type);
1118 else
1119 hcon = hci_connect(hdev, ACL_LINK, dst,
1120 chan->sec_level, auth_type);
1121
1122 if (IS_ERR(hcon)) {
1123 err = PTR_ERR(hcon);
1124 goto done;
1125 }
1126
1127 conn = l2cap_conn_add(hcon, 0);
1128 if (!conn) {
1129 hci_conn_put(hcon);
1130 err = -ENOMEM;
1131 goto done;
1132 }
1133
1134 /* Update source addr of the socket */
1135 bacpy(src, conn->src);
1136
1137 l2cap_chan_add(conn, chan);
1138
1139 l2cap_state_change(chan, BT_CONNECT);
1140 __set_chan_timer(chan, sk->sk_sndtimeo);
1141
1142 if (hcon->state == BT_CONNECTED) {
1143 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1144 __clear_chan_timer(chan);
1145 if (l2cap_check_security(chan))
1146 l2cap_state_change(chan, BT_CONNECTED);
1147 } else
1148 l2cap_do_start(chan);
1149 }
1150
1151 err = 0;
1152
1153done:
1154 hci_dev_unlock_bh(hdev);
1155 hci_dev_put(hdev);
1156 return err;
1157}
1158
1159int __l2cap_wait_ack(struct sock *sk)
1160{
1161 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1162 DECLARE_WAITQUEUE(wait, current);
1163 int err = 0;
1164 int timeo = HZ/5;
1165
1166 add_wait_queue(sk_sleep(sk), &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168 while (chan->unacked_frames > 0 && chan->conn) {
1169 if (!timeo)
1170 timeo = HZ/5;
1171
1172 if (signal_pending(current)) {
1173 err = sock_intr_errno(timeo);
1174 break;
1175 }
1176
1177 release_sock(sk);
1178 timeo = schedule_timeout(timeo);
1179 lock_sock(sk);
1180 set_current_state(TASK_INTERRUPTIBLE);
1181
1182 err = sock_error(sk);
1183 if (err)
1184 break;
1185 }
1186 set_current_state(TASK_RUNNING);
1187 remove_wait_queue(sk_sleep(sk), &wait);
1188 return err;
1189}
1190
1191static void l2cap_monitor_timeout(unsigned long arg)
1192{
1193 struct l2cap_chan *chan = (void *) arg;
1194 struct sock *sk = chan->sk;
1195
1196 BT_DBG("chan %p", chan);
1197
1198 bh_lock_sock(sk);
1199 if (chan->retry_count >= chan->remote_max_tx) {
1200 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1201 bh_unlock_sock(sk);
1202 return;
1203 }
1204
1205 chan->retry_count++;
1206 __set_monitor_timer(chan);
1207
1208 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1209 bh_unlock_sock(sk);
1210}
1211
1212static void l2cap_retrans_timeout(unsigned long arg)
1213{
1214 struct l2cap_chan *chan = (void *) arg;
1215 struct sock *sk = chan->sk;
1216
1217 BT_DBG("chan %p", chan);
1218
1219 bh_lock_sock(sk);
1220 chan->retry_count = 1;
1221 __set_monitor_timer(chan);
1222
1223 set_bit(CONN_WAIT_F, &chan->conn_state);
1224
1225 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1226 bh_unlock_sock(sk);
1227}
1228
1229static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1230{
1231 struct sk_buff *skb;
1232
1233 while ((skb = skb_peek(&chan->tx_q)) &&
1234 chan->unacked_frames) {
1235 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1236 break;
1237
1238 skb = skb_dequeue(&chan->tx_q);
1239 kfree_skb(skb);
1240
1241 chan->unacked_frames--;
1242 }
1243
1244 if (!chan->unacked_frames)
1245 __clear_retrans_timer(chan);
1246}
1247
1248void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1249{
1250 struct hci_conn *hcon = chan->conn->hcon;
1251 u16 flags;
1252
1253 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1254
1255 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1256 flags = ACL_START_NO_FLUSH;
1257 else
1258 flags = ACL_START;
1259
1260 bt_cb(skb)->force_active = chan->force_active;
1261 hci_send_acl(hcon, skb, flags);
1262}
1263
1264void l2cap_streaming_send(struct l2cap_chan *chan)
1265{
1266 struct sk_buff *skb;
1267 u16 control, fcs;
1268
1269 while ((skb = skb_dequeue(&chan->tx_q))) {
1270 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1271 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1272 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1273
1274 if (chan->fcs == L2CAP_FCS_CRC16) {
1275 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1276 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1277 }
1278
1279 l2cap_do_send(chan, skb);
1280
1281 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1282 }
1283}
1284
1285static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1286{
1287 struct sk_buff *skb, *tx_skb;
1288 u16 control, fcs;
1289
1290 skb = skb_peek(&chan->tx_q);
1291 if (!skb)
1292 return;
1293
1294 do {
1295 if (bt_cb(skb)->tx_seq == tx_seq)
1296 break;
1297
1298 if (skb_queue_is_last(&chan->tx_q, skb))
1299 return;
1300
1301 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1302
1303 if (chan->remote_max_tx &&
1304 bt_cb(skb)->retries == chan->remote_max_tx) {
1305 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1306 return;
1307 }
1308
1309 tx_skb = skb_clone(skb, GFP_ATOMIC);
1310 bt_cb(skb)->retries++;
1311 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1312 control &= L2CAP_CTRL_SAR;
1313
1314 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1315 control |= L2CAP_CTRL_FINAL;
1316
1317 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1318 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1319
1320 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1321
1322 if (chan->fcs == L2CAP_FCS_CRC16) {
1323 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1324 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1325 }
1326
1327 l2cap_do_send(chan, tx_skb);
1328}
1329
1330int l2cap_ertm_send(struct l2cap_chan *chan)
1331{
1332 struct sk_buff *skb, *tx_skb;
1333 u16 control, fcs;
1334 int nsent = 0;
1335
1336 if (chan->state != BT_CONNECTED)
1337 return -ENOTCONN;
1338
1339 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1340
1341 if (chan->remote_max_tx &&
1342 bt_cb(skb)->retries == chan->remote_max_tx) {
1343 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1344 break;
1345 }
1346
1347 tx_skb = skb_clone(skb, GFP_ATOMIC);
1348
1349 bt_cb(skb)->retries++;
1350
1351 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1352 control &= L2CAP_CTRL_SAR;
1353
1354 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1355 control |= L2CAP_CTRL_FINAL;
1356
1357 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1358 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1359 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1360
1361
1362 if (chan->fcs == L2CAP_FCS_CRC16) {
1363 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1364 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1365 }
1366
1367 l2cap_do_send(chan, tx_skb);
1368
1369 __set_retrans_timer(chan);
1370
1371 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1372 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1373
1374 if (bt_cb(skb)->retries == 1)
1375 chan->unacked_frames++;
1376
1377 chan->frames_sent++;
1378
1379 if (skb_queue_is_last(&chan->tx_q, skb))
1380 chan->tx_send_head = NULL;
1381 else
1382 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1383
1384 nsent++;
1385 }
1386
1387 return nsent;
1388}
1389
1390static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1391{
1392 int ret;
1393
1394 if (!skb_queue_empty(&chan->tx_q))
1395 chan->tx_send_head = chan->tx_q.next;
1396
1397 chan->next_tx_seq = chan->expected_ack_seq;
1398 ret = l2cap_ertm_send(chan);
1399 return ret;
1400}
1401
1402static void l2cap_send_ack(struct l2cap_chan *chan)
1403{
1404 u16 control = 0;
1405
1406 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1407
1408 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1409 control |= L2CAP_SUPER_RCV_NOT_READY;
1410 set_bit(CONN_RNR_SENT, &chan->conn_state);
1411 l2cap_send_sframe(chan, control);
1412 return;
1413 }
1414
1415 if (l2cap_ertm_send(chan) > 0)
1416 return;
1417
1418 control |= L2CAP_SUPER_RCV_READY;
1419 l2cap_send_sframe(chan, control);
1420}
1421
1422static void l2cap_send_srejtail(struct l2cap_chan *chan)
1423{
1424 struct srej_list *tail;
1425 u16 control;
1426
1427 control = L2CAP_SUPER_SELECT_REJECT;
1428 control |= L2CAP_CTRL_FINAL;
1429
1430 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1431 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1432
1433 l2cap_send_sframe(chan, control);
1434}
1435
1436static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1437{
1438 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1439 struct sk_buff **frag;
1440 int err, sent = 0;
1441
1442 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1443 return -EFAULT;
1444
1445 sent += count;
1446 len -= count;
1447
1448 /* Continuation fragments (no L2CAP header) */
1449 frag = &skb_shinfo(skb)->frag_list;
1450 while (len) {
1451 count = min_t(unsigned int, conn->mtu, len);
1452
1453 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1454 if (!*frag)
1455 return err;
1456 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1457 return -EFAULT;
1458
1459 sent += count;
1460 len -= count;
1461
1462 frag = &(*frag)->next;
1463 }
1464
1465 return sent;
1466}
1467
1468struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1469{
1470 struct sock *sk = chan->sk;
1471 struct l2cap_conn *conn = chan->conn;
1472 struct sk_buff *skb;
1473 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1474 struct l2cap_hdr *lh;
1475
1476 BT_DBG("sk %p len %d", sk, (int)len);
1477
1478 count = min_t(unsigned int, (conn->mtu - hlen), len);
1479 skb = bt_skb_send_alloc(sk, count + hlen,
1480 msg->msg_flags & MSG_DONTWAIT, &err);
1481 if (!skb)
1482 return ERR_PTR(err);
1483
1484 /* Create L2CAP header */
1485 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1486 lh->cid = cpu_to_le16(chan->dcid);
1487 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1488 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1489
1490 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1491 if (unlikely(err < 0)) {
1492 kfree_skb(skb);
1493 return ERR_PTR(err);
1494 }
1495 return skb;
1496}
1497
1498struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1499{
1500 struct sock *sk = chan->sk;
1501 struct l2cap_conn *conn = chan->conn;
1502 struct sk_buff *skb;
1503 int err, count, hlen = L2CAP_HDR_SIZE;
1504 struct l2cap_hdr *lh;
1505
1506 BT_DBG("sk %p len %d", sk, (int)len);
1507
1508 count = min_t(unsigned int, (conn->mtu - hlen), len);
1509 skb = bt_skb_send_alloc(sk, count + hlen,
1510 msg->msg_flags & MSG_DONTWAIT, &err);
1511 if (!skb)
1512 return ERR_PTR(err);
1513
1514 /* Create L2CAP header */
1515 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1516 lh->cid = cpu_to_le16(chan->dcid);
1517 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1518
1519 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1520 if (unlikely(err < 0)) {
1521 kfree_skb(skb);
1522 return ERR_PTR(err);
1523 }
1524 return skb;
1525}
1526
1527struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1528{
1529 struct sock *sk = chan->sk;
1530 struct l2cap_conn *conn = chan->conn;
1531 struct sk_buff *skb;
1532 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1533 struct l2cap_hdr *lh;
1534
1535 BT_DBG("sk %p len %d", sk, (int)len);
1536
1537 if (!conn)
1538 return ERR_PTR(-ENOTCONN);
1539
1540 if (sdulen)
1541 hlen += 2;
1542
1543 if (chan->fcs == L2CAP_FCS_CRC16)
1544 hlen += 2;
1545
1546 count = min_t(unsigned int, (conn->mtu - hlen), len);
1547 skb = bt_skb_send_alloc(sk, count + hlen,
1548 msg->msg_flags & MSG_DONTWAIT, &err);
1549 if (!skb)
1550 return ERR_PTR(err);
1551
1552 /* Create L2CAP header */
1553 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1554 lh->cid = cpu_to_le16(chan->dcid);
1555 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1556 put_unaligned_le16(control, skb_put(skb, 2));
1557 if (sdulen)
1558 put_unaligned_le16(sdulen, skb_put(skb, 2));
1559
1560 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1561 if (unlikely(err < 0)) {
1562 kfree_skb(skb);
1563 return ERR_PTR(err);
1564 }
1565
1566 if (chan->fcs == L2CAP_FCS_CRC16)
1567 put_unaligned_le16(0, skb_put(skb, 2));
1568
1569 bt_cb(skb)->retries = 0;
1570 return skb;
1571}
1572
1573int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1574{
1575 struct sk_buff *skb;
1576 struct sk_buff_head sar_queue;
1577 u16 control;
1578 size_t size = 0;
1579
1580 skb_queue_head_init(&sar_queue);
1581 control = L2CAP_SDU_START;
1582 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1583 if (IS_ERR(skb))
1584 return PTR_ERR(skb);
1585
1586 __skb_queue_tail(&sar_queue, skb);
1587 len -= chan->remote_mps;
1588 size += chan->remote_mps;
1589
1590 while (len > 0) {
1591 size_t buflen;
1592
1593 if (len > chan->remote_mps) {
1594 control = L2CAP_SDU_CONTINUE;
1595 buflen = chan->remote_mps;
1596 } else {
1597 control = L2CAP_SDU_END;
1598 buflen = len;
1599 }
1600
1601 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1602 if (IS_ERR(skb)) {
1603 skb_queue_purge(&sar_queue);
1604 return PTR_ERR(skb);
1605 }
1606
1607 __skb_queue_tail(&sar_queue, skb);
1608 len -= buflen;
1609 size += buflen;
1610 }
1611 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1612 if (chan->tx_send_head == NULL)
1613 chan->tx_send_head = sar_queue.next;
1614
1615 return size;
1616}
1617
1618int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1619{
1620 struct sk_buff *skb;
1621 u16 control;
1622 int err;
1623
1624 /* Connectionless channel */
1625 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1626 skb = l2cap_create_connless_pdu(chan, msg, len);
1627 if (IS_ERR(skb))
1628 return PTR_ERR(skb);
1629
1630 l2cap_do_send(chan, skb);
1631 return len;
1632 }
1633
1634 switch (chan->mode) {
1635 case L2CAP_MODE_BASIC:
1636 /* Check outgoing MTU */
1637 if (len > chan->omtu)
1638 return -EMSGSIZE;
1639
1640 /* Create a basic PDU */
1641 skb = l2cap_create_basic_pdu(chan, msg, len);
1642 if (IS_ERR(skb))
1643 return PTR_ERR(skb);
1644
1645 l2cap_do_send(chan, skb);
1646 err = len;
1647 break;
1648
1649 case L2CAP_MODE_ERTM:
1650 case L2CAP_MODE_STREAMING:
1651 /* Entire SDU fits into one PDU */
1652 if (len <= chan->remote_mps) {
1653 control = L2CAP_SDU_UNSEGMENTED;
1654 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1655 0);
1656 if (IS_ERR(skb))
1657 return PTR_ERR(skb);
1658
1659 __skb_queue_tail(&chan->tx_q, skb);
1660
1661 if (chan->tx_send_head == NULL)
1662 chan->tx_send_head = skb;
1663
1664 } else {
1665 /* Segment SDU into multiples PDUs */
1666 err = l2cap_sar_segment_sdu(chan, msg, len);
1667 if (err < 0)
1668 return err;
1669 }
1670
1671 if (chan->mode == L2CAP_MODE_STREAMING) {
1672 l2cap_streaming_send(chan);
1673 err = len;
1674 break;
1675 }
1676
1677 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1678 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1679 err = len;
1680 break;
1681 }
1682
1683 err = l2cap_ertm_send(chan);
1684 if (err >= 0)
1685 err = len;
1686
1687 break;
1688
1689 default:
1690 BT_DBG("bad state %1.1x", chan->mode);
1691 err = -EBADFD;
1692 }
1693
1694 return err;
1695}
1696
1697/* Copy frame to all raw sockets on that connection */
1698static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1699{
1700 struct sk_buff *nskb;
1701 struct l2cap_chan *chan;
1702
1703 BT_DBG("conn %p", conn);
1704
1705 read_lock(&conn->chan_lock);
1706 list_for_each_entry(chan, &conn->chan_l, list) {
1707 struct sock *sk = chan->sk;
1708 if (chan->chan_type != L2CAP_CHAN_RAW)
1709 continue;
1710
1711 /* Don't send frame to the socket it came from */
1712 if (skb->sk == sk)
1713 continue;
1714 nskb = skb_clone(skb, GFP_ATOMIC);
1715 if (!nskb)
1716 continue;
1717
1718 if (chan->ops->recv(chan->data, nskb))
1719 kfree_skb(nskb);
1720 }
1721 read_unlock(&conn->chan_lock);
1722}
1723
1724/* ---- L2CAP signalling commands ---- */
1725static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1726 u8 code, u8 ident, u16 dlen, void *data)
1727{
1728 struct sk_buff *skb, **frag;
1729 struct l2cap_cmd_hdr *cmd;
1730 struct l2cap_hdr *lh;
1731 int len, count;
1732
1733 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1734 conn, code, ident, dlen);
1735
1736 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1737 count = min_t(unsigned int, conn->mtu, len);
1738
1739 skb = bt_skb_alloc(count, GFP_ATOMIC);
1740 if (!skb)
1741 return NULL;
1742
1743 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1744 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1745
1746 if (conn->hcon->type == LE_LINK)
1747 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1748 else
1749 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1750
1751 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1752 cmd->code = code;
1753 cmd->ident = ident;
1754 cmd->len = cpu_to_le16(dlen);
1755
1756 if (dlen) {
1757 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1758 memcpy(skb_put(skb, count), data, count);
1759 data += count;
1760 }
1761
1762 len -= skb->len;
1763
1764 /* Continuation fragments (no L2CAP header) */
1765 frag = &skb_shinfo(skb)->frag_list;
1766 while (len) {
1767 count = min_t(unsigned int, conn->mtu, len);
1768
1769 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1770 if (!*frag)
1771 goto fail;
1772
1773 memcpy(skb_put(*frag, count), data, count);
1774
1775 len -= count;
1776 data += count;
1777
1778 frag = &(*frag)->next;
1779 }
1780
1781 return skb;
1782
1783fail:
1784 kfree_skb(skb);
1785 return NULL;
1786}
1787
1788static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1789{
1790 struct l2cap_conf_opt *opt = *ptr;
1791 int len;
1792
1793 len = L2CAP_CONF_OPT_SIZE + opt->len;
1794 *ptr += len;
1795
1796 *type = opt->type;
1797 *olen = opt->len;
1798
1799 switch (opt->len) {
1800 case 1:
1801 *val = *((u8 *) opt->val);
1802 break;
1803
1804 case 2:
1805 *val = get_unaligned_le16(opt->val);
1806 break;
1807
1808 case 4:
1809 *val = get_unaligned_le32(opt->val);
1810 break;
1811
1812 default:
1813 *val = (unsigned long) opt->val;
1814 break;
1815 }
1816
1817 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1818 return len;
1819}
1820
1821static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1822{
1823 struct l2cap_conf_opt *opt = *ptr;
1824
1825 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1826
1827 opt->type = type;
1828 opt->len = len;
1829
1830 switch (len) {
1831 case 1:
1832 *((u8 *) opt->val) = val;
1833 break;
1834
1835 case 2:
1836 put_unaligned_le16(val, opt->val);
1837 break;
1838
1839 case 4:
1840 put_unaligned_le32(val, opt->val);
1841 break;
1842
1843 default:
1844 memcpy(opt->val, (void *) val, len);
1845 break;
1846 }
1847
1848 *ptr += L2CAP_CONF_OPT_SIZE + len;
1849}
1850
1851static void l2cap_ack_timeout(unsigned long arg)
1852{
1853 struct l2cap_chan *chan = (void *) arg;
1854
1855 bh_lock_sock(chan->sk);
1856 l2cap_send_ack(chan);
1857 bh_unlock_sock(chan->sk);
1858}
1859
1860static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1861{
1862 struct sock *sk = chan->sk;
1863
1864 chan->expected_ack_seq = 0;
1865 chan->unacked_frames = 0;
1866 chan->buffer_seq = 0;
1867 chan->num_acked = 0;
1868 chan->frames_sent = 0;
1869
1870 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1871 (unsigned long) chan);
1872 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1873 (unsigned long) chan);
1874 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1875
1876 skb_queue_head_init(&chan->srej_q);
1877
1878 INIT_LIST_HEAD(&chan->srej_l);
1879
1880
1881 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1882}
1883
1884static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1885{
1886 switch (mode) {
1887 case L2CAP_MODE_STREAMING:
1888 case L2CAP_MODE_ERTM:
1889 if (l2cap_mode_supported(mode, remote_feat_mask))
1890 return mode;
1891 /* fall through */
1892 default:
1893 return L2CAP_MODE_BASIC;
1894 }
1895}
1896
1897static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1898{
1899 struct l2cap_conf_req *req = data;
1900 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1901 void *ptr = req->data;
1902
1903 BT_DBG("chan %p", chan);
1904
1905 if (chan->num_conf_req || chan->num_conf_rsp)
1906 goto done;
1907
1908 switch (chan->mode) {
1909 case L2CAP_MODE_STREAMING:
1910 case L2CAP_MODE_ERTM:
1911 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1912 break;
1913
1914 /* fall through */
1915 default:
1916 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1917 break;
1918 }
1919
1920done:
1921 if (chan->imtu != L2CAP_DEFAULT_MTU)
1922 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1923
1924 switch (chan->mode) {
1925 case L2CAP_MODE_BASIC:
1926 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1927 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1928 break;
1929
1930 rfc.mode = L2CAP_MODE_BASIC;
1931 rfc.txwin_size = 0;
1932 rfc.max_transmit = 0;
1933 rfc.retrans_timeout = 0;
1934 rfc.monitor_timeout = 0;
1935 rfc.max_pdu_size = 0;
1936
1937 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1938 (unsigned long) &rfc);
1939 break;
1940
1941 case L2CAP_MODE_ERTM:
1942 rfc.mode = L2CAP_MODE_ERTM;
1943 rfc.txwin_size = chan->tx_win;
1944 rfc.max_transmit = chan->max_tx;
1945 rfc.retrans_timeout = 0;
1946 rfc.monitor_timeout = 0;
1947 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1948 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1949 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1950
1951 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1952 (unsigned long) &rfc);
1953
1954 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1955 break;
1956
1957 if (chan->fcs == L2CAP_FCS_NONE ||
1958 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1959 chan->fcs = L2CAP_FCS_NONE;
1960 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1961 }
1962 break;
1963
1964 case L2CAP_MODE_STREAMING:
1965 rfc.mode = L2CAP_MODE_STREAMING;
1966 rfc.txwin_size = 0;
1967 rfc.max_transmit = 0;
1968 rfc.retrans_timeout = 0;
1969 rfc.monitor_timeout = 0;
1970 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1971 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1972 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1973
1974 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1975 (unsigned long) &rfc);
1976
1977 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1978 break;
1979
1980 if (chan->fcs == L2CAP_FCS_NONE ||
1981 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1982 chan->fcs = L2CAP_FCS_NONE;
1983 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1984 }
1985 break;
1986 }
1987
1988 req->dcid = cpu_to_le16(chan->dcid);
1989 req->flags = cpu_to_le16(0);
1990
1991 return ptr - data;
1992}
1993
1994static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1995{
1996 struct l2cap_conf_rsp *rsp = data;
1997 void *ptr = rsp->data;
1998 void *req = chan->conf_req;
1999 int len = chan->conf_len;
2000 int type, hint, olen;
2001 unsigned long val;
2002 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2003 u16 mtu = L2CAP_DEFAULT_MTU;
2004 u16 result = L2CAP_CONF_SUCCESS;
2005
2006 BT_DBG("chan %p", chan);
2007
2008 while (len >= L2CAP_CONF_OPT_SIZE) {
2009 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2010
2011 hint = type & L2CAP_CONF_HINT;
2012 type &= L2CAP_CONF_MASK;
2013
2014 switch (type) {
2015 case L2CAP_CONF_MTU:
2016 mtu = val;
2017 break;
2018
2019 case L2CAP_CONF_FLUSH_TO:
2020 chan->flush_to = val;
2021 break;
2022
2023 case L2CAP_CONF_QOS:
2024 break;
2025
2026 case L2CAP_CONF_RFC:
2027 if (olen == sizeof(rfc))
2028 memcpy(&rfc, (void *) val, olen);
2029 break;
2030
2031 case L2CAP_CONF_FCS:
2032 if (val == L2CAP_FCS_NONE)
2033 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2034
2035 break;
2036
2037 default:
2038 if (hint)
2039 break;
2040
2041 result = L2CAP_CONF_UNKNOWN;
2042 *((u8 *) ptr++) = type;
2043 break;
2044 }
2045 }
2046
2047 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2048 goto done;
2049
2050 switch (chan->mode) {
2051 case L2CAP_MODE_STREAMING:
2052 case L2CAP_MODE_ERTM:
2053 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2054 chan->mode = l2cap_select_mode(rfc.mode,
2055 chan->conn->feat_mask);
2056 break;
2057 }
2058
2059 if (chan->mode != rfc.mode)
2060 return -ECONNREFUSED;
2061
2062 break;
2063 }
2064
2065done:
2066 if (chan->mode != rfc.mode) {
2067 result = L2CAP_CONF_UNACCEPT;
2068 rfc.mode = chan->mode;
2069
2070 if (chan->num_conf_rsp == 1)
2071 return -ECONNREFUSED;
2072
2073 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2074 sizeof(rfc), (unsigned long) &rfc);
2075 }
2076
2077
2078 if (result == L2CAP_CONF_SUCCESS) {
2079 /* Configure output options and let the other side know
2080 * which ones we don't like. */
2081
2082 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2083 result = L2CAP_CONF_UNACCEPT;
2084 else {
2085 chan->omtu = mtu;
2086 set_bit(CONF_MTU_DONE, &chan->conf_state);
2087 }
2088 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2089
2090 switch (rfc.mode) {
2091 case L2CAP_MODE_BASIC:
2092 chan->fcs = L2CAP_FCS_NONE;
2093 set_bit(CONF_MODE_DONE, &chan->conf_state);
2094 break;
2095
2096 case L2CAP_MODE_ERTM:
2097 chan->remote_tx_win = rfc.txwin_size;
2098 chan->remote_max_tx = rfc.max_transmit;
2099
2100 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2101 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2102
2103 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2104
2105 rfc.retrans_timeout =
2106 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2107 rfc.monitor_timeout =
2108 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2109
2110 set_bit(CONF_MODE_DONE, &chan->conf_state);
2111
2112 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2113 sizeof(rfc), (unsigned long) &rfc);
2114
2115 break;
2116
2117 case L2CAP_MODE_STREAMING:
2118 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2119 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2120
2121 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2122
2123 set_bit(CONF_MODE_DONE, &chan->conf_state);
2124
2125 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2126 sizeof(rfc), (unsigned long) &rfc);
2127
2128 break;
2129
2130 default:
2131 result = L2CAP_CONF_UNACCEPT;
2132
2133 memset(&rfc, 0, sizeof(rfc));
2134 rfc.mode = chan->mode;
2135 }
2136
2137 if (result == L2CAP_CONF_SUCCESS)
2138 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2139 }
2140 rsp->scid = cpu_to_le16(chan->dcid);
2141 rsp->result = cpu_to_le16(result);
2142 rsp->flags = cpu_to_le16(0x0000);
2143
2144 return ptr - data;
2145}
2146
2147static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2148{
2149 struct l2cap_conf_req *req = data;
2150 void *ptr = req->data;
2151 int type, olen;
2152 unsigned long val;
2153 struct l2cap_conf_rfc rfc;
2154
2155 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2156
2157 while (len >= L2CAP_CONF_OPT_SIZE) {
2158 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2159
2160 switch (type) {
2161 case L2CAP_CONF_MTU:
2162 if (val < L2CAP_DEFAULT_MIN_MTU) {
2163 *result = L2CAP_CONF_UNACCEPT;
2164 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2165 } else
2166 chan->imtu = val;
2167 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2168 break;
2169
2170 case L2CAP_CONF_FLUSH_TO:
2171 chan->flush_to = val;
2172 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2173 2, chan->flush_to);
2174 break;
2175
2176 case L2CAP_CONF_RFC:
2177 if (olen == sizeof(rfc))
2178 memcpy(&rfc, (void *)val, olen);
2179
2180 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2181 rfc.mode != chan->mode)
2182 return -ECONNREFUSED;
2183
2184 chan->fcs = 0;
2185
2186 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2187 sizeof(rfc), (unsigned long) &rfc);
2188 break;
2189 }
2190 }
2191
2192 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2193 return -ECONNREFUSED;
2194
2195 chan->mode = rfc.mode;
2196
2197 if (*result == L2CAP_CONF_SUCCESS) {
2198 switch (rfc.mode) {
2199 case L2CAP_MODE_ERTM:
2200 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2201 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2202 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2203 break;
2204 case L2CAP_MODE_STREAMING:
2205 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2206 }
2207 }
2208
2209 req->dcid = cpu_to_le16(chan->dcid);
2210 req->flags = cpu_to_le16(0x0000);
2211
2212 return ptr - data;
2213}
2214
2215static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2216{
2217 struct l2cap_conf_rsp *rsp = data;
2218 void *ptr = rsp->data;
2219
2220 BT_DBG("chan %p", chan);
2221
2222 rsp->scid = cpu_to_le16(chan->dcid);
2223 rsp->result = cpu_to_le16(result);
2224 rsp->flags = cpu_to_le16(flags);
2225
2226 return ptr - data;
2227}
2228
2229void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2230{
2231 struct l2cap_conn_rsp rsp;
2232 struct l2cap_conn *conn = chan->conn;
2233 u8 buf[128];
2234
2235 rsp.scid = cpu_to_le16(chan->dcid);
2236 rsp.dcid = cpu_to_le16(chan->scid);
2237 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2238 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2239 l2cap_send_cmd(conn, chan->ident,
2240 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2241
2242 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2243 return;
2244
2245 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2246 l2cap_build_conf_req(chan, buf), buf);
2247 chan->num_conf_req++;
2248}
2249
2250static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2251{
2252 int type, olen;
2253 unsigned long val;
2254 struct l2cap_conf_rfc rfc;
2255
2256 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2257
2258 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2259 return;
2260
2261 while (len >= L2CAP_CONF_OPT_SIZE) {
2262 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2263
2264 switch (type) {
2265 case L2CAP_CONF_RFC:
2266 if (olen == sizeof(rfc))
2267 memcpy(&rfc, (void *)val, olen);
2268 goto done;
2269 }
2270 }
2271
2272done:
2273 switch (rfc.mode) {
2274 case L2CAP_MODE_ERTM:
2275 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2276 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2277 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2278 break;
2279 case L2CAP_MODE_STREAMING:
2280 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2281 }
2282}
2283
2284static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2285{
2286 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2287
2288 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2289 return 0;
2290
2291 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2292 cmd->ident == conn->info_ident) {
2293 del_timer(&conn->info_timer);
2294
2295 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2296 conn->info_ident = 0;
2297
2298 l2cap_conn_start(conn);
2299 }
2300
2301 return 0;
2302}
2303
2304static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2305{
2306 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2307 struct l2cap_conn_rsp rsp;
2308 struct l2cap_chan *chan = NULL, *pchan;
2309 struct sock *parent, *sk = NULL;
2310 int result, status = L2CAP_CS_NO_INFO;
2311
2312 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2313 __le16 psm = req->psm;
2314
2315 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2316
2317 /* Check if we have socket listening on psm */
2318 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2319 if (!pchan) {
2320 result = L2CAP_CR_BAD_PSM;
2321 goto sendresp;
2322 }
2323
2324 parent = pchan->sk;
2325
2326 bh_lock_sock(parent);
2327
2328 /* Check if the ACL is secure enough (if not SDP) */
2329 if (psm != cpu_to_le16(0x0001) &&
2330 !hci_conn_check_link_mode(conn->hcon)) {
2331 conn->disc_reason = 0x05;
2332 result = L2CAP_CR_SEC_BLOCK;
2333 goto response;
2334 }
2335
2336 result = L2CAP_CR_NO_MEM;
2337
2338 /* Check for backlog size */
2339 if (sk_acceptq_is_full(parent)) {
2340 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2341 goto response;
2342 }
2343
2344 chan = pchan->ops->new_connection(pchan->data);
2345 if (!chan)
2346 goto response;
2347
2348 sk = chan->sk;
2349
2350 write_lock_bh(&conn->chan_lock);
2351
2352 /* Check if we already have channel with that dcid */
2353 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2354 write_unlock_bh(&conn->chan_lock);
2355 sock_set_flag(sk, SOCK_ZAPPED);
2356 chan->ops->close(chan->data);
2357 goto response;
2358 }
2359
2360 hci_conn_hold(conn->hcon);
2361
2362 bacpy(&bt_sk(sk)->src, conn->src);
2363 bacpy(&bt_sk(sk)->dst, conn->dst);
2364 chan->psm = psm;
2365 chan->dcid = scid;
2366
2367 bt_accept_enqueue(parent, sk);
2368
2369 __l2cap_chan_add(conn, chan);
2370
2371 dcid = chan->scid;
2372
2373 __set_chan_timer(chan, sk->sk_sndtimeo);
2374
2375 chan->ident = cmd->ident;
2376
2377 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2378 if (l2cap_check_security(chan)) {
2379 if (bt_sk(sk)->defer_setup) {
2380 l2cap_state_change(chan, BT_CONNECT2);
2381 result = L2CAP_CR_PEND;
2382 status = L2CAP_CS_AUTHOR_PEND;
2383 parent->sk_data_ready(parent, 0);
2384 } else {
2385 l2cap_state_change(chan, BT_CONFIG);
2386 result = L2CAP_CR_SUCCESS;
2387 status = L2CAP_CS_NO_INFO;
2388 }
2389 } else {
2390 l2cap_state_change(chan, BT_CONNECT2);
2391 result = L2CAP_CR_PEND;
2392 status = L2CAP_CS_AUTHEN_PEND;
2393 }
2394 } else {
2395 l2cap_state_change(chan, BT_CONNECT2);
2396 result = L2CAP_CR_PEND;
2397 status = L2CAP_CS_NO_INFO;
2398 }
2399
2400 write_unlock_bh(&conn->chan_lock);
2401
2402response:
2403 bh_unlock_sock(parent);
2404
2405sendresp:
2406 rsp.scid = cpu_to_le16(scid);
2407 rsp.dcid = cpu_to_le16(dcid);
2408 rsp.result = cpu_to_le16(result);
2409 rsp.status = cpu_to_le16(status);
2410 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2411
2412 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2413 struct l2cap_info_req info;
2414 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2415
2416 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2417 conn->info_ident = l2cap_get_ident(conn);
2418
2419 mod_timer(&conn->info_timer, jiffies +
2420 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2421
2422 l2cap_send_cmd(conn, conn->info_ident,
2423 L2CAP_INFO_REQ, sizeof(info), &info);
2424 }
2425
2426 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2427 result == L2CAP_CR_SUCCESS) {
2428 u8 buf[128];
2429 set_bit(CONF_REQ_SENT, &chan->conf_state);
2430 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2431 l2cap_build_conf_req(chan, buf), buf);
2432 chan->num_conf_req++;
2433 }
2434
2435 return 0;
2436}
2437
2438static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2439{
2440 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2441 u16 scid, dcid, result, status;
2442 struct l2cap_chan *chan;
2443 struct sock *sk;
2444 u8 req[128];
2445
2446 scid = __le16_to_cpu(rsp->scid);
2447 dcid = __le16_to_cpu(rsp->dcid);
2448 result = __le16_to_cpu(rsp->result);
2449 status = __le16_to_cpu(rsp->status);
2450
2451 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2452
2453 if (scid) {
2454 chan = l2cap_get_chan_by_scid(conn, scid);
2455 if (!chan)
2456 return -EFAULT;
2457 } else {
2458 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2459 if (!chan)
2460 return -EFAULT;
2461 }
2462
2463 sk = chan->sk;
2464
2465 switch (result) {
2466 case L2CAP_CR_SUCCESS:
2467 l2cap_state_change(chan, BT_CONFIG);
2468 chan->ident = 0;
2469 chan->dcid = dcid;
2470 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2471
2472 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2473 break;
2474
2475 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2476 l2cap_build_conf_req(chan, req), req);
2477 chan->num_conf_req++;
2478 break;
2479
2480 case L2CAP_CR_PEND:
2481 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2482 break;
2483
2484 default:
2485 /* don't delete l2cap channel if sk is owned by user */
2486 if (sock_owned_by_user(sk)) {
2487 l2cap_state_change(chan, BT_DISCONN);
2488 __clear_chan_timer(chan);
2489 __set_chan_timer(chan, HZ / 5);
2490 break;
2491 }
2492
2493 l2cap_chan_del(chan, ECONNREFUSED);
2494 break;
2495 }
2496
2497 bh_unlock_sock(sk);
2498 return 0;
2499}
2500
2501static inline void set_default_fcs(struct l2cap_chan *chan)
2502{
2503 /* FCS is enabled only in ERTM or streaming mode, if one or both
2504 * sides request it.
2505 */
2506 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2507 chan->fcs = L2CAP_FCS_NONE;
2508 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2509 chan->fcs = L2CAP_FCS_CRC16;
2510}
2511
2512static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2513{
2514 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2515 u16 dcid, flags;
2516 u8 rsp[64];
2517 struct l2cap_chan *chan;
2518 struct sock *sk;
2519 int len;
2520
2521 dcid = __le16_to_cpu(req->dcid);
2522 flags = __le16_to_cpu(req->flags);
2523
2524 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2525
2526 chan = l2cap_get_chan_by_scid(conn, dcid);
2527 if (!chan)
2528 return -ENOENT;
2529
2530 sk = chan->sk;
2531
2532 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2533 struct l2cap_cmd_rej_cid rej;
2534
2535 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2536 rej.scid = cpu_to_le16(chan->scid);
2537 rej.dcid = cpu_to_le16(chan->dcid);
2538
2539 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2540 sizeof(rej), &rej);
2541 goto unlock;
2542 }
2543
2544 /* Reject if config buffer is too small. */
2545 len = cmd_len - sizeof(*req);
2546 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2547 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2548 l2cap_build_conf_rsp(chan, rsp,
2549 L2CAP_CONF_REJECT, flags), rsp);
2550 goto unlock;
2551 }
2552
2553 /* Store config. */
2554 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2555 chan->conf_len += len;
2556
2557 if (flags & 0x0001) {
2558 /* Incomplete config. Send empty response. */
2559 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2560 l2cap_build_conf_rsp(chan, rsp,
2561 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2562 goto unlock;
2563 }
2564
2565 /* Complete config. */
2566 len = l2cap_parse_conf_req(chan, rsp);
2567 if (len < 0) {
2568 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2569 goto unlock;
2570 }
2571
2572 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2573 chan->num_conf_rsp++;
2574
2575 /* Reset config buffer. */
2576 chan->conf_len = 0;
2577
2578 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2579 goto unlock;
2580
2581 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2582 set_default_fcs(chan);
2583
2584 l2cap_state_change(chan, BT_CONNECTED);
2585
2586 chan->next_tx_seq = 0;
2587 chan->expected_tx_seq = 0;
2588 skb_queue_head_init(&chan->tx_q);
2589 if (chan->mode == L2CAP_MODE_ERTM)
2590 l2cap_ertm_init(chan);
2591
2592 l2cap_chan_ready(sk);
2593 goto unlock;
2594 }
2595
2596 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2597 u8 buf[64];
2598 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2599 l2cap_build_conf_req(chan, buf), buf);
2600 chan->num_conf_req++;
2601 }
2602
2603unlock:
2604 bh_unlock_sock(sk);
2605 return 0;
2606}
2607
2608static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2609{
2610 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2611 u16 scid, flags, result;
2612 struct l2cap_chan *chan;
2613 struct sock *sk;
2614 int len = cmd->len - sizeof(*rsp);
2615
2616 scid = __le16_to_cpu(rsp->scid);
2617 flags = __le16_to_cpu(rsp->flags);
2618 result = __le16_to_cpu(rsp->result);
2619
2620 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2621 scid, flags, result);
2622
2623 chan = l2cap_get_chan_by_scid(conn, scid);
2624 if (!chan)
2625 return 0;
2626
2627 sk = chan->sk;
2628
2629 switch (result) {
2630 case L2CAP_CONF_SUCCESS:
2631 l2cap_conf_rfc_get(chan, rsp->data, len);
2632 break;
2633
2634 case L2CAP_CONF_UNACCEPT:
2635 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2636 char req[64];
2637
2638 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2639 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2640 goto done;
2641 }
2642
2643 /* throw out any old stored conf requests */
2644 result = L2CAP_CONF_SUCCESS;
2645 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2646 req, &result);
2647 if (len < 0) {
2648 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2649 goto done;
2650 }
2651
2652 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2653 L2CAP_CONF_REQ, len, req);
2654 chan->num_conf_req++;
2655 if (result != L2CAP_CONF_SUCCESS)
2656 goto done;
2657 break;
2658 }
2659
2660 default:
2661 sk->sk_err = ECONNRESET;
2662 __set_chan_timer(chan, HZ * 5);
2663 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2664 goto done;
2665 }
2666
2667 if (flags & 0x01)
2668 goto done;
2669
2670 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2671
2672 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2673 set_default_fcs(chan);
2674
2675 l2cap_state_change(chan, BT_CONNECTED);
2676 chan->next_tx_seq = 0;
2677 chan->expected_tx_seq = 0;
2678 skb_queue_head_init(&chan->tx_q);
2679 if (chan->mode == L2CAP_MODE_ERTM)
2680 l2cap_ertm_init(chan);
2681
2682 l2cap_chan_ready(sk);
2683 }
2684
2685done:
2686 bh_unlock_sock(sk);
2687 return 0;
2688}
2689
2690static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2691{
2692 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2693 struct l2cap_disconn_rsp rsp;
2694 u16 dcid, scid;
2695 struct l2cap_chan *chan;
2696 struct sock *sk;
2697
2698 scid = __le16_to_cpu(req->scid);
2699 dcid = __le16_to_cpu(req->dcid);
2700
2701 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2702
2703 chan = l2cap_get_chan_by_scid(conn, dcid);
2704 if (!chan)
2705 return 0;
2706
2707 sk = chan->sk;
2708
2709 rsp.dcid = cpu_to_le16(chan->scid);
2710 rsp.scid = cpu_to_le16(chan->dcid);
2711 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2712
2713 sk->sk_shutdown = SHUTDOWN_MASK;
2714
2715 /* don't delete l2cap channel if sk is owned by user */
2716 if (sock_owned_by_user(sk)) {
2717 l2cap_state_change(chan, BT_DISCONN);
2718 __clear_chan_timer(chan);
2719 __set_chan_timer(chan, HZ / 5);
2720 bh_unlock_sock(sk);
2721 return 0;
2722 }
2723
2724 l2cap_chan_del(chan, ECONNRESET);
2725 bh_unlock_sock(sk);
2726
2727 chan->ops->close(chan->data);
2728 return 0;
2729}
2730
2731static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2732{
2733 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2734 u16 dcid, scid;
2735 struct l2cap_chan *chan;
2736 struct sock *sk;
2737
2738 scid = __le16_to_cpu(rsp->scid);
2739 dcid = __le16_to_cpu(rsp->dcid);
2740
2741 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2742
2743 chan = l2cap_get_chan_by_scid(conn, scid);
2744 if (!chan)
2745 return 0;
2746
2747 sk = chan->sk;
2748
2749 /* don't delete l2cap channel if sk is owned by user */
2750 if (sock_owned_by_user(sk)) {
2751 l2cap_state_change(chan,BT_DISCONN);
2752 __clear_chan_timer(chan);
2753 __set_chan_timer(chan, HZ / 5);
2754 bh_unlock_sock(sk);
2755 return 0;
2756 }
2757
2758 l2cap_chan_del(chan, 0);
2759 bh_unlock_sock(sk);
2760
2761 chan->ops->close(chan->data);
2762 return 0;
2763}
2764
2765static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2766{
2767 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2768 u16 type;
2769
2770 type = __le16_to_cpu(req->type);
2771
2772 BT_DBG("type 0x%4.4x", type);
2773
2774 if (type == L2CAP_IT_FEAT_MASK) {
2775 u8 buf[8];
2776 u32 feat_mask = l2cap_feat_mask;
2777 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2778 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2779 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2780 if (!disable_ertm)
2781 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2782 | L2CAP_FEAT_FCS;
2783 put_unaligned_le32(feat_mask, rsp->data);
2784 l2cap_send_cmd(conn, cmd->ident,
2785 L2CAP_INFO_RSP, sizeof(buf), buf);
2786 } else if (type == L2CAP_IT_FIXED_CHAN) {
2787 u8 buf[12];
2788 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2789 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2790 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2791 memcpy(buf + 4, l2cap_fixed_chan, 8);
2792 l2cap_send_cmd(conn, cmd->ident,
2793 L2CAP_INFO_RSP, sizeof(buf), buf);
2794 } else {
2795 struct l2cap_info_rsp rsp;
2796 rsp.type = cpu_to_le16(type);
2797 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2798 l2cap_send_cmd(conn, cmd->ident,
2799 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2800 }
2801
2802 return 0;
2803}
2804
2805static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2806{
2807 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2808 u16 type, result;
2809
2810 type = __le16_to_cpu(rsp->type);
2811 result = __le16_to_cpu(rsp->result);
2812
2813 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2814
2815 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2816 if (cmd->ident != conn->info_ident ||
2817 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2818 return 0;
2819
2820 del_timer(&conn->info_timer);
2821
2822 if (result != L2CAP_IR_SUCCESS) {
2823 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2824 conn->info_ident = 0;
2825
2826 l2cap_conn_start(conn);
2827
2828 return 0;
2829 }
2830
2831 if (type == L2CAP_IT_FEAT_MASK) {
2832 conn->feat_mask = get_unaligned_le32(rsp->data);
2833
2834 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2835 struct l2cap_info_req req;
2836 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2837
2838 conn->info_ident = l2cap_get_ident(conn);
2839
2840 l2cap_send_cmd(conn, conn->info_ident,
2841 L2CAP_INFO_REQ, sizeof(req), &req);
2842 } else {
2843 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2844 conn->info_ident = 0;
2845
2846 l2cap_conn_start(conn);
2847 }
2848 } else if (type == L2CAP_IT_FIXED_CHAN) {
2849 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2850 conn->info_ident = 0;
2851
2852 l2cap_conn_start(conn);
2853 }
2854
2855 return 0;
2856}
2857
2858static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2859 u16 to_multiplier)
2860{
2861 u16 max_latency;
2862
2863 if (min > max || min < 6 || max > 3200)
2864 return -EINVAL;
2865
2866 if (to_multiplier < 10 || to_multiplier > 3200)
2867 return -EINVAL;
2868
2869 if (max >= to_multiplier * 8)
2870 return -EINVAL;
2871
2872 max_latency = (to_multiplier * 8 / max) - 1;
2873 if (latency > 499 || latency > max_latency)
2874 return -EINVAL;
2875
2876 return 0;
2877}
2878
2879static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2880 struct l2cap_cmd_hdr *cmd, u8 *data)
2881{
2882 struct hci_conn *hcon = conn->hcon;
2883 struct l2cap_conn_param_update_req *req;
2884 struct l2cap_conn_param_update_rsp rsp;
2885 u16 min, max, latency, to_multiplier, cmd_len;
2886 int err;
2887
2888 if (!(hcon->link_mode & HCI_LM_MASTER))
2889 return -EINVAL;
2890
2891 cmd_len = __le16_to_cpu(cmd->len);
2892 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2893 return -EPROTO;
2894
2895 req = (struct l2cap_conn_param_update_req *) data;
2896 min = __le16_to_cpu(req->min);
2897 max = __le16_to_cpu(req->max);
2898 latency = __le16_to_cpu(req->latency);
2899 to_multiplier = __le16_to_cpu(req->to_multiplier);
2900
2901 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2902 min, max, latency, to_multiplier);
2903
2904 memset(&rsp, 0, sizeof(rsp));
2905
2906 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2907 if (err)
2908 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2909 else
2910 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2911
2912 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2913 sizeof(rsp), &rsp);
2914
2915 if (!err)
2916 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2917
2918 return 0;
2919}
2920
2921static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2922 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2923{
2924 int err = 0;
2925
2926 switch (cmd->code) {
2927 case L2CAP_COMMAND_REJ:
2928 l2cap_command_rej(conn, cmd, data);
2929 break;
2930
2931 case L2CAP_CONN_REQ:
2932 err = l2cap_connect_req(conn, cmd, data);
2933 break;
2934
2935 case L2CAP_CONN_RSP:
2936 err = l2cap_connect_rsp(conn, cmd, data);
2937 break;
2938
2939 case L2CAP_CONF_REQ:
2940 err = l2cap_config_req(conn, cmd, cmd_len, data);
2941 break;
2942
2943 case L2CAP_CONF_RSP:
2944 err = l2cap_config_rsp(conn, cmd, data);
2945 break;
2946
2947 case L2CAP_DISCONN_REQ:
2948 err = l2cap_disconnect_req(conn, cmd, data);
2949 break;
2950
2951 case L2CAP_DISCONN_RSP:
2952 err = l2cap_disconnect_rsp(conn, cmd, data);
2953 break;
2954
2955 case L2CAP_ECHO_REQ:
2956 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2957 break;
2958
2959 case L2CAP_ECHO_RSP:
2960 break;
2961
2962 case L2CAP_INFO_REQ:
2963 err = l2cap_information_req(conn, cmd, data);
2964 break;
2965
2966 case L2CAP_INFO_RSP:
2967 err = l2cap_information_rsp(conn, cmd, data);
2968 break;
2969
2970 default:
2971 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2972 err = -EINVAL;
2973 break;
2974 }
2975
2976 return err;
2977}
2978
2979static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2980 struct l2cap_cmd_hdr *cmd, u8 *data)
2981{
2982 switch (cmd->code) {
2983 case L2CAP_COMMAND_REJ:
2984 return 0;
2985
2986 case L2CAP_CONN_PARAM_UPDATE_REQ:
2987 return l2cap_conn_param_update_req(conn, cmd, data);
2988
2989 case L2CAP_CONN_PARAM_UPDATE_RSP:
2990 return 0;
2991
2992 default:
2993 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2994 return -EINVAL;
2995 }
2996}
2997
2998static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2999 struct sk_buff *skb)
3000{
3001 u8 *data = skb->data;
3002 int len = skb->len;
3003 struct l2cap_cmd_hdr cmd;
3004 int err;
3005
3006 l2cap_raw_recv(conn, skb);
3007
3008 while (len >= L2CAP_CMD_HDR_SIZE) {
3009 u16 cmd_len;
3010 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3011 data += L2CAP_CMD_HDR_SIZE;
3012 len -= L2CAP_CMD_HDR_SIZE;
3013
3014 cmd_len = le16_to_cpu(cmd.len);
3015
3016 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3017
3018 if (cmd_len > len || !cmd.ident) {
3019 BT_DBG("corrupted command");
3020 break;
3021 }
3022
3023 if (conn->hcon->type == LE_LINK)
3024 err = l2cap_le_sig_cmd(conn, &cmd, data);
3025 else
3026 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3027
3028 if (err) {
3029 struct l2cap_cmd_rej_unk rej;
3030
3031 BT_ERR("Wrong link type (%d)", err);
3032
3033 /* FIXME: Map err to a valid reason */
3034 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3035 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3036 }
3037
3038 data += cmd_len;
3039 len -= cmd_len;
3040 }
3041
3042 kfree_skb(skb);
3043}
3044
3045static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3046{
3047 u16 our_fcs, rcv_fcs;
3048 int hdr_size = L2CAP_HDR_SIZE + 2;
3049
3050 if (chan->fcs == L2CAP_FCS_CRC16) {
3051 skb_trim(skb, skb->len - 2);
3052 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3053 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3054
3055 if (our_fcs != rcv_fcs)
3056 return -EBADMSG;
3057 }
3058 return 0;
3059}
3060
3061static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3062{
3063 u16 control = 0;
3064
3065 chan->frames_sent = 0;
3066
3067 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3068
3069 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3070 control |= L2CAP_SUPER_RCV_NOT_READY;
3071 l2cap_send_sframe(chan, control);
3072 set_bit(CONN_RNR_SENT, &chan->conn_state);
3073 }
3074
3075 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3076 l2cap_retransmit_frames(chan);
3077
3078 l2cap_ertm_send(chan);
3079
3080 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3081 chan->frames_sent == 0) {
3082 control |= L2CAP_SUPER_RCV_READY;
3083 l2cap_send_sframe(chan, control);
3084 }
3085}
3086
3087static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3088{
3089 struct sk_buff *next_skb;
3090 int tx_seq_offset, next_tx_seq_offset;
3091
3092 bt_cb(skb)->tx_seq = tx_seq;
3093 bt_cb(skb)->sar = sar;
3094
3095 next_skb = skb_peek(&chan->srej_q);
3096 if (!next_skb) {
3097 __skb_queue_tail(&chan->srej_q, skb);
3098 return 0;
3099 }
3100
3101 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3102 if (tx_seq_offset < 0)
3103 tx_seq_offset += 64;
3104
3105 do {
3106 if (bt_cb(next_skb)->tx_seq == tx_seq)
3107 return -EINVAL;
3108
3109 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3110 chan->buffer_seq) % 64;
3111 if (next_tx_seq_offset < 0)
3112 next_tx_seq_offset += 64;
3113
3114 if (next_tx_seq_offset > tx_seq_offset) {
3115 __skb_queue_before(&chan->srej_q, next_skb, skb);
3116 return 0;
3117 }
3118
3119 if (skb_queue_is_last(&chan->srej_q, next_skb))
3120 break;
3121
3122 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3123
3124 __skb_queue_tail(&chan->srej_q, skb);
3125
3126 return 0;
3127}
3128
3129static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3130{
3131 struct sk_buff *_skb;
3132 int err;
3133
3134 switch (control & L2CAP_CTRL_SAR) {
3135 case L2CAP_SDU_UNSEGMENTED:
3136 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3137 goto drop;
3138
3139 return chan->ops->recv(chan->data, skb);
3140
3141 case L2CAP_SDU_START:
3142 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3143 goto drop;
3144
3145 chan->sdu_len = get_unaligned_le16(skb->data);
3146
3147 if (chan->sdu_len > chan->imtu)
3148 goto disconnect;
3149
3150 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3151 if (!chan->sdu)
3152 return -ENOMEM;
3153
3154 /* pull sdu_len bytes only after alloc, because of Local Busy
3155 * condition we have to be sure that this will be executed
3156 * only once, i.e., when alloc does not fail */
3157 skb_pull(skb, 2);
3158
3159 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3160
3161 set_bit(CONN_SAR_SDU, &chan->conn_state);
3162 chan->partial_sdu_len = skb->len;
3163 break;
3164
3165 case L2CAP_SDU_CONTINUE:
3166 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3167 goto disconnect;
3168
3169 if (!chan->sdu)
3170 goto disconnect;
3171
3172 chan->partial_sdu_len += skb->len;
3173 if (chan->partial_sdu_len > chan->sdu_len)
3174 goto drop;
3175
3176 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3177
3178 break;
3179
3180 case L2CAP_SDU_END:
3181 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3182 goto disconnect;
3183
3184 if (!chan->sdu)
3185 goto disconnect;
3186
3187 chan->partial_sdu_len += skb->len;
3188
3189 if (chan->partial_sdu_len > chan->imtu)
3190 goto drop;
3191
3192 if (chan->partial_sdu_len != chan->sdu_len)
3193 goto drop;
3194
3195 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3196
3197 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3198 if (!_skb) {
3199 return -ENOMEM;
3200 }
3201
3202 err = chan->ops->recv(chan->data, _skb);
3203 if (err < 0) {
3204 kfree_skb(_skb);
3205 return err;
3206 }
3207
3208 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3209
3210 kfree_skb(chan->sdu);
3211 break;
3212 }
3213
3214 kfree_skb(skb);
3215 return 0;
3216
3217drop:
3218 kfree_skb(chan->sdu);
3219 chan->sdu = NULL;
3220
3221disconnect:
3222 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3223 kfree_skb(skb);
3224 return 0;
3225}
3226
3227static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3228{
3229 u16 control;
3230
3231 BT_DBG("chan %p, Enter local busy", chan);
3232
3233 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3234
3235 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3236 control |= L2CAP_SUPER_RCV_NOT_READY;
3237 l2cap_send_sframe(chan, control);
3238
3239 set_bit(CONN_RNR_SENT, &chan->conn_state);
3240
3241 __clear_ack_timer(chan);
3242}
3243
3244static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3245{
3246 u16 control;
3247
3248 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3249 goto done;
3250
3251 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3252 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3253 l2cap_send_sframe(chan, control);
3254 chan->retry_count = 1;
3255
3256 __clear_retrans_timer(chan);
3257 __set_monitor_timer(chan);
3258
3259 set_bit(CONN_WAIT_F, &chan->conn_state);
3260
3261done:
3262 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3263 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3264
3265 BT_DBG("chan %p, Exit local busy", chan);
3266}
3267
3268void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3269{
3270 if (chan->mode == L2CAP_MODE_ERTM) {
3271 if (busy)
3272 l2cap_ertm_enter_local_busy(chan);
3273 else
3274 l2cap_ertm_exit_local_busy(chan);
3275 }
3276}
3277
3278static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3279{
3280 struct sk_buff *_skb;
3281 int err = -EINVAL;
3282
3283 /*
3284 * TODO: We have to notify the userland if some data is lost with the
3285 * Streaming Mode.
3286 */
3287
3288 switch (control & L2CAP_CTRL_SAR) {
3289 case L2CAP_SDU_UNSEGMENTED:
3290 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3291 kfree_skb(chan->sdu);
3292 break;
3293 }
3294
3295 err = chan->ops->recv(chan->data, skb);
3296 if (!err)
3297 return 0;
3298
3299 break;
3300
3301 case L2CAP_SDU_START:
3302 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3303 kfree_skb(chan->sdu);
3304 break;
3305 }
3306
3307 chan->sdu_len = get_unaligned_le16(skb->data);
3308 skb_pull(skb, 2);
3309
3310 if (chan->sdu_len > chan->imtu) {
3311 err = -EMSGSIZE;
3312 break;
3313 }
3314
3315 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3316 if (!chan->sdu) {
3317 err = -ENOMEM;
3318 break;
3319 }
3320
3321 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3322
3323 set_bit(CONN_SAR_SDU, &chan->conn_state);
3324 chan->partial_sdu_len = skb->len;
3325 err = 0;
3326 break;
3327
3328 case L2CAP_SDU_CONTINUE:
3329 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3330 break;
3331
3332 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3333
3334 chan->partial_sdu_len += skb->len;
3335 if (chan->partial_sdu_len > chan->sdu_len)
3336 kfree_skb(chan->sdu);
3337 else
3338 err = 0;
3339
3340 break;
3341
3342 case L2CAP_SDU_END:
3343 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3344 break;
3345
3346 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3347
3348 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3349 chan->partial_sdu_len += skb->len;
3350
3351 if (chan->partial_sdu_len > chan->imtu)
3352 goto drop;
3353
3354 if (chan->partial_sdu_len == chan->sdu_len) {
3355 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3356 err = chan->ops->recv(chan->data, _skb);
3357 if (err < 0)
3358 kfree_skb(_skb);
3359 }
3360 err = 0;
3361
3362drop:
3363 kfree_skb(chan->sdu);
3364 break;
3365 }
3366
3367 kfree_skb(skb);
3368 return err;
3369}
3370
3371static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3372{
3373 struct sk_buff *skb;
3374 u16 control;
3375
3376 while ((skb = skb_peek(&chan->srej_q)) &&
3377 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3378 int err;
3379
3380 if (bt_cb(skb)->tx_seq != tx_seq)
3381 break;
3382
3383 skb = skb_dequeue(&chan->srej_q);
3384 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3385 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3386
3387 if (err < 0) {
3388 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3389 break;
3390 }
3391
3392 chan->buffer_seq_srej =
3393 (chan->buffer_seq_srej + 1) % 64;
3394 tx_seq = (tx_seq + 1) % 64;
3395 }
3396}
3397
3398static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3399{
3400 struct srej_list *l, *tmp;
3401 u16 control;
3402
3403 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3404 if (l->tx_seq == tx_seq) {
3405 list_del(&l->list);
3406 kfree(l);
3407 return;
3408 }
3409 control = L2CAP_SUPER_SELECT_REJECT;
3410 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3411 l2cap_send_sframe(chan, control);
3412 list_del(&l->list);
3413 list_add_tail(&l->list, &chan->srej_l);
3414 }
3415}
3416
3417static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3418{
3419 struct srej_list *new;
3420 u16 control;
3421
3422 while (tx_seq != chan->expected_tx_seq) {
3423 control = L2CAP_SUPER_SELECT_REJECT;
3424 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3425 l2cap_send_sframe(chan, control);
3426
3427 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3428 new->tx_seq = chan->expected_tx_seq;
3429 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3430 list_add_tail(&new->list, &chan->srej_l);
3431 }
3432 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3433}
3434
3435static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3436{
3437 u8 tx_seq = __get_txseq(rx_control);
3438 u8 req_seq = __get_reqseq(rx_control);
3439 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3440 int tx_seq_offset, expected_tx_seq_offset;
3441 int num_to_ack = (chan->tx_win/6) + 1;
3442 int err = 0;
3443
3444 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3445 tx_seq, rx_control);
3446
3447 if (L2CAP_CTRL_FINAL & rx_control &&
3448 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3449 __clear_monitor_timer(chan);
3450 if (chan->unacked_frames > 0)
3451 __set_retrans_timer(chan);
3452 clear_bit(CONN_WAIT_F, &chan->conn_state);
3453 }
3454
3455 chan->expected_ack_seq = req_seq;
3456 l2cap_drop_acked_frames(chan);
3457
3458 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3459 if (tx_seq_offset < 0)
3460 tx_seq_offset += 64;
3461
3462 /* invalid tx_seq */
3463 if (tx_seq_offset >= chan->tx_win) {
3464 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3465 goto drop;
3466 }
3467
3468 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3469 goto drop;
3470
3471 if (tx_seq == chan->expected_tx_seq)
3472 goto expected;
3473
3474 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3475 struct srej_list *first;
3476
3477 first = list_first_entry(&chan->srej_l,
3478 struct srej_list, list);
3479 if (tx_seq == first->tx_seq) {
3480 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3481 l2cap_check_srej_gap(chan, tx_seq);
3482
3483 list_del(&first->list);
3484 kfree(first);
3485
3486 if (list_empty(&chan->srej_l)) {
3487 chan->buffer_seq = chan->buffer_seq_srej;
3488 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3489 l2cap_send_ack(chan);
3490 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3491 }
3492 } else {
3493 struct srej_list *l;
3494
3495 /* duplicated tx_seq */
3496 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3497 goto drop;
3498
3499 list_for_each_entry(l, &chan->srej_l, list) {
3500 if (l->tx_seq == tx_seq) {
3501 l2cap_resend_srejframe(chan, tx_seq);
3502 return 0;
3503 }
3504 }
3505 l2cap_send_srejframe(chan, tx_seq);
3506 }
3507 } else {
3508 expected_tx_seq_offset =
3509 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3510 if (expected_tx_seq_offset < 0)
3511 expected_tx_seq_offset += 64;
3512
3513 /* duplicated tx_seq */
3514 if (tx_seq_offset < expected_tx_seq_offset)
3515 goto drop;
3516
3517 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3518
3519 BT_DBG("chan %p, Enter SREJ", chan);
3520
3521 INIT_LIST_HEAD(&chan->srej_l);
3522 chan->buffer_seq_srej = chan->buffer_seq;
3523
3524 __skb_queue_head_init(&chan->srej_q);
3525 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3526
3527 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3528
3529 l2cap_send_srejframe(chan, tx_seq);
3530
3531 __clear_ack_timer(chan);
3532 }
3533 return 0;
3534
3535expected:
3536 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3537
3538 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3539 bt_cb(skb)->tx_seq = tx_seq;
3540 bt_cb(skb)->sar = sar;
3541 __skb_queue_tail(&chan->srej_q, skb);
3542 return 0;
3543 }
3544
3545 err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
3546 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3547 if (err < 0) {
3548 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3549 return err;
3550 }
3551
3552 if (rx_control & L2CAP_CTRL_FINAL) {
3553 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3554 l2cap_retransmit_frames(chan);
3555 }
3556
3557 __set_ack_timer(chan);
3558
3559 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3560 if (chan->num_acked == num_to_ack - 1)
3561 l2cap_send_ack(chan);
3562
3563 return 0;
3564
3565drop:
3566 kfree_skb(skb);
3567 return 0;
3568}
3569
3570static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3571{
3572 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3573 rx_control);
3574
3575 chan->expected_ack_seq = __get_reqseq(rx_control);
3576 l2cap_drop_acked_frames(chan);
3577
3578 if (rx_control & L2CAP_CTRL_POLL) {
3579 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3580 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3581 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3582 (chan->unacked_frames > 0))
3583 __set_retrans_timer(chan);
3584
3585 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3586 l2cap_send_srejtail(chan);
3587 } else {
3588 l2cap_send_i_or_rr_or_rnr(chan);
3589 }
3590
3591 } else if (rx_control & L2CAP_CTRL_FINAL) {
3592 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3593
3594 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3595 l2cap_retransmit_frames(chan);
3596
3597 } else {
3598 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3599 (chan->unacked_frames > 0))
3600 __set_retrans_timer(chan);
3601
3602 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3603 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3604 l2cap_send_ack(chan);
3605 else
3606 l2cap_ertm_send(chan);
3607 }
3608}
3609
3610static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3611{
3612 u8 tx_seq = __get_reqseq(rx_control);
3613
3614 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3615
3616 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3617
3618 chan->expected_ack_seq = tx_seq;
3619 l2cap_drop_acked_frames(chan);
3620
3621 if (rx_control & L2CAP_CTRL_FINAL) {
3622 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3623 l2cap_retransmit_frames(chan);
3624 } else {
3625 l2cap_retransmit_frames(chan);
3626
3627 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3628 set_bit(CONN_REJ_ACT, &chan->conn_state);
3629 }
3630}
3631static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3632{
3633 u8 tx_seq = __get_reqseq(rx_control);
3634
3635 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3636
3637 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3638
3639 if (rx_control & L2CAP_CTRL_POLL) {
3640 chan->expected_ack_seq = tx_seq;
3641 l2cap_drop_acked_frames(chan);
3642
3643 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3644 l2cap_retransmit_one_frame(chan, tx_seq);
3645
3646 l2cap_ertm_send(chan);
3647
3648 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3649 chan->srej_save_reqseq = tx_seq;
3650 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3651 }
3652 } else if (rx_control & L2CAP_CTRL_FINAL) {
3653 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3654 chan->srej_save_reqseq == tx_seq)
3655 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3656 else
3657 l2cap_retransmit_one_frame(chan, tx_seq);
3658 } else {
3659 l2cap_retransmit_one_frame(chan, tx_seq);
3660 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3661 chan->srej_save_reqseq = tx_seq;
3662 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3663 }
3664 }
3665}
3666
3667static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3668{
3669 u8 tx_seq = __get_reqseq(rx_control);
3670
3671 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3672
3673 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3674 chan->expected_ack_seq = tx_seq;
3675 l2cap_drop_acked_frames(chan);
3676
3677 if (rx_control & L2CAP_CTRL_POLL)
3678 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3679
3680 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3681 __clear_retrans_timer(chan);
3682 if (rx_control & L2CAP_CTRL_POLL)
3683 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3684 return;
3685 }
3686
3687 if (rx_control & L2CAP_CTRL_POLL)
3688 l2cap_send_srejtail(chan);
3689 else
3690 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3691}
3692
3693static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3694{
3695 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3696
3697 if (L2CAP_CTRL_FINAL & rx_control &&
3698 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3699 __clear_monitor_timer(chan);
3700 if (chan->unacked_frames > 0)
3701 __set_retrans_timer(chan);
3702 clear_bit(CONN_WAIT_F, &chan->conn_state);
3703 }
3704
3705 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3706 case L2CAP_SUPER_RCV_READY:
3707 l2cap_data_channel_rrframe(chan, rx_control);
3708 break;
3709
3710 case L2CAP_SUPER_REJECT:
3711 l2cap_data_channel_rejframe(chan, rx_control);
3712 break;
3713
3714 case L2CAP_SUPER_SELECT_REJECT:
3715 l2cap_data_channel_srejframe(chan, rx_control);
3716 break;
3717
3718 case L2CAP_SUPER_RCV_NOT_READY:
3719 l2cap_data_channel_rnrframe(chan, rx_control);
3720 break;
3721 }
3722
3723 kfree_skb(skb);
3724 return 0;
3725}
3726
3727static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3728{
3729 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3730 u16 control;
3731 u8 req_seq;
3732 int len, next_tx_seq_offset, req_seq_offset;
3733
3734 control = get_unaligned_le16(skb->data);
3735 skb_pull(skb, 2);
3736 len = skb->len;
3737
3738 /*
3739 * We can just drop the corrupted I-frame here.
3740 * Receiver will miss it and start proper recovery
3741 * procedures and ask retransmission.
3742 */
3743 if (l2cap_check_fcs(chan, skb))
3744 goto drop;
3745
3746 if (__is_sar_start(control) && __is_iframe(control))
3747 len -= 2;
3748
3749 if (chan->fcs == L2CAP_FCS_CRC16)
3750 len -= 2;
3751
3752 if (len > chan->mps) {
3753 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3754 goto drop;
3755 }
3756
3757 req_seq = __get_reqseq(control);
3758 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3759 if (req_seq_offset < 0)
3760 req_seq_offset += 64;
3761
3762 next_tx_seq_offset =
3763 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3764 if (next_tx_seq_offset < 0)
3765 next_tx_seq_offset += 64;
3766
3767 /* check for invalid req-seq */
3768 if (req_seq_offset > next_tx_seq_offset) {
3769 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3770 goto drop;
3771 }
3772
3773 if (__is_iframe(control)) {
3774 if (len < 0) {
3775 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3776 goto drop;
3777 }
3778
3779 l2cap_data_channel_iframe(chan, control, skb);
3780 } else {
3781 if (len != 0) {
3782 BT_ERR("%d", len);
3783 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3784 goto drop;
3785 }
3786
3787 l2cap_data_channel_sframe(chan, control, skb);
3788 }
3789
3790 return 0;
3791
3792drop:
3793 kfree_skb(skb);
3794 return 0;
3795}
3796
3797static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3798{
3799 struct l2cap_chan *chan;
3800 struct sock *sk = NULL;
3801 u16 control;
3802 u8 tx_seq;
3803 int len;
3804
3805 chan = l2cap_get_chan_by_scid(conn, cid);
3806 if (!chan) {
3807 BT_DBG("unknown cid 0x%4.4x", cid);
3808 goto drop;
3809 }
3810
3811 sk = chan->sk;
3812
3813 BT_DBG("chan %p, len %d", chan, skb->len);
3814
3815 if (chan->state != BT_CONNECTED)
3816 goto drop;
3817
3818 switch (chan->mode) {
3819 case L2CAP_MODE_BASIC:
3820 /* If socket recv buffers overflows we drop data here
3821 * which is *bad* because L2CAP has to be reliable.
3822 * But we don't have any other choice. L2CAP doesn't
3823 * provide flow control mechanism. */
3824
3825 if (chan->imtu < skb->len)
3826 goto drop;
3827
3828 if (!chan->ops->recv(chan->data, skb))
3829 goto done;
3830 break;
3831
3832 case L2CAP_MODE_ERTM:
3833 if (!sock_owned_by_user(sk)) {
3834 l2cap_ertm_data_rcv(sk, skb);
3835 } else {
3836 if (sk_add_backlog(sk, skb))
3837 goto drop;
3838 }
3839
3840 goto done;
3841
3842 case L2CAP_MODE_STREAMING:
3843 control = get_unaligned_le16(skb->data);
3844 skb_pull(skb, 2);
3845 len = skb->len;
3846
3847 if (l2cap_check_fcs(chan, skb))
3848 goto drop;
3849
3850 if (__is_sar_start(control))
3851 len -= 2;
3852
3853 if (chan->fcs == L2CAP_FCS_CRC16)
3854 len -= 2;
3855
3856 if (len > chan->mps || len < 0 || __is_sframe(control))
3857 goto drop;
3858
3859 tx_seq = __get_txseq(control);
3860
3861 if (chan->expected_tx_seq == tx_seq)
3862 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3863 else
3864 chan->expected_tx_seq = (tx_seq + 1) % 64;
3865
3866 l2cap_streaming_reassembly_sdu(chan, skb, control);
3867
3868 goto done;
3869
3870 default:
3871 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3872 break;
3873 }
3874
3875drop:
3876 kfree_skb(skb);
3877
3878done:
3879 if (sk)
3880 bh_unlock_sock(sk);
3881
3882 return 0;
3883}
3884
3885static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3886{
3887 struct sock *sk = NULL;
3888 struct l2cap_chan *chan;
3889
3890 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3891 if (!chan)
3892 goto drop;
3893
3894 sk = chan->sk;
3895
3896 bh_lock_sock(sk);
3897
3898 BT_DBG("sk %p, len %d", sk, skb->len);
3899
3900 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3901 goto drop;
3902
3903 if (chan->imtu < skb->len)
3904 goto drop;
3905
3906 if (!chan->ops->recv(chan->data, skb))
3907 goto done;
3908
3909drop:
3910 kfree_skb(skb);
3911
3912done:
3913 if (sk)
3914 bh_unlock_sock(sk);
3915 return 0;
3916}
3917
3918static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3919{
3920 struct sock *sk = NULL;
3921 struct l2cap_chan *chan;
3922
3923 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3924 if (!chan)
3925 goto drop;
3926
3927 sk = chan->sk;
3928
3929 bh_lock_sock(sk);
3930
3931 BT_DBG("sk %p, len %d", sk, skb->len);
3932
3933 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3934 goto drop;
3935
3936 if (chan->imtu < skb->len)
3937 goto drop;
3938
3939 if (!chan->ops->recv(chan->data, skb))
3940 goto done;
3941
3942drop:
3943 kfree_skb(skb);
3944
3945done:
3946 if (sk)
3947 bh_unlock_sock(sk);
3948 return 0;
3949}
3950
3951static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3952{
3953 struct l2cap_hdr *lh = (void *) skb->data;
3954 u16 cid, len;
3955 __le16 psm;
3956
3957 skb_pull(skb, L2CAP_HDR_SIZE);
3958 cid = __le16_to_cpu(lh->cid);
3959 len = __le16_to_cpu(lh->len);
3960
3961 if (len != skb->len) {
3962 kfree_skb(skb);
3963 return;
3964 }
3965
3966 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3967
3968 switch (cid) {
3969 case L2CAP_CID_LE_SIGNALING:
3970 case L2CAP_CID_SIGNALING:
3971 l2cap_sig_channel(conn, skb);
3972 break;
3973
3974 case L2CAP_CID_CONN_LESS:
3975 psm = get_unaligned_le16(skb->data);
3976 skb_pull(skb, 2);
3977 l2cap_conless_channel(conn, psm, skb);
3978 break;
3979
3980 case L2CAP_CID_LE_DATA:
3981 l2cap_att_channel(conn, cid, skb);
3982 break;
3983
3984 case L2CAP_CID_SMP:
3985 if (smp_sig_channel(conn, skb))
3986 l2cap_conn_del(conn->hcon, EACCES);
3987 break;
3988
3989 default:
3990 l2cap_data_channel(conn, cid, skb);
3991 break;
3992 }
3993}
3994
3995/* ---- L2CAP interface with lower layer (HCI) ---- */
3996
3997static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3998{
3999 int exact = 0, lm1 = 0, lm2 = 0;
4000 struct l2cap_chan *c;
4001
4002 if (type != ACL_LINK)
4003 return -EINVAL;
4004
4005 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4006
4007 /* Find listening sockets and check their link_mode */
4008 read_lock(&chan_list_lock);
4009 list_for_each_entry(c, &chan_list, global_l) {
4010 struct sock *sk = c->sk;
4011
4012 if (c->state != BT_LISTEN)
4013 continue;
4014
4015 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4016 lm1 |= HCI_LM_ACCEPT;
4017 if (c->role_switch)
4018 lm1 |= HCI_LM_MASTER;
4019 exact++;
4020 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4021 lm2 |= HCI_LM_ACCEPT;
4022 if (c->role_switch)
4023 lm2 |= HCI_LM_MASTER;
4024 }
4025 }
4026 read_unlock(&chan_list_lock);
4027
4028 return exact ? lm1 : lm2;
4029}
4030
4031static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4032{
4033 struct l2cap_conn *conn;
4034
4035 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4036
4037 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4038 return -EINVAL;
4039
4040 if (!status) {
4041 conn = l2cap_conn_add(hcon, status);
4042 if (conn)
4043 l2cap_conn_ready(conn);
4044 } else
4045 l2cap_conn_del(hcon, bt_to_errno(status));
4046
4047 return 0;
4048}
4049
4050static int l2cap_disconn_ind(struct hci_conn *hcon)
4051{
4052 struct l2cap_conn *conn = hcon->l2cap_data;
4053
4054 BT_DBG("hcon %p", hcon);
4055
4056 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4057 return 0x13;
4058
4059 return conn->disc_reason;
4060}
4061
4062static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4063{
4064 BT_DBG("hcon %p reason %d", hcon, reason);
4065
4066 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4067 return -EINVAL;
4068
4069 l2cap_conn_del(hcon, bt_to_errno(reason));
4070
4071 return 0;
4072}
4073
4074static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4075{
4076 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4077 return;
4078
4079 if (encrypt == 0x00) {
4080 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4081 __clear_chan_timer(chan);
4082 __set_chan_timer(chan, HZ * 5);
4083 } else if (chan->sec_level == BT_SECURITY_HIGH)
4084 l2cap_chan_close(chan, ECONNREFUSED);
4085 } else {
4086 if (chan->sec_level == BT_SECURITY_MEDIUM)
4087 __clear_chan_timer(chan);
4088 }
4089}
4090
4091static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4092{
4093 struct l2cap_conn *conn = hcon->l2cap_data;
4094 struct l2cap_chan *chan;
4095
4096 if (!conn)
4097 return 0;
4098
4099 BT_DBG("conn %p", conn);
4100
4101 if (hcon->type == LE_LINK) {
4102 smp_distribute_keys(conn, 0);
4103 del_timer(&conn->security_timer);
4104 }
4105
4106 read_lock(&conn->chan_lock);
4107
4108 list_for_each_entry(chan, &conn->chan_l, list) {
4109 struct sock *sk = chan->sk;
4110
4111 bh_lock_sock(sk);
4112
4113 BT_DBG("chan->scid %d", chan->scid);
4114
4115 if (chan->scid == L2CAP_CID_LE_DATA) {
4116 if (!status && encrypt) {
4117 chan->sec_level = hcon->sec_level;
4118 l2cap_chan_ready(sk);
4119 }
4120
4121 bh_unlock_sock(sk);
4122 continue;
4123 }
4124
4125 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4126 bh_unlock_sock(sk);
4127 continue;
4128 }
4129
4130 if (!status && (chan->state == BT_CONNECTED ||
4131 chan->state == BT_CONFIG)) {
4132 l2cap_check_encryption(chan, encrypt);
4133 bh_unlock_sock(sk);
4134 continue;
4135 }
4136
4137 if (chan->state == BT_CONNECT) {
4138 if (!status) {
4139 struct l2cap_conn_req req;
4140 req.scid = cpu_to_le16(chan->scid);
4141 req.psm = chan->psm;
4142
4143 chan->ident = l2cap_get_ident(conn);
4144 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4145
4146 l2cap_send_cmd(conn, chan->ident,
4147 L2CAP_CONN_REQ, sizeof(req), &req);
4148 } else {
4149 __clear_chan_timer(chan);
4150 __set_chan_timer(chan, HZ / 10);
4151 }
4152 } else if (chan->state == BT_CONNECT2) {
4153 struct l2cap_conn_rsp rsp;
4154 __u16 res, stat;
4155
4156 if (!status) {
4157 if (bt_sk(sk)->defer_setup) {
4158 struct sock *parent = bt_sk(sk)->parent;
4159 res = L2CAP_CR_PEND;
4160 stat = L2CAP_CS_AUTHOR_PEND;
4161 if (parent)
4162 parent->sk_data_ready(parent, 0);
4163 } else {
4164 l2cap_state_change(chan, BT_CONFIG);
4165 res = L2CAP_CR_SUCCESS;
4166 stat = L2CAP_CS_NO_INFO;
4167 }
4168 } else {
4169 l2cap_state_change(chan, BT_DISCONN);
4170 __set_chan_timer(chan, HZ / 10);
4171 res = L2CAP_CR_SEC_BLOCK;
4172 stat = L2CAP_CS_NO_INFO;
4173 }
4174
4175 rsp.scid = cpu_to_le16(chan->dcid);
4176 rsp.dcid = cpu_to_le16(chan->scid);
4177 rsp.result = cpu_to_le16(res);
4178 rsp.status = cpu_to_le16(stat);
4179 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4180 sizeof(rsp), &rsp);
4181 }
4182
4183 bh_unlock_sock(sk);
4184 }
4185
4186 read_unlock(&conn->chan_lock);
4187
4188 return 0;
4189}
4190
4191static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4192{
4193 struct l2cap_conn *conn = hcon->l2cap_data;
4194
4195 if (!conn)
4196 conn = l2cap_conn_add(hcon, 0);
4197
4198 if (!conn)
4199 goto drop;
4200
4201 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4202
4203 if (!(flags & ACL_CONT)) {
4204 struct l2cap_hdr *hdr;
4205 struct l2cap_chan *chan;
4206 u16 cid;
4207 int len;
4208
4209 if (conn->rx_len) {
4210 BT_ERR("Unexpected start frame (len %d)", skb->len);
4211 kfree_skb(conn->rx_skb);
4212 conn->rx_skb = NULL;
4213 conn->rx_len = 0;
4214 l2cap_conn_unreliable(conn, ECOMM);
4215 }
4216
4217 /* Start fragment always begin with Basic L2CAP header */
4218 if (skb->len < L2CAP_HDR_SIZE) {
4219 BT_ERR("Frame is too short (len %d)", skb->len);
4220 l2cap_conn_unreliable(conn, ECOMM);
4221 goto drop;
4222 }
4223
4224 hdr = (struct l2cap_hdr *) skb->data;
4225 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4226 cid = __le16_to_cpu(hdr->cid);
4227
4228 if (len == skb->len) {
4229 /* Complete frame received */
4230 l2cap_recv_frame(conn, skb);
4231 return 0;
4232 }
4233
4234 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4235
4236 if (skb->len > len) {
4237 BT_ERR("Frame is too long (len %d, expected len %d)",
4238 skb->len, len);
4239 l2cap_conn_unreliable(conn, ECOMM);
4240 goto drop;
4241 }
4242
4243 chan = l2cap_get_chan_by_scid(conn, cid);
4244
4245 if (chan && chan->sk) {
4246 struct sock *sk = chan->sk;
4247
4248 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4249 BT_ERR("Frame exceeding recv MTU (len %d, "
4250 "MTU %d)", len,
4251 chan->imtu);
4252 bh_unlock_sock(sk);
4253 l2cap_conn_unreliable(conn, ECOMM);
4254 goto drop;
4255 }
4256 bh_unlock_sock(sk);
4257 }
4258
4259 /* Allocate skb for the complete frame (with header) */
4260 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4261 if (!conn->rx_skb)
4262 goto drop;
4263
4264 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4265 skb->len);
4266 conn->rx_len = len - skb->len;
4267 } else {
4268 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4269
4270 if (!conn->rx_len) {
4271 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4272 l2cap_conn_unreliable(conn, ECOMM);
4273 goto drop;
4274 }
4275
4276 if (skb->len > conn->rx_len) {
4277 BT_ERR("Fragment is too long (len %d, expected %d)",
4278 skb->len, conn->rx_len);
4279 kfree_skb(conn->rx_skb);
4280 conn->rx_skb = NULL;
4281 conn->rx_len = 0;
4282 l2cap_conn_unreliable(conn, ECOMM);
4283 goto drop;
4284 }
4285
4286 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4287 skb->len);
4288 conn->rx_len -= skb->len;
4289
4290 if (!conn->rx_len) {
4291 /* Complete frame received */
4292 l2cap_recv_frame(conn, conn->rx_skb);
4293 conn->rx_skb = NULL;
4294 }
4295 }
4296
4297drop:
4298 kfree_skb(skb);
4299 return 0;
4300}
4301
4302static int l2cap_debugfs_show(struct seq_file *f, void *p)
4303{
4304 struct l2cap_chan *c;
4305
4306 read_lock_bh(&chan_list_lock);
4307
4308 list_for_each_entry(c, &chan_list, global_l) {
4309 struct sock *sk = c->sk;
4310
4311 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4312 batostr(&bt_sk(sk)->src),
4313 batostr(&bt_sk(sk)->dst),
4314 c->state, __le16_to_cpu(c->psm),
4315 c->scid, c->dcid, c->imtu, c->omtu,
4316 c->sec_level, c->mode);
4317}
4318
4319 read_unlock_bh(&chan_list_lock);
4320
4321 return 0;
4322}
4323
4324static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4325{
4326 return single_open(file, l2cap_debugfs_show, inode->i_private);
4327}
4328
4329static const struct file_operations l2cap_debugfs_fops = {
4330 .open = l2cap_debugfs_open,
4331 .read = seq_read,
4332 .llseek = seq_lseek,
4333 .release = single_release,
4334};
4335
4336static struct dentry *l2cap_debugfs;
4337
4338static struct hci_proto l2cap_hci_proto = {
4339 .name = "L2CAP",
4340 .id = HCI_PROTO_L2CAP,
4341 .connect_ind = l2cap_connect_ind,
4342 .connect_cfm = l2cap_connect_cfm,
4343 .disconn_ind = l2cap_disconn_ind,
4344 .disconn_cfm = l2cap_disconn_cfm,
4345 .security_cfm = l2cap_security_cfm,
4346 .recv_acldata = l2cap_recv_acldata
4347};
4348
4349int __init l2cap_init(void)
4350{
4351 int err;
4352
4353 err = l2cap_init_sockets();
4354 if (err < 0)
4355 return err;
4356
4357 err = hci_register_proto(&l2cap_hci_proto);
4358 if (err < 0) {
4359 BT_ERR("L2CAP protocol registration failed");
4360 bt_sock_unregister(BTPROTO_L2CAP);
4361 goto error;
4362 }
4363
4364 if (bt_debugfs) {
4365 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4366 bt_debugfs, NULL, &l2cap_debugfs_fops);
4367 if (!l2cap_debugfs)
4368 BT_ERR("Failed to create L2CAP debug file");
4369 }
4370
4371 return 0;
4372
4373error:
4374 l2cap_cleanup_sockets();
4375 return err;
4376}
4377
4378void l2cap_exit(void)
4379{
4380 debugfs_remove(l2cap_debugfs);
4381
4382 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4383 BT_ERR("L2CAP protocol unregistration failed");
4384
4385 l2cap_cleanup_sockets();
4386}
4387
4388module_param(disable_ertm, bool, 0644);
4389MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.038664 seconds and 5 git commands to generate.