Bluetooth: Fix early return from l2cap_chan_del
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
51 #include <net/sock.h>
52
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 /* ---- L2CAP channels ---- */
77
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 if (c->dcid == cid)
84 return c;
85 }
86 return NULL;
87 }
88
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 {
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98 }
99
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 {
104 struct l2cap_chan *c;
105
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 if (c)
109 l2cap_chan_lock(c);
110 mutex_unlock(&conn->chan_lock);
111
112 return c;
113 }
114
115 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
116 {
117 struct l2cap_chan *c;
118
119 list_for_each_entry(c, &conn->chan_l, list) {
120 if (c->ident == ident)
121 return c;
122 }
123 return NULL;
124 }
125
126 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
127 {
128 struct l2cap_chan *c;
129
130 list_for_each_entry(c, &chan_list, global_l) {
131 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
132 return c;
133 }
134 return NULL;
135 }
136
137 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
138 {
139 int err;
140
141 write_lock(&chan_list_lock);
142
143 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
144 err = -EADDRINUSE;
145 goto done;
146 }
147
148 if (psm) {
149 chan->psm = psm;
150 chan->sport = psm;
151 err = 0;
152 } else {
153 u16 p;
154
155 err = -EINVAL;
156 for (p = 0x1001; p < 0x1100; p += 2)
157 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
158 chan->psm = cpu_to_le16(p);
159 chan->sport = cpu_to_le16(p);
160 err = 0;
161 break;
162 }
163 }
164
165 done:
166 write_unlock(&chan_list_lock);
167 return err;
168 }
169
170 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
171 {
172 write_lock(&chan_list_lock);
173
174 chan->scid = scid;
175
176 write_unlock(&chan_list_lock);
177
178 return 0;
179 }
180
181 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
182 {
183 u16 cid = L2CAP_CID_DYN_START;
184
185 for (; cid < L2CAP_CID_DYN_END; cid++) {
186 if (!__l2cap_get_chan_by_scid(conn, cid))
187 return cid;
188 }
189
190 return 0;
191 }
192
193 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
194 {
195 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
196 state_to_string(state));
197
198 chan->state = state;
199 chan->ops->state_change(chan->data, state);
200 }
201
202 static void l2cap_state_change(struct l2cap_chan *chan, int state)
203 {
204 struct sock *sk = chan->sk;
205
206 lock_sock(sk);
207 __l2cap_state_change(chan, state);
208 release_sock(sk);
209 }
210
211 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
212 {
213 struct sock *sk = chan->sk;
214
215 sk->sk_err = err;
216 }
217
218 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
219 {
220 struct sock *sk = chan->sk;
221
222 lock_sock(sk);
223 __l2cap_chan_set_err(chan, err);
224 release_sock(sk);
225 }
226
227 /* ---- L2CAP sequence number lists ---- */
228
229 /* For ERTM, ordered lists of sequence numbers must be tracked for
230 * SREJ requests that are received and for frames that are to be
231 * retransmitted. These seq_list functions implement a singly-linked
232 * list in an array, where membership in the list can also be checked
233 * in constant time. Items can also be added to the tail of the list
234 * and removed from the head in constant time, without further memory
235 * allocs or frees.
236 */
237
238 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
239 {
240 size_t alloc_size, i;
241
242 /* Allocated size is a power of 2 to map sequence numbers
243 * (which may be up to 14 bits) in to a smaller array that is
244 * sized for the negotiated ERTM transmit windows.
245 */
246 alloc_size = roundup_pow_of_two(size);
247
248 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
249 if (!seq_list->list)
250 return -ENOMEM;
251
252 seq_list->mask = alloc_size - 1;
253 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
254 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
255 for (i = 0; i < alloc_size; i++)
256 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
257
258 return 0;
259 }
260
261 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
262 {
263 kfree(seq_list->list);
264 }
265
266 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
267 u16 seq)
268 {
269 /* Constant-time check for list membership */
270 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
271 }
272
273 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
274 {
275 u16 mask = seq_list->mask;
276
277 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
278 /* In case someone tries to pop the head of an empty list */
279 return L2CAP_SEQ_LIST_CLEAR;
280 } else if (seq_list->head == seq) {
281 /* Head can be removed in constant time */
282 seq_list->head = seq_list->list[seq & mask];
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
284
285 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
286 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
287 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
288 }
289 } else {
290 /* Walk the list to find the sequence number */
291 u16 prev = seq_list->head;
292 while (seq_list->list[prev & mask] != seq) {
293 prev = seq_list->list[prev & mask];
294 if (prev == L2CAP_SEQ_LIST_TAIL)
295 return L2CAP_SEQ_LIST_CLEAR;
296 }
297
298 /* Unlink the number from the list and clear it */
299 seq_list->list[prev & mask] = seq_list->list[seq & mask];
300 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->tail == seq)
302 seq_list->tail = prev;
303 }
304 return seq;
305 }
306
307 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
308 {
309 /* Remove the head in constant time */
310 return l2cap_seq_list_remove(seq_list, seq_list->head);
311 }
312
313 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
314 {
315 u16 i;
316
317 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
318 return;
319
320 for (i = 0; i <= seq_list->mask; i++)
321 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
322
323 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
324 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
325 }
326
327 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
328 {
329 u16 mask = seq_list->mask;
330
331 /* All appends happen in constant time */
332
333 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
337 seq_list->head = seq;
338 else
339 seq_list->list[seq_list->tail & mask] = seq;
340
341 seq_list->tail = seq;
342 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
343 }
344
345 static void l2cap_chan_timeout(struct work_struct *work)
346 {
347 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
348 chan_timer.work);
349 struct l2cap_conn *conn = chan->conn;
350 int reason;
351
352 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
353
354 mutex_lock(&conn->chan_lock);
355 l2cap_chan_lock(chan);
356
357 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
358 reason = ECONNREFUSED;
359 else if (chan->state == BT_CONNECT &&
360 chan->sec_level != BT_SECURITY_SDP)
361 reason = ECONNREFUSED;
362 else
363 reason = ETIMEDOUT;
364
365 l2cap_chan_close(chan, reason);
366
367 l2cap_chan_unlock(chan);
368
369 chan->ops->close(chan->data);
370 mutex_unlock(&conn->chan_lock);
371
372 l2cap_chan_put(chan);
373 }
374
375 struct l2cap_chan *l2cap_chan_create(void)
376 {
377 struct l2cap_chan *chan;
378
379 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
380 if (!chan)
381 return NULL;
382
383 mutex_init(&chan->lock);
384
385 write_lock(&chan_list_lock);
386 list_add(&chan->global_l, &chan_list);
387 write_unlock(&chan_list_lock);
388
389 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
390
391 chan->state = BT_OPEN;
392
393 atomic_set(&chan->refcnt, 1);
394
395 /* This flag is cleared in l2cap_chan_ready() */
396 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
397
398 BT_DBG("chan %p", chan);
399
400 return chan;
401 }
402
403 void l2cap_chan_destroy(struct l2cap_chan *chan)
404 {
405 write_lock(&chan_list_lock);
406 list_del(&chan->global_l);
407 write_unlock(&chan_list_lock);
408
409 l2cap_chan_put(chan);
410 }
411
412 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
413 {
414 chan->fcs = L2CAP_FCS_CRC16;
415 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
416 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
417 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
418 chan->sec_level = BT_SECURITY_LOW;
419
420 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
421 }
422
423 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
424 {
425 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
426 __le16_to_cpu(chan->psm), chan->dcid);
427
428 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
429
430 chan->conn = conn;
431
432 switch (chan->chan_type) {
433 case L2CAP_CHAN_CONN_ORIENTED:
434 if (conn->hcon->type == LE_LINK) {
435 /* LE connection */
436 chan->omtu = L2CAP_LE_DEFAULT_MTU;
437 chan->scid = L2CAP_CID_LE_DATA;
438 chan->dcid = L2CAP_CID_LE_DATA;
439 } else {
440 /* Alloc CID for connection-oriented socket */
441 chan->scid = l2cap_alloc_cid(conn);
442 chan->omtu = L2CAP_DEFAULT_MTU;
443 }
444 break;
445
446 case L2CAP_CHAN_CONN_LESS:
447 /* Connectionless socket */
448 chan->scid = L2CAP_CID_CONN_LESS;
449 chan->dcid = L2CAP_CID_CONN_LESS;
450 chan->omtu = L2CAP_DEFAULT_MTU;
451 break;
452
453 default:
454 /* Raw socket can send/recv signalling messages only */
455 chan->scid = L2CAP_CID_SIGNALING;
456 chan->dcid = L2CAP_CID_SIGNALING;
457 chan->omtu = L2CAP_DEFAULT_MTU;
458 }
459
460 chan->local_id = L2CAP_BESTEFFORT_ID;
461 chan->local_stype = L2CAP_SERV_BESTEFFORT;
462 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
463 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
464 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
465 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
466
467 l2cap_chan_hold(chan);
468
469 list_add(&chan->list, &conn->chan_l);
470 }
471
472 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
473 {
474 mutex_lock(&conn->chan_lock);
475 __l2cap_chan_add(conn, chan);
476 mutex_unlock(&conn->chan_lock);
477 }
478
479 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
480 {
481 struct sock *sk = chan->sk;
482 struct l2cap_conn *conn = chan->conn;
483 struct sock *parent = bt_sk(sk)->parent;
484
485 __clear_chan_timer(chan);
486
487 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
488
489 if (conn) {
490 /* Delete from channel list */
491 list_del(&chan->list);
492
493 l2cap_chan_put(chan);
494
495 chan->conn = NULL;
496 hci_conn_put(conn->hcon);
497 }
498
499 lock_sock(sk);
500
501 __l2cap_state_change(chan, BT_CLOSED);
502 sock_set_flag(sk, SOCK_ZAPPED);
503
504 if (err)
505 __l2cap_chan_set_err(chan, err);
506
507 if (parent) {
508 bt_accept_unlink(sk);
509 parent->sk_data_ready(parent, 0);
510 } else
511 sk->sk_state_change(sk);
512
513 release_sock(sk);
514
515 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
516 return;
517
518 skb_queue_purge(&chan->tx_q);
519
520 if (chan->mode == L2CAP_MODE_ERTM) {
521 struct srej_list *l, *tmp;
522
523 __clear_retrans_timer(chan);
524 __clear_monitor_timer(chan);
525 __clear_ack_timer(chan);
526
527 skb_queue_purge(&chan->srej_q);
528
529 l2cap_seq_list_free(&chan->srej_list);
530 l2cap_seq_list_free(&chan->retrans_list);
531 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
532 list_del(&l->list);
533 kfree(l);
534 }
535 }
536 }
537
538 static void l2cap_chan_cleanup_listen(struct sock *parent)
539 {
540 struct sock *sk;
541
542 BT_DBG("parent %p", parent);
543
544 /* Close not yet accepted channels */
545 while ((sk = bt_accept_dequeue(parent, NULL))) {
546 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
547
548 l2cap_chan_lock(chan);
549 __clear_chan_timer(chan);
550 l2cap_chan_close(chan, ECONNRESET);
551 l2cap_chan_unlock(chan);
552
553 chan->ops->close(chan->data);
554 }
555 }
556
557 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
558 {
559 struct l2cap_conn *conn = chan->conn;
560 struct sock *sk = chan->sk;
561
562 BT_DBG("chan %p state %s sk %p", chan,
563 state_to_string(chan->state), sk);
564
565 switch (chan->state) {
566 case BT_LISTEN:
567 lock_sock(sk);
568 l2cap_chan_cleanup_listen(sk);
569
570 __l2cap_state_change(chan, BT_CLOSED);
571 sock_set_flag(sk, SOCK_ZAPPED);
572 release_sock(sk);
573 break;
574
575 case BT_CONNECTED:
576 case BT_CONFIG:
577 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
578 conn->hcon->type == ACL_LINK) {
579 __set_chan_timer(chan, sk->sk_sndtimeo);
580 l2cap_send_disconn_req(conn, chan, reason);
581 } else
582 l2cap_chan_del(chan, reason);
583 break;
584
585 case BT_CONNECT2:
586 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
587 conn->hcon->type == ACL_LINK) {
588 struct l2cap_conn_rsp rsp;
589 __u16 result;
590
591 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
592 result = L2CAP_CR_SEC_BLOCK;
593 else
594 result = L2CAP_CR_BAD_PSM;
595 l2cap_state_change(chan, BT_DISCONN);
596
597 rsp.scid = cpu_to_le16(chan->dcid);
598 rsp.dcid = cpu_to_le16(chan->scid);
599 rsp.result = cpu_to_le16(result);
600 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
601 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
602 sizeof(rsp), &rsp);
603 }
604
605 l2cap_chan_del(chan, reason);
606 break;
607
608 case BT_CONNECT:
609 case BT_DISCONN:
610 l2cap_chan_del(chan, reason);
611 break;
612
613 default:
614 lock_sock(sk);
615 sock_set_flag(sk, SOCK_ZAPPED);
616 release_sock(sk);
617 break;
618 }
619 }
620
621 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
622 {
623 if (chan->chan_type == L2CAP_CHAN_RAW) {
624 switch (chan->sec_level) {
625 case BT_SECURITY_HIGH:
626 return HCI_AT_DEDICATED_BONDING_MITM;
627 case BT_SECURITY_MEDIUM:
628 return HCI_AT_DEDICATED_BONDING;
629 default:
630 return HCI_AT_NO_BONDING;
631 }
632 } else if (chan->psm == cpu_to_le16(0x0001)) {
633 if (chan->sec_level == BT_SECURITY_LOW)
634 chan->sec_level = BT_SECURITY_SDP;
635
636 if (chan->sec_level == BT_SECURITY_HIGH)
637 return HCI_AT_NO_BONDING_MITM;
638 else
639 return HCI_AT_NO_BONDING;
640 } else {
641 switch (chan->sec_level) {
642 case BT_SECURITY_HIGH:
643 return HCI_AT_GENERAL_BONDING_MITM;
644 case BT_SECURITY_MEDIUM:
645 return HCI_AT_GENERAL_BONDING;
646 default:
647 return HCI_AT_NO_BONDING;
648 }
649 }
650 }
651
652 /* Service level security */
653 int l2cap_chan_check_security(struct l2cap_chan *chan)
654 {
655 struct l2cap_conn *conn = chan->conn;
656 __u8 auth_type;
657
658 auth_type = l2cap_get_auth_type(chan);
659
660 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
661 }
662
663 static u8 l2cap_get_ident(struct l2cap_conn *conn)
664 {
665 u8 id;
666
667 /* Get next available identificator.
668 * 1 - 128 are used by kernel.
669 * 129 - 199 are reserved.
670 * 200 - 254 are used by utilities like l2ping, etc.
671 */
672
673 spin_lock(&conn->lock);
674
675 if (++conn->tx_ident > 128)
676 conn->tx_ident = 1;
677
678 id = conn->tx_ident;
679
680 spin_unlock(&conn->lock);
681
682 return id;
683 }
684
685 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
686 {
687 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
688 u8 flags;
689
690 BT_DBG("code 0x%2.2x", code);
691
692 if (!skb)
693 return;
694
695 if (lmp_no_flush_capable(conn->hcon->hdev))
696 flags = ACL_START_NO_FLUSH;
697 else
698 flags = ACL_START;
699
700 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
701 skb->priority = HCI_PRIO_MAX;
702
703 hci_send_acl(conn->hchan, skb, flags);
704 }
705
706 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
707 {
708 struct hci_conn *hcon = chan->conn->hcon;
709 u16 flags;
710
711 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
712 skb->priority);
713
714 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
715 lmp_no_flush_capable(hcon->hdev))
716 flags = ACL_START_NO_FLUSH;
717 else
718 flags = ACL_START;
719
720 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
721 hci_send_acl(chan->conn->hchan, skb, flags);
722 }
723
724 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
725 {
726 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
727 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
728
729 if (enh & L2CAP_CTRL_FRAME_TYPE) {
730 /* S-Frame */
731 control->sframe = 1;
732 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
733 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
734
735 control->sar = 0;
736 control->txseq = 0;
737 } else {
738 /* I-Frame */
739 control->sframe = 0;
740 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
741 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
742
743 control->poll = 0;
744 control->super = 0;
745 }
746 }
747
748 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
749 {
750 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
751 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
752
753 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
754 /* S-Frame */
755 control->sframe = 1;
756 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
757 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
758
759 control->sar = 0;
760 control->txseq = 0;
761 } else {
762 /* I-Frame */
763 control->sframe = 0;
764 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
765 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
766
767 control->poll = 0;
768 control->super = 0;
769 }
770 }
771
772 static inline void __unpack_control(struct l2cap_chan *chan,
773 struct sk_buff *skb)
774 {
775 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
776 __unpack_extended_control(get_unaligned_le32(skb->data),
777 &bt_cb(skb)->control);
778 } else {
779 __unpack_enhanced_control(get_unaligned_le16(skb->data),
780 &bt_cb(skb)->control);
781 }
782 }
783
784 static u32 __pack_extended_control(struct l2cap_ctrl *control)
785 {
786 u32 packed;
787
788 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
789 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
790
791 if (control->sframe) {
792 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
793 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
794 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
795 } else {
796 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
797 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
798 }
799
800 return packed;
801 }
802
803 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
804 {
805 u16 packed;
806
807 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
808 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
809
810 if (control->sframe) {
811 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
812 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
813 packed |= L2CAP_CTRL_FRAME_TYPE;
814 } else {
815 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
816 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
817 }
818
819 return packed;
820 }
821
822 static inline void __pack_control(struct l2cap_chan *chan,
823 struct l2cap_ctrl *control,
824 struct sk_buff *skb)
825 {
826 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
827 put_unaligned_le32(__pack_extended_control(control),
828 skb->data + L2CAP_HDR_SIZE);
829 } else {
830 put_unaligned_le16(__pack_enhanced_control(control),
831 skb->data + L2CAP_HDR_SIZE);
832 }
833 }
834
835 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
836 {
837 struct sk_buff *skb;
838 struct l2cap_hdr *lh;
839 struct l2cap_conn *conn = chan->conn;
840 int count, hlen;
841
842 if (chan->state != BT_CONNECTED)
843 return;
844
845 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
846 hlen = L2CAP_EXT_HDR_SIZE;
847 else
848 hlen = L2CAP_ENH_HDR_SIZE;
849
850 if (chan->fcs == L2CAP_FCS_CRC16)
851 hlen += L2CAP_FCS_SIZE;
852
853 BT_DBG("chan %p, control 0x%8.8x", chan, control);
854
855 count = min_t(unsigned int, conn->mtu, hlen);
856
857 control |= __set_sframe(chan);
858
859 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
860 control |= __set_ctrl_final(chan);
861
862 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
863 control |= __set_ctrl_poll(chan);
864
865 skb = bt_skb_alloc(count, GFP_ATOMIC);
866 if (!skb)
867 return;
868
869 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
870 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
871 lh->cid = cpu_to_le16(chan->dcid);
872
873 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
874
875 if (chan->fcs == L2CAP_FCS_CRC16) {
876 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
877 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
878 }
879
880 skb->priority = HCI_PRIO_MAX;
881 l2cap_do_send(chan, skb);
882 }
883
884 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
885 {
886 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
887 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
888 set_bit(CONN_RNR_SENT, &chan->conn_state);
889 } else
890 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
891
892 control |= __set_reqseq(chan, chan->buffer_seq);
893
894 l2cap_send_sframe(chan, control);
895 }
896
897 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
898 {
899 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
900 }
901
902 static void l2cap_send_conn_req(struct l2cap_chan *chan)
903 {
904 struct l2cap_conn *conn = chan->conn;
905 struct l2cap_conn_req req;
906
907 req.scid = cpu_to_le16(chan->scid);
908 req.psm = chan->psm;
909
910 chan->ident = l2cap_get_ident(conn);
911
912 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
913
914 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
915 }
916
917 static void l2cap_chan_ready(struct l2cap_chan *chan)
918 {
919 struct sock *sk = chan->sk;
920 struct sock *parent;
921
922 lock_sock(sk);
923
924 parent = bt_sk(sk)->parent;
925
926 BT_DBG("sk %p, parent %p", sk, parent);
927
928 /* This clears all conf flags, including CONF_NOT_COMPLETE */
929 chan->conf_state = 0;
930 __clear_chan_timer(chan);
931
932 __l2cap_state_change(chan, BT_CONNECTED);
933 sk->sk_state_change(sk);
934
935 if (parent)
936 parent->sk_data_ready(parent, 0);
937
938 release_sock(sk);
939 }
940
941 static void l2cap_do_start(struct l2cap_chan *chan)
942 {
943 struct l2cap_conn *conn = chan->conn;
944
945 if (conn->hcon->type == LE_LINK) {
946 l2cap_chan_ready(chan);
947 return;
948 }
949
950 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
951 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
952 return;
953
954 if (l2cap_chan_check_security(chan) &&
955 __l2cap_no_conn_pending(chan))
956 l2cap_send_conn_req(chan);
957 } else {
958 struct l2cap_info_req req;
959 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
960
961 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
962 conn->info_ident = l2cap_get_ident(conn);
963
964 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
965
966 l2cap_send_cmd(conn, conn->info_ident,
967 L2CAP_INFO_REQ, sizeof(req), &req);
968 }
969 }
970
971 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
972 {
973 u32 local_feat_mask = l2cap_feat_mask;
974 if (!disable_ertm)
975 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
976
977 switch (mode) {
978 case L2CAP_MODE_ERTM:
979 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
980 case L2CAP_MODE_STREAMING:
981 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
982 default:
983 return 0x00;
984 }
985 }
986
987 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
988 {
989 struct sock *sk = chan->sk;
990 struct l2cap_disconn_req req;
991
992 if (!conn)
993 return;
994
995 if (chan->mode == L2CAP_MODE_ERTM) {
996 __clear_retrans_timer(chan);
997 __clear_monitor_timer(chan);
998 __clear_ack_timer(chan);
999 }
1000
1001 req.dcid = cpu_to_le16(chan->dcid);
1002 req.scid = cpu_to_le16(chan->scid);
1003 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1004 L2CAP_DISCONN_REQ, sizeof(req), &req);
1005
1006 lock_sock(sk);
1007 __l2cap_state_change(chan, BT_DISCONN);
1008 __l2cap_chan_set_err(chan, err);
1009 release_sock(sk);
1010 }
1011
1012 /* ---- L2CAP connections ---- */
1013 static void l2cap_conn_start(struct l2cap_conn *conn)
1014 {
1015 struct l2cap_chan *chan, *tmp;
1016
1017 BT_DBG("conn %p", conn);
1018
1019 mutex_lock(&conn->chan_lock);
1020
1021 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1022 struct sock *sk = chan->sk;
1023
1024 l2cap_chan_lock(chan);
1025
1026 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1027 l2cap_chan_unlock(chan);
1028 continue;
1029 }
1030
1031 if (chan->state == BT_CONNECT) {
1032 if (!l2cap_chan_check_security(chan) ||
1033 !__l2cap_no_conn_pending(chan)) {
1034 l2cap_chan_unlock(chan);
1035 continue;
1036 }
1037
1038 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1039 && test_bit(CONF_STATE2_DEVICE,
1040 &chan->conf_state)) {
1041 l2cap_chan_close(chan, ECONNRESET);
1042 l2cap_chan_unlock(chan);
1043 continue;
1044 }
1045
1046 l2cap_send_conn_req(chan);
1047
1048 } else if (chan->state == BT_CONNECT2) {
1049 struct l2cap_conn_rsp rsp;
1050 char buf[128];
1051 rsp.scid = cpu_to_le16(chan->dcid);
1052 rsp.dcid = cpu_to_le16(chan->scid);
1053
1054 if (l2cap_chan_check_security(chan)) {
1055 lock_sock(sk);
1056 if (test_bit(BT_SK_DEFER_SETUP,
1057 &bt_sk(sk)->flags)) {
1058 struct sock *parent = bt_sk(sk)->parent;
1059 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1060 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1061 if (parent)
1062 parent->sk_data_ready(parent, 0);
1063
1064 } else {
1065 __l2cap_state_change(chan, BT_CONFIG);
1066 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1067 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1068 }
1069 release_sock(sk);
1070 } else {
1071 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1072 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1073 }
1074
1075 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1076 sizeof(rsp), &rsp);
1077
1078 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1079 rsp.result != L2CAP_CR_SUCCESS) {
1080 l2cap_chan_unlock(chan);
1081 continue;
1082 }
1083
1084 set_bit(CONF_REQ_SENT, &chan->conf_state);
1085 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1086 l2cap_build_conf_req(chan, buf), buf);
1087 chan->num_conf_req++;
1088 }
1089
1090 l2cap_chan_unlock(chan);
1091 }
1092
1093 mutex_unlock(&conn->chan_lock);
1094 }
1095
1096 /* Find socket with cid and source/destination bdaddr.
1097 * Returns closest match, locked.
1098 */
1099 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1100 bdaddr_t *src,
1101 bdaddr_t *dst)
1102 {
1103 struct l2cap_chan *c, *c1 = NULL;
1104
1105 read_lock(&chan_list_lock);
1106
1107 list_for_each_entry(c, &chan_list, global_l) {
1108 struct sock *sk = c->sk;
1109
1110 if (state && c->state != state)
1111 continue;
1112
1113 if (c->scid == cid) {
1114 int src_match, dst_match;
1115 int src_any, dst_any;
1116
1117 /* Exact match. */
1118 src_match = !bacmp(&bt_sk(sk)->src, src);
1119 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1120 if (src_match && dst_match) {
1121 read_unlock(&chan_list_lock);
1122 return c;
1123 }
1124
1125 /* Closest match */
1126 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1127 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1128 if ((src_match && dst_any) || (src_any && dst_match) ||
1129 (src_any && dst_any))
1130 c1 = c;
1131 }
1132 }
1133
1134 read_unlock(&chan_list_lock);
1135
1136 return c1;
1137 }
1138
1139 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1140 {
1141 struct sock *parent, *sk;
1142 struct l2cap_chan *chan, *pchan;
1143
1144 BT_DBG("");
1145
1146 /* Check if we have socket listening on cid */
1147 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1148 conn->src, conn->dst);
1149 if (!pchan)
1150 return;
1151
1152 parent = pchan->sk;
1153
1154 lock_sock(parent);
1155
1156 /* Check for backlog size */
1157 if (sk_acceptq_is_full(parent)) {
1158 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1159 goto clean;
1160 }
1161
1162 chan = pchan->ops->new_connection(pchan->data);
1163 if (!chan)
1164 goto clean;
1165
1166 sk = chan->sk;
1167
1168 hci_conn_hold(conn->hcon);
1169
1170 bacpy(&bt_sk(sk)->src, conn->src);
1171 bacpy(&bt_sk(sk)->dst, conn->dst);
1172
1173 bt_accept_enqueue(parent, sk);
1174
1175 l2cap_chan_add(conn, chan);
1176
1177 __set_chan_timer(chan, sk->sk_sndtimeo);
1178
1179 __l2cap_state_change(chan, BT_CONNECTED);
1180 parent->sk_data_ready(parent, 0);
1181
1182 clean:
1183 release_sock(parent);
1184 }
1185
1186 static void l2cap_conn_ready(struct l2cap_conn *conn)
1187 {
1188 struct l2cap_chan *chan;
1189
1190 BT_DBG("conn %p", conn);
1191
1192 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1193 l2cap_le_conn_ready(conn);
1194
1195 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1196 smp_conn_security(conn, conn->hcon->pending_sec_level);
1197
1198 mutex_lock(&conn->chan_lock);
1199
1200 list_for_each_entry(chan, &conn->chan_l, list) {
1201
1202 l2cap_chan_lock(chan);
1203
1204 if (conn->hcon->type == LE_LINK) {
1205 if (smp_conn_security(conn, chan->sec_level))
1206 l2cap_chan_ready(chan);
1207
1208 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1209 struct sock *sk = chan->sk;
1210 __clear_chan_timer(chan);
1211 lock_sock(sk);
1212 __l2cap_state_change(chan, BT_CONNECTED);
1213 sk->sk_state_change(sk);
1214 release_sock(sk);
1215
1216 } else if (chan->state == BT_CONNECT)
1217 l2cap_do_start(chan);
1218
1219 l2cap_chan_unlock(chan);
1220 }
1221
1222 mutex_unlock(&conn->chan_lock);
1223 }
1224
1225 /* Notify sockets that we cannot guaranty reliability anymore */
1226 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1227 {
1228 struct l2cap_chan *chan;
1229
1230 BT_DBG("conn %p", conn);
1231
1232 mutex_lock(&conn->chan_lock);
1233
1234 list_for_each_entry(chan, &conn->chan_l, list) {
1235 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1236 __l2cap_chan_set_err(chan, err);
1237 }
1238
1239 mutex_unlock(&conn->chan_lock);
1240 }
1241
1242 static void l2cap_info_timeout(struct work_struct *work)
1243 {
1244 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1245 info_timer.work);
1246
1247 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1248 conn->info_ident = 0;
1249
1250 l2cap_conn_start(conn);
1251 }
1252
1253 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1254 {
1255 struct l2cap_conn *conn = hcon->l2cap_data;
1256 struct l2cap_chan *chan, *l;
1257
1258 if (!conn)
1259 return;
1260
1261 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1262
1263 kfree_skb(conn->rx_skb);
1264
1265 mutex_lock(&conn->chan_lock);
1266
1267 /* Kill channels */
1268 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1269 l2cap_chan_hold(chan);
1270 l2cap_chan_lock(chan);
1271
1272 l2cap_chan_del(chan, err);
1273
1274 l2cap_chan_unlock(chan);
1275
1276 chan->ops->close(chan->data);
1277 l2cap_chan_put(chan);
1278 }
1279
1280 mutex_unlock(&conn->chan_lock);
1281
1282 hci_chan_del(conn->hchan);
1283
1284 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1285 cancel_delayed_work_sync(&conn->info_timer);
1286
1287 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1288 cancel_delayed_work_sync(&conn->security_timer);
1289 smp_chan_destroy(conn);
1290 }
1291
1292 hcon->l2cap_data = NULL;
1293 kfree(conn);
1294 }
1295
1296 static void security_timeout(struct work_struct *work)
1297 {
1298 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1299 security_timer.work);
1300
1301 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1302 }
1303
1304 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1305 {
1306 struct l2cap_conn *conn = hcon->l2cap_data;
1307 struct hci_chan *hchan;
1308
1309 if (conn || status)
1310 return conn;
1311
1312 hchan = hci_chan_create(hcon);
1313 if (!hchan)
1314 return NULL;
1315
1316 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1317 if (!conn) {
1318 hci_chan_del(hchan);
1319 return NULL;
1320 }
1321
1322 hcon->l2cap_data = conn;
1323 conn->hcon = hcon;
1324 conn->hchan = hchan;
1325
1326 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1327
1328 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1329 conn->mtu = hcon->hdev->le_mtu;
1330 else
1331 conn->mtu = hcon->hdev->acl_mtu;
1332
1333 conn->src = &hcon->hdev->bdaddr;
1334 conn->dst = &hcon->dst;
1335
1336 conn->feat_mask = 0;
1337
1338 spin_lock_init(&conn->lock);
1339 mutex_init(&conn->chan_lock);
1340
1341 INIT_LIST_HEAD(&conn->chan_l);
1342
1343 if (hcon->type == LE_LINK)
1344 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1345 else
1346 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1347
1348 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1349
1350 return conn;
1351 }
1352
1353 /* ---- Socket interface ---- */
1354
1355 /* Find socket with psm and source / destination bdaddr.
1356 * Returns closest match.
1357 */
1358 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1359 bdaddr_t *src,
1360 bdaddr_t *dst)
1361 {
1362 struct l2cap_chan *c, *c1 = NULL;
1363
1364 read_lock(&chan_list_lock);
1365
1366 list_for_each_entry(c, &chan_list, global_l) {
1367 struct sock *sk = c->sk;
1368
1369 if (state && c->state != state)
1370 continue;
1371
1372 if (c->psm == psm) {
1373 int src_match, dst_match;
1374 int src_any, dst_any;
1375
1376 /* Exact match. */
1377 src_match = !bacmp(&bt_sk(sk)->src, src);
1378 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1379 if (src_match && dst_match) {
1380 read_unlock(&chan_list_lock);
1381 return c;
1382 }
1383
1384 /* Closest match */
1385 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1386 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1387 if ((src_match && dst_any) || (src_any && dst_match) ||
1388 (src_any && dst_any))
1389 c1 = c;
1390 }
1391 }
1392
1393 read_unlock(&chan_list_lock);
1394
1395 return c1;
1396 }
1397
1398 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1399 bdaddr_t *dst, u8 dst_type)
1400 {
1401 struct sock *sk = chan->sk;
1402 bdaddr_t *src = &bt_sk(sk)->src;
1403 struct l2cap_conn *conn;
1404 struct hci_conn *hcon;
1405 struct hci_dev *hdev;
1406 __u8 auth_type;
1407 int err;
1408
1409 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1410 dst_type, __le16_to_cpu(chan->psm));
1411
1412 hdev = hci_get_route(dst, src);
1413 if (!hdev)
1414 return -EHOSTUNREACH;
1415
1416 hci_dev_lock(hdev);
1417
1418 l2cap_chan_lock(chan);
1419
1420 /* PSM must be odd and lsb of upper byte must be 0 */
1421 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1422 chan->chan_type != L2CAP_CHAN_RAW) {
1423 err = -EINVAL;
1424 goto done;
1425 }
1426
1427 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1428 err = -EINVAL;
1429 goto done;
1430 }
1431
1432 switch (chan->mode) {
1433 case L2CAP_MODE_BASIC:
1434 break;
1435 case L2CAP_MODE_ERTM:
1436 case L2CAP_MODE_STREAMING:
1437 if (!disable_ertm)
1438 break;
1439 /* fall through */
1440 default:
1441 err = -ENOTSUPP;
1442 goto done;
1443 }
1444
1445 lock_sock(sk);
1446
1447 switch (sk->sk_state) {
1448 case BT_CONNECT:
1449 case BT_CONNECT2:
1450 case BT_CONFIG:
1451 /* Already connecting */
1452 err = 0;
1453 release_sock(sk);
1454 goto done;
1455
1456 case BT_CONNECTED:
1457 /* Already connected */
1458 err = -EISCONN;
1459 release_sock(sk);
1460 goto done;
1461
1462 case BT_OPEN:
1463 case BT_BOUND:
1464 /* Can connect */
1465 break;
1466
1467 default:
1468 err = -EBADFD;
1469 release_sock(sk);
1470 goto done;
1471 }
1472
1473 /* Set destination address and psm */
1474 bacpy(&bt_sk(sk)->dst, dst);
1475
1476 release_sock(sk);
1477
1478 chan->psm = psm;
1479 chan->dcid = cid;
1480
1481 auth_type = l2cap_get_auth_type(chan);
1482
1483 if (chan->dcid == L2CAP_CID_LE_DATA)
1484 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1485 chan->sec_level, auth_type);
1486 else
1487 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1488 chan->sec_level, auth_type);
1489
1490 if (IS_ERR(hcon)) {
1491 err = PTR_ERR(hcon);
1492 goto done;
1493 }
1494
1495 conn = l2cap_conn_add(hcon, 0);
1496 if (!conn) {
1497 hci_conn_put(hcon);
1498 err = -ENOMEM;
1499 goto done;
1500 }
1501
1502 if (hcon->type == LE_LINK) {
1503 err = 0;
1504
1505 if (!list_empty(&conn->chan_l)) {
1506 err = -EBUSY;
1507 hci_conn_put(hcon);
1508 }
1509
1510 if (err)
1511 goto done;
1512 }
1513
1514 /* Update source addr of the socket */
1515 bacpy(src, conn->src);
1516
1517 l2cap_chan_unlock(chan);
1518 l2cap_chan_add(conn, chan);
1519 l2cap_chan_lock(chan);
1520
1521 l2cap_state_change(chan, BT_CONNECT);
1522 __set_chan_timer(chan, sk->sk_sndtimeo);
1523
1524 if (hcon->state == BT_CONNECTED) {
1525 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1526 __clear_chan_timer(chan);
1527 if (l2cap_chan_check_security(chan))
1528 l2cap_state_change(chan, BT_CONNECTED);
1529 } else
1530 l2cap_do_start(chan);
1531 }
1532
1533 err = 0;
1534
1535 done:
1536 l2cap_chan_unlock(chan);
1537 hci_dev_unlock(hdev);
1538 hci_dev_put(hdev);
1539 return err;
1540 }
1541
1542 int __l2cap_wait_ack(struct sock *sk)
1543 {
1544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1545 DECLARE_WAITQUEUE(wait, current);
1546 int err = 0;
1547 int timeo = HZ/5;
1548
1549 add_wait_queue(sk_sleep(sk), &wait);
1550 set_current_state(TASK_INTERRUPTIBLE);
1551 while (chan->unacked_frames > 0 && chan->conn) {
1552 if (!timeo)
1553 timeo = HZ/5;
1554
1555 if (signal_pending(current)) {
1556 err = sock_intr_errno(timeo);
1557 break;
1558 }
1559
1560 release_sock(sk);
1561 timeo = schedule_timeout(timeo);
1562 lock_sock(sk);
1563 set_current_state(TASK_INTERRUPTIBLE);
1564
1565 err = sock_error(sk);
1566 if (err)
1567 break;
1568 }
1569 set_current_state(TASK_RUNNING);
1570 remove_wait_queue(sk_sleep(sk), &wait);
1571 return err;
1572 }
1573
1574 static void l2cap_monitor_timeout(struct work_struct *work)
1575 {
1576 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1577 monitor_timer.work);
1578
1579 BT_DBG("chan %p", chan);
1580
1581 l2cap_chan_lock(chan);
1582
1583 if (chan->retry_count >= chan->remote_max_tx) {
1584 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1585 l2cap_chan_unlock(chan);
1586 l2cap_chan_put(chan);
1587 return;
1588 }
1589
1590 chan->retry_count++;
1591 __set_monitor_timer(chan);
1592
1593 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1594 l2cap_chan_unlock(chan);
1595 l2cap_chan_put(chan);
1596 }
1597
1598 static void l2cap_retrans_timeout(struct work_struct *work)
1599 {
1600 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1601 retrans_timer.work);
1602
1603 BT_DBG("chan %p", chan);
1604
1605 l2cap_chan_lock(chan);
1606
1607 chan->retry_count = 1;
1608 __set_monitor_timer(chan);
1609
1610 set_bit(CONN_WAIT_F, &chan->conn_state);
1611
1612 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1613
1614 l2cap_chan_unlock(chan);
1615 l2cap_chan_put(chan);
1616 }
1617
1618 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1619 {
1620 struct sk_buff *skb;
1621
1622 while ((skb = skb_peek(&chan->tx_q)) &&
1623 chan->unacked_frames) {
1624 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1625 break;
1626
1627 skb = skb_dequeue(&chan->tx_q);
1628 kfree_skb(skb);
1629
1630 chan->unacked_frames--;
1631 }
1632
1633 if (!chan->unacked_frames)
1634 __clear_retrans_timer(chan);
1635 }
1636
1637 static void l2cap_streaming_send(struct l2cap_chan *chan)
1638 {
1639 struct sk_buff *skb;
1640 u32 control;
1641 u16 fcs;
1642
1643 while ((skb = skb_dequeue(&chan->tx_q))) {
1644 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1645 control |= __set_txseq(chan, chan->next_tx_seq);
1646 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1647 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1648
1649 if (chan->fcs == L2CAP_FCS_CRC16) {
1650 fcs = crc16(0, (u8 *)skb->data,
1651 skb->len - L2CAP_FCS_SIZE);
1652 put_unaligned_le16(fcs,
1653 skb->data + skb->len - L2CAP_FCS_SIZE);
1654 }
1655
1656 l2cap_do_send(chan, skb);
1657
1658 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1659 }
1660 }
1661
1662 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1663 {
1664 struct sk_buff *skb, *tx_skb;
1665 u16 fcs;
1666 u32 control;
1667
1668 skb = skb_peek(&chan->tx_q);
1669 if (!skb)
1670 return;
1671
1672 while (bt_cb(skb)->control.txseq != tx_seq) {
1673 if (skb_queue_is_last(&chan->tx_q, skb))
1674 return;
1675
1676 skb = skb_queue_next(&chan->tx_q, skb);
1677 }
1678
1679 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1680 chan->remote_max_tx) {
1681 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1682 return;
1683 }
1684
1685 tx_skb = skb_clone(skb, GFP_ATOMIC);
1686 bt_cb(skb)->control.retries++;
1687
1688 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1689 control &= __get_sar_mask(chan);
1690
1691 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1692 control |= __set_ctrl_final(chan);
1693
1694 control |= __set_reqseq(chan, chan->buffer_seq);
1695 control |= __set_txseq(chan, tx_seq);
1696
1697 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1698
1699 if (chan->fcs == L2CAP_FCS_CRC16) {
1700 fcs = crc16(0, (u8 *)tx_skb->data,
1701 tx_skb->len - L2CAP_FCS_SIZE);
1702 put_unaligned_le16(fcs,
1703 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1704 }
1705
1706 l2cap_do_send(chan, tx_skb);
1707 }
1708
1709 static int l2cap_ertm_send(struct l2cap_chan *chan)
1710 {
1711 struct sk_buff *skb, *tx_skb;
1712 u16 fcs;
1713 u32 control;
1714 int nsent = 0;
1715
1716 if (chan->state != BT_CONNECTED)
1717 return -ENOTCONN;
1718
1719 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1720 return 0;
1721
1722 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1723
1724 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1725 chan->remote_max_tx) {
1726 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1727 break;
1728 }
1729
1730 tx_skb = skb_clone(skb, GFP_ATOMIC);
1731
1732 bt_cb(skb)->control.retries++;
1733
1734 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1735 control &= __get_sar_mask(chan);
1736
1737 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1738 control |= __set_ctrl_final(chan);
1739
1740 control |= __set_reqseq(chan, chan->buffer_seq);
1741 control |= __set_txseq(chan, chan->next_tx_seq);
1742 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1743
1744 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1745
1746 if (chan->fcs == L2CAP_FCS_CRC16) {
1747 fcs = crc16(0, (u8 *)skb->data,
1748 tx_skb->len - L2CAP_FCS_SIZE);
1749 put_unaligned_le16(fcs, skb->data +
1750 tx_skb->len - L2CAP_FCS_SIZE);
1751 }
1752
1753 l2cap_do_send(chan, tx_skb);
1754
1755 __set_retrans_timer(chan);
1756
1757 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1758
1759 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1760
1761 if (bt_cb(skb)->control.retries == 1) {
1762 chan->unacked_frames++;
1763
1764 if (!nsent++)
1765 __clear_ack_timer(chan);
1766 }
1767
1768 chan->frames_sent++;
1769
1770 if (skb_queue_is_last(&chan->tx_q, skb))
1771 chan->tx_send_head = NULL;
1772 else
1773 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1774 }
1775
1776 return nsent;
1777 }
1778
1779 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1780 {
1781 int ret;
1782
1783 if (!skb_queue_empty(&chan->tx_q))
1784 chan->tx_send_head = chan->tx_q.next;
1785
1786 chan->next_tx_seq = chan->expected_ack_seq;
1787 ret = l2cap_ertm_send(chan);
1788 return ret;
1789 }
1790
1791 static void __l2cap_send_ack(struct l2cap_chan *chan)
1792 {
1793 u32 control = 0;
1794
1795 control |= __set_reqseq(chan, chan->buffer_seq);
1796
1797 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1798 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1799 set_bit(CONN_RNR_SENT, &chan->conn_state);
1800 l2cap_send_sframe(chan, control);
1801 return;
1802 }
1803
1804 if (l2cap_ertm_send(chan) > 0)
1805 return;
1806
1807 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1808 l2cap_send_sframe(chan, control);
1809 }
1810
1811 static void l2cap_send_ack(struct l2cap_chan *chan)
1812 {
1813 __clear_ack_timer(chan);
1814 __l2cap_send_ack(chan);
1815 }
1816
1817 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1818 {
1819 struct srej_list *tail;
1820 u32 control;
1821
1822 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1823 control |= __set_ctrl_final(chan);
1824
1825 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1826 control |= __set_reqseq(chan, tail->tx_seq);
1827
1828 l2cap_send_sframe(chan, control);
1829 }
1830
1831 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1832 struct msghdr *msg, int len,
1833 int count, struct sk_buff *skb)
1834 {
1835 struct l2cap_conn *conn = chan->conn;
1836 struct sk_buff **frag;
1837 int sent = 0;
1838
1839 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1840 return -EFAULT;
1841
1842 sent += count;
1843 len -= count;
1844
1845 /* Continuation fragments (no L2CAP header) */
1846 frag = &skb_shinfo(skb)->frag_list;
1847 while (len) {
1848 struct sk_buff *tmp;
1849
1850 count = min_t(unsigned int, conn->mtu, len);
1851
1852 tmp = chan->ops->alloc_skb(chan, count,
1853 msg->msg_flags & MSG_DONTWAIT);
1854 if (IS_ERR(tmp))
1855 return PTR_ERR(tmp);
1856
1857 *frag = tmp;
1858
1859 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1860 return -EFAULT;
1861
1862 (*frag)->priority = skb->priority;
1863
1864 sent += count;
1865 len -= count;
1866
1867 skb->len += (*frag)->len;
1868 skb->data_len += (*frag)->len;
1869
1870 frag = &(*frag)->next;
1871 }
1872
1873 return sent;
1874 }
1875
1876 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1877 struct msghdr *msg, size_t len,
1878 u32 priority)
1879 {
1880 struct l2cap_conn *conn = chan->conn;
1881 struct sk_buff *skb;
1882 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1883 struct l2cap_hdr *lh;
1884
1885 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1886
1887 count = min_t(unsigned int, (conn->mtu - hlen), len);
1888
1889 skb = chan->ops->alloc_skb(chan, count + hlen,
1890 msg->msg_flags & MSG_DONTWAIT);
1891 if (IS_ERR(skb))
1892 return skb;
1893
1894 skb->priority = priority;
1895
1896 /* Create L2CAP header */
1897 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1898 lh->cid = cpu_to_le16(chan->dcid);
1899 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1900 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1901
1902 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1903 if (unlikely(err < 0)) {
1904 kfree_skb(skb);
1905 return ERR_PTR(err);
1906 }
1907 return skb;
1908 }
1909
1910 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1911 struct msghdr *msg, size_t len,
1912 u32 priority)
1913 {
1914 struct l2cap_conn *conn = chan->conn;
1915 struct sk_buff *skb;
1916 int err, count;
1917 struct l2cap_hdr *lh;
1918
1919 BT_DBG("chan %p len %d", chan, (int)len);
1920
1921 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1922
1923 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1924 msg->msg_flags & MSG_DONTWAIT);
1925 if (IS_ERR(skb))
1926 return skb;
1927
1928 skb->priority = priority;
1929
1930 /* Create L2CAP header */
1931 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1932 lh->cid = cpu_to_le16(chan->dcid);
1933 lh->len = cpu_to_le16(len);
1934
1935 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1936 if (unlikely(err < 0)) {
1937 kfree_skb(skb);
1938 return ERR_PTR(err);
1939 }
1940 return skb;
1941 }
1942
1943 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1944 struct msghdr *msg, size_t len,
1945 u16 sdulen)
1946 {
1947 struct l2cap_conn *conn = chan->conn;
1948 struct sk_buff *skb;
1949 int err, count, hlen;
1950 struct l2cap_hdr *lh;
1951
1952 BT_DBG("chan %p len %d", chan, (int)len);
1953
1954 if (!conn)
1955 return ERR_PTR(-ENOTCONN);
1956
1957 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1958 hlen = L2CAP_EXT_HDR_SIZE;
1959 else
1960 hlen = L2CAP_ENH_HDR_SIZE;
1961
1962 if (sdulen)
1963 hlen += L2CAP_SDULEN_SIZE;
1964
1965 if (chan->fcs == L2CAP_FCS_CRC16)
1966 hlen += L2CAP_FCS_SIZE;
1967
1968 count = min_t(unsigned int, (conn->mtu - hlen), len);
1969
1970 skb = chan->ops->alloc_skb(chan, count + hlen,
1971 msg->msg_flags & MSG_DONTWAIT);
1972 if (IS_ERR(skb))
1973 return skb;
1974
1975 /* Create L2CAP header */
1976 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1977 lh->cid = cpu_to_le16(chan->dcid);
1978 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1979
1980 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1981
1982 if (sdulen)
1983 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1984
1985 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1986 if (unlikely(err < 0)) {
1987 kfree_skb(skb);
1988 return ERR_PTR(err);
1989 }
1990
1991 if (chan->fcs == L2CAP_FCS_CRC16)
1992 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1993
1994 bt_cb(skb)->control.retries = 0;
1995 return skb;
1996 }
1997
1998 static int l2cap_segment_sdu(struct l2cap_chan *chan,
1999 struct sk_buff_head *seg_queue,
2000 struct msghdr *msg, size_t len)
2001 {
2002 struct sk_buff *skb;
2003 u16 sdu_len;
2004 size_t pdu_len;
2005 int err = 0;
2006 u8 sar;
2007
2008 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2009
2010 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2011 * so fragmented skbs are not used. The HCI layer's handling
2012 * of fragmented skbs is not compatible with ERTM's queueing.
2013 */
2014
2015 /* PDU size is derived from the HCI MTU */
2016 pdu_len = chan->conn->mtu;
2017
2018 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2019
2020 /* Adjust for largest possible L2CAP overhead. */
2021 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2022
2023 /* Remote device may have requested smaller PDUs */
2024 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2025
2026 if (len <= pdu_len) {
2027 sar = L2CAP_SAR_UNSEGMENTED;
2028 sdu_len = 0;
2029 pdu_len = len;
2030 } else {
2031 sar = L2CAP_SAR_START;
2032 sdu_len = len;
2033 pdu_len -= L2CAP_SDULEN_SIZE;
2034 }
2035
2036 while (len > 0) {
2037 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2038
2039 if (IS_ERR(skb)) {
2040 __skb_queue_purge(seg_queue);
2041 return PTR_ERR(skb);
2042 }
2043
2044 bt_cb(skb)->control.sar = sar;
2045 __skb_queue_tail(seg_queue, skb);
2046
2047 len -= pdu_len;
2048 if (sdu_len) {
2049 sdu_len = 0;
2050 pdu_len += L2CAP_SDULEN_SIZE;
2051 }
2052
2053 if (len <= pdu_len) {
2054 sar = L2CAP_SAR_END;
2055 pdu_len = len;
2056 } else {
2057 sar = L2CAP_SAR_CONTINUE;
2058 }
2059 }
2060
2061 return err;
2062 }
2063
2064 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2065 u32 priority)
2066 {
2067 struct sk_buff *skb;
2068 int err;
2069 struct sk_buff_head seg_queue;
2070
2071 /* Connectionless channel */
2072 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2073 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2074 if (IS_ERR(skb))
2075 return PTR_ERR(skb);
2076
2077 l2cap_do_send(chan, skb);
2078 return len;
2079 }
2080
2081 switch (chan->mode) {
2082 case L2CAP_MODE_BASIC:
2083 /* Check outgoing MTU */
2084 if (len > chan->omtu)
2085 return -EMSGSIZE;
2086
2087 /* Create a basic PDU */
2088 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2089 if (IS_ERR(skb))
2090 return PTR_ERR(skb);
2091
2092 l2cap_do_send(chan, skb);
2093 err = len;
2094 break;
2095
2096 case L2CAP_MODE_ERTM:
2097 case L2CAP_MODE_STREAMING:
2098 /* Check outgoing MTU */
2099 if (len > chan->omtu) {
2100 err = -EMSGSIZE;
2101 break;
2102 }
2103
2104 __skb_queue_head_init(&seg_queue);
2105
2106 /* Do segmentation before calling in to the state machine,
2107 * since it's possible to block while waiting for memory
2108 * allocation.
2109 */
2110 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2111
2112 /* The channel could have been closed while segmenting,
2113 * check that it is still connected.
2114 */
2115 if (chan->state != BT_CONNECTED) {
2116 __skb_queue_purge(&seg_queue);
2117 err = -ENOTCONN;
2118 }
2119
2120 if (err)
2121 break;
2122
2123 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2124 chan->tx_send_head = seg_queue.next;
2125 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2126
2127 if (chan->mode == L2CAP_MODE_ERTM)
2128 err = l2cap_ertm_send(chan);
2129 else
2130 l2cap_streaming_send(chan);
2131
2132 if (err >= 0)
2133 err = len;
2134
2135 /* If the skbs were not queued for sending, they'll still be in
2136 * seg_queue and need to be purged.
2137 */
2138 __skb_queue_purge(&seg_queue);
2139 break;
2140
2141 default:
2142 BT_DBG("bad state %1.1x", chan->mode);
2143 err = -EBADFD;
2144 }
2145
2146 return err;
2147 }
2148
2149 /* Copy frame to all raw sockets on that connection */
2150 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2151 {
2152 struct sk_buff *nskb;
2153 struct l2cap_chan *chan;
2154
2155 BT_DBG("conn %p", conn);
2156
2157 mutex_lock(&conn->chan_lock);
2158
2159 list_for_each_entry(chan, &conn->chan_l, list) {
2160 struct sock *sk = chan->sk;
2161 if (chan->chan_type != L2CAP_CHAN_RAW)
2162 continue;
2163
2164 /* Don't send frame to the socket it came from */
2165 if (skb->sk == sk)
2166 continue;
2167 nskb = skb_clone(skb, GFP_ATOMIC);
2168 if (!nskb)
2169 continue;
2170
2171 if (chan->ops->recv(chan->data, nskb))
2172 kfree_skb(nskb);
2173 }
2174
2175 mutex_unlock(&conn->chan_lock);
2176 }
2177
2178 /* ---- L2CAP signalling commands ---- */
2179 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2180 u8 code, u8 ident, u16 dlen, void *data)
2181 {
2182 struct sk_buff *skb, **frag;
2183 struct l2cap_cmd_hdr *cmd;
2184 struct l2cap_hdr *lh;
2185 int len, count;
2186
2187 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2188 conn, code, ident, dlen);
2189
2190 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2191 count = min_t(unsigned int, conn->mtu, len);
2192
2193 skb = bt_skb_alloc(count, GFP_ATOMIC);
2194 if (!skb)
2195 return NULL;
2196
2197 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2198 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2199
2200 if (conn->hcon->type == LE_LINK)
2201 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2202 else
2203 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2204
2205 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2206 cmd->code = code;
2207 cmd->ident = ident;
2208 cmd->len = cpu_to_le16(dlen);
2209
2210 if (dlen) {
2211 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2212 memcpy(skb_put(skb, count), data, count);
2213 data += count;
2214 }
2215
2216 len -= skb->len;
2217
2218 /* Continuation fragments (no L2CAP header) */
2219 frag = &skb_shinfo(skb)->frag_list;
2220 while (len) {
2221 count = min_t(unsigned int, conn->mtu, len);
2222
2223 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2224 if (!*frag)
2225 goto fail;
2226
2227 memcpy(skb_put(*frag, count), data, count);
2228
2229 len -= count;
2230 data += count;
2231
2232 frag = &(*frag)->next;
2233 }
2234
2235 return skb;
2236
2237 fail:
2238 kfree_skb(skb);
2239 return NULL;
2240 }
2241
2242 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2243 {
2244 struct l2cap_conf_opt *opt = *ptr;
2245 int len;
2246
2247 len = L2CAP_CONF_OPT_SIZE + opt->len;
2248 *ptr += len;
2249
2250 *type = opt->type;
2251 *olen = opt->len;
2252
2253 switch (opt->len) {
2254 case 1:
2255 *val = *((u8 *) opt->val);
2256 break;
2257
2258 case 2:
2259 *val = get_unaligned_le16(opt->val);
2260 break;
2261
2262 case 4:
2263 *val = get_unaligned_le32(opt->val);
2264 break;
2265
2266 default:
2267 *val = (unsigned long) opt->val;
2268 break;
2269 }
2270
2271 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2272 return len;
2273 }
2274
2275 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2276 {
2277 struct l2cap_conf_opt *opt = *ptr;
2278
2279 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2280
2281 opt->type = type;
2282 opt->len = len;
2283
2284 switch (len) {
2285 case 1:
2286 *((u8 *) opt->val) = val;
2287 break;
2288
2289 case 2:
2290 put_unaligned_le16(val, opt->val);
2291 break;
2292
2293 case 4:
2294 put_unaligned_le32(val, opt->val);
2295 break;
2296
2297 default:
2298 memcpy(opt->val, (void *) val, len);
2299 break;
2300 }
2301
2302 *ptr += L2CAP_CONF_OPT_SIZE + len;
2303 }
2304
2305 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2306 {
2307 struct l2cap_conf_efs efs;
2308
2309 switch (chan->mode) {
2310 case L2CAP_MODE_ERTM:
2311 efs.id = chan->local_id;
2312 efs.stype = chan->local_stype;
2313 efs.msdu = cpu_to_le16(chan->local_msdu);
2314 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2315 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2316 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2317 break;
2318
2319 case L2CAP_MODE_STREAMING:
2320 efs.id = 1;
2321 efs.stype = L2CAP_SERV_BESTEFFORT;
2322 efs.msdu = cpu_to_le16(chan->local_msdu);
2323 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2324 efs.acc_lat = 0;
2325 efs.flush_to = 0;
2326 break;
2327
2328 default:
2329 return;
2330 }
2331
2332 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2333 (unsigned long) &efs);
2334 }
2335
2336 static void l2cap_ack_timeout(struct work_struct *work)
2337 {
2338 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2339 ack_timer.work);
2340
2341 BT_DBG("chan %p", chan);
2342
2343 l2cap_chan_lock(chan);
2344
2345 __l2cap_send_ack(chan);
2346
2347 l2cap_chan_unlock(chan);
2348
2349 l2cap_chan_put(chan);
2350 }
2351
2352 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2353 {
2354 int err;
2355
2356 chan->next_tx_seq = 0;
2357 chan->expected_tx_seq = 0;
2358 chan->expected_ack_seq = 0;
2359 chan->unacked_frames = 0;
2360 chan->buffer_seq = 0;
2361 chan->num_acked = 0;
2362 chan->frames_sent = 0;
2363 chan->last_acked_seq = 0;
2364 chan->sdu = NULL;
2365 chan->sdu_last_frag = NULL;
2366 chan->sdu_len = 0;
2367
2368 skb_queue_head_init(&chan->tx_q);
2369
2370 if (chan->mode != L2CAP_MODE_ERTM)
2371 return 0;
2372
2373 chan->rx_state = L2CAP_RX_STATE_RECV;
2374 chan->tx_state = L2CAP_TX_STATE_XMIT;
2375
2376 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2377 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2378 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2379
2380 skb_queue_head_init(&chan->srej_q);
2381
2382 INIT_LIST_HEAD(&chan->srej_l);
2383 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2384 if (err < 0)
2385 return err;
2386
2387 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2388 if (err < 0)
2389 l2cap_seq_list_free(&chan->srej_list);
2390
2391 return err;
2392 }
2393
2394 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2395 {
2396 switch (mode) {
2397 case L2CAP_MODE_STREAMING:
2398 case L2CAP_MODE_ERTM:
2399 if (l2cap_mode_supported(mode, remote_feat_mask))
2400 return mode;
2401 /* fall through */
2402 default:
2403 return L2CAP_MODE_BASIC;
2404 }
2405 }
2406
2407 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2408 {
2409 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2410 }
2411
2412 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2413 {
2414 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2415 }
2416
2417 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2418 {
2419 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2420 __l2cap_ews_supported(chan)) {
2421 /* use extended control field */
2422 set_bit(FLAG_EXT_CTRL, &chan->flags);
2423 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2424 } else {
2425 chan->tx_win = min_t(u16, chan->tx_win,
2426 L2CAP_DEFAULT_TX_WINDOW);
2427 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2428 }
2429 }
2430
2431 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2432 {
2433 struct l2cap_conf_req *req = data;
2434 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2435 void *ptr = req->data;
2436 u16 size;
2437
2438 BT_DBG("chan %p", chan);
2439
2440 if (chan->num_conf_req || chan->num_conf_rsp)
2441 goto done;
2442
2443 switch (chan->mode) {
2444 case L2CAP_MODE_STREAMING:
2445 case L2CAP_MODE_ERTM:
2446 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2447 break;
2448
2449 if (__l2cap_efs_supported(chan))
2450 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2451
2452 /* fall through */
2453 default:
2454 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2455 break;
2456 }
2457
2458 done:
2459 if (chan->imtu != L2CAP_DEFAULT_MTU)
2460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2461
2462 switch (chan->mode) {
2463 case L2CAP_MODE_BASIC:
2464 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2465 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2466 break;
2467
2468 rfc.mode = L2CAP_MODE_BASIC;
2469 rfc.txwin_size = 0;
2470 rfc.max_transmit = 0;
2471 rfc.retrans_timeout = 0;
2472 rfc.monitor_timeout = 0;
2473 rfc.max_pdu_size = 0;
2474
2475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2476 (unsigned long) &rfc);
2477 break;
2478
2479 case L2CAP_MODE_ERTM:
2480 rfc.mode = L2CAP_MODE_ERTM;
2481 rfc.max_transmit = chan->max_tx;
2482 rfc.retrans_timeout = 0;
2483 rfc.monitor_timeout = 0;
2484
2485 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2486 L2CAP_EXT_HDR_SIZE -
2487 L2CAP_SDULEN_SIZE -
2488 L2CAP_FCS_SIZE);
2489 rfc.max_pdu_size = cpu_to_le16(size);
2490
2491 l2cap_txwin_setup(chan);
2492
2493 rfc.txwin_size = min_t(u16, chan->tx_win,
2494 L2CAP_DEFAULT_TX_WINDOW);
2495
2496 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2497 (unsigned long) &rfc);
2498
2499 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2500 l2cap_add_opt_efs(&ptr, chan);
2501
2502 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2503 break;
2504
2505 if (chan->fcs == L2CAP_FCS_NONE ||
2506 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2507 chan->fcs = L2CAP_FCS_NONE;
2508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2509 }
2510
2511 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2512 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2513 chan->tx_win);
2514 break;
2515
2516 case L2CAP_MODE_STREAMING:
2517 rfc.mode = L2CAP_MODE_STREAMING;
2518 rfc.txwin_size = 0;
2519 rfc.max_transmit = 0;
2520 rfc.retrans_timeout = 0;
2521 rfc.monitor_timeout = 0;
2522
2523 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2524 L2CAP_EXT_HDR_SIZE -
2525 L2CAP_SDULEN_SIZE -
2526 L2CAP_FCS_SIZE);
2527 rfc.max_pdu_size = cpu_to_le16(size);
2528
2529 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2530 (unsigned long) &rfc);
2531
2532 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2533 l2cap_add_opt_efs(&ptr, chan);
2534
2535 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2536 break;
2537
2538 if (chan->fcs == L2CAP_FCS_NONE ||
2539 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2540 chan->fcs = L2CAP_FCS_NONE;
2541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2542 }
2543 break;
2544 }
2545
2546 req->dcid = cpu_to_le16(chan->dcid);
2547 req->flags = cpu_to_le16(0);
2548
2549 return ptr - data;
2550 }
2551
2552 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2553 {
2554 struct l2cap_conf_rsp *rsp = data;
2555 void *ptr = rsp->data;
2556 void *req = chan->conf_req;
2557 int len = chan->conf_len;
2558 int type, hint, olen;
2559 unsigned long val;
2560 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2561 struct l2cap_conf_efs efs;
2562 u8 remote_efs = 0;
2563 u16 mtu = L2CAP_DEFAULT_MTU;
2564 u16 result = L2CAP_CONF_SUCCESS;
2565 u16 size;
2566
2567 BT_DBG("chan %p", chan);
2568
2569 while (len >= L2CAP_CONF_OPT_SIZE) {
2570 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2571
2572 hint = type & L2CAP_CONF_HINT;
2573 type &= L2CAP_CONF_MASK;
2574
2575 switch (type) {
2576 case L2CAP_CONF_MTU:
2577 mtu = val;
2578 break;
2579
2580 case L2CAP_CONF_FLUSH_TO:
2581 chan->flush_to = val;
2582 break;
2583
2584 case L2CAP_CONF_QOS:
2585 break;
2586
2587 case L2CAP_CONF_RFC:
2588 if (olen == sizeof(rfc))
2589 memcpy(&rfc, (void *) val, olen);
2590 break;
2591
2592 case L2CAP_CONF_FCS:
2593 if (val == L2CAP_FCS_NONE)
2594 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2595 break;
2596
2597 case L2CAP_CONF_EFS:
2598 remote_efs = 1;
2599 if (olen == sizeof(efs))
2600 memcpy(&efs, (void *) val, olen);
2601 break;
2602
2603 case L2CAP_CONF_EWS:
2604 if (!enable_hs)
2605 return -ECONNREFUSED;
2606
2607 set_bit(FLAG_EXT_CTRL, &chan->flags);
2608 set_bit(CONF_EWS_RECV, &chan->conf_state);
2609 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2610 chan->remote_tx_win = val;
2611 break;
2612
2613 default:
2614 if (hint)
2615 break;
2616
2617 result = L2CAP_CONF_UNKNOWN;
2618 *((u8 *) ptr++) = type;
2619 break;
2620 }
2621 }
2622
2623 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2624 goto done;
2625
2626 switch (chan->mode) {
2627 case L2CAP_MODE_STREAMING:
2628 case L2CAP_MODE_ERTM:
2629 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2630 chan->mode = l2cap_select_mode(rfc.mode,
2631 chan->conn->feat_mask);
2632 break;
2633 }
2634
2635 if (remote_efs) {
2636 if (__l2cap_efs_supported(chan))
2637 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2638 else
2639 return -ECONNREFUSED;
2640 }
2641
2642 if (chan->mode != rfc.mode)
2643 return -ECONNREFUSED;
2644
2645 break;
2646 }
2647
2648 done:
2649 if (chan->mode != rfc.mode) {
2650 result = L2CAP_CONF_UNACCEPT;
2651 rfc.mode = chan->mode;
2652
2653 if (chan->num_conf_rsp == 1)
2654 return -ECONNREFUSED;
2655
2656 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2657 sizeof(rfc), (unsigned long) &rfc);
2658 }
2659
2660 if (result == L2CAP_CONF_SUCCESS) {
2661 /* Configure output options and let the other side know
2662 * which ones we don't like. */
2663
2664 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2665 result = L2CAP_CONF_UNACCEPT;
2666 else {
2667 chan->omtu = mtu;
2668 set_bit(CONF_MTU_DONE, &chan->conf_state);
2669 }
2670 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2671
2672 if (remote_efs) {
2673 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2674 efs.stype != L2CAP_SERV_NOTRAFIC &&
2675 efs.stype != chan->local_stype) {
2676
2677 result = L2CAP_CONF_UNACCEPT;
2678
2679 if (chan->num_conf_req >= 1)
2680 return -ECONNREFUSED;
2681
2682 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2683 sizeof(efs),
2684 (unsigned long) &efs);
2685 } else {
2686 /* Send PENDING Conf Rsp */
2687 result = L2CAP_CONF_PENDING;
2688 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2689 }
2690 }
2691
2692 switch (rfc.mode) {
2693 case L2CAP_MODE_BASIC:
2694 chan->fcs = L2CAP_FCS_NONE;
2695 set_bit(CONF_MODE_DONE, &chan->conf_state);
2696 break;
2697
2698 case L2CAP_MODE_ERTM:
2699 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2700 chan->remote_tx_win = rfc.txwin_size;
2701 else
2702 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2703
2704 chan->remote_max_tx = rfc.max_transmit;
2705
2706 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2707 chan->conn->mtu -
2708 L2CAP_EXT_HDR_SIZE -
2709 L2CAP_SDULEN_SIZE -
2710 L2CAP_FCS_SIZE);
2711 rfc.max_pdu_size = cpu_to_le16(size);
2712 chan->remote_mps = size;
2713
2714 rfc.retrans_timeout =
2715 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2716 rfc.monitor_timeout =
2717 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2718
2719 set_bit(CONF_MODE_DONE, &chan->conf_state);
2720
2721 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2722 sizeof(rfc), (unsigned long) &rfc);
2723
2724 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2725 chan->remote_id = efs.id;
2726 chan->remote_stype = efs.stype;
2727 chan->remote_msdu = le16_to_cpu(efs.msdu);
2728 chan->remote_flush_to =
2729 le32_to_cpu(efs.flush_to);
2730 chan->remote_acc_lat =
2731 le32_to_cpu(efs.acc_lat);
2732 chan->remote_sdu_itime =
2733 le32_to_cpu(efs.sdu_itime);
2734 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2735 sizeof(efs), (unsigned long) &efs);
2736 }
2737 break;
2738
2739 case L2CAP_MODE_STREAMING:
2740 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2741 chan->conn->mtu -
2742 L2CAP_EXT_HDR_SIZE -
2743 L2CAP_SDULEN_SIZE -
2744 L2CAP_FCS_SIZE);
2745 rfc.max_pdu_size = cpu_to_le16(size);
2746 chan->remote_mps = size;
2747
2748 set_bit(CONF_MODE_DONE, &chan->conf_state);
2749
2750 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2751 sizeof(rfc), (unsigned long) &rfc);
2752
2753 break;
2754
2755 default:
2756 result = L2CAP_CONF_UNACCEPT;
2757
2758 memset(&rfc, 0, sizeof(rfc));
2759 rfc.mode = chan->mode;
2760 }
2761
2762 if (result == L2CAP_CONF_SUCCESS)
2763 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2764 }
2765 rsp->scid = cpu_to_le16(chan->dcid);
2766 rsp->result = cpu_to_le16(result);
2767 rsp->flags = cpu_to_le16(0x0000);
2768
2769 return ptr - data;
2770 }
2771
2772 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2773 {
2774 struct l2cap_conf_req *req = data;
2775 void *ptr = req->data;
2776 int type, olen;
2777 unsigned long val;
2778 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2779 struct l2cap_conf_efs efs;
2780
2781 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2782
2783 while (len >= L2CAP_CONF_OPT_SIZE) {
2784 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2785
2786 switch (type) {
2787 case L2CAP_CONF_MTU:
2788 if (val < L2CAP_DEFAULT_MIN_MTU) {
2789 *result = L2CAP_CONF_UNACCEPT;
2790 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2791 } else
2792 chan->imtu = val;
2793 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2794 break;
2795
2796 case L2CAP_CONF_FLUSH_TO:
2797 chan->flush_to = val;
2798 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2799 2, chan->flush_to);
2800 break;
2801
2802 case L2CAP_CONF_RFC:
2803 if (olen == sizeof(rfc))
2804 memcpy(&rfc, (void *)val, olen);
2805
2806 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2807 rfc.mode != chan->mode)
2808 return -ECONNREFUSED;
2809
2810 chan->fcs = 0;
2811
2812 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2813 sizeof(rfc), (unsigned long) &rfc);
2814 break;
2815
2816 case L2CAP_CONF_EWS:
2817 chan->tx_win = min_t(u16, val,
2818 L2CAP_DEFAULT_EXT_WINDOW);
2819 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2820 chan->tx_win);
2821 break;
2822
2823 case L2CAP_CONF_EFS:
2824 if (olen == sizeof(efs))
2825 memcpy(&efs, (void *)val, olen);
2826
2827 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2828 efs.stype != L2CAP_SERV_NOTRAFIC &&
2829 efs.stype != chan->local_stype)
2830 return -ECONNREFUSED;
2831
2832 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2833 sizeof(efs), (unsigned long) &efs);
2834 break;
2835 }
2836 }
2837
2838 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2839 return -ECONNREFUSED;
2840
2841 chan->mode = rfc.mode;
2842
2843 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2844 switch (rfc.mode) {
2845 case L2CAP_MODE_ERTM:
2846 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2847 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2848 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2849
2850 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2851 chan->local_msdu = le16_to_cpu(efs.msdu);
2852 chan->local_sdu_itime =
2853 le32_to_cpu(efs.sdu_itime);
2854 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2855 chan->local_flush_to =
2856 le32_to_cpu(efs.flush_to);
2857 }
2858 break;
2859
2860 case L2CAP_MODE_STREAMING:
2861 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2862 }
2863 }
2864
2865 req->dcid = cpu_to_le16(chan->dcid);
2866 req->flags = cpu_to_le16(0x0000);
2867
2868 return ptr - data;
2869 }
2870
2871 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2872 {
2873 struct l2cap_conf_rsp *rsp = data;
2874 void *ptr = rsp->data;
2875
2876 BT_DBG("chan %p", chan);
2877
2878 rsp->scid = cpu_to_le16(chan->dcid);
2879 rsp->result = cpu_to_le16(result);
2880 rsp->flags = cpu_to_le16(flags);
2881
2882 return ptr - data;
2883 }
2884
2885 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2886 {
2887 struct l2cap_conn_rsp rsp;
2888 struct l2cap_conn *conn = chan->conn;
2889 u8 buf[128];
2890
2891 rsp.scid = cpu_to_le16(chan->dcid);
2892 rsp.dcid = cpu_to_le16(chan->scid);
2893 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2894 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2895 l2cap_send_cmd(conn, chan->ident,
2896 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2897
2898 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2899 return;
2900
2901 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2902 l2cap_build_conf_req(chan, buf), buf);
2903 chan->num_conf_req++;
2904 }
2905
2906 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2907 {
2908 int type, olen;
2909 unsigned long val;
2910 struct l2cap_conf_rfc rfc;
2911
2912 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2913
2914 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2915 return;
2916
2917 while (len >= L2CAP_CONF_OPT_SIZE) {
2918 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2919
2920 switch (type) {
2921 case L2CAP_CONF_RFC:
2922 if (olen == sizeof(rfc))
2923 memcpy(&rfc, (void *)val, olen);
2924 goto done;
2925 }
2926 }
2927
2928 /* Use sane default values in case a misbehaving remote device
2929 * did not send an RFC option.
2930 */
2931 rfc.mode = chan->mode;
2932 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2933 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2934 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2935
2936 BT_ERR("Expected RFC option was not found, using defaults");
2937
2938 done:
2939 switch (rfc.mode) {
2940 case L2CAP_MODE_ERTM:
2941 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2942 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2943 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2944 break;
2945 case L2CAP_MODE_STREAMING:
2946 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2947 }
2948 }
2949
2950 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2951 {
2952 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2953
2954 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2955 return 0;
2956
2957 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2958 cmd->ident == conn->info_ident) {
2959 cancel_delayed_work(&conn->info_timer);
2960
2961 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2962 conn->info_ident = 0;
2963
2964 l2cap_conn_start(conn);
2965 }
2966
2967 return 0;
2968 }
2969
2970 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2971 {
2972 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2973 struct l2cap_conn_rsp rsp;
2974 struct l2cap_chan *chan = NULL, *pchan;
2975 struct sock *parent, *sk = NULL;
2976 int result, status = L2CAP_CS_NO_INFO;
2977
2978 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2979 __le16 psm = req->psm;
2980
2981 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2982
2983 /* Check if we have socket listening on psm */
2984 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
2985 if (!pchan) {
2986 result = L2CAP_CR_BAD_PSM;
2987 goto sendresp;
2988 }
2989
2990 parent = pchan->sk;
2991
2992 mutex_lock(&conn->chan_lock);
2993 lock_sock(parent);
2994
2995 /* Check if the ACL is secure enough (if not SDP) */
2996 if (psm != cpu_to_le16(0x0001) &&
2997 !hci_conn_check_link_mode(conn->hcon)) {
2998 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2999 result = L2CAP_CR_SEC_BLOCK;
3000 goto response;
3001 }
3002
3003 result = L2CAP_CR_NO_MEM;
3004
3005 /* Check for backlog size */
3006 if (sk_acceptq_is_full(parent)) {
3007 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3008 goto response;
3009 }
3010
3011 chan = pchan->ops->new_connection(pchan->data);
3012 if (!chan)
3013 goto response;
3014
3015 sk = chan->sk;
3016
3017 /* Check if we already have channel with that dcid */
3018 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3019 sock_set_flag(sk, SOCK_ZAPPED);
3020 chan->ops->close(chan->data);
3021 goto response;
3022 }
3023
3024 hci_conn_hold(conn->hcon);
3025
3026 bacpy(&bt_sk(sk)->src, conn->src);
3027 bacpy(&bt_sk(sk)->dst, conn->dst);
3028 chan->psm = psm;
3029 chan->dcid = scid;
3030
3031 bt_accept_enqueue(parent, sk);
3032
3033 __l2cap_chan_add(conn, chan);
3034
3035 dcid = chan->scid;
3036
3037 __set_chan_timer(chan, sk->sk_sndtimeo);
3038
3039 chan->ident = cmd->ident;
3040
3041 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3042 if (l2cap_chan_check_security(chan)) {
3043 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3044 __l2cap_state_change(chan, BT_CONNECT2);
3045 result = L2CAP_CR_PEND;
3046 status = L2CAP_CS_AUTHOR_PEND;
3047 parent->sk_data_ready(parent, 0);
3048 } else {
3049 __l2cap_state_change(chan, BT_CONFIG);
3050 result = L2CAP_CR_SUCCESS;
3051 status = L2CAP_CS_NO_INFO;
3052 }
3053 } else {
3054 __l2cap_state_change(chan, BT_CONNECT2);
3055 result = L2CAP_CR_PEND;
3056 status = L2CAP_CS_AUTHEN_PEND;
3057 }
3058 } else {
3059 __l2cap_state_change(chan, BT_CONNECT2);
3060 result = L2CAP_CR_PEND;
3061 status = L2CAP_CS_NO_INFO;
3062 }
3063
3064 response:
3065 release_sock(parent);
3066 mutex_unlock(&conn->chan_lock);
3067
3068 sendresp:
3069 rsp.scid = cpu_to_le16(scid);
3070 rsp.dcid = cpu_to_le16(dcid);
3071 rsp.result = cpu_to_le16(result);
3072 rsp.status = cpu_to_le16(status);
3073 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3074
3075 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3076 struct l2cap_info_req info;
3077 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3078
3079 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3080 conn->info_ident = l2cap_get_ident(conn);
3081
3082 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3083
3084 l2cap_send_cmd(conn, conn->info_ident,
3085 L2CAP_INFO_REQ, sizeof(info), &info);
3086 }
3087
3088 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3089 result == L2CAP_CR_SUCCESS) {
3090 u8 buf[128];
3091 set_bit(CONF_REQ_SENT, &chan->conf_state);
3092 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3093 l2cap_build_conf_req(chan, buf), buf);
3094 chan->num_conf_req++;
3095 }
3096
3097 return 0;
3098 }
3099
3100 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3101 {
3102 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3103 u16 scid, dcid, result, status;
3104 struct l2cap_chan *chan;
3105 u8 req[128];
3106 int err;
3107
3108 scid = __le16_to_cpu(rsp->scid);
3109 dcid = __le16_to_cpu(rsp->dcid);
3110 result = __le16_to_cpu(rsp->result);
3111 status = __le16_to_cpu(rsp->status);
3112
3113 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3114 dcid, scid, result, status);
3115
3116 mutex_lock(&conn->chan_lock);
3117
3118 if (scid) {
3119 chan = __l2cap_get_chan_by_scid(conn, scid);
3120 if (!chan) {
3121 err = -EFAULT;
3122 goto unlock;
3123 }
3124 } else {
3125 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3126 if (!chan) {
3127 err = -EFAULT;
3128 goto unlock;
3129 }
3130 }
3131
3132 err = 0;
3133
3134 l2cap_chan_lock(chan);
3135
3136 switch (result) {
3137 case L2CAP_CR_SUCCESS:
3138 l2cap_state_change(chan, BT_CONFIG);
3139 chan->ident = 0;
3140 chan->dcid = dcid;
3141 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3142
3143 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3144 break;
3145
3146 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3147 l2cap_build_conf_req(chan, req), req);
3148 chan->num_conf_req++;
3149 break;
3150
3151 case L2CAP_CR_PEND:
3152 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3153 break;
3154
3155 default:
3156 l2cap_chan_del(chan, ECONNREFUSED);
3157 break;
3158 }
3159
3160 l2cap_chan_unlock(chan);
3161
3162 unlock:
3163 mutex_unlock(&conn->chan_lock);
3164
3165 return err;
3166 }
3167
3168 static inline void set_default_fcs(struct l2cap_chan *chan)
3169 {
3170 /* FCS is enabled only in ERTM or streaming mode, if one or both
3171 * sides request it.
3172 */
3173 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3174 chan->fcs = L2CAP_FCS_NONE;
3175 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3176 chan->fcs = L2CAP_FCS_CRC16;
3177 }
3178
3179 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3180 {
3181 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3182 u16 dcid, flags;
3183 u8 rsp[64];
3184 struct l2cap_chan *chan;
3185 int len, err = 0;
3186
3187 dcid = __le16_to_cpu(req->dcid);
3188 flags = __le16_to_cpu(req->flags);
3189
3190 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3191
3192 chan = l2cap_get_chan_by_scid(conn, dcid);
3193 if (!chan)
3194 return -ENOENT;
3195
3196 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3197 struct l2cap_cmd_rej_cid rej;
3198
3199 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3200 rej.scid = cpu_to_le16(chan->scid);
3201 rej.dcid = cpu_to_le16(chan->dcid);
3202
3203 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3204 sizeof(rej), &rej);
3205 goto unlock;
3206 }
3207
3208 /* Reject if config buffer is too small. */
3209 len = cmd_len - sizeof(*req);
3210 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3211 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3212 l2cap_build_conf_rsp(chan, rsp,
3213 L2CAP_CONF_REJECT, flags), rsp);
3214 goto unlock;
3215 }
3216
3217 /* Store config. */
3218 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3219 chan->conf_len += len;
3220
3221 if (flags & 0x0001) {
3222 /* Incomplete config. Send empty response. */
3223 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3224 l2cap_build_conf_rsp(chan, rsp,
3225 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3226 goto unlock;
3227 }
3228
3229 /* Complete config. */
3230 len = l2cap_parse_conf_req(chan, rsp);
3231 if (len < 0) {
3232 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3233 goto unlock;
3234 }
3235
3236 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3237 chan->num_conf_rsp++;
3238
3239 /* Reset config buffer. */
3240 chan->conf_len = 0;
3241
3242 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3243 goto unlock;
3244
3245 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3246 set_default_fcs(chan);
3247
3248 l2cap_state_change(chan, BT_CONNECTED);
3249
3250 if (chan->mode == L2CAP_MODE_ERTM ||
3251 chan->mode == L2CAP_MODE_STREAMING)
3252 err = l2cap_ertm_init(chan);
3253
3254 if (err < 0)
3255 l2cap_send_disconn_req(chan->conn, chan, -err);
3256 else
3257 l2cap_chan_ready(chan);
3258
3259 goto unlock;
3260 }
3261
3262 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3263 u8 buf[64];
3264 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3265 l2cap_build_conf_req(chan, buf), buf);
3266 chan->num_conf_req++;
3267 }
3268
3269 /* Got Conf Rsp PENDING from remote side and asume we sent
3270 Conf Rsp PENDING in the code above */
3271 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3272 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3273
3274 /* check compatibility */
3275
3276 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3277 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3278
3279 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3280 l2cap_build_conf_rsp(chan, rsp,
3281 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3282 }
3283
3284 unlock:
3285 l2cap_chan_unlock(chan);
3286 return err;
3287 }
3288
3289 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3290 {
3291 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3292 u16 scid, flags, result;
3293 struct l2cap_chan *chan;
3294 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3295 int err = 0;
3296
3297 scid = __le16_to_cpu(rsp->scid);
3298 flags = __le16_to_cpu(rsp->flags);
3299 result = __le16_to_cpu(rsp->result);
3300
3301 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3302 result, len);
3303
3304 chan = l2cap_get_chan_by_scid(conn, scid);
3305 if (!chan)
3306 return 0;
3307
3308 switch (result) {
3309 case L2CAP_CONF_SUCCESS:
3310 l2cap_conf_rfc_get(chan, rsp->data, len);
3311 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3312 break;
3313
3314 case L2CAP_CONF_PENDING:
3315 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3316
3317 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3318 char buf[64];
3319
3320 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3321 buf, &result);
3322 if (len < 0) {
3323 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3324 goto done;
3325 }
3326
3327 /* check compatibility */
3328
3329 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3330 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3331
3332 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3333 l2cap_build_conf_rsp(chan, buf,
3334 L2CAP_CONF_SUCCESS, 0x0000), buf);
3335 }
3336 goto done;
3337
3338 case L2CAP_CONF_UNACCEPT:
3339 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3340 char req[64];
3341
3342 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3343 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3344 goto done;
3345 }
3346
3347 /* throw out any old stored conf requests */
3348 result = L2CAP_CONF_SUCCESS;
3349 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3350 req, &result);
3351 if (len < 0) {
3352 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3353 goto done;
3354 }
3355
3356 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3357 L2CAP_CONF_REQ, len, req);
3358 chan->num_conf_req++;
3359 if (result != L2CAP_CONF_SUCCESS)
3360 goto done;
3361 break;
3362 }
3363
3364 default:
3365 l2cap_chan_set_err(chan, ECONNRESET);
3366
3367 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3368 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3369 goto done;
3370 }
3371
3372 if (flags & 0x01)
3373 goto done;
3374
3375 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3376
3377 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3378 set_default_fcs(chan);
3379
3380 l2cap_state_change(chan, BT_CONNECTED);
3381 if (chan->mode == L2CAP_MODE_ERTM ||
3382 chan->mode == L2CAP_MODE_STREAMING)
3383 err = l2cap_ertm_init(chan);
3384
3385 if (err < 0)
3386 l2cap_send_disconn_req(chan->conn, chan, -err);
3387 else
3388 l2cap_chan_ready(chan);
3389 }
3390
3391 done:
3392 l2cap_chan_unlock(chan);
3393 return err;
3394 }
3395
3396 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3397 {
3398 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3399 struct l2cap_disconn_rsp rsp;
3400 u16 dcid, scid;
3401 struct l2cap_chan *chan;
3402 struct sock *sk;
3403
3404 scid = __le16_to_cpu(req->scid);
3405 dcid = __le16_to_cpu(req->dcid);
3406
3407 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3408
3409 mutex_lock(&conn->chan_lock);
3410
3411 chan = __l2cap_get_chan_by_scid(conn, dcid);
3412 if (!chan) {
3413 mutex_unlock(&conn->chan_lock);
3414 return 0;
3415 }
3416
3417 l2cap_chan_lock(chan);
3418
3419 sk = chan->sk;
3420
3421 rsp.dcid = cpu_to_le16(chan->scid);
3422 rsp.scid = cpu_to_le16(chan->dcid);
3423 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3424
3425 lock_sock(sk);
3426 sk->sk_shutdown = SHUTDOWN_MASK;
3427 release_sock(sk);
3428
3429 l2cap_chan_hold(chan);
3430 l2cap_chan_del(chan, ECONNRESET);
3431
3432 l2cap_chan_unlock(chan);
3433
3434 chan->ops->close(chan->data);
3435 l2cap_chan_put(chan);
3436
3437 mutex_unlock(&conn->chan_lock);
3438
3439 return 0;
3440 }
3441
3442 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3443 {
3444 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3445 u16 dcid, scid;
3446 struct l2cap_chan *chan;
3447
3448 scid = __le16_to_cpu(rsp->scid);
3449 dcid = __le16_to_cpu(rsp->dcid);
3450
3451 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3452
3453 mutex_lock(&conn->chan_lock);
3454
3455 chan = __l2cap_get_chan_by_scid(conn, scid);
3456 if (!chan) {
3457 mutex_unlock(&conn->chan_lock);
3458 return 0;
3459 }
3460
3461 l2cap_chan_lock(chan);
3462
3463 l2cap_chan_hold(chan);
3464 l2cap_chan_del(chan, 0);
3465
3466 l2cap_chan_unlock(chan);
3467
3468 chan->ops->close(chan->data);
3469 l2cap_chan_put(chan);
3470
3471 mutex_unlock(&conn->chan_lock);
3472
3473 return 0;
3474 }
3475
3476 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3477 {
3478 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3479 u16 type;
3480
3481 type = __le16_to_cpu(req->type);
3482
3483 BT_DBG("type 0x%4.4x", type);
3484
3485 if (type == L2CAP_IT_FEAT_MASK) {
3486 u8 buf[8];
3487 u32 feat_mask = l2cap_feat_mask;
3488 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3489 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3490 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3491 if (!disable_ertm)
3492 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3493 | L2CAP_FEAT_FCS;
3494 if (enable_hs)
3495 feat_mask |= L2CAP_FEAT_EXT_FLOW
3496 | L2CAP_FEAT_EXT_WINDOW;
3497
3498 put_unaligned_le32(feat_mask, rsp->data);
3499 l2cap_send_cmd(conn, cmd->ident,
3500 L2CAP_INFO_RSP, sizeof(buf), buf);
3501 } else if (type == L2CAP_IT_FIXED_CHAN) {
3502 u8 buf[12];
3503 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3504
3505 if (enable_hs)
3506 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3507 else
3508 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3509
3510 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3511 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3512 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3513 l2cap_send_cmd(conn, cmd->ident,
3514 L2CAP_INFO_RSP, sizeof(buf), buf);
3515 } else {
3516 struct l2cap_info_rsp rsp;
3517 rsp.type = cpu_to_le16(type);
3518 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3519 l2cap_send_cmd(conn, cmd->ident,
3520 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3521 }
3522
3523 return 0;
3524 }
3525
3526 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3527 {
3528 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3529 u16 type, result;
3530
3531 type = __le16_to_cpu(rsp->type);
3532 result = __le16_to_cpu(rsp->result);
3533
3534 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3535
3536 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3537 if (cmd->ident != conn->info_ident ||
3538 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3539 return 0;
3540
3541 cancel_delayed_work(&conn->info_timer);
3542
3543 if (result != L2CAP_IR_SUCCESS) {
3544 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3545 conn->info_ident = 0;
3546
3547 l2cap_conn_start(conn);
3548
3549 return 0;
3550 }
3551
3552 switch (type) {
3553 case L2CAP_IT_FEAT_MASK:
3554 conn->feat_mask = get_unaligned_le32(rsp->data);
3555
3556 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3557 struct l2cap_info_req req;
3558 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3559
3560 conn->info_ident = l2cap_get_ident(conn);
3561
3562 l2cap_send_cmd(conn, conn->info_ident,
3563 L2CAP_INFO_REQ, sizeof(req), &req);
3564 } else {
3565 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3566 conn->info_ident = 0;
3567
3568 l2cap_conn_start(conn);
3569 }
3570 break;
3571
3572 case L2CAP_IT_FIXED_CHAN:
3573 conn->fixed_chan_mask = rsp->data[0];
3574 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3575 conn->info_ident = 0;
3576
3577 l2cap_conn_start(conn);
3578 break;
3579 }
3580
3581 return 0;
3582 }
3583
3584 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3585 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3586 void *data)
3587 {
3588 struct l2cap_create_chan_req *req = data;
3589 struct l2cap_create_chan_rsp rsp;
3590 u16 psm, scid;
3591
3592 if (cmd_len != sizeof(*req))
3593 return -EPROTO;
3594
3595 if (!enable_hs)
3596 return -EINVAL;
3597
3598 psm = le16_to_cpu(req->psm);
3599 scid = le16_to_cpu(req->scid);
3600
3601 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3602
3603 /* Placeholder: Always reject */
3604 rsp.dcid = 0;
3605 rsp.scid = cpu_to_le16(scid);
3606 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3607 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3608
3609 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3610 sizeof(rsp), &rsp);
3611
3612 return 0;
3613 }
3614
3615 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3616 struct l2cap_cmd_hdr *cmd, void *data)
3617 {
3618 BT_DBG("conn %p", conn);
3619
3620 return l2cap_connect_rsp(conn, cmd, data);
3621 }
3622
3623 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3624 u16 icid, u16 result)
3625 {
3626 struct l2cap_move_chan_rsp rsp;
3627
3628 BT_DBG("icid %d, result %d", icid, result);
3629
3630 rsp.icid = cpu_to_le16(icid);
3631 rsp.result = cpu_to_le16(result);
3632
3633 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3634 }
3635
3636 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3637 struct l2cap_chan *chan, u16 icid, u16 result)
3638 {
3639 struct l2cap_move_chan_cfm cfm;
3640 u8 ident;
3641
3642 BT_DBG("icid %d, result %d", icid, result);
3643
3644 ident = l2cap_get_ident(conn);
3645 if (chan)
3646 chan->ident = ident;
3647
3648 cfm.icid = cpu_to_le16(icid);
3649 cfm.result = cpu_to_le16(result);
3650
3651 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3652 }
3653
3654 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3655 u16 icid)
3656 {
3657 struct l2cap_move_chan_cfm_rsp rsp;
3658
3659 BT_DBG("icid %d", icid);
3660
3661 rsp.icid = cpu_to_le16(icid);
3662 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3663 }
3664
3665 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3666 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3667 {
3668 struct l2cap_move_chan_req *req = data;
3669 u16 icid = 0;
3670 u16 result = L2CAP_MR_NOT_ALLOWED;
3671
3672 if (cmd_len != sizeof(*req))
3673 return -EPROTO;
3674
3675 icid = le16_to_cpu(req->icid);
3676
3677 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3678
3679 if (!enable_hs)
3680 return -EINVAL;
3681
3682 /* Placeholder: Always refuse */
3683 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3684
3685 return 0;
3686 }
3687
3688 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3689 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3690 {
3691 struct l2cap_move_chan_rsp *rsp = data;
3692 u16 icid, result;
3693
3694 if (cmd_len != sizeof(*rsp))
3695 return -EPROTO;
3696
3697 icid = le16_to_cpu(rsp->icid);
3698 result = le16_to_cpu(rsp->result);
3699
3700 BT_DBG("icid %d, result %d", icid, result);
3701
3702 /* Placeholder: Always unconfirmed */
3703 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3704
3705 return 0;
3706 }
3707
3708 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3709 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3710 {
3711 struct l2cap_move_chan_cfm *cfm = data;
3712 u16 icid, result;
3713
3714 if (cmd_len != sizeof(*cfm))
3715 return -EPROTO;
3716
3717 icid = le16_to_cpu(cfm->icid);
3718 result = le16_to_cpu(cfm->result);
3719
3720 BT_DBG("icid %d, result %d", icid, result);
3721
3722 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3723
3724 return 0;
3725 }
3726
3727 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3729 {
3730 struct l2cap_move_chan_cfm_rsp *rsp = data;
3731 u16 icid;
3732
3733 if (cmd_len != sizeof(*rsp))
3734 return -EPROTO;
3735
3736 icid = le16_to_cpu(rsp->icid);
3737
3738 BT_DBG("icid %d", icid);
3739
3740 return 0;
3741 }
3742
3743 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3744 u16 to_multiplier)
3745 {
3746 u16 max_latency;
3747
3748 if (min > max || min < 6 || max > 3200)
3749 return -EINVAL;
3750
3751 if (to_multiplier < 10 || to_multiplier > 3200)
3752 return -EINVAL;
3753
3754 if (max >= to_multiplier * 8)
3755 return -EINVAL;
3756
3757 max_latency = (to_multiplier * 8 / max) - 1;
3758 if (latency > 499 || latency > max_latency)
3759 return -EINVAL;
3760
3761 return 0;
3762 }
3763
3764 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3765 struct l2cap_cmd_hdr *cmd, u8 *data)
3766 {
3767 struct hci_conn *hcon = conn->hcon;
3768 struct l2cap_conn_param_update_req *req;
3769 struct l2cap_conn_param_update_rsp rsp;
3770 u16 min, max, latency, to_multiplier, cmd_len;
3771 int err;
3772
3773 if (!(hcon->link_mode & HCI_LM_MASTER))
3774 return -EINVAL;
3775
3776 cmd_len = __le16_to_cpu(cmd->len);
3777 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3778 return -EPROTO;
3779
3780 req = (struct l2cap_conn_param_update_req *) data;
3781 min = __le16_to_cpu(req->min);
3782 max = __le16_to_cpu(req->max);
3783 latency = __le16_to_cpu(req->latency);
3784 to_multiplier = __le16_to_cpu(req->to_multiplier);
3785
3786 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3787 min, max, latency, to_multiplier);
3788
3789 memset(&rsp, 0, sizeof(rsp));
3790
3791 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3792 if (err)
3793 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3794 else
3795 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3796
3797 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3798 sizeof(rsp), &rsp);
3799
3800 if (!err)
3801 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3802
3803 return 0;
3804 }
3805
3806 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3807 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3808 {
3809 int err = 0;
3810
3811 switch (cmd->code) {
3812 case L2CAP_COMMAND_REJ:
3813 l2cap_command_rej(conn, cmd, data);
3814 break;
3815
3816 case L2CAP_CONN_REQ:
3817 err = l2cap_connect_req(conn, cmd, data);
3818 break;
3819
3820 case L2CAP_CONN_RSP:
3821 err = l2cap_connect_rsp(conn, cmd, data);
3822 break;
3823
3824 case L2CAP_CONF_REQ:
3825 err = l2cap_config_req(conn, cmd, cmd_len, data);
3826 break;
3827
3828 case L2CAP_CONF_RSP:
3829 err = l2cap_config_rsp(conn, cmd, data);
3830 break;
3831
3832 case L2CAP_DISCONN_REQ:
3833 err = l2cap_disconnect_req(conn, cmd, data);
3834 break;
3835
3836 case L2CAP_DISCONN_RSP:
3837 err = l2cap_disconnect_rsp(conn, cmd, data);
3838 break;
3839
3840 case L2CAP_ECHO_REQ:
3841 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3842 break;
3843
3844 case L2CAP_ECHO_RSP:
3845 break;
3846
3847 case L2CAP_INFO_REQ:
3848 err = l2cap_information_req(conn, cmd, data);
3849 break;
3850
3851 case L2CAP_INFO_RSP:
3852 err = l2cap_information_rsp(conn, cmd, data);
3853 break;
3854
3855 case L2CAP_CREATE_CHAN_REQ:
3856 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3857 break;
3858
3859 case L2CAP_CREATE_CHAN_RSP:
3860 err = l2cap_create_channel_rsp(conn, cmd, data);
3861 break;
3862
3863 case L2CAP_MOVE_CHAN_REQ:
3864 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3865 break;
3866
3867 case L2CAP_MOVE_CHAN_RSP:
3868 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3869 break;
3870
3871 case L2CAP_MOVE_CHAN_CFM:
3872 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3873 break;
3874
3875 case L2CAP_MOVE_CHAN_CFM_RSP:
3876 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3877 break;
3878
3879 default:
3880 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3881 err = -EINVAL;
3882 break;
3883 }
3884
3885 return err;
3886 }
3887
3888 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3889 struct l2cap_cmd_hdr *cmd, u8 *data)
3890 {
3891 switch (cmd->code) {
3892 case L2CAP_COMMAND_REJ:
3893 return 0;
3894
3895 case L2CAP_CONN_PARAM_UPDATE_REQ:
3896 return l2cap_conn_param_update_req(conn, cmd, data);
3897
3898 case L2CAP_CONN_PARAM_UPDATE_RSP:
3899 return 0;
3900
3901 default:
3902 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3903 return -EINVAL;
3904 }
3905 }
3906
3907 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3908 struct sk_buff *skb)
3909 {
3910 u8 *data = skb->data;
3911 int len = skb->len;
3912 struct l2cap_cmd_hdr cmd;
3913 int err;
3914
3915 l2cap_raw_recv(conn, skb);
3916
3917 while (len >= L2CAP_CMD_HDR_SIZE) {
3918 u16 cmd_len;
3919 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3920 data += L2CAP_CMD_HDR_SIZE;
3921 len -= L2CAP_CMD_HDR_SIZE;
3922
3923 cmd_len = le16_to_cpu(cmd.len);
3924
3925 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3926
3927 if (cmd_len > len || !cmd.ident) {
3928 BT_DBG("corrupted command");
3929 break;
3930 }
3931
3932 if (conn->hcon->type == LE_LINK)
3933 err = l2cap_le_sig_cmd(conn, &cmd, data);
3934 else
3935 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3936
3937 if (err) {
3938 struct l2cap_cmd_rej_unk rej;
3939
3940 BT_ERR("Wrong link type (%d)", err);
3941
3942 /* FIXME: Map err to a valid reason */
3943 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3944 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3945 }
3946
3947 data += cmd_len;
3948 len -= cmd_len;
3949 }
3950
3951 kfree_skb(skb);
3952 }
3953
3954 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3955 {
3956 u16 our_fcs, rcv_fcs;
3957 int hdr_size;
3958
3959 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3960 hdr_size = L2CAP_EXT_HDR_SIZE;
3961 else
3962 hdr_size = L2CAP_ENH_HDR_SIZE;
3963
3964 if (chan->fcs == L2CAP_FCS_CRC16) {
3965 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3966 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3967 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3968
3969 if (our_fcs != rcv_fcs)
3970 return -EBADMSG;
3971 }
3972 return 0;
3973 }
3974
3975 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3976 {
3977 u32 control = 0;
3978
3979 chan->frames_sent = 0;
3980
3981 control |= __set_reqseq(chan, chan->buffer_seq);
3982
3983 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3984 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3985 l2cap_send_sframe(chan, control);
3986 set_bit(CONN_RNR_SENT, &chan->conn_state);
3987 }
3988
3989 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3990 l2cap_retransmit_frames(chan);
3991
3992 l2cap_ertm_send(chan);
3993
3994 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3995 chan->frames_sent == 0) {
3996 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3997 l2cap_send_sframe(chan, control);
3998 }
3999 }
4000
4001 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
4002 {
4003 struct sk_buff *next_skb;
4004 int tx_seq_offset, next_tx_seq_offset;
4005
4006 bt_cb(skb)->control.txseq = tx_seq;
4007 bt_cb(skb)->control.sar = sar;
4008
4009 next_skb = skb_peek(&chan->srej_q);
4010
4011 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4012
4013 while (next_skb) {
4014 if (bt_cb(next_skb)->control.txseq == tx_seq)
4015 return -EINVAL;
4016
4017 next_tx_seq_offset = __seq_offset(chan,
4018 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4019
4020 if (next_tx_seq_offset > tx_seq_offset) {
4021 __skb_queue_before(&chan->srej_q, next_skb, skb);
4022 return 0;
4023 }
4024
4025 if (skb_queue_is_last(&chan->srej_q, next_skb))
4026 next_skb = NULL;
4027 else
4028 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4029 }
4030
4031 __skb_queue_tail(&chan->srej_q, skb);
4032
4033 return 0;
4034 }
4035
4036 static void append_skb_frag(struct sk_buff *skb,
4037 struct sk_buff *new_frag, struct sk_buff **last_frag)
4038 {
4039 /* skb->len reflects data in skb as well as all fragments
4040 * skb->data_len reflects only data in fragments
4041 */
4042 if (!skb_has_frag_list(skb))
4043 skb_shinfo(skb)->frag_list = new_frag;
4044
4045 new_frag->next = NULL;
4046
4047 (*last_frag)->next = new_frag;
4048 *last_frag = new_frag;
4049
4050 skb->len += new_frag->len;
4051 skb->data_len += new_frag->len;
4052 skb->truesize += new_frag->truesize;
4053 }
4054
4055 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4056 {
4057 int err = -EINVAL;
4058
4059 switch (__get_ctrl_sar(chan, control)) {
4060 case L2CAP_SAR_UNSEGMENTED:
4061 if (chan->sdu)
4062 break;
4063
4064 err = chan->ops->recv(chan->data, skb);
4065 break;
4066
4067 case L2CAP_SAR_START:
4068 if (chan->sdu)
4069 break;
4070
4071 chan->sdu_len = get_unaligned_le16(skb->data);
4072 skb_pull(skb, L2CAP_SDULEN_SIZE);
4073
4074 if (chan->sdu_len > chan->imtu) {
4075 err = -EMSGSIZE;
4076 break;
4077 }
4078
4079 if (skb->len >= chan->sdu_len)
4080 break;
4081
4082 chan->sdu = skb;
4083 chan->sdu_last_frag = skb;
4084
4085 skb = NULL;
4086 err = 0;
4087 break;
4088
4089 case L2CAP_SAR_CONTINUE:
4090 if (!chan->sdu)
4091 break;
4092
4093 append_skb_frag(chan->sdu, skb,
4094 &chan->sdu_last_frag);
4095 skb = NULL;
4096
4097 if (chan->sdu->len >= chan->sdu_len)
4098 break;
4099
4100 err = 0;
4101 break;
4102
4103 case L2CAP_SAR_END:
4104 if (!chan->sdu)
4105 break;
4106
4107 append_skb_frag(chan->sdu, skb,
4108 &chan->sdu_last_frag);
4109 skb = NULL;
4110
4111 if (chan->sdu->len != chan->sdu_len)
4112 break;
4113
4114 err = chan->ops->recv(chan->data, chan->sdu);
4115
4116 if (!err) {
4117 /* Reassembly complete */
4118 chan->sdu = NULL;
4119 chan->sdu_last_frag = NULL;
4120 chan->sdu_len = 0;
4121 }
4122 break;
4123 }
4124
4125 if (err) {
4126 kfree_skb(skb);
4127 kfree_skb(chan->sdu);
4128 chan->sdu = NULL;
4129 chan->sdu_last_frag = NULL;
4130 chan->sdu_len = 0;
4131 }
4132
4133 return err;
4134 }
4135
4136 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4137 {
4138 BT_DBG("chan %p, Enter local busy", chan);
4139
4140 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4141 l2cap_seq_list_clear(&chan->srej_list);
4142
4143 __set_ack_timer(chan);
4144 }
4145
4146 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4147 {
4148 u32 control;
4149
4150 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4151 goto done;
4152
4153 control = __set_reqseq(chan, chan->buffer_seq);
4154 control |= __set_ctrl_poll(chan);
4155 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4156 l2cap_send_sframe(chan, control);
4157 chan->retry_count = 1;
4158
4159 __clear_retrans_timer(chan);
4160 __set_monitor_timer(chan);
4161
4162 set_bit(CONN_WAIT_F, &chan->conn_state);
4163
4164 done:
4165 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4166 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4167
4168 BT_DBG("chan %p, Exit local busy", chan);
4169 }
4170
4171 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4172 {
4173 if (chan->mode == L2CAP_MODE_ERTM) {
4174 if (busy)
4175 l2cap_ertm_enter_local_busy(chan);
4176 else
4177 l2cap_ertm_exit_local_busy(chan);
4178 }
4179 }
4180
4181 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4182 {
4183 struct sk_buff *skb;
4184 u32 control;
4185
4186 while ((skb = skb_peek(&chan->srej_q)) &&
4187 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4188 int err;
4189
4190 if (bt_cb(skb)->control.txseq != tx_seq)
4191 break;
4192
4193 skb = skb_dequeue(&chan->srej_q);
4194 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4195 err = l2cap_reassemble_sdu(chan, skb, control);
4196
4197 if (err < 0) {
4198 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4199 break;
4200 }
4201
4202 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4203 tx_seq = __next_seq(chan, tx_seq);
4204 }
4205 }
4206
4207 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4208 {
4209 struct srej_list *l, *tmp;
4210 u32 control;
4211
4212 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4213 if (l->tx_seq == tx_seq) {
4214 list_del(&l->list);
4215 kfree(l);
4216 return;
4217 }
4218 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4219 control |= __set_reqseq(chan, l->tx_seq);
4220 l2cap_send_sframe(chan, control);
4221 list_del(&l->list);
4222 list_add_tail(&l->list, &chan->srej_l);
4223 }
4224 }
4225
4226 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4227 {
4228 struct srej_list *new;
4229 u32 control;
4230
4231 while (tx_seq != chan->expected_tx_seq) {
4232 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4233 control |= __set_reqseq(chan, chan->expected_tx_seq);
4234 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4235 l2cap_send_sframe(chan, control);
4236
4237 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4238 if (!new)
4239 return -ENOMEM;
4240
4241 new->tx_seq = chan->expected_tx_seq;
4242
4243 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4244
4245 list_add_tail(&new->list, &chan->srej_l);
4246 }
4247
4248 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4249
4250 return 0;
4251 }
4252
4253 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4254 {
4255 u16 tx_seq = __get_txseq(chan, rx_control);
4256 u16 req_seq = __get_reqseq(chan, rx_control);
4257 u8 sar = __get_ctrl_sar(chan, rx_control);
4258 int tx_seq_offset, expected_tx_seq_offset;
4259 int num_to_ack = (chan->tx_win/6) + 1;
4260 int err = 0;
4261
4262 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4263 tx_seq, rx_control);
4264
4265 if (__is_ctrl_final(chan, rx_control) &&
4266 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4267 __clear_monitor_timer(chan);
4268 if (chan->unacked_frames > 0)
4269 __set_retrans_timer(chan);
4270 clear_bit(CONN_WAIT_F, &chan->conn_state);
4271 }
4272
4273 chan->expected_ack_seq = req_seq;
4274 l2cap_drop_acked_frames(chan);
4275
4276 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4277
4278 /* invalid tx_seq */
4279 if (tx_seq_offset >= chan->tx_win) {
4280 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4281 goto drop;
4282 }
4283
4284 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4285 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4286 l2cap_send_ack(chan);
4287 goto drop;
4288 }
4289
4290 if (tx_seq == chan->expected_tx_seq)
4291 goto expected;
4292
4293 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4294 struct srej_list *first;
4295
4296 first = list_first_entry(&chan->srej_l,
4297 struct srej_list, list);
4298 if (tx_seq == first->tx_seq) {
4299 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4300 l2cap_check_srej_gap(chan, tx_seq);
4301
4302 list_del(&first->list);
4303 kfree(first);
4304
4305 if (list_empty(&chan->srej_l)) {
4306 chan->buffer_seq = chan->buffer_seq_srej;
4307 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4308 l2cap_send_ack(chan);
4309 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4310 }
4311 } else {
4312 struct srej_list *l;
4313
4314 /* duplicated tx_seq */
4315 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4316 goto drop;
4317
4318 list_for_each_entry(l, &chan->srej_l, list) {
4319 if (l->tx_seq == tx_seq) {
4320 l2cap_resend_srejframe(chan, tx_seq);
4321 return 0;
4322 }
4323 }
4324
4325 err = l2cap_send_srejframe(chan, tx_seq);
4326 if (err < 0) {
4327 l2cap_send_disconn_req(chan->conn, chan, -err);
4328 return err;
4329 }
4330 }
4331 } else {
4332 expected_tx_seq_offset = __seq_offset(chan,
4333 chan->expected_tx_seq, chan->buffer_seq);
4334
4335 /* duplicated tx_seq */
4336 if (tx_seq_offset < expected_tx_seq_offset)
4337 goto drop;
4338
4339 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4340
4341 BT_DBG("chan %p, Enter SREJ", chan);
4342
4343 INIT_LIST_HEAD(&chan->srej_l);
4344 chan->buffer_seq_srej = chan->buffer_seq;
4345
4346 __skb_queue_head_init(&chan->srej_q);
4347 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4348
4349 /* Set P-bit only if there are some I-frames to ack. */
4350 if (__clear_ack_timer(chan))
4351 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4352
4353 err = l2cap_send_srejframe(chan, tx_seq);
4354 if (err < 0) {
4355 l2cap_send_disconn_req(chan->conn, chan, -err);
4356 return err;
4357 }
4358 }
4359 return 0;
4360
4361 expected:
4362 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4363
4364 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4365 bt_cb(skb)->control.txseq = tx_seq;
4366 bt_cb(skb)->control.sar = sar;
4367 __skb_queue_tail(&chan->srej_q, skb);
4368 return 0;
4369 }
4370
4371 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4372 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4373
4374 if (err < 0) {
4375 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4376 return err;
4377 }
4378
4379 if (__is_ctrl_final(chan, rx_control)) {
4380 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4381 l2cap_retransmit_frames(chan);
4382 }
4383
4384
4385 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4386 if (chan->num_acked == num_to_ack - 1)
4387 l2cap_send_ack(chan);
4388 else
4389 __set_ack_timer(chan);
4390
4391 return 0;
4392
4393 drop:
4394 kfree_skb(skb);
4395 return 0;
4396 }
4397
4398 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4399 {
4400 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4401 __get_reqseq(chan, rx_control), rx_control);
4402
4403 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4404 l2cap_drop_acked_frames(chan);
4405
4406 if (__is_ctrl_poll(chan, rx_control)) {
4407 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4408 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4409 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4410 (chan->unacked_frames > 0))
4411 __set_retrans_timer(chan);
4412
4413 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4414 l2cap_send_srejtail(chan);
4415 } else {
4416 l2cap_send_i_or_rr_or_rnr(chan);
4417 }
4418
4419 } else if (__is_ctrl_final(chan, rx_control)) {
4420 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4421
4422 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4423 l2cap_retransmit_frames(chan);
4424
4425 } else {
4426 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4427 (chan->unacked_frames > 0))
4428 __set_retrans_timer(chan);
4429
4430 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4431 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4432 l2cap_send_ack(chan);
4433 else
4434 l2cap_ertm_send(chan);
4435 }
4436 }
4437
4438 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4439 {
4440 u16 tx_seq = __get_reqseq(chan, rx_control);
4441
4442 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4443
4444 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4445
4446 chan->expected_ack_seq = tx_seq;
4447 l2cap_drop_acked_frames(chan);
4448
4449 if (__is_ctrl_final(chan, rx_control)) {
4450 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4451 l2cap_retransmit_frames(chan);
4452 } else {
4453 l2cap_retransmit_frames(chan);
4454
4455 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4456 set_bit(CONN_REJ_ACT, &chan->conn_state);
4457 }
4458 }
4459 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4460 {
4461 u16 tx_seq = __get_reqseq(chan, rx_control);
4462
4463 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4464
4465 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4466
4467 if (__is_ctrl_poll(chan, rx_control)) {
4468 chan->expected_ack_seq = tx_seq;
4469 l2cap_drop_acked_frames(chan);
4470
4471 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4472 l2cap_retransmit_one_frame(chan, tx_seq);
4473
4474 l2cap_ertm_send(chan);
4475
4476 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4477 chan->srej_save_reqseq = tx_seq;
4478 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4479 }
4480 } else if (__is_ctrl_final(chan, rx_control)) {
4481 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4482 chan->srej_save_reqseq == tx_seq)
4483 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4484 else
4485 l2cap_retransmit_one_frame(chan, tx_seq);
4486 } else {
4487 l2cap_retransmit_one_frame(chan, tx_seq);
4488 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4489 chan->srej_save_reqseq = tx_seq;
4490 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4491 }
4492 }
4493 }
4494
4495 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4496 {
4497 u16 tx_seq = __get_reqseq(chan, rx_control);
4498
4499 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4500
4501 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4502 chan->expected_ack_seq = tx_seq;
4503 l2cap_drop_acked_frames(chan);
4504
4505 if (__is_ctrl_poll(chan, rx_control))
4506 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4507
4508 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4509 __clear_retrans_timer(chan);
4510 if (__is_ctrl_poll(chan, rx_control))
4511 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4512 return;
4513 }
4514
4515 if (__is_ctrl_poll(chan, rx_control)) {
4516 l2cap_send_srejtail(chan);
4517 } else {
4518 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4519 l2cap_send_sframe(chan, rx_control);
4520 }
4521 }
4522
4523 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4524 {
4525 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4526
4527 if (__is_ctrl_final(chan, rx_control) &&
4528 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4529 __clear_monitor_timer(chan);
4530 if (chan->unacked_frames > 0)
4531 __set_retrans_timer(chan);
4532 clear_bit(CONN_WAIT_F, &chan->conn_state);
4533 }
4534
4535 switch (__get_ctrl_super(chan, rx_control)) {
4536 case L2CAP_SUPER_RR:
4537 l2cap_data_channel_rrframe(chan, rx_control);
4538 break;
4539
4540 case L2CAP_SUPER_REJ:
4541 l2cap_data_channel_rejframe(chan, rx_control);
4542 break;
4543
4544 case L2CAP_SUPER_SREJ:
4545 l2cap_data_channel_srejframe(chan, rx_control);
4546 break;
4547
4548 case L2CAP_SUPER_RNR:
4549 l2cap_data_channel_rnrframe(chan, rx_control);
4550 break;
4551 }
4552
4553 kfree_skb(skb);
4554 return 0;
4555 }
4556
4557 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4558 {
4559 u32 control;
4560 u16 req_seq;
4561 int len, next_tx_seq_offset, req_seq_offset;
4562
4563 __unpack_control(chan, skb);
4564
4565 control = __get_control(chan, skb->data);
4566 skb_pull(skb, __ctrl_size(chan));
4567 len = skb->len;
4568
4569 /*
4570 * We can just drop the corrupted I-frame here.
4571 * Receiver will miss it and start proper recovery
4572 * procedures and ask retransmission.
4573 */
4574 if (l2cap_check_fcs(chan, skb))
4575 goto drop;
4576
4577 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4578 len -= L2CAP_SDULEN_SIZE;
4579
4580 if (chan->fcs == L2CAP_FCS_CRC16)
4581 len -= L2CAP_FCS_SIZE;
4582
4583 if (len > chan->mps) {
4584 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4585 goto drop;
4586 }
4587
4588 req_seq = __get_reqseq(chan, control);
4589
4590 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4591
4592 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4593 chan->expected_ack_seq);
4594
4595 /* check for invalid req-seq */
4596 if (req_seq_offset > next_tx_seq_offset) {
4597 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4598 goto drop;
4599 }
4600
4601 if (!__is_sframe(chan, control)) {
4602 if (len < 0) {
4603 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4604 goto drop;
4605 }
4606
4607 l2cap_data_channel_iframe(chan, control, skb);
4608 } else {
4609 if (len != 0) {
4610 BT_ERR("%d", len);
4611 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4612 goto drop;
4613 }
4614
4615 l2cap_data_channel_sframe(chan, control, skb);
4616 }
4617
4618 return 0;
4619
4620 drop:
4621 kfree_skb(skb);
4622 return 0;
4623 }
4624
4625 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4626 {
4627 struct l2cap_chan *chan;
4628 u32 control;
4629 u16 tx_seq;
4630 int len;
4631
4632 chan = l2cap_get_chan_by_scid(conn, cid);
4633 if (!chan) {
4634 BT_DBG("unknown cid 0x%4.4x", cid);
4635 /* Drop packet and return */
4636 kfree_skb(skb);
4637 return 0;
4638 }
4639
4640 BT_DBG("chan %p, len %d", chan, skb->len);
4641
4642 if (chan->state != BT_CONNECTED)
4643 goto drop;
4644
4645 switch (chan->mode) {
4646 case L2CAP_MODE_BASIC:
4647 /* If socket recv buffers overflows we drop data here
4648 * which is *bad* because L2CAP has to be reliable.
4649 * But we don't have any other choice. L2CAP doesn't
4650 * provide flow control mechanism. */
4651
4652 if (chan->imtu < skb->len)
4653 goto drop;
4654
4655 if (!chan->ops->recv(chan->data, skb))
4656 goto done;
4657 break;
4658
4659 case L2CAP_MODE_ERTM:
4660 l2cap_ertm_data_rcv(chan, skb);
4661
4662 goto done;
4663
4664 case L2CAP_MODE_STREAMING:
4665 control = __get_control(chan, skb->data);
4666 skb_pull(skb, __ctrl_size(chan));
4667 len = skb->len;
4668
4669 if (l2cap_check_fcs(chan, skb))
4670 goto drop;
4671
4672 if (__is_sar_start(chan, control))
4673 len -= L2CAP_SDULEN_SIZE;
4674
4675 if (chan->fcs == L2CAP_FCS_CRC16)
4676 len -= L2CAP_FCS_SIZE;
4677
4678 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4679 goto drop;
4680
4681 tx_seq = __get_txseq(chan, control);
4682
4683 if (chan->expected_tx_seq != tx_seq) {
4684 /* Frame(s) missing - must discard partial SDU */
4685 kfree_skb(chan->sdu);
4686 chan->sdu = NULL;
4687 chan->sdu_last_frag = NULL;
4688 chan->sdu_len = 0;
4689
4690 /* TODO: Notify userland of missing data */
4691 }
4692
4693 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4694
4695 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4696 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4697
4698 goto done;
4699
4700 default:
4701 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4702 break;
4703 }
4704
4705 drop:
4706 kfree_skb(skb);
4707
4708 done:
4709 l2cap_chan_unlock(chan);
4710
4711 return 0;
4712 }
4713
4714 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4715 {
4716 struct l2cap_chan *chan;
4717
4718 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4719 if (!chan)
4720 goto drop;
4721
4722 BT_DBG("chan %p, len %d", chan, skb->len);
4723
4724 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4725 goto drop;
4726
4727 if (chan->imtu < skb->len)
4728 goto drop;
4729
4730 if (!chan->ops->recv(chan->data, skb))
4731 return 0;
4732
4733 drop:
4734 kfree_skb(skb);
4735
4736 return 0;
4737 }
4738
4739 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4740 struct sk_buff *skb)
4741 {
4742 struct l2cap_chan *chan;
4743
4744 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4745 if (!chan)
4746 goto drop;
4747
4748 BT_DBG("chan %p, len %d", chan, skb->len);
4749
4750 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4751 goto drop;
4752
4753 if (chan->imtu < skb->len)
4754 goto drop;
4755
4756 if (!chan->ops->recv(chan->data, skb))
4757 return 0;
4758
4759 drop:
4760 kfree_skb(skb);
4761
4762 return 0;
4763 }
4764
4765 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4766 {
4767 struct l2cap_hdr *lh = (void *) skb->data;
4768 u16 cid, len;
4769 __le16 psm;
4770
4771 skb_pull(skb, L2CAP_HDR_SIZE);
4772 cid = __le16_to_cpu(lh->cid);
4773 len = __le16_to_cpu(lh->len);
4774
4775 if (len != skb->len) {
4776 kfree_skb(skb);
4777 return;
4778 }
4779
4780 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4781
4782 switch (cid) {
4783 case L2CAP_CID_LE_SIGNALING:
4784 case L2CAP_CID_SIGNALING:
4785 l2cap_sig_channel(conn, skb);
4786 break;
4787
4788 case L2CAP_CID_CONN_LESS:
4789 psm = get_unaligned((__le16 *) skb->data);
4790 skb_pull(skb, 2);
4791 l2cap_conless_channel(conn, psm, skb);
4792 break;
4793
4794 case L2CAP_CID_LE_DATA:
4795 l2cap_att_channel(conn, cid, skb);
4796 break;
4797
4798 case L2CAP_CID_SMP:
4799 if (smp_sig_channel(conn, skb))
4800 l2cap_conn_del(conn->hcon, EACCES);
4801 break;
4802
4803 default:
4804 l2cap_data_channel(conn, cid, skb);
4805 break;
4806 }
4807 }
4808
4809 /* ---- L2CAP interface with lower layer (HCI) ---- */
4810
4811 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4812 {
4813 int exact = 0, lm1 = 0, lm2 = 0;
4814 struct l2cap_chan *c;
4815
4816 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4817
4818 /* Find listening sockets and check their link_mode */
4819 read_lock(&chan_list_lock);
4820 list_for_each_entry(c, &chan_list, global_l) {
4821 struct sock *sk = c->sk;
4822
4823 if (c->state != BT_LISTEN)
4824 continue;
4825
4826 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4827 lm1 |= HCI_LM_ACCEPT;
4828 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4829 lm1 |= HCI_LM_MASTER;
4830 exact++;
4831 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4832 lm2 |= HCI_LM_ACCEPT;
4833 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4834 lm2 |= HCI_LM_MASTER;
4835 }
4836 }
4837 read_unlock(&chan_list_lock);
4838
4839 return exact ? lm1 : lm2;
4840 }
4841
4842 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4843 {
4844 struct l2cap_conn *conn;
4845
4846 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4847
4848 if (!status) {
4849 conn = l2cap_conn_add(hcon, status);
4850 if (conn)
4851 l2cap_conn_ready(conn);
4852 } else
4853 l2cap_conn_del(hcon, bt_to_errno(status));
4854
4855 return 0;
4856 }
4857
4858 int l2cap_disconn_ind(struct hci_conn *hcon)
4859 {
4860 struct l2cap_conn *conn = hcon->l2cap_data;
4861
4862 BT_DBG("hcon %p", hcon);
4863
4864 if (!conn)
4865 return HCI_ERROR_REMOTE_USER_TERM;
4866 return conn->disc_reason;
4867 }
4868
4869 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4870 {
4871 BT_DBG("hcon %p reason %d", hcon, reason);
4872
4873 l2cap_conn_del(hcon, bt_to_errno(reason));
4874 return 0;
4875 }
4876
4877 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4878 {
4879 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4880 return;
4881
4882 if (encrypt == 0x00) {
4883 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4884 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4885 } else if (chan->sec_level == BT_SECURITY_HIGH)
4886 l2cap_chan_close(chan, ECONNREFUSED);
4887 } else {
4888 if (chan->sec_level == BT_SECURITY_MEDIUM)
4889 __clear_chan_timer(chan);
4890 }
4891 }
4892
4893 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4894 {
4895 struct l2cap_conn *conn = hcon->l2cap_data;
4896 struct l2cap_chan *chan;
4897
4898 if (!conn)
4899 return 0;
4900
4901 BT_DBG("conn %p", conn);
4902
4903 if (hcon->type == LE_LINK) {
4904 if (!status && encrypt)
4905 smp_distribute_keys(conn, 0);
4906 cancel_delayed_work(&conn->security_timer);
4907 }
4908
4909 mutex_lock(&conn->chan_lock);
4910
4911 list_for_each_entry(chan, &conn->chan_l, list) {
4912 l2cap_chan_lock(chan);
4913
4914 BT_DBG("chan->scid %d", chan->scid);
4915
4916 if (chan->scid == L2CAP_CID_LE_DATA) {
4917 if (!status && encrypt) {
4918 chan->sec_level = hcon->sec_level;
4919 l2cap_chan_ready(chan);
4920 }
4921
4922 l2cap_chan_unlock(chan);
4923 continue;
4924 }
4925
4926 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4927 l2cap_chan_unlock(chan);
4928 continue;
4929 }
4930
4931 if (!status && (chan->state == BT_CONNECTED ||
4932 chan->state == BT_CONFIG)) {
4933 struct sock *sk = chan->sk;
4934
4935 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
4936 sk->sk_state_change(sk);
4937
4938 l2cap_check_encryption(chan, encrypt);
4939 l2cap_chan_unlock(chan);
4940 continue;
4941 }
4942
4943 if (chan->state == BT_CONNECT) {
4944 if (!status) {
4945 l2cap_send_conn_req(chan);
4946 } else {
4947 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4948 }
4949 } else if (chan->state == BT_CONNECT2) {
4950 struct sock *sk = chan->sk;
4951 struct l2cap_conn_rsp rsp;
4952 __u16 res, stat;
4953
4954 lock_sock(sk);
4955
4956 if (!status) {
4957 if (test_bit(BT_SK_DEFER_SETUP,
4958 &bt_sk(sk)->flags)) {
4959 struct sock *parent = bt_sk(sk)->parent;
4960 res = L2CAP_CR_PEND;
4961 stat = L2CAP_CS_AUTHOR_PEND;
4962 if (parent)
4963 parent->sk_data_ready(parent, 0);
4964 } else {
4965 __l2cap_state_change(chan, BT_CONFIG);
4966 res = L2CAP_CR_SUCCESS;
4967 stat = L2CAP_CS_NO_INFO;
4968 }
4969 } else {
4970 __l2cap_state_change(chan, BT_DISCONN);
4971 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4972 res = L2CAP_CR_SEC_BLOCK;
4973 stat = L2CAP_CS_NO_INFO;
4974 }
4975
4976 release_sock(sk);
4977
4978 rsp.scid = cpu_to_le16(chan->dcid);
4979 rsp.dcid = cpu_to_le16(chan->scid);
4980 rsp.result = cpu_to_le16(res);
4981 rsp.status = cpu_to_le16(stat);
4982 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4983 sizeof(rsp), &rsp);
4984 }
4985
4986 l2cap_chan_unlock(chan);
4987 }
4988
4989 mutex_unlock(&conn->chan_lock);
4990
4991 return 0;
4992 }
4993
4994 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4995 {
4996 struct l2cap_conn *conn = hcon->l2cap_data;
4997
4998 if (!conn)
4999 conn = l2cap_conn_add(hcon, 0);
5000
5001 if (!conn)
5002 goto drop;
5003
5004 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5005
5006 if (!(flags & ACL_CONT)) {
5007 struct l2cap_hdr *hdr;
5008 int len;
5009
5010 if (conn->rx_len) {
5011 BT_ERR("Unexpected start frame (len %d)", skb->len);
5012 kfree_skb(conn->rx_skb);
5013 conn->rx_skb = NULL;
5014 conn->rx_len = 0;
5015 l2cap_conn_unreliable(conn, ECOMM);
5016 }
5017
5018 /* Start fragment always begin with Basic L2CAP header */
5019 if (skb->len < L2CAP_HDR_SIZE) {
5020 BT_ERR("Frame is too short (len %d)", skb->len);
5021 l2cap_conn_unreliable(conn, ECOMM);
5022 goto drop;
5023 }
5024
5025 hdr = (struct l2cap_hdr *) skb->data;
5026 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5027
5028 if (len == skb->len) {
5029 /* Complete frame received */
5030 l2cap_recv_frame(conn, skb);
5031 return 0;
5032 }
5033
5034 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5035
5036 if (skb->len > len) {
5037 BT_ERR("Frame is too long (len %d, expected len %d)",
5038 skb->len, len);
5039 l2cap_conn_unreliable(conn, ECOMM);
5040 goto drop;
5041 }
5042
5043 /* Allocate skb for the complete frame (with header) */
5044 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5045 if (!conn->rx_skb)
5046 goto drop;
5047
5048 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5049 skb->len);
5050 conn->rx_len = len - skb->len;
5051 } else {
5052 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5053
5054 if (!conn->rx_len) {
5055 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5056 l2cap_conn_unreliable(conn, ECOMM);
5057 goto drop;
5058 }
5059
5060 if (skb->len > conn->rx_len) {
5061 BT_ERR("Fragment is too long (len %d, expected %d)",
5062 skb->len, conn->rx_len);
5063 kfree_skb(conn->rx_skb);
5064 conn->rx_skb = NULL;
5065 conn->rx_len = 0;
5066 l2cap_conn_unreliable(conn, ECOMM);
5067 goto drop;
5068 }
5069
5070 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5071 skb->len);
5072 conn->rx_len -= skb->len;
5073
5074 if (!conn->rx_len) {
5075 /* Complete frame received */
5076 l2cap_recv_frame(conn, conn->rx_skb);
5077 conn->rx_skb = NULL;
5078 }
5079 }
5080
5081 drop:
5082 kfree_skb(skb);
5083 return 0;
5084 }
5085
5086 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5087 {
5088 struct l2cap_chan *c;
5089
5090 read_lock(&chan_list_lock);
5091
5092 list_for_each_entry(c, &chan_list, global_l) {
5093 struct sock *sk = c->sk;
5094
5095 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5096 batostr(&bt_sk(sk)->src),
5097 batostr(&bt_sk(sk)->dst),
5098 c->state, __le16_to_cpu(c->psm),
5099 c->scid, c->dcid, c->imtu, c->omtu,
5100 c->sec_level, c->mode);
5101 }
5102
5103 read_unlock(&chan_list_lock);
5104
5105 return 0;
5106 }
5107
5108 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5109 {
5110 return single_open(file, l2cap_debugfs_show, inode->i_private);
5111 }
5112
5113 static const struct file_operations l2cap_debugfs_fops = {
5114 .open = l2cap_debugfs_open,
5115 .read = seq_read,
5116 .llseek = seq_lseek,
5117 .release = single_release,
5118 };
5119
5120 static struct dentry *l2cap_debugfs;
5121
5122 int __init l2cap_init(void)
5123 {
5124 int err;
5125
5126 err = l2cap_init_sockets();
5127 if (err < 0)
5128 return err;
5129
5130 if (bt_debugfs) {
5131 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5132 bt_debugfs, NULL, &l2cap_debugfs_fops);
5133 if (!l2cap_debugfs)
5134 BT_ERR("Failed to create L2CAP debug file");
5135 }
5136
5137 return 0;
5138 }
5139
5140 void l2cap_exit(void)
5141 {
5142 debugfs_remove(l2cap_debugfs);
5143 l2cap_cleanup_sockets();
5144 }
5145
5146 module_param(disable_ertm, bool, 0644);
5147 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.129603 seconds and 6 git commands to generate.