Bluetooth: Fix skb length calculation
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
51 #include <net/sock.h>
52
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 /* ---- L2CAP channels ---- */
77
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 if (c->dcid == cid)
84 return c;
85 }
86 return NULL;
87 }
88
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 {
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98 }
99
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 {
104 struct l2cap_chan *c;
105
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 mutex_unlock(&conn->chan_lock);
109
110 return c;
111 }
112
113 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
114 {
115 struct l2cap_chan *c;
116
117 list_for_each_entry(c, &conn->chan_l, list) {
118 if (c->ident == ident)
119 return c;
120 }
121 return NULL;
122 }
123
124 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
125 {
126 struct l2cap_chan *c;
127
128 list_for_each_entry(c, &chan_list, global_l) {
129 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
130 return c;
131 }
132 return NULL;
133 }
134
135 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
136 {
137 int err;
138
139 write_lock(&chan_list_lock);
140
141 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
142 err = -EADDRINUSE;
143 goto done;
144 }
145
146 if (psm) {
147 chan->psm = psm;
148 chan->sport = psm;
149 err = 0;
150 } else {
151 u16 p;
152
153 err = -EINVAL;
154 for (p = 0x1001; p < 0x1100; p += 2)
155 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
156 chan->psm = cpu_to_le16(p);
157 chan->sport = cpu_to_le16(p);
158 err = 0;
159 break;
160 }
161 }
162
163 done:
164 write_unlock(&chan_list_lock);
165 return err;
166 }
167
168 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
169 {
170 write_lock(&chan_list_lock);
171
172 chan->scid = scid;
173
174 write_unlock(&chan_list_lock);
175
176 return 0;
177 }
178
179 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
180 {
181 u16 cid = L2CAP_CID_DYN_START;
182
183 for (; cid < L2CAP_CID_DYN_END; cid++) {
184 if (!__l2cap_get_chan_by_scid(conn, cid))
185 return cid;
186 }
187
188 return 0;
189 }
190
191 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
192 {
193 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
194 state_to_string(state));
195
196 chan->state = state;
197 chan->ops->state_change(chan->data, state);
198 }
199
200 static void l2cap_state_change(struct l2cap_chan *chan, int state)
201 {
202 struct sock *sk = chan->sk;
203
204 lock_sock(sk);
205 __l2cap_state_change(chan, state);
206 release_sock(sk);
207 }
208
209 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
210 {
211 struct sock *sk = chan->sk;
212
213 sk->sk_err = err;
214 }
215
216 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
217 {
218 struct sock *sk = chan->sk;
219
220 lock_sock(sk);
221 __l2cap_chan_set_err(chan, err);
222 release_sock(sk);
223 }
224
225 /* ---- L2CAP sequence number lists ---- */
226
227 /* For ERTM, ordered lists of sequence numbers must be tracked for
228 * SREJ requests that are received and for frames that are to be
229 * retransmitted. These seq_list functions implement a singly-linked
230 * list in an array, where membership in the list can also be checked
231 * in constant time. Items can also be added to the tail of the list
232 * and removed from the head in constant time, without further memory
233 * allocs or frees.
234 */
235
236 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
237 {
238 size_t alloc_size, i;
239
240 /* Allocated size is a power of 2 to map sequence numbers
241 * (which may be up to 14 bits) in to a smaller array that is
242 * sized for the negotiated ERTM transmit windows.
243 */
244 alloc_size = roundup_pow_of_two(size);
245
246 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
247 if (!seq_list->list)
248 return -ENOMEM;
249
250 seq_list->mask = alloc_size - 1;
251 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
252 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
253 for (i = 0; i < alloc_size; i++)
254 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
255
256 return 0;
257 }
258
259 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
260 {
261 kfree(seq_list->list);
262 }
263
264 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
265 u16 seq)
266 {
267 /* Constant-time check for list membership */
268 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
269 }
270
271 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
272 {
273 u16 mask = seq_list->mask;
274
275 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
276 /* In case someone tries to pop the head of an empty list */
277 return L2CAP_SEQ_LIST_CLEAR;
278 } else if (seq_list->head == seq) {
279 /* Head can be removed in constant time */
280 seq_list->head = seq_list->list[seq & mask];
281 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
282
283 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
284 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
285 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
286 }
287 } else {
288 /* Walk the list to find the sequence number */
289 u16 prev = seq_list->head;
290 while (seq_list->list[prev & mask] != seq) {
291 prev = seq_list->list[prev & mask];
292 if (prev == L2CAP_SEQ_LIST_TAIL)
293 return L2CAP_SEQ_LIST_CLEAR;
294 }
295
296 /* Unlink the number from the list and clear it */
297 seq_list->list[prev & mask] = seq_list->list[seq & mask];
298 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
299 if (seq_list->tail == seq)
300 seq_list->tail = prev;
301 }
302 return seq;
303 }
304
305 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
306 {
307 /* Remove the head in constant time */
308 return l2cap_seq_list_remove(seq_list, seq_list->head);
309 }
310
311 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
312 {
313 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
314 u16 i;
315 for (i = 0; i <= seq_list->mask; i++)
316 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
317
318 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
319 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
320 }
321 }
322
323 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
324 {
325 u16 mask = seq_list->mask;
326
327 /* All appends happen in constant time */
328
329 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
330 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
331 seq_list->head = seq;
332 else
333 seq_list->list[seq_list->tail & mask] = seq;
334
335 seq_list->tail = seq;
336 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
337 }
338 }
339
340 static void l2cap_chan_timeout(struct work_struct *work)
341 {
342 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
343 chan_timer.work);
344 struct l2cap_conn *conn = chan->conn;
345 int reason;
346
347 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
348
349 mutex_lock(&conn->chan_lock);
350 l2cap_chan_lock(chan);
351
352 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
353 reason = ECONNREFUSED;
354 else if (chan->state == BT_CONNECT &&
355 chan->sec_level != BT_SECURITY_SDP)
356 reason = ECONNREFUSED;
357 else
358 reason = ETIMEDOUT;
359
360 l2cap_chan_close(chan, reason);
361
362 l2cap_chan_unlock(chan);
363
364 chan->ops->close(chan->data);
365 mutex_unlock(&conn->chan_lock);
366
367 l2cap_chan_put(chan);
368 }
369
370 struct l2cap_chan *l2cap_chan_create(void)
371 {
372 struct l2cap_chan *chan;
373
374 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
375 if (!chan)
376 return NULL;
377
378 mutex_init(&chan->lock);
379
380 write_lock(&chan_list_lock);
381 list_add(&chan->global_l, &chan_list);
382 write_unlock(&chan_list_lock);
383
384 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
385
386 chan->state = BT_OPEN;
387
388 atomic_set(&chan->refcnt, 1);
389
390 BT_DBG("chan %p", chan);
391
392 return chan;
393 }
394
395 void l2cap_chan_destroy(struct l2cap_chan *chan)
396 {
397 write_lock(&chan_list_lock);
398 list_del(&chan->global_l);
399 write_unlock(&chan_list_lock);
400
401 l2cap_chan_put(chan);
402 }
403
404 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
405 {
406 chan->fcs = L2CAP_FCS_CRC16;
407 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
408 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
409 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
410 chan->sec_level = BT_SECURITY_LOW;
411
412 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
413 }
414
415 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
416 {
417 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
418 __le16_to_cpu(chan->psm), chan->dcid);
419
420 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
421
422 chan->conn = conn;
423
424 switch (chan->chan_type) {
425 case L2CAP_CHAN_CONN_ORIENTED:
426 if (conn->hcon->type == LE_LINK) {
427 /* LE connection */
428 chan->omtu = L2CAP_LE_DEFAULT_MTU;
429 chan->scid = L2CAP_CID_LE_DATA;
430 chan->dcid = L2CAP_CID_LE_DATA;
431 } else {
432 /* Alloc CID for connection-oriented socket */
433 chan->scid = l2cap_alloc_cid(conn);
434 chan->omtu = L2CAP_DEFAULT_MTU;
435 }
436 break;
437
438 case L2CAP_CHAN_CONN_LESS:
439 /* Connectionless socket */
440 chan->scid = L2CAP_CID_CONN_LESS;
441 chan->dcid = L2CAP_CID_CONN_LESS;
442 chan->omtu = L2CAP_DEFAULT_MTU;
443 break;
444
445 default:
446 /* Raw socket can send/recv signalling messages only */
447 chan->scid = L2CAP_CID_SIGNALING;
448 chan->dcid = L2CAP_CID_SIGNALING;
449 chan->omtu = L2CAP_DEFAULT_MTU;
450 }
451
452 chan->local_id = L2CAP_BESTEFFORT_ID;
453 chan->local_stype = L2CAP_SERV_BESTEFFORT;
454 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
455 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
456 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
457 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
458
459 l2cap_chan_hold(chan);
460
461 list_add(&chan->list, &conn->chan_l);
462 }
463
464 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
465 {
466 mutex_lock(&conn->chan_lock);
467 __l2cap_chan_add(conn, chan);
468 mutex_unlock(&conn->chan_lock);
469 }
470
471 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
472 {
473 struct sock *sk = chan->sk;
474 struct l2cap_conn *conn = chan->conn;
475 struct sock *parent = bt_sk(sk)->parent;
476
477 __clear_chan_timer(chan);
478
479 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
480
481 if (conn) {
482 /* Delete from channel list */
483 list_del(&chan->list);
484
485 l2cap_chan_put(chan);
486
487 chan->conn = NULL;
488 hci_conn_put(conn->hcon);
489 }
490
491 lock_sock(sk);
492
493 __l2cap_state_change(chan, BT_CLOSED);
494 sock_set_flag(sk, SOCK_ZAPPED);
495
496 if (err)
497 __l2cap_chan_set_err(chan, err);
498
499 if (parent) {
500 bt_accept_unlink(sk);
501 parent->sk_data_ready(parent, 0);
502 } else
503 sk->sk_state_change(sk);
504
505 release_sock(sk);
506
507 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
508 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
509 return;
510
511 skb_queue_purge(&chan->tx_q);
512
513 if (chan->mode == L2CAP_MODE_ERTM) {
514 struct srej_list *l, *tmp;
515
516 __clear_retrans_timer(chan);
517 __clear_monitor_timer(chan);
518 __clear_ack_timer(chan);
519
520 skb_queue_purge(&chan->srej_q);
521
522 l2cap_seq_list_free(&chan->srej_list);
523 l2cap_seq_list_free(&chan->retrans_list);
524 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
525 list_del(&l->list);
526 kfree(l);
527 }
528 }
529 }
530
531 static void l2cap_chan_cleanup_listen(struct sock *parent)
532 {
533 struct sock *sk;
534
535 BT_DBG("parent %p", parent);
536
537 /* Close not yet accepted channels */
538 while ((sk = bt_accept_dequeue(parent, NULL))) {
539 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
540
541 l2cap_chan_lock(chan);
542 __clear_chan_timer(chan);
543 l2cap_chan_close(chan, ECONNRESET);
544 l2cap_chan_unlock(chan);
545
546 chan->ops->close(chan->data);
547 }
548 }
549
550 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
551 {
552 struct l2cap_conn *conn = chan->conn;
553 struct sock *sk = chan->sk;
554
555 BT_DBG("chan %p state %s sk %p", chan,
556 state_to_string(chan->state), sk);
557
558 switch (chan->state) {
559 case BT_LISTEN:
560 lock_sock(sk);
561 l2cap_chan_cleanup_listen(sk);
562
563 __l2cap_state_change(chan, BT_CLOSED);
564 sock_set_flag(sk, SOCK_ZAPPED);
565 release_sock(sk);
566 break;
567
568 case BT_CONNECTED:
569 case BT_CONFIG:
570 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
571 conn->hcon->type == ACL_LINK) {
572 __set_chan_timer(chan, sk->sk_sndtimeo);
573 l2cap_send_disconn_req(conn, chan, reason);
574 } else
575 l2cap_chan_del(chan, reason);
576 break;
577
578 case BT_CONNECT2:
579 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
580 conn->hcon->type == ACL_LINK) {
581 struct l2cap_conn_rsp rsp;
582 __u16 result;
583
584 if (bt_sk(sk)->defer_setup)
585 result = L2CAP_CR_SEC_BLOCK;
586 else
587 result = L2CAP_CR_BAD_PSM;
588 l2cap_state_change(chan, BT_DISCONN);
589
590 rsp.scid = cpu_to_le16(chan->dcid);
591 rsp.dcid = cpu_to_le16(chan->scid);
592 rsp.result = cpu_to_le16(result);
593 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
594 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
595 sizeof(rsp), &rsp);
596 }
597
598 l2cap_chan_del(chan, reason);
599 break;
600
601 case BT_CONNECT:
602 case BT_DISCONN:
603 l2cap_chan_del(chan, reason);
604 break;
605
606 default:
607 lock_sock(sk);
608 sock_set_flag(sk, SOCK_ZAPPED);
609 release_sock(sk);
610 break;
611 }
612 }
613
614 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
615 {
616 if (chan->chan_type == L2CAP_CHAN_RAW) {
617 switch (chan->sec_level) {
618 case BT_SECURITY_HIGH:
619 return HCI_AT_DEDICATED_BONDING_MITM;
620 case BT_SECURITY_MEDIUM:
621 return HCI_AT_DEDICATED_BONDING;
622 default:
623 return HCI_AT_NO_BONDING;
624 }
625 } else if (chan->psm == cpu_to_le16(0x0001)) {
626 if (chan->sec_level == BT_SECURITY_LOW)
627 chan->sec_level = BT_SECURITY_SDP;
628
629 if (chan->sec_level == BT_SECURITY_HIGH)
630 return HCI_AT_NO_BONDING_MITM;
631 else
632 return HCI_AT_NO_BONDING;
633 } else {
634 switch (chan->sec_level) {
635 case BT_SECURITY_HIGH:
636 return HCI_AT_GENERAL_BONDING_MITM;
637 case BT_SECURITY_MEDIUM:
638 return HCI_AT_GENERAL_BONDING;
639 default:
640 return HCI_AT_NO_BONDING;
641 }
642 }
643 }
644
645 /* Service level security */
646 int l2cap_chan_check_security(struct l2cap_chan *chan)
647 {
648 struct l2cap_conn *conn = chan->conn;
649 __u8 auth_type;
650
651 auth_type = l2cap_get_auth_type(chan);
652
653 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
654 }
655
656 static u8 l2cap_get_ident(struct l2cap_conn *conn)
657 {
658 u8 id;
659
660 /* Get next available identificator.
661 * 1 - 128 are used by kernel.
662 * 129 - 199 are reserved.
663 * 200 - 254 are used by utilities like l2ping, etc.
664 */
665
666 spin_lock(&conn->lock);
667
668 if (++conn->tx_ident > 128)
669 conn->tx_ident = 1;
670
671 id = conn->tx_ident;
672
673 spin_unlock(&conn->lock);
674
675 return id;
676 }
677
678 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
679 {
680 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
681 u8 flags;
682
683 BT_DBG("code 0x%2.2x", code);
684
685 if (!skb)
686 return;
687
688 if (lmp_no_flush_capable(conn->hcon->hdev))
689 flags = ACL_START_NO_FLUSH;
690 else
691 flags = ACL_START;
692
693 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
694 skb->priority = HCI_PRIO_MAX;
695
696 hci_send_acl(conn->hchan, skb, flags);
697 }
698
699 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
700 {
701 struct hci_conn *hcon = chan->conn->hcon;
702 u16 flags;
703
704 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
705 skb->priority);
706
707 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
708 lmp_no_flush_capable(hcon->hdev))
709 flags = ACL_START_NO_FLUSH;
710 else
711 flags = ACL_START;
712
713 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
714 hci_send_acl(chan->conn->hchan, skb, flags);
715 }
716
717 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
718 {
719 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
720 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
721
722 if (enh & L2CAP_CTRL_FRAME_TYPE) {
723 /* S-Frame */
724 control->sframe = 1;
725 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
726 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
727
728 control->sar = 0;
729 control->txseq = 0;
730 } else {
731 /* I-Frame */
732 control->sframe = 0;
733 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
734 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
735
736 control->poll = 0;
737 control->super = 0;
738 }
739 }
740
741 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
742 {
743 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
744 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
745
746 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
747 /* S-Frame */
748 control->sframe = 1;
749 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
750 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
751
752 control->sar = 0;
753 control->txseq = 0;
754 } else {
755 /* I-Frame */
756 control->sframe = 0;
757 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
758 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
759
760 control->poll = 0;
761 control->super = 0;
762 }
763 }
764
765 static inline void __unpack_control(struct l2cap_chan *chan,
766 struct sk_buff *skb)
767 {
768 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
769 __unpack_extended_control(get_unaligned_le32(skb->data),
770 &bt_cb(skb)->control);
771 } else {
772 __unpack_enhanced_control(get_unaligned_le16(skb->data),
773 &bt_cb(skb)->control);
774 }
775 }
776
777 static u32 __pack_extended_control(struct l2cap_ctrl *control)
778 {
779 u32 packed;
780
781 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
782 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
783
784 if (control->sframe) {
785 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
786 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
787 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
788 } else {
789 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
790 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
791 }
792
793 return packed;
794 }
795
796 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
797 {
798 u16 packed;
799
800 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
801 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
802
803 if (control->sframe) {
804 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
805 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
806 packed |= L2CAP_CTRL_FRAME_TYPE;
807 } else {
808 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
809 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
810 }
811
812 return packed;
813 }
814
815 static inline void __pack_control(struct l2cap_chan *chan,
816 struct l2cap_ctrl *control,
817 struct sk_buff *skb)
818 {
819 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
820 put_unaligned_le32(__pack_extended_control(control),
821 skb->data + L2CAP_HDR_SIZE);
822 } else {
823 put_unaligned_le16(__pack_enhanced_control(control),
824 skb->data + L2CAP_HDR_SIZE);
825 }
826 }
827
828 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
829 {
830 struct sk_buff *skb;
831 struct l2cap_hdr *lh;
832 struct l2cap_conn *conn = chan->conn;
833 int count, hlen;
834
835 if (chan->state != BT_CONNECTED)
836 return;
837
838 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
839 hlen = L2CAP_EXT_HDR_SIZE;
840 else
841 hlen = L2CAP_ENH_HDR_SIZE;
842
843 if (chan->fcs == L2CAP_FCS_CRC16)
844 hlen += L2CAP_FCS_SIZE;
845
846 BT_DBG("chan %p, control 0x%8.8x", chan, control);
847
848 count = min_t(unsigned int, conn->mtu, hlen);
849
850 control |= __set_sframe(chan);
851
852 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
853 control |= __set_ctrl_final(chan);
854
855 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
856 control |= __set_ctrl_poll(chan);
857
858 skb = bt_skb_alloc(count, GFP_ATOMIC);
859 if (!skb)
860 return;
861
862 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
863 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
864 lh->cid = cpu_to_le16(chan->dcid);
865
866 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
867
868 if (chan->fcs == L2CAP_FCS_CRC16) {
869 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
870 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
871 }
872
873 skb->priority = HCI_PRIO_MAX;
874 l2cap_do_send(chan, skb);
875 }
876
877 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
878 {
879 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
880 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
881 set_bit(CONN_RNR_SENT, &chan->conn_state);
882 } else
883 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
884
885 control |= __set_reqseq(chan, chan->buffer_seq);
886
887 l2cap_send_sframe(chan, control);
888 }
889
890 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
891 {
892 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
893 }
894
895 static void l2cap_send_conn_req(struct l2cap_chan *chan)
896 {
897 struct l2cap_conn *conn = chan->conn;
898 struct l2cap_conn_req req;
899
900 req.scid = cpu_to_le16(chan->scid);
901 req.psm = chan->psm;
902
903 chan->ident = l2cap_get_ident(conn);
904
905 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
906
907 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
908 }
909
910 static void l2cap_chan_ready(struct l2cap_chan *chan)
911 {
912 struct sock *sk = chan->sk;
913 struct sock *parent;
914
915 lock_sock(sk);
916
917 parent = bt_sk(sk)->parent;
918
919 BT_DBG("sk %p, parent %p", sk, parent);
920
921 chan->conf_state = 0;
922 __clear_chan_timer(chan);
923
924 __l2cap_state_change(chan, BT_CONNECTED);
925 sk->sk_state_change(sk);
926
927 if (parent)
928 parent->sk_data_ready(parent, 0);
929
930 release_sock(sk);
931 }
932
933 static void l2cap_do_start(struct l2cap_chan *chan)
934 {
935 struct l2cap_conn *conn = chan->conn;
936
937 if (conn->hcon->type == LE_LINK) {
938 l2cap_chan_ready(chan);
939 return;
940 }
941
942 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
943 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
944 return;
945
946 if (l2cap_chan_check_security(chan) &&
947 __l2cap_no_conn_pending(chan))
948 l2cap_send_conn_req(chan);
949 } else {
950 struct l2cap_info_req req;
951 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
952
953 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
954 conn->info_ident = l2cap_get_ident(conn);
955
956 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
957
958 l2cap_send_cmd(conn, conn->info_ident,
959 L2CAP_INFO_REQ, sizeof(req), &req);
960 }
961 }
962
963 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
964 {
965 u32 local_feat_mask = l2cap_feat_mask;
966 if (!disable_ertm)
967 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
968
969 switch (mode) {
970 case L2CAP_MODE_ERTM:
971 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
972 case L2CAP_MODE_STREAMING:
973 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
974 default:
975 return 0x00;
976 }
977 }
978
979 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
980 {
981 struct sock *sk = chan->sk;
982 struct l2cap_disconn_req req;
983
984 if (!conn)
985 return;
986
987 if (chan->mode == L2CAP_MODE_ERTM) {
988 __clear_retrans_timer(chan);
989 __clear_monitor_timer(chan);
990 __clear_ack_timer(chan);
991 }
992
993 req.dcid = cpu_to_le16(chan->dcid);
994 req.scid = cpu_to_le16(chan->scid);
995 l2cap_send_cmd(conn, l2cap_get_ident(conn),
996 L2CAP_DISCONN_REQ, sizeof(req), &req);
997
998 lock_sock(sk);
999 __l2cap_state_change(chan, BT_DISCONN);
1000 __l2cap_chan_set_err(chan, err);
1001 release_sock(sk);
1002 }
1003
1004 /* ---- L2CAP connections ---- */
1005 static void l2cap_conn_start(struct l2cap_conn *conn)
1006 {
1007 struct l2cap_chan *chan, *tmp;
1008
1009 BT_DBG("conn %p", conn);
1010
1011 mutex_lock(&conn->chan_lock);
1012
1013 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1014 struct sock *sk = chan->sk;
1015
1016 l2cap_chan_lock(chan);
1017
1018 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1019 l2cap_chan_unlock(chan);
1020 continue;
1021 }
1022
1023 if (chan->state == BT_CONNECT) {
1024 if (!l2cap_chan_check_security(chan) ||
1025 !__l2cap_no_conn_pending(chan)) {
1026 l2cap_chan_unlock(chan);
1027 continue;
1028 }
1029
1030 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1031 && test_bit(CONF_STATE2_DEVICE,
1032 &chan->conf_state)) {
1033 l2cap_chan_close(chan, ECONNRESET);
1034 l2cap_chan_unlock(chan);
1035 continue;
1036 }
1037
1038 l2cap_send_conn_req(chan);
1039
1040 } else if (chan->state == BT_CONNECT2) {
1041 struct l2cap_conn_rsp rsp;
1042 char buf[128];
1043 rsp.scid = cpu_to_le16(chan->dcid);
1044 rsp.dcid = cpu_to_le16(chan->scid);
1045
1046 if (l2cap_chan_check_security(chan)) {
1047 lock_sock(sk);
1048 if (bt_sk(sk)->defer_setup) {
1049 struct sock *parent = bt_sk(sk)->parent;
1050 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1051 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1052 if (parent)
1053 parent->sk_data_ready(parent, 0);
1054
1055 } else {
1056 __l2cap_state_change(chan, BT_CONFIG);
1057 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1058 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1059 }
1060 release_sock(sk);
1061 } else {
1062 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1063 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1064 }
1065
1066 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1067 sizeof(rsp), &rsp);
1068
1069 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1070 rsp.result != L2CAP_CR_SUCCESS) {
1071 l2cap_chan_unlock(chan);
1072 continue;
1073 }
1074
1075 set_bit(CONF_REQ_SENT, &chan->conf_state);
1076 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1077 l2cap_build_conf_req(chan, buf), buf);
1078 chan->num_conf_req++;
1079 }
1080
1081 l2cap_chan_unlock(chan);
1082 }
1083
1084 mutex_unlock(&conn->chan_lock);
1085 }
1086
1087 /* Find socket with cid and source/destination bdaddr.
1088 * Returns closest match, locked.
1089 */
1090 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1091 bdaddr_t *src,
1092 bdaddr_t *dst)
1093 {
1094 struct l2cap_chan *c, *c1 = NULL;
1095
1096 read_lock(&chan_list_lock);
1097
1098 list_for_each_entry(c, &chan_list, global_l) {
1099 struct sock *sk = c->sk;
1100
1101 if (state && c->state != state)
1102 continue;
1103
1104 if (c->scid == cid) {
1105 int src_match, dst_match;
1106 int src_any, dst_any;
1107
1108 /* Exact match. */
1109 src_match = !bacmp(&bt_sk(sk)->src, src);
1110 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1111 if (src_match && dst_match) {
1112 read_unlock(&chan_list_lock);
1113 return c;
1114 }
1115
1116 /* Closest match */
1117 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1118 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1119 if ((src_match && dst_any) || (src_any && dst_match) ||
1120 (src_any && dst_any))
1121 c1 = c;
1122 }
1123 }
1124
1125 read_unlock(&chan_list_lock);
1126
1127 return c1;
1128 }
1129
1130 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1131 {
1132 struct sock *parent, *sk;
1133 struct l2cap_chan *chan, *pchan;
1134
1135 BT_DBG("");
1136
1137 /* Check if we have socket listening on cid */
1138 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1139 conn->src, conn->dst);
1140 if (!pchan)
1141 return;
1142
1143 parent = pchan->sk;
1144
1145 lock_sock(parent);
1146
1147 /* Check for backlog size */
1148 if (sk_acceptq_is_full(parent)) {
1149 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1150 goto clean;
1151 }
1152
1153 chan = pchan->ops->new_connection(pchan->data);
1154 if (!chan)
1155 goto clean;
1156
1157 sk = chan->sk;
1158
1159 hci_conn_hold(conn->hcon);
1160
1161 bacpy(&bt_sk(sk)->src, conn->src);
1162 bacpy(&bt_sk(sk)->dst, conn->dst);
1163
1164 bt_accept_enqueue(parent, sk);
1165
1166 l2cap_chan_add(conn, chan);
1167
1168 __set_chan_timer(chan, sk->sk_sndtimeo);
1169
1170 __l2cap_state_change(chan, BT_CONNECTED);
1171 parent->sk_data_ready(parent, 0);
1172
1173 clean:
1174 release_sock(parent);
1175 }
1176
1177 static void l2cap_conn_ready(struct l2cap_conn *conn)
1178 {
1179 struct l2cap_chan *chan;
1180
1181 BT_DBG("conn %p", conn);
1182
1183 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1184 l2cap_le_conn_ready(conn);
1185
1186 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1187 smp_conn_security(conn, conn->hcon->pending_sec_level);
1188
1189 mutex_lock(&conn->chan_lock);
1190
1191 list_for_each_entry(chan, &conn->chan_l, list) {
1192
1193 l2cap_chan_lock(chan);
1194
1195 if (conn->hcon->type == LE_LINK) {
1196 if (smp_conn_security(conn, chan->sec_level))
1197 l2cap_chan_ready(chan);
1198
1199 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1200 struct sock *sk = chan->sk;
1201 __clear_chan_timer(chan);
1202 lock_sock(sk);
1203 __l2cap_state_change(chan, BT_CONNECTED);
1204 sk->sk_state_change(sk);
1205 release_sock(sk);
1206
1207 } else if (chan->state == BT_CONNECT)
1208 l2cap_do_start(chan);
1209
1210 l2cap_chan_unlock(chan);
1211 }
1212
1213 mutex_unlock(&conn->chan_lock);
1214 }
1215
1216 /* Notify sockets that we cannot guaranty reliability anymore */
1217 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1218 {
1219 struct l2cap_chan *chan;
1220
1221 BT_DBG("conn %p", conn);
1222
1223 mutex_lock(&conn->chan_lock);
1224
1225 list_for_each_entry(chan, &conn->chan_l, list) {
1226 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1227 __l2cap_chan_set_err(chan, err);
1228 }
1229
1230 mutex_unlock(&conn->chan_lock);
1231 }
1232
1233 static void l2cap_info_timeout(struct work_struct *work)
1234 {
1235 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1236 info_timer.work);
1237
1238 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1239 conn->info_ident = 0;
1240
1241 l2cap_conn_start(conn);
1242 }
1243
1244 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1245 {
1246 struct l2cap_conn *conn = hcon->l2cap_data;
1247 struct l2cap_chan *chan, *l;
1248
1249 if (!conn)
1250 return;
1251
1252 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1253
1254 kfree_skb(conn->rx_skb);
1255
1256 mutex_lock(&conn->chan_lock);
1257
1258 /* Kill channels */
1259 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1260 l2cap_chan_hold(chan);
1261 l2cap_chan_lock(chan);
1262
1263 l2cap_chan_del(chan, err);
1264
1265 l2cap_chan_unlock(chan);
1266
1267 chan->ops->close(chan->data);
1268 l2cap_chan_put(chan);
1269 }
1270
1271 mutex_unlock(&conn->chan_lock);
1272
1273 hci_chan_del(conn->hchan);
1274
1275 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1276 cancel_delayed_work_sync(&conn->info_timer);
1277
1278 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1279 cancel_delayed_work_sync(&conn->security_timer);
1280 smp_chan_destroy(conn);
1281 }
1282
1283 hcon->l2cap_data = NULL;
1284 kfree(conn);
1285 }
1286
1287 static void security_timeout(struct work_struct *work)
1288 {
1289 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1290 security_timer.work);
1291
1292 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1293 }
1294
1295 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1296 {
1297 struct l2cap_conn *conn = hcon->l2cap_data;
1298 struct hci_chan *hchan;
1299
1300 if (conn || status)
1301 return conn;
1302
1303 hchan = hci_chan_create(hcon);
1304 if (!hchan)
1305 return NULL;
1306
1307 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1308 if (!conn) {
1309 hci_chan_del(hchan);
1310 return NULL;
1311 }
1312
1313 hcon->l2cap_data = conn;
1314 conn->hcon = hcon;
1315 conn->hchan = hchan;
1316
1317 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1318
1319 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1320 conn->mtu = hcon->hdev->le_mtu;
1321 else
1322 conn->mtu = hcon->hdev->acl_mtu;
1323
1324 conn->src = &hcon->hdev->bdaddr;
1325 conn->dst = &hcon->dst;
1326
1327 conn->feat_mask = 0;
1328
1329 spin_lock_init(&conn->lock);
1330 mutex_init(&conn->chan_lock);
1331
1332 INIT_LIST_HEAD(&conn->chan_l);
1333
1334 if (hcon->type == LE_LINK)
1335 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1336 else
1337 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1338
1339 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1340
1341 return conn;
1342 }
1343
1344 /* ---- Socket interface ---- */
1345
1346 /* Find socket with psm and source / destination bdaddr.
1347 * Returns closest match.
1348 */
1349 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1350 bdaddr_t *src,
1351 bdaddr_t *dst)
1352 {
1353 struct l2cap_chan *c, *c1 = NULL;
1354
1355 read_lock(&chan_list_lock);
1356
1357 list_for_each_entry(c, &chan_list, global_l) {
1358 struct sock *sk = c->sk;
1359
1360 if (state && c->state != state)
1361 continue;
1362
1363 if (c->psm == psm) {
1364 int src_match, dst_match;
1365 int src_any, dst_any;
1366
1367 /* Exact match. */
1368 src_match = !bacmp(&bt_sk(sk)->src, src);
1369 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1370 if (src_match && dst_match) {
1371 read_unlock(&chan_list_lock);
1372 return c;
1373 }
1374
1375 /* Closest match */
1376 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1377 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1378 if ((src_match && dst_any) || (src_any && dst_match) ||
1379 (src_any && dst_any))
1380 c1 = c;
1381 }
1382 }
1383
1384 read_unlock(&chan_list_lock);
1385
1386 return c1;
1387 }
1388
1389 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1390 bdaddr_t *dst, u8 dst_type)
1391 {
1392 struct sock *sk = chan->sk;
1393 bdaddr_t *src = &bt_sk(sk)->src;
1394 struct l2cap_conn *conn;
1395 struct hci_conn *hcon;
1396 struct hci_dev *hdev;
1397 __u8 auth_type;
1398 int err;
1399
1400 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1401 dst_type, __le16_to_cpu(chan->psm));
1402
1403 hdev = hci_get_route(dst, src);
1404 if (!hdev)
1405 return -EHOSTUNREACH;
1406
1407 hci_dev_lock(hdev);
1408
1409 l2cap_chan_lock(chan);
1410
1411 /* PSM must be odd and lsb of upper byte must be 0 */
1412 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1413 chan->chan_type != L2CAP_CHAN_RAW) {
1414 err = -EINVAL;
1415 goto done;
1416 }
1417
1418 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1419 err = -EINVAL;
1420 goto done;
1421 }
1422
1423 switch (chan->mode) {
1424 case L2CAP_MODE_BASIC:
1425 break;
1426 case L2CAP_MODE_ERTM:
1427 case L2CAP_MODE_STREAMING:
1428 if (!disable_ertm)
1429 break;
1430 /* fall through */
1431 default:
1432 err = -ENOTSUPP;
1433 goto done;
1434 }
1435
1436 lock_sock(sk);
1437
1438 switch (sk->sk_state) {
1439 case BT_CONNECT:
1440 case BT_CONNECT2:
1441 case BT_CONFIG:
1442 /* Already connecting */
1443 err = 0;
1444 release_sock(sk);
1445 goto done;
1446
1447 case BT_CONNECTED:
1448 /* Already connected */
1449 err = -EISCONN;
1450 release_sock(sk);
1451 goto done;
1452
1453 case BT_OPEN:
1454 case BT_BOUND:
1455 /* Can connect */
1456 break;
1457
1458 default:
1459 err = -EBADFD;
1460 release_sock(sk);
1461 goto done;
1462 }
1463
1464 /* Set destination address and psm */
1465 bacpy(&bt_sk(sk)->dst, dst);
1466
1467 release_sock(sk);
1468
1469 chan->psm = psm;
1470 chan->dcid = cid;
1471
1472 auth_type = l2cap_get_auth_type(chan);
1473
1474 if (chan->dcid == L2CAP_CID_LE_DATA)
1475 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1476 chan->sec_level, auth_type);
1477 else
1478 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1479 chan->sec_level, auth_type);
1480
1481 if (IS_ERR(hcon)) {
1482 err = PTR_ERR(hcon);
1483 goto done;
1484 }
1485
1486 conn = l2cap_conn_add(hcon, 0);
1487 if (!conn) {
1488 hci_conn_put(hcon);
1489 err = -ENOMEM;
1490 goto done;
1491 }
1492
1493 if (hcon->type == LE_LINK) {
1494 err = 0;
1495
1496 if (!list_empty(&conn->chan_l)) {
1497 err = -EBUSY;
1498 hci_conn_put(hcon);
1499 }
1500
1501 if (err)
1502 goto done;
1503 }
1504
1505 /* Update source addr of the socket */
1506 bacpy(src, conn->src);
1507
1508 l2cap_chan_unlock(chan);
1509 l2cap_chan_add(conn, chan);
1510 l2cap_chan_lock(chan);
1511
1512 l2cap_state_change(chan, BT_CONNECT);
1513 __set_chan_timer(chan, sk->sk_sndtimeo);
1514
1515 if (hcon->state == BT_CONNECTED) {
1516 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1517 __clear_chan_timer(chan);
1518 if (l2cap_chan_check_security(chan))
1519 l2cap_state_change(chan, BT_CONNECTED);
1520 } else
1521 l2cap_do_start(chan);
1522 }
1523
1524 err = 0;
1525
1526 done:
1527 l2cap_chan_unlock(chan);
1528 hci_dev_unlock(hdev);
1529 hci_dev_put(hdev);
1530 return err;
1531 }
1532
1533 int __l2cap_wait_ack(struct sock *sk)
1534 {
1535 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1536 DECLARE_WAITQUEUE(wait, current);
1537 int err = 0;
1538 int timeo = HZ/5;
1539
1540 add_wait_queue(sk_sleep(sk), &wait);
1541 set_current_state(TASK_INTERRUPTIBLE);
1542 while (chan->unacked_frames > 0 && chan->conn) {
1543 if (!timeo)
1544 timeo = HZ/5;
1545
1546 if (signal_pending(current)) {
1547 err = sock_intr_errno(timeo);
1548 break;
1549 }
1550
1551 release_sock(sk);
1552 timeo = schedule_timeout(timeo);
1553 lock_sock(sk);
1554 set_current_state(TASK_INTERRUPTIBLE);
1555
1556 err = sock_error(sk);
1557 if (err)
1558 break;
1559 }
1560 set_current_state(TASK_RUNNING);
1561 remove_wait_queue(sk_sleep(sk), &wait);
1562 return err;
1563 }
1564
1565 static void l2cap_monitor_timeout(struct work_struct *work)
1566 {
1567 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1568 monitor_timer.work);
1569
1570 BT_DBG("chan %p", chan);
1571
1572 l2cap_chan_lock(chan);
1573
1574 if (chan->retry_count >= chan->remote_max_tx) {
1575 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1576 l2cap_chan_unlock(chan);
1577 l2cap_chan_put(chan);
1578 return;
1579 }
1580
1581 chan->retry_count++;
1582 __set_monitor_timer(chan);
1583
1584 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1585 l2cap_chan_unlock(chan);
1586 l2cap_chan_put(chan);
1587 }
1588
1589 static void l2cap_retrans_timeout(struct work_struct *work)
1590 {
1591 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1592 retrans_timer.work);
1593
1594 BT_DBG("chan %p", chan);
1595
1596 l2cap_chan_lock(chan);
1597
1598 chan->retry_count = 1;
1599 __set_monitor_timer(chan);
1600
1601 set_bit(CONN_WAIT_F, &chan->conn_state);
1602
1603 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1604
1605 l2cap_chan_unlock(chan);
1606 l2cap_chan_put(chan);
1607 }
1608
1609 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1610 {
1611 struct sk_buff *skb;
1612
1613 while ((skb = skb_peek(&chan->tx_q)) &&
1614 chan->unacked_frames) {
1615 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1616 break;
1617
1618 skb = skb_dequeue(&chan->tx_q);
1619 kfree_skb(skb);
1620
1621 chan->unacked_frames--;
1622 }
1623
1624 if (!chan->unacked_frames)
1625 __clear_retrans_timer(chan);
1626 }
1627
1628 static void l2cap_streaming_send(struct l2cap_chan *chan)
1629 {
1630 struct sk_buff *skb;
1631 u32 control;
1632 u16 fcs;
1633
1634 while ((skb = skb_dequeue(&chan->tx_q))) {
1635 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1636 control |= __set_txseq(chan, chan->next_tx_seq);
1637 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1638 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1639
1640 if (chan->fcs == L2CAP_FCS_CRC16) {
1641 fcs = crc16(0, (u8 *)skb->data,
1642 skb->len - L2CAP_FCS_SIZE);
1643 put_unaligned_le16(fcs,
1644 skb->data + skb->len - L2CAP_FCS_SIZE);
1645 }
1646
1647 l2cap_do_send(chan, skb);
1648
1649 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1650 }
1651 }
1652
1653 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1654 {
1655 struct sk_buff *skb, *tx_skb;
1656 u16 fcs;
1657 u32 control;
1658
1659 skb = skb_peek(&chan->tx_q);
1660 if (!skb)
1661 return;
1662
1663 while (bt_cb(skb)->control.txseq != tx_seq) {
1664 if (skb_queue_is_last(&chan->tx_q, skb))
1665 return;
1666
1667 skb = skb_queue_next(&chan->tx_q, skb);
1668 }
1669
1670 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1671 chan->remote_max_tx) {
1672 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1673 return;
1674 }
1675
1676 tx_skb = skb_clone(skb, GFP_ATOMIC);
1677 bt_cb(skb)->control.retries++;
1678
1679 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1680 control &= __get_sar_mask(chan);
1681
1682 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1683 control |= __set_ctrl_final(chan);
1684
1685 control |= __set_reqseq(chan, chan->buffer_seq);
1686 control |= __set_txseq(chan, tx_seq);
1687
1688 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1689
1690 if (chan->fcs == L2CAP_FCS_CRC16) {
1691 fcs = crc16(0, (u8 *)tx_skb->data,
1692 tx_skb->len - L2CAP_FCS_SIZE);
1693 put_unaligned_le16(fcs,
1694 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1695 }
1696
1697 l2cap_do_send(chan, tx_skb);
1698 }
1699
1700 static int l2cap_ertm_send(struct l2cap_chan *chan)
1701 {
1702 struct sk_buff *skb, *tx_skb;
1703 u16 fcs;
1704 u32 control;
1705 int nsent = 0;
1706
1707 if (chan->state != BT_CONNECTED)
1708 return -ENOTCONN;
1709
1710 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1711 return 0;
1712
1713 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1714
1715 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1716 chan->remote_max_tx) {
1717 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1718 break;
1719 }
1720
1721 tx_skb = skb_clone(skb, GFP_ATOMIC);
1722
1723 bt_cb(skb)->control.retries++;
1724
1725 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1726 control &= __get_sar_mask(chan);
1727
1728 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1729 control |= __set_ctrl_final(chan);
1730
1731 control |= __set_reqseq(chan, chan->buffer_seq);
1732 control |= __set_txseq(chan, chan->next_tx_seq);
1733 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1734
1735 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1736
1737 if (chan->fcs == L2CAP_FCS_CRC16) {
1738 fcs = crc16(0, (u8 *)skb->data,
1739 tx_skb->len - L2CAP_FCS_SIZE);
1740 put_unaligned_le16(fcs, skb->data +
1741 tx_skb->len - L2CAP_FCS_SIZE);
1742 }
1743
1744 l2cap_do_send(chan, tx_skb);
1745
1746 __set_retrans_timer(chan);
1747
1748 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1749
1750 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1751
1752 if (bt_cb(skb)->control.retries == 1) {
1753 chan->unacked_frames++;
1754
1755 if (!nsent++)
1756 __clear_ack_timer(chan);
1757 }
1758
1759 chan->frames_sent++;
1760
1761 if (skb_queue_is_last(&chan->tx_q, skb))
1762 chan->tx_send_head = NULL;
1763 else
1764 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1765 }
1766
1767 return nsent;
1768 }
1769
1770 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1771 {
1772 int ret;
1773
1774 if (!skb_queue_empty(&chan->tx_q))
1775 chan->tx_send_head = chan->tx_q.next;
1776
1777 chan->next_tx_seq = chan->expected_ack_seq;
1778 ret = l2cap_ertm_send(chan);
1779 return ret;
1780 }
1781
1782 static void __l2cap_send_ack(struct l2cap_chan *chan)
1783 {
1784 u32 control = 0;
1785
1786 control |= __set_reqseq(chan, chan->buffer_seq);
1787
1788 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1789 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1790 set_bit(CONN_RNR_SENT, &chan->conn_state);
1791 l2cap_send_sframe(chan, control);
1792 return;
1793 }
1794
1795 if (l2cap_ertm_send(chan) > 0)
1796 return;
1797
1798 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1799 l2cap_send_sframe(chan, control);
1800 }
1801
1802 static void l2cap_send_ack(struct l2cap_chan *chan)
1803 {
1804 __clear_ack_timer(chan);
1805 __l2cap_send_ack(chan);
1806 }
1807
1808 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1809 {
1810 struct srej_list *tail;
1811 u32 control;
1812
1813 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1814 control |= __set_ctrl_final(chan);
1815
1816 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1817 control |= __set_reqseq(chan, tail->tx_seq);
1818
1819 l2cap_send_sframe(chan, control);
1820 }
1821
1822 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1823 struct msghdr *msg, int len,
1824 int count, struct sk_buff *skb)
1825 {
1826 struct l2cap_conn *conn = chan->conn;
1827 struct sk_buff **frag;
1828 int sent = 0;
1829
1830 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1831 return -EFAULT;
1832
1833 sent += count;
1834 len -= count;
1835
1836 /* Continuation fragments (no L2CAP header) */
1837 frag = &skb_shinfo(skb)->frag_list;
1838 while (len) {
1839 struct sk_buff *tmp;
1840
1841 count = min_t(unsigned int, conn->mtu, len);
1842
1843 tmp = chan->ops->alloc_skb(chan, count,
1844 msg->msg_flags & MSG_DONTWAIT);
1845 if (IS_ERR(tmp))
1846 return PTR_ERR(tmp);
1847
1848 *frag = tmp;
1849
1850 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1851 return -EFAULT;
1852
1853 (*frag)->priority = skb->priority;
1854
1855 sent += count;
1856 len -= count;
1857
1858 skb->len += (*frag)->len;
1859 skb->data_len += (*frag)->len;
1860
1861 frag = &(*frag)->next;
1862 }
1863
1864 return sent;
1865 }
1866
1867 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1868 struct msghdr *msg, size_t len,
1869 u32 priority)
1870 {
1871 struct l2cap_conn *conn = chan->conn;
1872 struct sk_buff *skb;
1873 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1874 struct l2cap_hdr *lh;
1875
1876 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1877
1878 count = min_t(unsigned int, (conn->mtu - hlen), len);
1879
1880 skb = chan->ops->alloc_skb(chan, count + hlen,
1881 msg->msg_flags & MSG_DONTWAIT);
1882 if (IS_ERR(skb))
1883 return skb;
1884
1885 skb->priority = priority;
1886
1887 /* Create L2CAP header */
1888 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1889 lh->cid = cpu_to_le16(chan->dcid);
1890 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1891 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1892
1893 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1894 if (unlikely(err < 0)) {
1895 kfree_skb(skb);
1896 return ERR_PTR(err);
1897 }
1898 return skb;
1899 }
1900
1901 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1902 struct msghdr *msg, size_t len,
1903 u32 priority)
1904 {
1905 struct l2cap_conn *conn = chan->conn;
1906 struct sk_buff *skb;
1907 int err, count;
1908 struct l2cap_hdr *lh;
1909
1910 BT_DBG("chan %p len %d", chan, (int)len);
1911
1912 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1913
1914 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1915 msg->msg_flags & MSG_DONTWAIT);
1916 if (IS_ERR(skb))
1917 return skb;
1918
1919 skb->priority = priority;
1920
1921 /* Create L2CAP header */
1922 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1923 lh->cid = cpu_to_le16(chan->dcid);
1924 lh->len = cpu_to_le16(len);
1925
1926 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1927 if (unlikely(err < 0)) {
1928 kfree_skb(skb);
1929 return ERR_PTR(err);
1930 }
1931 return skb;
1932 }
1933
1934 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1935 struct msghdr *msg, size_t len,
1936 u16 sdulen)
1937 {
1938 struct l2cap_conn *conn = chan->conn;
1939 struct sk_buff *skb;
1940 int err, count, hlen;
1941 struct l2cap_hdr *lh;
1942
1943 BT_DBG("chan %p len %d", chan, (int)len);
1944
1945 if (!conn)
1946 return ERR_PTR(-ENOTCONN);
1947
1948 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1949 hlen = L2CAP_EXT_HDR_SIZE;
1950 else
1951 hlen = L2CAP_ENH_HDR_SIZE;
1952
1953 if (sdulen)
1954 hlen += L2CAP_SDULEN_SIZE;
1955
1956 if (chan->fcs == L2CAP_FCS_CRC16)
1957 hlen += L2CAP_FCS_SIZE;
1958
1959 count = min_t(unsigned int, (conn->mtu - hlen), len);
1960
1961 skb = chan->ops->alloc_skb(chan, count + hlen,
1962 msg->msg_flags & MSG_DONTWAIT);
1963 if (IS_ERR(skb))
1964 return skb;
1965
1966 /* Create L2CAP header */
1967 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1968 lh->cid = cpu_to_le16(chan->dcid);
1969 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1970
1971 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1972
1973 if (sdulen)
1974 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1975
1976 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1977 if (unlikely(err < 0)) {
1978 kfree_skb(skb);
1979 return ERR_PTR(err);
1980 }
1981
1982 if (chan->fcs == L2CAP_FCS_CRC16)
1983 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1984
1985 bt_cb(skb)->control.retries = 0;
1986 return skb;
1987 }
1988
1989 static int l2cap_segment_sdu(struct l2cap_chan *chan,
1990 struct sk_buff_head *seg_queue,
1991 struct msghdr *msg, size_t len)
1992 {
1993 struct sk_buff *skb;
1994 u16 sdu_len;
1995 size_t pdu_len;
1996 int err = 0;
1997 u8 sar;
1998
1999 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2000
2001 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2002 * so fragmented skbs are not used. The HCI layer's handling
2003 * of fragmented skbs is not compatible with ERTM's queueing.
2004 */
2005
2006 /* PDU size is derived from the HCI MTU */
2007 pdu_len = chan->conn->mtu;
2008
2009 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2010
2011 /* Adjust for largest possible L2CAP overhead. */
2012 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2013
2014 /* Remote device may have requested smaller PDUs */
2015 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2016
2017 if (len <= pdu_len) {
2018 sar = L2CAP_SAR_UNSEGMENTED;
2019 sdu_len = 0;
2020 pdu_len = len;
2021 } else {
2022 sar = L2CAP_SAR_START;
2023 sdu_len = len;
2024 pdu_len -= L2CAP_SDULEN_SIZE;
2025 }
2026
2027 while (len > 0) {
2028 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2029
2030 if (IS_ERR(skb)) {
2031 __skb_queue_purge(seg_queue);
2032 return PTR_ERR(skb);
2033 }
2034
2035 bt_cb(skb)->control.sar = sar;
2036 __skb_queue_tail(seg_queue, skb);
2037
2038 len -= pdu_len;
2039 if (sdu_len) {
2040 sdu_len = 0;
2041 pdu_len += L2CAP_SDULEN_SIZE;
2042 }
2043
2044 if (len <= pdu_len) {
2045 sar = L2CAP_SAR_END;
2046 pdu_len = len;
2047 } else {
2048 sar = L2CAP_SAR_CONTINUE;
2049 }
2050 }
2051
2052 return err;
2053 }
2054
2055 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2056 u32 priority)
2057 {
2058 struct sk_buff *skb;
2059 int err;
2060 struct sk_buff_head seg_queue;
2061
2062 /* Connectionless channel */
2063 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2064 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2065 if (IS_ERR(skb))
2066 return PTR_ERR(skb);
2067
2068 l2cap_do_send(chan, skb);
2069 return len;
2070 }
2071
2072 switch (chan->mode) {
2073 case L2CAP_MODE_BASIC:
2074 /* Check outgoing MTU */
2075 if (len > chan->omtu)
2076 return -EMSGSIZE;
2077
2078 /* Create a basic PDU */
2079 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2080 if (IS_ERR(skb))
2081 return PTR_ERR(skb);
2082
2083 l2cap_do_send(chan, skb);
2084 err = len;
2085 break;
2086
2087 case L2CAP_MODE_ERTM:
2088 case L2CAP_MODE_STREAMING:
2089 /* Check outgoing MTU */
2090 if (len > chan->omtu) {
2091 err = -EMSGSIZE;
2092 break;
2093 }
2094
2095 __skb_queue_head_init(&seg_queue);
2096
2097 /* Do segmentation before calling in to the state machine,
2098 * since it's possible to block while waiting for memory
2099 * allocation.
2100 */
2101 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2102
2103 /* The channel could have been closed while segmenting,
2104 * check that it is still connected.
2105 */
2106 if (chan->state != BT_CONNECTED) {
2107 __skb_queue_purge(&seg_queue);
2108 err = -ENOTCONN;
2109 }
2110
2111 if (err)
2112 break;
2113
2114 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2115 chan->tx_send_head = seg_queue.next;
2116 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2117
2118 if (chan->mode == L2CAP_MODE_ERTM)
2119 err = l2cap_ertm_send(chan);
2120 else
2121 l2cap_streaming_send(chan);
2122
2123 if (err >= 0)
2124 err = len;
2125
2126 /* If the skbs were not queued for sending, they'll still be in
2127 * seg_queue and need to be purged.
2128 */
2129 __skb_queue_purge(&seg_queue);
2130 break;
2131
2132 default:
2133 BT_DBG("bad state %1.1x", chan->mode);
2134 err = -EBADFD;
2135 }
2136
2137 return err;
2138 }
2139
2140 /* Copy frame to all raw sockets on that connection */
2141 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2142 {
2143 struct sk_buff *nskb;
2144 struct l2cap_chan *chan;
2145
2146 BT_DBG("conn %p", conn);
2147
2148 mutex_lock(&conn->chan_lock);
2149
2150 list_for_each_entry(chan, &conn->chan_l, list) {
2151 struct sock *sk = chan->sk;
2152 if (chan->chan_type != L2CAP_CHAN_RAW)
2153 continue;
2154
2155 /* Don't send frame to the socket it came from */
2156 if (skb->sk == sk)
2157 continue;
2158 nskb = skb_clone(skb, GFP_ATOMIC);
2159 if (!nskb)
2160 continue;
2161
2162 if (chan->ops->recv(chan->data, nskb))
2163 kfree_skb(nskb);
2164 }
2165
2166 mutex_unlock(&conn->chan_lock);
2167 }
2168
2169 /* ---- L2CAP signalling commands ---- */
2170 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2171 u8 code, u8 ident, u16 dlen, void *data)
2172 {
2173 struct sk_buff *skb, **frag;
2174 struct l2cap_cmd_hdr *cmd;
2175 struct l2cap_hdr *lh;
2176 int len, count;
2177
2178 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2179 conn, code, ident, dlen);
2180
2181 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2182 count = min_t(unsigned int, conn->mtu, len);
2183
2184 skb = bt_skb_alloc(count, GFP_ATOMIC);
2185 if (!skb)
2186 return NULL;
2187
2188 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2189 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2190
2191 if (conn->hcon->type == LE_LINK)
2192 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2193 else
2194 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2195
2196 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2197 cmd->code = code;
2198 cmd->ident = ident;
2199 cmd->len = cpu_to_le16(dlen);
2200
2201 if (dlen) {
2202 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2203 memcpy(skb_put(skb, count), data, count);
2204 data += count;
2205 }
2206
2207 len -= skb->len;
2208
2209 /* Continuation fragments (no L2CAP header) */
2210 frag = &skb_shinfo(skb)->frag_list;
2211 while (len) {
2212 count = min_t(unsigned int, conn->mtu, len);
2213
2214 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2215 if (!*frag)
2216 goto fail;
2217
2218 memcpy(skb_put(*frag, count), data, count);
2219
2220 len -= count;
2221 data += count;
2222
2223 frag = &(*frag)->next;
2224 }
2225
2226 return skb;
2227
2228 fail:
2229 kfree_skb(skb);
2230 return NULL;
2231 }
2232
2233 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2234 {
2235 struct l2cap_conf_opt *opt = *ptr;
2236 int len;
2237
2238 len = L2CAP_CONF_OPT_SIZE + opt->len;
2239 *ptr += len;
2240
2241 *type = opt->type;
2242 *olen = opt->len;
2243
2244 switch (opt->len) {
2245 case 1:
2246 *val = *((u8 *) opt->val);
2247 break;
2248
2249 case 2:
2250 *val = get_unaligned_le16(opt->val);
2251 break;
2252
2253 case 4:
2254 *val = get_unaligned_le32(opt->val);
2255 break;
2256
2257 default:
2258 *val = (unsigned long) opt->val;
2259 break;
2260 }
2261
2262 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2263 return len;
2264 }
2265
2266 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2267 {
2268 struct l2cap_conf_opt *opt = *ptr;
2269
2270 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2271
2272 opt->type = type;
2273 opt->len = len;
2274
2275 switch (len) {
2276 case 1:
2277 *((u8 *) opt->val) = val;
2278 break;
2279
2280 case 2:
2281 put_unaligned_le16(val, opt->val);
2282 break;
2283
2284 case 4:
2285 put_unaligned_le32(val, opt->val);
2286 break;
2287
2288 default:
2289 memcpy(opt->val, (void *) val, len);
2290 break;
2291 }
2292
2293 *ptr += L2CAP_CONF_OPT_SIZE + len;
2294 }
2295
2296 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2297 {
2298 struct l2cap_conf_efs efs;
2299
2300 switch (chan->mode) {
2301 case L2CAP_MODE_ERTM:
2302 efs.id = chan->local_id;
2303 efs.stype = chan->local_stype;
2304 efs.msdu = cpu_to_le16(chan->local_msdu);
2305 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2306 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2307 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2308 break;
2309
2310 case L2CAP_MODE_STREAMING:
2311 efs.id = 1;
2312 efs.stype = L2CAP_SERV_BESTEFFORT;
2313 efs.msdu = cpu_to_le16(chan->local_msdu);
2314 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2315 efs.acc_lat = 0;
2316 efs.flush_to = 0;
2317 break;
2318
2319 default:
2320 return;
2321 }
2322
2323 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2324 (unsigned long) &efs);
2325 }
2326
2327 static void l2cap_ack_timeout(struct work_struct *work)
2328 {
2329 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2330 ack_timer.work);
2331
2332 BT_DBG("chan %p", chan);
2333
2334 l2cap_chan_lock(chan);
2335
2336 __l2cap_send_ack(chan);
2337
2338 l2cap_chan_unlock(chan);
2339
2340 l2cap_chan_put(chan);
2341 }
2342
2343 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2344 {
2345 int err;
2346
2347 chan->next_tx_seq = 0;
2348 chan->expected_tx_seq = 0;
2349 chan->expected_ack_seq = 0;
2350 chan->unacked_frames = 0;
2351 chan->buffer_seq = 0;
2352 chan->num_acked = 0;
2353 chan->frames_sent = 0;
2354 chan->last_acked_seq = 0;
2355 chan->sdu = NULL;
2356 chan->sdu_last_frag = NULL;
2357 chan->sdu_len = 0;
2358
2359 skb_queue_head_init(&chan->tx_q);
2360
2361 if (chan->mode != L2CAP_MODE_ERTM)
2362 return 0;
2363
2364 chan->rx_state = L2CAP_RX_STATE_RECV;
2365 chan->tx_state = L2CAP_TX_STATE_XMIT;
2366
2367 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2368 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2369 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2370
2371 skb_queue_head_init(&chan->srej_q);
2372
2373 INIT_LIST_HEAD(&chan->srej_l);
2374 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2375 if (err < 0)
2376 return err;
2377
2378 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2379 }
2380
2381 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2382 {
2383 switch (mode) {
2384 case L2CAP_MODE_STREAMING:
2385 case L2CAP_MODE_ERTM:
2386 if (l2cap_mode_supported(mode, remote_feat_mask))
2387 return mode;
2388 /* fall through */
2389 default:
2390 return L2CAP_MODE_BASIC;
2391 }
2392 }
2393
2394 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2395 {
2396 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2397 }
2398
2399 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2400 {
2401 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2402 }
2403
2404 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2405 {
2406 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2407 __l2cap_ews_supported(chan)) {
2408 /* use extended control field */
2409 set_bit(FLAG_EXT_CTRL, &chan->flags);
2410 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2411 } else {
2412 chan->tx_win = min_t(u16, chan->tx_win,
2413 L2CAP_DEFAULT_TX_WINDOW);
2414 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2415 }
2416 }
2417
2418 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2419 {
2420 struct l2cap_conf_req *req = data;
2421 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2422 void *ptr = req->data;
2423 u16 size;
2424
2425 BT_DBG("chan %p", chan);
2426
2427 if (chan->num_conf_req || chan->num_conf_rsp)
2428 goto done;
2429
2430 switch (chan->mode) {
2431 case L2CAP_MODE_STREAMING:
2432 case L2CAP_MODE_ERTM:
2433 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2434 break;
2435
2436 if (__l2cap_efs_supported(chan))
2437 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2438
2439 /* fall through */
2440 default:
2441 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2442 break;
2443 }
2444
2445 done:
2446 if (chan->imtu != L2CAP_DEFAULT_MTU)
2447 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2448
2449 switch (chan->mode) {
2450 case L2CAP_MODE_BASIC:
2451 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2452 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2453 break;
2454
2455 rfc.mode = L2CAP_MODE_BASIC;
2456 rfc.txwin_size = 0;
2457 rfc.max_transmit = 0;
2458 rfc.retrans_timeout = 0;
2459 rfc.monitor_timeout = 0;
2460 rfc.max_pdu_size = 0;
2461
2462 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2463 (unsigned long) &rfc);
2464 break;
2465
2466 case L2CAP_MODE_ERTM:
2467 rfc.mode = L2CAP_MODE_ERTM;
2468 rfc.max_transmit = chan->max_tx;
2469 rfc.retrans_timeout = 0;
2470 rfc.monitor_timeout = 0;
2471
2472 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2473 L2CAP_EXT_HDR_SIZE -
2474 L2CAP_SDULEN_SIZE -
2475 L2CAP_FCS_SIZE);
2476 rfc.max_pdu_size = cpu_to_le16(size);
2477
2478 l2cap_txwin_setup(chan);
2479
2480 rfc.txwin_size = min_t(u16, chan->tx_win,
2481 L2CAP_DEFAULT_TX_WINDOW);
2482
2483 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2484 (unsigned long) &rfc);
2485
2486 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2487 l2cap_add_opt_efs(&ptr, chan);
2488
2489 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2490 break;
2491
2492 if (chan->fcs == L2CAP_FCS_NONE ||
2493 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2494 chan->fcs = L2CAP_FCS_NONE;
2495 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2496 }
2497
2498 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2499 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2500 chan->tx_win);
2501 break;
2502
2503 case L2CAP_MODE_STREAMING:
2504 rfc.mode = L2CAP_MODE_STREAMING;
2505 rfc.txwin_size = 0;
2506 rfc.max_transmit = 0;
2507 rfc.retrans_timeout = 0;
2508 rfc.monitor_timeout = 0;
2509
2510 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2511 L2CAP_EXT_HDR_SIZE -
2512 L2CAP_SDULEN_SIZE -
2513 L2CAP_FCS_SIZE);
2514 rfc.max_pdu_size = cpu_to_le16(size);
2515
2516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2517 (unsigned long) &rfc);
2518
2519 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2520 l2cap_add_opt_efs(&ptr, chan);
2521
2522 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2523 break;
2524
2525 if (chan->fcs == L2CAP_FCS_NONE ||
2526 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2527 chan->fcs = L2CAP_FCS_NONE;
2528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2529 }
2530 break;
2531 }
2532
2533 req->dcid = cpu_to_le16(chan->dcid);
2534 req->flags = cpu_to_le16(0);
2535
2536 return ptr - data;
2537 }
2538
2539 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2540 {
2541 struct l2cap_conf_rsp *rsp = data;
2542 void *ptr = rsp->data;
2543 void *req = chan->conf_req;
2544 int len = chan->conf_len;
2545 int type, hint, olen;
2546 unsigned long val;
2547 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2548 struct l2cap_conf_efs efs;
2549 u8 remote_efs = 0;
2550 u16 mtu = L2CAP_DEFAULT_MTU;
2551 u16 result = L2CAP_CONF_SUCCESS;
2552 u16 size;
2553
2554 BT_DBG("chan %p", chan);
2555
2556 while (len >= L2CAP_CONF_OPT_SIZE) {
2557 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2558
2559 hint = type & L2CAP_CONF_HINT;
2560 type &= L2CAP_CONF_MASK;
2561
2562 switch (type) {
2563 case L2CAP_CONF_MTU:
2564 mtu = val;
2565 break;
2566
2567 case L2CAP_CONF_FLUSH_TO:
2568 chan->flush_to = val;
2569 break;
2570
2571 case L2CAP_CONF_QOS:
2572 break;
2573
2574 case L2CAP_CONF_RFC:
2575 if (olen == sizeof(rfc))
2576 memcpy(&rfc, (void *) val, olen);
2577 break;
2578
2579 case L2CAP_CONF_FCS:
2580 if (val == L2CAP_FCS_NONE)
2581 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2582 break;
2583
2584 case L2CAP_CONF_EFS:
2585 remote_efs = 1;
2586 if (olen == sizeof(efs))
2587 memcpy(&efs, (void *) val, olen);
2588 break;
2589
2590 case L2CAP_CONF_EWS:
2591 if (!enable_hs)
2592 return -ECONNREFUSED;
2593
2594 set_bit(FLAG_EXT_CTRL, &chan->flags);
2595 set_bit(CONF_EWS_RECV, &chan->conf_state);
2596 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2597 chan->remote_tx_win = val;
2598 break;
2599
2600 default:
2601 if (hint)
2602 break;
2603
2604 result = L2CAP_CONF_UNKNOWN;
2605 *((u8 *) ptr++) = type;
2606 break;
2607 }
2608 }
2609
2610 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2611 goto done;
2612
2613 switch (chan->mode) {
2614 case L2CAP_MODE_STREAMING:
2615 case L2CAP_MODE_ERTM:
2616 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2617 chan->mode = l2cap_select_mode(rfc.mode,
2618 chan->conn->feat_mask);
2619 break;
2620 }
2621
2622 if (remote_efs) {
2623 if (__l2cap_efs_supported(chan))
2624 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2625 else
2626 return -ECONNREFUSED;
2627 }
2628
2629 if (chan->mode != rfc.mode)
2630 return -ECONNREFUSED;
2631
2632 break;
2633 }
2634
2635 done:
2636 if (chan->mode != rfc.mode) {
2637 result = L2CAP_CONF_UNACCEPT;
2638 rfc.mode = chan->mode;
2639
2640 if (chan->num_conf_rsp == 1)
2641 return -ECONNREFUSED;
2642
2643 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2644 sizeof(rfc), (unsigned long) &rfc);
2645 }
2646
2647 if (result == L2CAP_CONF_SUCCESS) {
2648 /* Configure output options and let the other side know
2649 * which ones we don't like. */
2650
2651 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2652 result = L2CAP_CONF_UNACCEPT;
2653 else {
2654 chan->omtu = mtu;
2655 set_bit(CONF_MTU_DONE, &chan->conf_state);
2656 }
2657 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2658
2659 if (remote_efs) {
2660 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2661 efs.stype != L2CAP_SERV_NOTRAFIC &&
2662 efs.stype != chan->local_stype) {
2663
2664 result = L2CAP_CONF_UNACCEPT;
2665
2666 if (chan->num_conf_req >= 1)
2667 return -ECONNREFUSED;
2668
2669 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2670 sizeof(efs),
2671 (unsigned long) &efs);
2672 } else {
2673 /* Send PENDING Conf Rsp */
2674 result = L2CAP_CONF_PENDING;
2675 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2676 }
2677 }
2678
2679 switch (rfc.mode) {
2680 case L2CAP_MODE_BASIC:
2681 chan->fcs = L2CAP_FCS_NONE;
2682 set_bit(CONF_MODE_DONE, &chan->conf_state);
2683 break;
2684
2685 case L2CAP_MODE_ERTM:
2686 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2687 chan->remote_tx_win = rfc.txwin_size;
2688 else
2689 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2690
2691 chan->remote_max_tx = rfc.max_transmit;
2692
2693 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2694 chan->conn->mtu -
2695 L2CAP_EXT_HDR_SIZE -
2696 L2CAP_SDULEN_SIZE -
2697 L2CAP_FCS_SIZE);
2698 rfc.max_pdu_size = cpu_to_le16(size);
2699 chan->remote_mps = size;
2700
2701 rfc.retrans_timeout =
2702 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2703 rfc.monitor_timeout =
2704 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2705
2706 set_bit(CONF_MODE_DONE, &chan->conf_state);
2707
2708 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2709 sizeof(rfc), (unsigned long) &rfc);
2710
2711 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2712 chan->remote_id = efs.id;
2713 chan->remote_stype = efs.stype;
2714 chan->remote_msdu = le16_to_cpu(efs.msdu);
2715 chan->remote_flush_to =
2716 le32_to_cpu(efs.flush_to);
2717 chan->remote_acc_lat =
2718 le32_to_cpu(efs.acc_lat);
2719 chan->remote_sdu_itime =
2720 le32_to_cpu(efs.sdu_itime);
2721 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2722 sizeof(efs), (unsigned long) &efs);
2723 }
2724 break;
2725
2726 case L2CAP_MODE_STREAMING:
2727 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2728 chan->conn->mtu -
2729 L2CAP_EXT_HDR_SIZE -
2730 L2CAP_SDULEN_SIZE -
2731 L2CAP_FCS_SIZE);
2732 rfc.max_pdu_size = cpu_to_le16(size);
2733 chan->remote_mps = size;
2734
2735 set_bit(CONF_MODE_DONE, &chan->conf_state);
2736
2737 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2738 sizeof(rfc), (unsigned long) &rfc);
2739
2740 break;
2741
2742 default:
2743 result = L2CAP_CONF_UNACCEPT;
2744
2745 memset(&rfc, 0, sizeof(rfc));
2746 rfc.mode = chan->mode;
2747 }
2748
2749 if (result == L2CAP_CONF_SUCCESS)
2750 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2751 }
2752 rsp->scid = cpu_to_le16(chan->dcid);
2753 rsp->result = cpu_to_le16(result);
2754 rsp->flags = cpu_to_le16(0x0000);
2755
2756 return ptr - data;
2757 }
2758
2759 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2760 {
2761 struct l2cap_conf_req *req = data;
2762 void *ptr = req->data;
2763 int type, olen;
2764 unsigned long val;
2765 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2766 struct l2cap_conf_efs efs;
2767
2768 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2769
2770 while (len >= L2CAP_CONF_OPT_SIZE) {
2771 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2772
2773 switch (type) {
2774 case L2CAP_CONF_MTU:
2775 if (val < L2CAP_DEFAULT_MIN_MTU) {
2776 *result = L2CAP_CONF_UNACCEPT;
2777 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2778 } else
2779 chan->imtu = val;
2780 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2781 break;
2782
2783 case L2CAP_CONF_FLUSH_TO:
2784 chan->flush_to = val;
2785 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2786 2, chan->flush_to);
2787 break;
2788
2789 case L2CAP_CONF_RFC:
2790 if (olen == sizeof(rfc))
2791 memcpy(&rfc, (void *)val, olen);
2792
2793 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2794 rfc.mode != chan->mode)
2795 return -ECONNREFUSED;
2796
2797 chan->fcs = 0;
2798
2799 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2800 sizeof(rfc), (unsigned long) &rfc);
2801 break;
2802
2803 case L2CAP_CONF_EWS:
2804 chan->tx_win = min_t(u16, val,
2805 L2CAP_DEFAULT_EXT_WINDOW);
2806 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2807 chan->tx_win);
2808 break;
2809
2810 case L2CAP_CONF_EFS:
2811 if (olen == sizeof(efs))
2812 memcpy(&efs, (void *)val, olen);
2813
2814 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2815 efs.stype != L2CAP_SERV_NOTRAFIC &&
2816 efs.stype != chan->local_stype)
2817 return -ECONNREFUSED;
2818
2819 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2820 sizeof(efs), (unsigned long) &efs);
2821 break;
2822 }
2823 }
2824
2825 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2826 return -ECONNREFUSED;
2827
2828 chan->mode = rfc.mode;
2829
2830 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2831 switch (rfc.mode) {
2832 case L2CAP_MODE_ERTM:
2833 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2834 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2835 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2836
2837 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2838 chan->local_msdu = le16_to_cpu(efs.msdu);
2839 chan->local_sdu_itime =
2840 le32_to_cpu(efs.sdu_itime);
2841 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2842 chan->local_flush_to =
2843 le32_to_cpu(efs.flush_to);
2844 }
2845 break;
2846
2847 case L2CAP_MODE_STREAMING:
2848 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2849 }
2850 }
2851
2852 req->dcid = cpu_to_le16(chan->dcid);
2853 req->flags = cpu_to_le16(0x0000);
2854
2855 return ptr - data;
2856 }
2857
2858 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2859 {
2860 struct l2cap_conf_rsp *rsp = data;
2861 void *ptr = rsp->data;
2862
2863 BT_DBG("chan %p", chan);
2864
2865 rsp->scid = cpu_to_le16(chan->dcid);
2866 rsp->result = cpu_to_le16(result);
2867 rsp->flags = cpu_to_le16(flags);
2868
2869 return ptr - data;
2870 }
2871
2872 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2873 {
2874 struct l2cap_conn_rsp rsp;
2875 struct l2cap_conn *conn = chan->conn;
2876 u8 buf[128];
2877
2878 rsp.scid = cpu_to_le16(chan->dcid);
2879 rsp.dcid = cpu_to_le16(chan->scid);
2880 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2881 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2882 l2cap_send_cmd(conn, chan->ident,
2883 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2884
2885 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2886 return;
2887
2888 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2889 l2cap_build_conf_req(chan, buf), buf);
2890 chan->num_conf_req++;
2891 }
2892
2893 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2894 {
2895 int type, olen;
2896 unsigned long val;
2897 struct l2cap_conf_rfc rfc;
2898
2899 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2900
2901 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2902 return;
2903
2904 while (len >= L2CAP_CONF_OPT_SIZE) {
2905 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2906
2907 switch (type) {
2908 case L2CAP_CONF_RFC:
2909 if (olen == sizeof(rfc))
2910 memcpy(&rfc, (void *)val, olen);
2911 goto done;
2912 }
2913 }
2914
2915 /* Use sane default values in case a misbehaving remote device
2916 * did not send an RFC option.
2917 */
2918 rfc.mode = chan->mode;
2919 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2920 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2921 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2922
2923 BT_ERR("Expected RFC option was not found, using defaults");
2924
2925 done:
2926 switch (rfc.mode) {
2927 case L2CAP_MODE_ERTM:
2928 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2929 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2930 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2931 break;
2932 case L2CAP_MODE_STREAMING:
2933 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2934 }
2935 }
2936
2937 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2938 {
2939 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2940
2941 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2942 return 0;
2943
2944 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2945 cmd->ident == conn->info_ident) {
2946 cancel_delayed_work(&conn->info_timer);
2947
2948 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2949 conn->info_ident = 0;
2950
2951 l2cap_conn_start(conn);
2952 }
2953
2954 return 0;
2955 }
2956
2957 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2958 {
2959 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2960 struct l2cap_conn_rsp rsp;
2961 struct l2cap_chan *chan = NULL, *pchan;
2962 struct sock *parent, *sk = NULL;
2963 int result, status = L2CAP_CS_NO_INFO;
2964
2965 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2966 __le16 psm = req->psm;
2967
2968 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2969
2970 /* Check if we have socket listening on psm */
2971 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
2972 if (!pchan) {
2973 result = L2CAP_CR_BAD_PSM;
2974 goto sendresp;
2975 }
2976
2977 parent = pchan->sk;
2978
2979 mutex_lock(&conn->chan_lock);
2980 lock_sock(parent);
2981
2982 /* Check if the ACL is secure enough (if not SDP) */
2983 if (psm != cpu_to_le16(0x0001) &&
2984 !hci_conn_check_link_mode(conn->hcon)) {
2985 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2986 result = L2CAP_CR_SEC_BLOCK;
2987 goto response;
2988 }
2989
2990 result = L2CAP_CR_NO_MEM;
2991
2992 /* Check for backlog size */
2993 if (sk_acceptq_is_full(parent)) {
2994 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2995 goto response;
2996 }
2997
2998 chan = pchan->ops->new_connection(pchan->data);
2999 if (!chan)
3000 goto response;
3001
3002 sk = chan->sk;
3003
3004 /* Check if we already have channel with that dcid */
3005 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3006 sock_set_flag(sk, SOCK_ZAPPED);
3007 chan->ops->close(chan->data);
3008 goto response;
3009 }
3010
3011 hci_conn_hold(conn->hcon);
3012
3013 bacpy(&bt_sk(sk)->src, conn->src);
3014 bacpy(&bt_sk(sk)->dst, conn->dst);
3015 chan->psm = psm;
3016 chan->dcid = scid;
3017
3018 bt_accept_enqueue(parent, sk);
3019
3020 __l2cap_chan_add(conn, chan);
3021
3022 dcid = chan->scid;
3023
3024 __set_chan_timer(chan, sk->sk_sndtimeo);
3025
3026 chan->ident = cmd->ident;
3027
3028 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3029 if (l2cap_chan_check_security(chan)) {
3030 if (bt_sk(sk)->defer_setup) {
3031 __l2cap_state_change(chan, BT_CONNECT2);
3032 result = L2CAP_CR_PEND;
3033 status = L2CAP_CS_AUTHOR_PEND;
3034 parent->sk_data_ready(parent, 0);
3035 } else {
3036 __l2cap_state_change(chan, BT_CONFIG);
3037 result = L2CAP_CR_SUCCESS;
3038 status = L2CAP_CS_NO_INFO;
3039 }
3040 } else {
3041 __l2cap_state_change(chan, BT_CONNECT2);
3042 result = L2CAP_CR_PEND;
3043 status = L2CAP_CS_AUTHEN_PEND;
3044 }
3045 } else {
3046 __l2cap_state_change(chan, BT_CONNECT2);
3047 result = L2CAP_CR_PEND;
3048 status = L2CAP_CS_NO_INFO;
3049 }
3050
3051 response:
3052 release_sock(parent);
3053 mutex_unlock(&conn->chan_lock);
3054
3055 sendresp:
3056 rsp.scid = cpu_to_le16(scid);
3057 rsp.dcid = cpu_to_le16(dcid);
3058 rsp.result = cpu_to_le16(result);
3059 rsp.status = cpu_to_le16(status);
3060 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3061
3062 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3063 struct l2cap_info_req info;
3064 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3065
3066 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3067 conn->info_ident = l2cap_get_ident(conn);
3068
3069 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3070
3071 l2cap_send_cmd(conn, conn->info_ident,
3072 L2CAP_INFO_REQ, sizeof(info), &info);
3073 }
3074
3075 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3076 result == L2CAP_CR_SUCCESS) {
3077 u8 buf[128];
3078 set_bit(CONF_REQ_SENT, &chan->conf_state);
3079 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3080 l2cap_build_conf_req(chan, buf), buf);
3081 chan->num_conf_req++;
3082 }
3083
3084 return 0;
3085 }
3086
3087 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3088 {
3089 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3090 u16 scid, dcid, result, status;
3091 struct l2cap_chan *chan;
3092 u8 req[128];
3093 int err;
3094
3095 scid = __le16_to_cpu(rsp->scid);
3096 dcid = __le16_to_cpu(rsp->dcid);
3097 result = __le16_to_cpu(rsp->result);
3098 status = __le16_to_cpu(rsp->status);
3099
3100 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3101 dcid, scid, result, status);
3102
3103 mutex_lock(&conn->chan_lock);
3104
3105 if (scid) {
3106 chan = __l2cap_get_chan_by_scid(conn, scid);
3107 if (!chan) {
3108 err = -EFAULT;
3109 goto unlock;
3110 }
3111 } else {
3112 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3113 if (!chan) {
3114 err = -EFAULT;
3115 goto unlock;
3116 }
3117 }
3118
3119 err = 0;
3120
3121 l2cap_chan_lock(chan);
3122
3123 switch (result) {
3124 case L2CAP_CR_SUCCESS:
3125 l2cap_state_change(chan, BT_CONFIG);
3126 chan->ident = 0;
3127 chan->dcid = dcid;
3128 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3129
3130 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3131 break;
3132
3133 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3134 l2cap_build_conf_req(chan, req), req);
3135 chan->num_conf_req++;
3136 break;
3137
3138 case L2CAP_CR_PEND:
3139 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3140 break;
3141
3142 default:
3143 l2cap_chan_del(chan, ECONNREFUSED);
3144 break;
3145 }
3146
3147 l2cap_chan_unlock(chan);
3148
3149 unlock:
3150 mutex_unlock(&conn->chan_lock);
3151
3152 return err;
3153 }
3154
3155 static inline void set_default_fcs(struct l2cap_chan *chan)
3156 {
3157 /* FCS is enabled only in ERTM or streaming mode, if one or both
3158 * sides request it.
3159 */
3160 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3161 chan->fcs = L2CAP_FCS_NONE;
3162 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3163 chan->fcs = L2CAP_FCS_CRC16;
3164 }
3165
3166 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3167 {
3168 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3169 u16 dcid, flags;
3170 u8 rsp[64];
3171 struct l2cap_chan *chan;
3172 int len, err = 0;
3173
3174 dcid = __le16_to_cpu(req->dcid);
3175 flags = __le16_to_cpu(req->flags);
3176
3177 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3178
3179 chan = l2cap_get_chan_by_scid(conn, dcid);
3180 if (!chan)
3181 return -ENOENT;
3182
3183 l2cap_chan_lock(chan);
3184
3185 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3186 struct l2cap_cmd_rej_cid rej;
3187
3188 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3189 rej.scid = cpu_to_le16(chan->scid);
3190 rej.dcid = cpu_to_le16(chan->dcid);
3191
3192 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3193 sizeof(rej), &rej);
3194 goto unlock;
3195 }
3196
3197 /* Reject if config buffer is too small. */
3198 len = cmd_len - sizeof(*req);
3199 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3200 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3201 l2cap_build_conf_rsp(chan, rsp,
3202 L2CAP_CONF_REJECT, flags), rsp);
3203 goto unlock;
3204 }
3205
3206 /* Store config. */
3207 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3208 chan->conf_len += len;
3209
3210 if (flags & 0x0001) {
3211 /* Incomplete config. Send empty response. */
3212 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3213 l2cap_build_conf_rsp(chan, rsp,
3214 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3215 goto unlock;
3216 }
3217
3218 /* Complete config. */
3219 len = l2cap_parse_conf_req(chan, rsp);
3220 if (len < 0) {
3221 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3222 goto unlock;
3223 }
3224
3225 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3226 chan->num_conf_rsp++;
3227
3228 /* Reset config buffer. */
3229 chan->conf_len = 0;
3230
3231 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3232 goto unlock;
3233
3234 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3235 set_default_fcs(chan);
3236
3237 l2cap_state_change(chan, BT_CONNECTED);
3238
3239 if (chan->mode == L2CAP_MODE_ERTM ||
3240 chan->mode == L2CAP_MODE_STREAMING)
3241 err = l2cap_ertm_init(chan);
3242
3243 if (err < 0)
3244 l2cap_send_disconn_req(chan->conn, chan, -err);
3245 else
3246 l2cap_chan_ready(chan);
3247
3248 goto unlock;
3249 }
3250
3251 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3252 u8 buf[64];
3253 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3254 l2cap_build_conf_req(chan, buf), buf);
3255 chan->num_conf_req++;
3256 }
3257
3258 /* Got Conf Rsp PENDING from remote side and asume we sent
3259 Conf Rsp PENDING in the code above */
3260 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3261 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3262
3263 /* check compatibility */
3264
3265 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3266 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3267
3268 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3269 l2cap_build_conf_rsp(chan, rsp,
3270 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3271 }
3272
3273 unlock:
3274 l2cap_chan_unlock(chan);
3275 return err;
3276 }
3277
3278 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3279 {
3280 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3281 u16 scid, flags, result;
3282 struct l2cap_chan *chan;
3283 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3284 int err = 0;
3285
3286 scid = __le16_to_cpu(rsp->scid);
3287 flags = __le16_to_cpu(rsp->flags);
3288 result = __le16_to_cpu(rsp->result);
3289
3290 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3291 result, len);
3292
3293 chan = l2cap_get_chan_by_scid(conn, scid);
3294 if (!chan)
3295 return 0;
3296
3297 l2cap_chan_lock(chan);
3298
3299 switch (result) {
3300 case L2CAP_CONF_SUCCESS:
3301 l2cap_conf_rfc_get(chan, rsp->data, len);
3302 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3303 break;
3304
3305 case L2CAP_CONF_PENDING:
3306 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3307
3308 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3309 char buf[64];
3310
3311 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3312 buf, &result);
3313 if (len < 0) {
3314 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3315 goto done;
3316 }
3317
3318 /* check compatibility */
3319
3320 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3321 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3322
3323 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3324 l2cap_build_conf_rsp(chan, buf,
3325 L2CAP_CONF_SUCCESS, 0x0000), buf);
3326 }
3327 goto done;
3328
3329 case L2CAP_CONF_UNACCEPT:
3330 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3331 char req[64];
3332
3333 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3334 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3335 goto done;
3336 }
3337
3338 /* throw out any old stored conf requests */
3339 result = L2CAP_CONF_SUCCESS;
3340 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3341 req, &result);
3342 if (len < 0) {
3343 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3344 goto done;
3345 }
3346
3347 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3348 L2CAP_CONF_REQ, len, req);
3349 chan->num_conf_req++;
3350 if (result != L2CAP_CONF_SUCCESS)
3351 goto done;
3352 break;
3353 }
3354
3355 default:
3356 l2cap_chan_set_err(chan, ECONNRESET);
3357
3358 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3359 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3360 goto done;
3361 }
3362
3363 if (flags & 0x01)
3364 goto done;
3365
3366 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3367
3368 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3369 set_default_fcs(chan);
3370
3371 l2cap_state_change(chan, BT_CONNECTED);
3372 if (chan->mode == L2CAP_MODE_ERTM ||
3373 chan->mode == L2CAP_MODE_STREAMING)
3374 err = l2cap_ertm_init(chan);
3375
3376 if (err < 0)
3377 l2cap_send_disconn_req(chan->conn, chan, -err);
3378 else
3379 l2cap_chan_ready(chan);
3380 }
3381
3382 done:
3383 l2cap_chan_unlock(chan);
3384 return err;
3385 }
3386
3387 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3388 {
3389 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3390 struct l2cap_disconn_rsp rsp;
3391 u16 dcid, scid;
3392 struct l2cap_chan *chan;
3393 struct sock *sk;
3394
3395 scid = __le16_to_cpu(req->scid);
3396 dcid = __le16_to_cpu(req->dcid);
3397
3398 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3399
3400 mutex_lock(&conn->chan_lock);
3401
3402 chan = __l2cap_get_chan_by_scid(conn, dcid);
3403 if (!chan) {
3404 mutex_unlock(&conn->chan_lock);
3405 return 0;
3406 }
3407
3408 l2cap_chan_lock(chan);
3409
3410 sk = chan->sk;
3411
3412 rsp.dcid = cpu_to_le16(chan->scid);
3413 rsp.scid = cpu_to_le16(chan->dcid);
3414 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3415
3416 lock_sock(sk);
3417 sk->sk_shutdown = SHUTDOWN_MASK;
3418 release_sock(sk);
3419
3420 l2cap_chan_hold(chan);
3421 l2cap_chan_del(chan, ECONNRESET);
3422
3423 l2cap_chan_unlock(chan);
3424
3425 chan->ops->close(chan->data);
3426 l2cap_chan_put(chan);
3427
3428 mutex_unlock(&conn->chan_lock);
3429
3430 return 0;
3431 }
3432
3433 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3434 {
3435 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3436 u16 dcid, scid;
3437 struct l2cap_chan *chan;
3438
3439 scid = __le16_to_cpu(rsp->scid);
3440 dcid = __le16_to_cpu(rsp->dcid);
3441
3442 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3443
3444 mutex_lock(&conn->chan_lock);
3445
3446 chan = __l2cap_get_chan_by_scid(conn, scid);
3447 if (!chan) {
3448 mutex_unlock(&conn->chan_lock);
3449 return 0;
3450 }
3451
3452 l2cap_chan_lock(chan);
3453
3454 l2cap_chan_hold(chan);
3455 l2cap_chan_del(chan, 0);
3456
3457 l2cap_chan_unlock(chan);
3458
3459 chan->ops->close(chan->data);
3460 l2cap_chan_put(chan);
3461
3462 mutex_unlock(&conn->chan_lock);
3463
3464 return 0;
3465 }
3466
3467 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3468 {
3469 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3470 u16 type;
3471
3472 type = __le16_to_cpu(req->type);
3473
3474 BT_DBG("type 0x%4.4x", type);
3475
3476 if (type == L2CAP_IT_FEAT_MASK) {
3477 u8 buf[8];
3478 u32 feat_mask = l2cap_feat_mask;
3479 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3480 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3481 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3482 if (!disable_ertm)
3483 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3484 | L2CAP_FEAT_FCS;
3485 if (enable_hs)
3486 feat_mask |= L2CAP_FEAT_EXT_FLOW
3487 | L2CAP_FEAT_EXT_WINDOW;
3488
3489 put_unaligned_le32(feat_mask, rsp->data);
3490 l2cap_send_cmd(conn, cmd->ident,
3491 L2CAP_INFO_RSP, sizeof(buf), buf);
3492 } else if (type == L2CAP_IT_FIXED_CHAN) {
3493 u8 buf[12];
3494 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3495
3496 if (enable_hs)
3497 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3498 else
3499 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3500
3501 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3502 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3503 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3504 l2cap_send_cmd(conn, cmd->ident,
3505 L2CAP_INFO_RSP, sizeof(buf), buf);
3506 } else {
3507 struct l2cap_info_rsp rsp;
3508 rsp.type = cpu_to_le16(type);
3509 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3510 l2cap_send_cmd(conn, cmd->ident,
3511 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3512 }
3513
3514 return 0;
3515 }
3516
3517 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3518 {
3519 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3520 u16 type, result;
3521
3522 type = __le16_to_cpu(rsp->type);
3523 result = __le16_to_cpu(rsp->result);
3524
3525 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3526
3527 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3528 if (cmd->ident != conn->info_ident ||
3529 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3530 return 0;
3531
3532 cancel_delayed_work(&conn->info_timer);
3533
3534 if (result != L2CAP_IR_SUCCESS) {
3535 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3536 conn->info_ident = 0;
3537
3538 l2cap_conn_start(conn);
3539
3540 return 0;
3541 }
3542
3543 switch (type) {
3544 case L2CAP_IT_FEAT_MASK:
3545 conn->feat_mask = get_unaligned_le32(rsp->data);
3546
3547 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3548 struct l2cap_info_req req;
3549 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3550
3551 conn->info_ident = l2cap_get_ident(conn);
3552
3553 l2cap_send_cmd(conn, conn->info_ident,
3554 L2CAP_INFO_REQ, sizeof(req), &req);
3555 } else {
3556 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3557 conn->info_ident = 0;
3558
3559 l2cap_conn_start(conn);
3560 }
3561 break;
3562
3563 case L2CAP_IT_FIXED_CHAN:
3564 conn->fixed_chan_mask = rsp->data[0];
3565 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3566 conn->info_ident = 0;
3567
3568 l2cap_conn_start(conn);
3569 break;
3570 }
3571
3572 return 0;
3573 }
3574
3575 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3576 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3577 void *data)
3578 {
3579 struct l2cap_create_chan_req *req = data;
3580 struct l2cap_create_chan_rsp rsp;
3581 u16 psm, scid;
3582
3583 if (cmd_len != sizeof(*req))
3584 return -EPROTO;
3585
3586 if (!enable_hs)
3587 return -EINVAL;
3588
3589 psm = le16_to_cpu(req->psm);
3590 scid = le16_to_cpu(req->scid);
3591
3592 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3593
3594 /* Placeholder: Always reject */
3595 rsp.dcid = 0;
3596 rsp.scid = cpu_to_le16(scid);
3597 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3598 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3599
3600 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3601 sizeof(rsp), &rsp);
3602
3603 return 0;
3604 }
3605
3606 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3607 struct l2cap_cmd_hdr *cmd, void *data)
3608 {
3609 BT_DBG("conn %p", conn);
3610
3611 return l2cap_connect_rsp(conn, cmd, data);
3612 }
3613
3614 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3615 u16 icid, u16 result)
3616 {
3617 struct l2cap_move_chan_rsp rsp;
3618
3619 BT_DBG("icid %d, result %d", icid, result);
3620
3621 rsp.icid = cpu_to_le16(icid);
3622 rsp.result = cpu_to_le16(result);
3623
3624 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3625 }
3626
3627 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3628 struct l2cap_chan *chan, u16 icid, u16 result)
3629 {
3630 struct l2cap_move_chan_cfm cfm;
3631 u8 ident;
3632
3633 BT_DBG("icid %d, result %d", icid, result);
3634
3635 ident = l2cap_get_ident(conn);
3636 if (chan)
3637 chan->ident = ident;
3638
3639 cfm.icid = cpu_to_le16(icid);
3640 cfm.result = cpu_to_le16(result);
3641
3642 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3643 }
3644
3645 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3646 u16 icid)
3647 {
3648 struct l2cap_move_chan_cfm_rsp rsp;
3649
3650 BT_DBG("icid %d", icid);
3651
3652 rsp.icid = cpu_to_le16(icid);
3653 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3654 }
3655
3656 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3657 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3658 {
3659 struct l2cap_move_chan_req *req = data;
3660 u16 icid = 0;
3661 u16 result = L2CAP_MR_NOT_ALLOWED;
3662
3663 if (cmd_len != sizeof(*req))
3664 return -EPROTO;
3665
3666 icid = le16_to_cpu(req->icid);
3667
3668 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3669
3670 if (!enable_hs)
3671 return -EINVAL;
3672
3673 /* Placeholder: Always refuse */
3674 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3675
3676 return 0;
3677 }
3678
3679 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3680 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3681 {
3682 struct l2cap_move_chan_rsp *rsp = data;
3683 u16 icid, result;
3684
3685 if (cmd_len != sizeof(*rsp))
3686 return -EPROTO;
3687
3688 icid = le16_to_cpu(rsp->icid);
3689 result = le16_to_cpu(rsp->result);
3690
3691 BT_DBG("icid %d, result %d", icid, result);
3692
3693 /* Placeholder: Always unconfirmed */
3694 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3695
3696 return 0;
3697 }
3698
3699 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3700 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3701 {
3702 struct l2cap_move_chan_cfm *cfm = data;
3703 u16 icid, result;
3704
3705 if (cmd_len != sizeof(*cfm))
3706 return -EPROTO;
3707
3708 icid = le16_to_cpu(cfm->icid);
3709 result = le16_to_cpu(cfm->result);
3710
3711 BT_DBG("icid %d, result %d", icid, result);
3712
3713 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3714
3715 return 0;
3716 }
3717
3718 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3719 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3720 {
3721 struct l2cap_move_chan_cfm_rsp *rsp = data;
3722 u16 icid;
3723
3724 if (cmd_len != sizeof(*rsp))
3725 return -EPROTO;
3726
3727 icid = le16_to_cpu(rsp->icid);
3728
3729 BT_DBG("icid %d", icid);
3730
3731 return 0;
3732 }
3733
3734 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3735 u16 to_multiplier)
3736 {
3737 u16 max_latency;
3738
3739 if (min > max || min < 6 || max > 3200)
3740 return -EINVAL;
3741
3742 if (to_multiplier < 10 || to_multiplier > 3200)
3743 return -EINVAL;
3744
3745 if (max >= to_multiplier * 8)
3746 return -EINVAL;
3747
3748 max_latency = (to_multiplier * 8 / max) - 1;
3749 if (latency > 499 || latency > max_latency)
3750 return -EINVAL;
3751
3752 return 0;
3753 }
3754
3755 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3756 struct l2cap_cmd_hdr *cmd, u8 *data)
3757 {
3758 struct hci_conn *hcon = conn->hcon;
3759 struct l2cap_conn_param_update_req *req;
3760 struct l2cap_conn_param_update_rsp rsp;
3761 u16 min, max, latency, to_multiplier, cmd_len;
3762 int err;
3763
3764 if (!(hcon->link_mode & HCI_LM_MASTER))
3765 return -EINVAL;
3766
3767 cmd_len = __le16_to_cpu(cmd->len);
3768 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3769 return -EPROTO;
3770
3771 req = (struct l2cap_conn_param_update_req *) data;
3772 min = __le16_to_cpu(req->min);
3773 max = __le16_to_cpu(req->max);
3774 latency = __le16_to_cpu(req->latency);
3775 to_multiplier = __le16_to_cpu(req->to_multiplier);
3776
3777 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3778 min, max, latency, to_multiplier);
3779
3780 memset(&rsp, 0, sizeof(rsp));
3781
3782 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3783 if (err)
3784 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3785 else
3786 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3787
3788 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3789 sizeof(rsp), &rsp);
3790
3791 if (!err)
3792 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3793
3794 return 0;
3795 }
3796
3797 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3798 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3799 {
3800 int err = 0;
3801
3802 switch (cmd->code) {
3803 case L2CAP_COMMAND_REJ:
3804 l2cap_command_rej(conn, cmd, data);
3805 break;
3806
3807 case L2CAP_CONN_REQ:
3808 err = l2cap_connect_req(conn, cmd, data);
3809 break;
3810
3811 case L2CAP_CONN_RSP:
3812 err = l2cap_connect_rsp(conn, cmd, data);
3813 break;
3814
3815 case L2CAP_CONF_REQ:
3816 err = l2cap_config_req(conn, cmd, cmd_len, data);
3817 break;
3818
3819 case L2CAP_CONF_RSP:
3820 err = l2cap_config_rsp(conn, cmd, data);
3821 break;
3822
3823 case L2CAP_DISCONN_REQ:
3824 err = l2cap_disconnect_req(conn, cmd, data);
3825 break;
3826
3827 case L2CAP_DISCONN_RSP:
3828 err = l2cap_disconnect_rsp(conn, cmd, data);
3829 break;
3830
3831 case L2CAP_ECHO_REQ:
3832 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3833 break;
3834
3835 case L2CAP_ECHO_RSP:
3836 break;
3837
3838 case L2CAP_INFO_REQ:
3839 err = l2cap_information_req(conn, cmd, data);
3840 break;
3841
3842 case L2CAP_INFO_RSP:
3843 err = l2cap_information_rsp(conn, cmd, data);
3844 break;
3845
3846 case L2CAP_CREATE_CHAN_REQ:
3847 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3848 break;
3849
3850 case L2CAP_CREATE_CHAN_RSP:
3851 err = l2cap_create_channel_rsp(conn, cmd, data);
3852 break;
3853
3854 case L2CAP_MOVE_CHAN_REQ:
3855 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3856 break;
3857
3858 case L2CAP_MOVE_CHAN_RSP:
3859 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3860 break;
3861
3862 case L2CAP_MOVE_CHAN_CFM:
3863 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3864 break;
3865
3866 case L2CAP_MOVE_CHAN_CFM_RSP:
3867 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3868 break;
3869
3870 default:
3871 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3872 err = -EINVAL;
3873 break;
3874 }
3875
3876 return err;
3877 }
3878
3879 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3880 struct l2cap_cmd_hdr *cmd, u8 *data)
3881 {
3882 switch (cmd->code) {
3883 case L2CAP_COMMAND_REJ:
3884 return 0;
3885
3886 case L2CAP_CONN_PARAM_UPDATE_REQ:
3887 return l2cap_conn_param_update_req(conn, cmd, data);
3888
3889 case L2CAP_CONN_PARAM_UPDATE_RSP:
3890 return 0;
3891
3892 default:
3893 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3894 return -EINVAL;
3895 }
3896 }
3897
3898 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3899 struct sk_buff *skb)
3900 {
3901 u8 *data = skb->data;
3902 int len = skb->len;
3903 struct l2cap_cmd_hdr cmd;
3904 int err;
3905
3906 l2cap_raw_recv(conn, skb);
3907
3908 while (len >= L2CAP_CMD_HDR_SIZE) {
3909 u16 cmd_len;
3910 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3911 data += L2CAP_CMD_HDR_SIZE;
3912 len -= L2CAP_CMD_HDR_SIZE;
3913
3914 cmd_len = le16_to_cpu(cmd.len);
3915
3916 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3917
3918 if (cmd_len > len || !cmd.ident) {
3919 BT_DBG("corrupted command");
3920 break;
3921 }
3922
3923 if (conn->hcon->type == LE_LINK)
3924 err = l2cap_le_sig_cmd(conn, &cmd, data);
3925 else
3926 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3927
3928 if (err) {
3929 struct l2cap_cmd_rej_unk rej;
3930
3931 BT_ERR("Wrong link type (%d)", err);
3932
3933 /* FIXME: Map err to a valid reason */
3934 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3935 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3936 }
3937
3938 data += cmd_len;
3939 len -= cmd_len;
3940 }
3941
3942 kfree_skb(skb);
3943 }
3944
3945 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3946 {
3947 u16 our_fcs, rcv_fcs;
3948 int hdr_size;
3949
3950 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3951 hdr_size = L2CAP_EXT_HDR_SIZE;
3952 else
3953 hdr_size = L2CAP_ENH_HDR_SIZE;
3954
3955 if (chan->fcs == L2CAP_FCS_CRC16) {
3956 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3957 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3958 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3959
3960 if (our_fcs != rcv_fcs)
3961 return -EBADMSG;
3962 }
3963 return 0;
3964 }
3965
3966 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3967 {
3968 u32 control = 0;
3969
3970 chan->frames_sent = 0;
3971
3972 control |= __set_reqseq(chan, chan->buffer_seq);
3973
3974 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3975 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3976 l2cap_send_sframe(chan, control);
3977 set_bit(CONN_RNR_SENT, &chan->conn_state);
3978 }
3979
3980 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3981 l2cap_retransmit_frames(chan);
3982
3983 l2cap_ertm_send(chan);
3984
3985 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3986 chan->frames_sent == 0) {
3987 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3988 l2cap_send_sframe(chan, control);
3989 }
3990 }
3991
3992 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3993 {
3994 struct sk_buff *next_skb;
3995 int tx_seq_offset, next_tx_seq_offset;
3996
3997 bt_cb(skb)->control.txseq = tx_seq;
3998 bt_cb(skb)->control.sar = sar;
3999
4000 next_skb = skb_peek(&chan->srej_q);
4001
4002 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4003
4004 while (next_skb) {
4005 if (bt_cb(next_skb)->control.txseq == tx_seq)
4006 return -EINVAL;
4007
4008 next_tx_seq_offset = __seq_offset(chan,
4009 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4010
4011 if (next_tx_seq_offset > tx_seq_offset) {
4012 __skb_queue_before(&chan->srej_q, next_skb, skb);
4013 return 0;
4014 }
4015
4016 if (skb_queue_is_last(&chan->srej_q, next_skb))
4017 next_skb = NULL;
4018 else
4019 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4020 }
4021
4022 __skb_queue_tail(&chan->srej_q, skb);
4023
4024 return 0;
4025 }
4026
4027 static void append_skb_frag(struct sk_buff *skb,
4028 struct sk_buff *new_frag, struct sk_buff **last_frag)
4029 {
4030 /* skb->len reflects data in skb as well as all fragments
4031 * skb->data_len reflects only data in fragments
4032 */
4033 if (!skb_has_frag_list(skb))
4034 skb_shinfo(skb)->frag_list = new_frag;
4035
4036 new_frag->next = NULL;
4037
4038 (*last_frag)->next = new_frag;
4039 *last_frag = new_frag;
4040
4041 skb->len += new_frag->len;
4042 skb->data_len += new_frag->len;
4043 skb->truesize += new_frag->truesize;
4044 }
4045
4046 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4047 {
4048 int err = -EINVAL;
4049
4050 switch (__get_ctrl_sar(chan, control)) {
4051 case L2CAP_SAR_UNSEGMENTED:
4052 if (chan->sdu)
4053 break;
4054
4055 err = chan->ops->recv(chan->data, skb);
4056 break;
4057
4058 case L2CAP_SAR_START:
4059 if (chan->sdu)
4060 break;
4061
4062 chan->sdu_len = get_unaligned_le16(skb->data);
4063 skb_pull(skb, L2CAP_SDULEN_SIZE);
4064
4065 if (chan->sdu_len > chan->imtu) {
4066 err = -EMSGSIZE;
4067 break;
4068 }
4069
4070 if (skb->len >= chan->sdu_len)
4071 break;
4072
4073 chan->sdu = skb;
4074 chan->sdu_last_frag = skb;
4075
4076 skb = NULL;
4077 err = 0;
4078 break;
4079
4080 case L2CAP_SAR_CONTINUE:
4081 if (!chan->sdu)
4082 break;
4083
4084 append_skb_frag(chan->sdu, skb,
4085 &chan->sdu_last_frag);
4086 skb = NULL;
4087
4088 if (chan->sdu->len >= chan->sdu_len)
4089 break;
4090
4091 err = 0;
4092 break;
4093
4094 case L2CAP_SAR_END:
4095 if (!chan->sdu)
4096 break;
4097
4098 append_skb_frag(chan->sdu, skb,
4099 &chan->sdu_last_frag);
4100 skb = NULL;
4101
4102 if (chan->sdu->len != chan->sdu_len)
4103 break;
4104
4105 err = chan->ops->recv(chan->data, chan->sdu);
4106
4107 if (!err) {
4108 /* Reassembly complete */
4109 chan->sdu = NULL;
4110 chan->sdu_last_frag = NULL;
4111 chan->sdu_len = 0;
4112 }
4113 break;
4114 }
4115
4116 if (err) {
4117 kfree_skb(skb);
4118 kfree_skb(chan->sdu);
4119 chan->sdu = NULL;
4120 chan->sdu_last_frag = NULL;
4121 chan->sdu_len = 0;
4122 }
4123
4124 return err;
4125 }
4126
4127 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4128 {
4129 BT_DBG("chan %p, Enter local busy", chan);
4130
4131 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4132 l2cap_seq_list_clear(&chan->srej_list);
4133
4134 __set_ack_timer(chan);
4135 }
4136
4137 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4138 {
4139 u32 control;
4140
4141 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4142 goto done;
4143
4144 control = __set_reqseq(chan, chan->buffer_seq);
4145 control |= __set_ctrl_poll(chan);
4146 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4147 l2cap_send_sframe(chan, control);
4148 chan->retry_count = 1;
4149
4150 __clear_retrans_timer(chan);
4151 __set_monitor_timer(chan);
4152
4153 set_bit(CONN_WAIT_F, &chan->conn_state);
4154
4155 done:
4156 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4157 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4158
4159 BT_DBG("chan %p, Exit local busy", chan);
4160 }
4161
4162 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4163 {
4164 if (chan->mode == L2CAP_MODE_ERTM) {
4165 if (busy)
4166 l2cap_ertm_enter_local_busy(chan);
4167 else
4168 l2cap_ertm_exit_local_busy(chan);
4169 }
4170 }
4171
4172 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4173 {
4174 struct sk_buff *skb;
4175 u32 control;
4176
4177 while ((skb = skb_peek(&chan->srej_q)) &&
4178 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4179 int err;
4180
4181 if (bt_cb(skb)->control.txseq != tx_seq)
4182 break;
4183
4184 skb = skb_dequeue(&chan->srej_q);
4185 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4186 err = l2cap_reassemble_sdu(chan, skb, control);
4187
4188 if (err < 0) {
4189 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4190 break;
4191 }
4192
4193 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4194 tx_seq = __next_seq(chan, tx_seq);
4195 }
4196 }
4197
4198 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4199 {
4200 struct srej_list *l, *tmp;
4201 u32 control;
4202
4203 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4204 if (l->tx_seq == tx_seq) {
4205 list_del(&l->list);
4206 kfree(l);
4207 return;
4208 }
4209 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4210 control |= __set_reqseq(chan, l->tx_seq);
4211 l2cap_send_sframe(chan, control);
4212 list_del(&l->list);
4213 list_add_tail(&l->list, &chan->srej_l);
4214 }
4215 }
4216
4217 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4218 {
4219 struct srej_list *new;
4220 u32 control;
4221
4222 while (tx_seq != chan->expected_tx_seq) {
4223 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4224 control |= __set_reqseq(chan, chan->expected_tx_seq);
4225 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4226 l2cap_send_sframe(chan, control);
4227
4228 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4229 if (!new)
4230 return -ENOMEM;
4231
4232 new->tx_seq = chan->expected_tx_seq;
4233
4234 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4235
4236 list_add_tail(&new->list, &chan->srej_l);
4237 }
4238
4239 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4240
4241 return 0;
4242 }
4243
4244 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4245 {
4246 u16 tx_seq = __get_txseq(chan, rx_control);
4247 u16 req_seq = __get_reqseq(chan, rx_control);
4248 u8 sar = __get_ctrl_sar(chan, rx_control);
4249 int tx_seq_offset, expected_tx_seq_offset;
4250 int num_to_ack = (chan->tx_win/6) + 1;
4251 int err = 0;
4252
4253 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4254 tx_seq, rx_control);
4255
4256 if (__is_ctrl_final(chan, rx_control) &&
4257 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4258 __clear_monitor_timer(chan);
4259 if (chan->unacked_frames > 0)
4260 __set_retrans_timer(chan);
4261 clear_bit(CONN_WAIT_F, &chan->conn_state);
4262 }
4263
4264 chan->expected_ack_seq = req_seq;
4265 l2cap_drop_acked_frames(chan);
4266
4267 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4268
4269 /* invalid tx_seq */
4270 if (tx_seq_offset >= chan->tx_win) {
4271 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4272 goto drop;
4273 }
4274
4275 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4276 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4277 l2cap_send_ack(chan);
4278 goto drop;
4279 }
4280
4281 if (tx_seq == chan->expected_tx_seq)
4282 goto expected;
4283
4284 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4285 struct srej_list *first;
4286
4287 first = list_first_entry(&chan->srej_l,
4288 struct srej_list, list);
4289 if (tx_seq == first->tx_seq) {
4290 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4291 l2cap_check_srej_gap(chan, tx_seq);
4292
4293 list_del(&first->list);
4294 kfree(first);
4295
4296 if (list_empty(&chan->srej_l)) {
4297 chan->buffer_seq = chan->buffer_seq_srej;
4298 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4299 l2cap_send_ack(chan);
4300 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4301 }
4302 } else {
4303 struct srej_list *l;
4304
4305 /* duplicated tx_seq */
4306 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4307 goto drop;
4308
4309 list_for_each_entry(l, &chan->srej_l, list) {
4310 if (l->tx_seq == tx_seq) {
4311 l2cap_resend_srejframe(chan, tx_seq);
4312 return 0;
4313 }
4314 }
4315
4316 err = l2cap_send_srejframe(chan, tx_seq);
4317 if (err < 0) {
4318 l2cap_send_disconn_req(chan->conn, chan, -err);
4319 return err;
4320 }
4321 }
4322 } else {
4323 expected_tx_seq_offset = __seq_offset(chan,
4324 chan->expected_tx_seq, chan->buffer_seq);
4325
4326 /* duplicated tx_seq */
4327 if (tx_seq_offset < expected_tx_seq_offset)
4328 goto drop;
4329
4330 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4331
4332 BT_DBG("chan %p, Enter SREJ", chan);
4333
4334 INIT_LIST_HEAD(&chan->srej_l);
4335 chan->buffer_seq_srej = chan->buffer_seq;
4336
4337 __skb_queue_head_init(&chan->srej_q);
4338 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4339
4340 /* Set P-bit only if there are some I-frames to ack. */
4341 if (__clear_ack_timer(chan))
4342 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4343
4344 err = l2cap_send_srejframe(chan, tx_seq);
4345 if (err < 0) {
4346 l2cap_send_disconn_req(chan->conn, chan, -err);
4347 return err;
4348 }
4349 }
4350 return 0;
4351
4352 expected:
4353 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4354
4355 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4356 bt_cb(skb)->control.txseq = tx_seq;
4357 bt_cb(skb)->control.sar = sar;
4358 __skb_queue_tail(&chan->srej_q, skb);
4359 return 0;
4360 }
4361
4362 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4363 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4364
4365 if (err < 0) {
4366 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4367 return err;
4368 }
4369
4370 if (__is_ctrl_final(chan, rx_control)) {
4371 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4372 l2cap_retransmit_frames(chan);
4373 }
4374
4375
4376 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4377 if (chan->num_acked == num_to_ack - 1)
4378 l2cap_send_ack(chan);
4379 else
4380 __set_ack_timer(chan);
4381
4382 return 0;
4383
4384 drop:
4385 kfree_skb(skb);
4386 return 0;
4387 }
4388
4389 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4390 {
4391 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4392 __get_reqseq(chan, rx_control), rx_control);
4393
4394 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4395 l2cap_drop_acked_frames(chan);
4396
4397 if (__is_ctrl_poll(chan, rx_control)) {
4398 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4399 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4400 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4401 (chan->unacked_frames > 0))
4402 __set_retrans_timer(chan);
4403
4404 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4405 l2cap_send_srejtail(chan);
4406 } else {
4407 l2cap_send_i_or_rr_or_rnr(chan);
4408 }
4409
4410 } else if (__is_ctrl_final(chan, rx_control)) {
4411 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4412
4413 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4414 l2cap_retransmit_frames(chan);
4415
4416 } else {
4417 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4418 (chan->unacked_frames > 0))
4419 __set_retrans_timer(chan);
4420
4421 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4422 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4423 l2cap_send_ack(chan);
4424 else
4425 l2cap_ertm_send(chan);
4426 }
4427 }
4428
4429 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4430 {
4431 u16 tx_seq = __get_reqseq(chan, rx_control);
4432
4433 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4434
4435 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4436
4437 chan->expected_ack_seq = tx_seq;
4438 l2cap_drop_acked_frames(chan);
4439
4440 if (__is_ctrl_final(chan, rx_control)) {
4441 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4442 l2cap_retransmit_frames(chan);
4443 } else {
4444 l2cap_retransmit_frames(chan);
4445
4446 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4447 set_bit(CONN_REJ_ACT, &chan->conn_state);
4448 }
4449 }
4450 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4451 {
4452 u16 tx_seq = __get_reqseq(chan, rx_control);
4453
4454 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4455
4456 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4457
4458 if (__is_ctrl_poll(chan, rx_control)) {
4459 chan->expected_ack_seq = tx_seq;
4460 l2cap_drop_acked_frames(chan);
4461
4462 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4463 l2cap_retransmit_one_frame(chan, tx_seq);
4464
4465 l2cap_ertm_send(chan);
4466
4467 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4468 chan->srej_save_reqseq = tx_seq;
4469 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4470 }
4471 } else if (__is_ctrl_final(chan, rx_control)) {
4472 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4473 chan->srej_save_reqseq == tx_seq)
4474 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4475 else
4476 l2cap_retransmit_one_frame(chan, tx_seq);
4477 } else {
4478 l2cap_retransmit_one_frame(chan, tx_seq);
4479 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4480 chan->srej_save_reqseq = tx_seq;
4481 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4482 }
4483 }
4484 }
4485
4486 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4487 {
4488 u16 tx_seq = __get_reqseq(chan, rx_control);
4489
4490 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4491
4492 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4493 chan->expected_ack_seq = tx_seq;
4494 l2cap_drop_acked_frames(chan);
4495
4496 if (__is_ctrl_poll(chan, rx_control))
4497 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4498
4499 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4500 __clear_retrans_timer(chan);
4501 if (__is_ctrl_poll(chan, rx_control))
4502 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4503 return;
4504 }
4505
4506 if (__is_ctrl_poll(chan, rx_control)) {
4507 l2cap_send_srejtail(chan);
4508 } else {
4509 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4510 l2cap_send_sframe(chan, rx_control);
4511 }
4512 }
4513
4514 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4515 {
4516 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4517
4518 if (__is_ctrl_final(chan, rx_control) &&
4519 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4520 __clear_monitor_timer(chan);
4521 if (chan->unacked_frames > 0)
4522 __set_retrans_timer(chan);
4523 clear_bit(CONN_WAIT_F, &chan->conn_state);
4524 }
4525
4526 switch (__get_ctrl_super(chan, rx_control)) {
4527 case L2CAP_SUPER_RR:
4528 l2cap_data_channel_rrframe(chan, rx_control);
4529 break;
4530
4531 case L2CAP_SUPER_REJ:
4532 l2cap_data_channel_rejframe(chan, rx_control);
4533 break;
4534
4535 case L2CAP_SUPER_SREJ:
4536 l2cap_data_channel_srejframe(chan, rx_control);
4537 break;
4538
4539 case L2CAP_SUPER_RNR:
4540 l2cap_data_channel_rnrframe(chan, rx_control);
4541 break;
4542 }
4543
4544 kfree_skb(skb);
4545 return 0;
4546 }
4547
4548 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4549 {
4550 u32 control;
4551 u16 req_seq;
4552 int len, next_tx_seq_offset, req_seq_offset;
4553
4554 __unpack_control(chan, skb);
4555
4556 control = __get_control(chan, skb->data);
4557 skb_pull(skb, __ctrl_size(chan));
4558 len = skb->len;
4559
4560 /*
4561 * We can just drop the corrupted I-frame here.
4562 * Receiver will miss it and start proper recovery
4563 * procedures and ask retransmission.
4564 */
4565 if (l2cap_check_fcs(chan, skb))
4566 goto drop;
4567
4568 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4569 len -= L2CAP_SDULEN_SIZE;
4570
4571 if (chan->fcs == L2CAP_FCS_CRC16)
4572 len -= L2CAP_FCS_SIZE;
4573
4574 if (len > chan->mps) {
4575 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4576 goto drop;
4577 }
4578
4579 req_seq = __get_reqseq(chan, control);
4580
4581 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4582
4583 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4584 chan->expected_ack_seq);
4585
4586 /* check for invalid req-seq */
4587 if (req_seq_offset > next_tx_seq_offset) {
4588 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4589 goto drop;
4590 }
4591
4592 if (!__is_sframe(chan, control)) {
4593 if (len < 0) {
4594 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4595 goto drop;
4596 }
4597
4598 l2cap_data_channel_iframe(chan, control, skb);
4599 } else {
4600 if (len != 0) {
4601 BT_ERR("%d", len);
4602 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4603 goto drop;
4604 }
4605
4606 l2cap_data_channel_sframe(chan, control, skb);
4607 }
4608
4609 return 0;
4610
4611 drop:
4612 kfree_skb(skb);
4613 return 0;
4614 }
4615
4616 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4617 {
4618 struct l2cap_chan *chan;
4619 u32 control;
4620 u16 tx_seq;
4621 int len;
4622
4623 chan = l2cap_get_chan_by_scid(conn, cid);
4624 if (!chan) {
4625 BT_DBG("unknown cid 0x%4.4x", cid);
4626 /* Drop packet and return */
4627 kfree_skb(skb);
4628 return 0;
4629 }
4630
4631 l2cap_chan_lock(chan);
4632
4633 BT_DBG("chan %p, len %d", chan, skb->len);
4634
4635 if (chan->state != BT_CONNECTED)
4636 goto drop;
4637
4638 switch (chan->mode) {
4639 case L2CAP_MODE_BASIC:
4640 /* If socket recv buffers overflows we drop data here
4641 * which is *bad* because L2CAP has to be reliable.
4642 * But we don't have any other choice. L2CAP doesn't
4643 * provide flow control mechanism. */
4644
4645 if (chan->imtu < skb->len)
4646 goto drop;
4647
4648 if (!chan->ops->recv(chan->data, skb))
4649 goto done;
4650 break;
4651
4652 case L2CAP_MODE_ERTM:
4653 l2cap_ertm_data_rcv(chan, skb);
4654
4655 goto done;
4656
4657 case L2CAP_MODE_STREAMING:
4658 control = __get_control(chan, skb->data);
4659 skb_pull(skb, __ctrl_size(chan));
4660 len = skb->len;
4661
4662 if (l2cap_check_fcs(chan, skb))
4663 goto drop;
4664
4665 if (__is_sar_start(chan, control))
4666 len -= L2CAP_SDULEN_SIZE;
4667
4668 if (chan->fcs == L2CAP_FCS_CRC16)
4669 len -= L2CAP_FCS_SIZE;
4670
4671 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4672 goto drop;
4673
4674 tx_seq = __get_txseq(chan, control);
4675
4676 if (chan->expected_tx_seq != tx_seq) {
4677 /* Frame(s) missing - must discard partial SDU */
4678 kfree_skb(chan->sdu);
4679 chan->sdu = NULL;
4680 chan->sdu_last_frag = NULL;
4681 chan->sdu_len = 0;
4682
4683 /* TODO: Notify userland of missing data */
4684 }
4685
4686 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4687
4688 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4689 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4690
4691 goto done;
4692
4693 default:
4694 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4695 break;
4696 }
4697
4698 drop:
4699 kfree_skb(skb);
4700
4701 done:
4702 l2cap_chan_unlock(chan);
4703
4704 return 0;
4705 }
4706
4707 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4708 {
4709 struct l2cap_chan *chan;
4710
4711 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4712 if (!chan)
4713 goto drop;
4714
4715 BT_DBG("chan %p, len %d", chan, skb->len);
4716
4717 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4718 goto drop;
4719
4720 if (chan->imtu < skb->len)
4721 goto drop;
4722
4723 if (!chan->ops->recv(chan->data, skb))
4724 return 0;
4725
4726 drop:
4727 kfree_skb(skb);
4728
4729 return 0;
4730 }
4731
4732 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4733 struct sk_buff *skb)
4734 {
4735 struct l2cap_chan *chan;
4736
4737 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4738 if (!chan)
4739 goto drop;
4740
4741 BT_DBG("chan %p, len %d", chan, skb->len);
4742
4743 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4744 goto drop;
4745
4746 if (chan->imtu < skb->len)
4747 goto drop;
4748
4749 if (!chan->ops->recv(chan->data, skb))
4750 return 0;
4751
4752 drop:
4753 kfree_skb(skb);
4754
4755 return 0;
4756 }
4757
4758 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4759 {
4760 struct l2cap_hdr *lh = (void *) skb->data;
4761 u16 cid, len;
4762 __le16 psm;
4763
4764 skb_pull(skb, L2CAP_HDR_SIZE);
4765 cid = __le16_to_cpu(lh->cid);
4766 len = __le16_to_cpu(lh->len);
4767
4768 if (len != skb->len) {
4769 kfree_skb(skb);
4770 return;
4771 }
4772
4773 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4774
4775 switch (cid) {
4776 case L2CAP_CID_LE_SIGNALING:
4777 case L2CAP_CID_SIGNALING:
4778 l2cap_sig_channel(conn, skb);
4779 break;
4780
4781 case L2CAP_CID_CONN_LESS:
4782 psm = get_unaligned((__le16 *) skb->data);
4783 skb_pull(skb, 2);
4784 l2cap_conless_channel(conn, psm, skb);
4785 break;
4786
4787 case L2CAP_CID_LE_DATA:
4788 l2cap_att_channel(conn, cid, skb);
4789 break;
4790
4791 case L2CAP_CID_SMP:
4792 if (smp_sig_channel(conn, skb))
4793 l2cap_conn_del(conn->hcon, EACCES);
4794 break;
4795
4796 default:
4797 l2cap_data_channel(conn, cid, skb);
4798 break;
4799 }
4800 }
4801
4802 /* ---- L2CAP interface with lower layer (HCI) ---- */
4803
4804 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4805 {
4806 int exact = 0, lm1 = 0, lm2 = 0;
4807 struct l2cap_chan *c;
4808
4809 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4810
4811 /* Find listening sockets and check their link_mode */
4812 read_lock(&chan_list_lock);
4813 list_for_each_entry(c, &chan_list, global_l) {
4814 struct sock *sk = c->sk;
4815
4816 if (c->state != BT_LISTEN)
4817 continue;
4818
4819 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4820 lm1 |= HCI_LM_ACCEPT;
4821 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4822 lm1 |= HCI_LM_MASTER;
4823 exact++;
4824 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4825 lm2 |= HCI_LM_ACCEPT;
4826 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4827 lm2 |= HCI_LM_MASTER;
4828 }
4829 }
4830 read_unlock(&chan_list_lock);
4831
4832 return exact ? lm1 : lm2;
4833 }
4834
4835 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4836 {
4837 struct l2cap_conn *conn;
4838
4839 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4840
4841 if (!status) {
4842 conn = l2cap_conn_add(hcon, status);
4843 if (conn)
4844 l2cap_conn_ready(conn);
4845 } else
4846 l2cap_conn_del(hcon, bt_to_errno(status));
4847
4848 return 0;
4849 }
4850
4851 int l2cap_disconn_ind(struct hci_conn *hcon)
4852 {
4853 struct l2cap_conn *conn = hcon->l2cap_data;
4854
4855 BT_DBG("hcon %p", hcon);
4856
4857 if (!conn)
4858 return HCI_ERROR_REMOTE_USER_TERM;
4859 return conn->disc_reason;
4860 }
4861
4862 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4863 {
4864 BT_DBG("hcon %p reason %d", hcon, reason);
4865
4866 l2cap_conn_del(hcon, bt_to_errno(reason));
4867 return 0;
4868 }
4869
4870 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4871 {
4872 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4873 return;
4874
4875 if (encrypt == 0x00) {
4876 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4877 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4878 } else if (chan->sec_level == BT_SECURITY_HIGH)
4879 l2cap_chan_close(chan, ECONNREFUSED);
4880 } else {
4881 if (chan->sec_level == BT_SECURITY_MEDIUM)
4882 __clear_chan_timer(chan);
4883 }
4884 }
4885
4886 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4887 {
4888 struct l2cap_conn *conn = hcon->l2cap_data;
4889 struct l2cap_chan *chan;
4890
4891 if (!conn)
4892 return 0;
4893
4894 BT_DBG("conn %p", conn);
4895
4896 if (hcon->type == LE_LINK) {
4897 if (!status && encrypt)
4898 smp_distribute_keys(conn, 0);
4899 cancel_delayed_work(&conn->security_timer);
4900 }
4901
4902 mutex_lock(&conn->chan_lock);
4903
4904 list_for_each_entry(chan, &conn->chan_l, list) {
4905 l2cap_chan_lock(chan);
4906
4907 BT_DBG("chan->scid %d", chan->scid);
4908
4909 if (chan->scid == L2CAP_CID_LE_DATA) {
4910 if (!status && encrypt) {
4911 chan->sec_level = hcon->sec_level;
4912 l2cap_chan_ready(chan);
4913 }
4914
4915 l2cap_chan_unlock(chan);
4916 continue;
4917 }
4918
4919 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4920 l2cap_chan_unlock(chan);
4921 continue;
4922 }
4923
4924 if (!status && (chan->state == BT_CONNECTED ||
4925 chan->state == BT_CONFIG)) {
4926 struct sock *sk = chan->sk;
4927
4928 bt_sk(sk)->suspended = false;
4929 sk->sk_state_change(sk);
4930
4931 l2cap_check_encryption(chan, encrypt);
4932 l2cap_chan_unlock(chan);
4933 continue;
4934 }
4935
4936 if (chan->state == BT_CONNECT) {
4937 if (!status) {
4938 l2cap_send_conn_req(chan);
4939 } else {
4940 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4941 }
4942 } else if (chan->state == BT_CONNECT2) {
4943 struct sock *sk = chan->sk;
4944 struct l2cap_conn_rsp rsp;
4945 __u16 res, stat;
4946
4947 lock_sock(sk);
4948
4949 if (!status) {
4950 if (bt_sk(sk)->defer_setup) {
4951 struct sock *parent = bt_sk(sk)->parent;
4952 res = L2CAP_CR_PEND;
4953 stat = L2CAP_CS_AUTHOR_PEND;
4954 if (parent)
4955 parent->sk_data_ready(parent, 0);
4956 } else {
4957 __l2cap_state_change(chan, BT_CONFIG);
4958 res = L2CAP_CR_SUCCESS;
4959 stat = L2CAP_CS_NO_INFO;
4960 }
4961 } else {
4962 __l2cap_state_change(chan, BT_DISCONN);
4963 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4964 res = L2CAP_CR_SEC_BLOCK;
4965 stat = L2CAP_CS_NO_INFO;
4966 }
4967
4968 release_sock(sk);
4969
4970 rsp.scid = cpu_to_le16(chan->dcid);
4971 rsp.dcid = cpu_to_le16(chan->scid);
4972 rsp.result = cpu_to_le16(res);
4973 rsp.status = cpu_to_le16(stat);
4974 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4975 sizeof(rsp), &rsp);
4976 }
4977
4978 l2cap_chan_unlock(chan);
4979 }
4980
4981 mutex_unlock(&conn->chan_lock);
4982
4983 return 0;
4984 }
4985
4986 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4987 {
4988 struct l2cap_conn *conn = hcon->l2cap_data;
4989
4990 if (!conn)
4991 conn = l2cap_conn_add(hcon, 0);
4992
4993 if (!conn)
4994 goto drop;
4995
4996 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4997
4998 if (!(flags & ACL_CONT)) {
4999 struct l2cap_hdr *hdr;
5000 struct l2cap_chan *chan;
5001 u16 cid;
5002 int len;
5003
5004 if (conn->rx_len) {
5005 BT_ERR("Unexpected start frame (len %d)", skb->len);
5006 kfree_skb(conn->rx_skb);
5007 conn->rx_skb = NULL;
5008 conn->rx_len = 0;
5009 l2cap_conn_unreliable(conn, ECOMM);
5010 }
5011
5012 /* Start fragment always begin with Basic L2CAP header */
5013 if (skb->len < L2CAP_HDR_SIZE) {
5014 BT_ERR("Frame is too short (len %d)", skb->len);
5015 l2cap_conn_unreliable(conn, ECOMM);
5016 goto drop;
5017 }
5018
5019 hdr = (struct l2cap_hdr *) skb->data;
5020 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5021 cid = __le16_to_cpu(hdr->cid);
5022
5023 if (len == skb->len) {
5024 /* Complete frame received */
5025 l2cap_recv_frame(conn, skb);
5026 return 0;
5027 }
5028
5029 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5030
5031 if (skb->len > len) {
5032 BT_ERR("Frame is too long (len %d, expected len %d)",
5033 skb->len, len);
5034 l2cap_conn_unreliable(conn, ECOMM);
5035 goto drop;
5036 }
5037
5038 chan = l2cap_get_chan_by_scid(conn, cid);
5039
5040 if (chan && chan->sk) {
5041 struct sock *sk = chan->sk;
5042 lock_sock(sk);
5043
5044 if (chan->imtu < len - L2CAP_HDR_SIZE) {
5045 BT_ERR("Frame exceeding recv MTU (len %d, "
5046 "MTU %d)", len,
5047 chan->imtu);
5048 release_sock(sk);
5049 l2cap_conn_unreliable(conn, ECOMM);
5050 goto drop;
5051 }
5052 release_sock(sk);
5053 }
5054
5055 /* Allocate skb for the complete frame (with header) */
5056 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5057 if (!conn->rx_skb)
5058 goto drop;
5059
5060 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5061 skb->len);
5062 conn->rx_len = len - skb->len;
5063 } else {
5064 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5065
5066 if (!conn->rx_len) {
5067 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5068 l2cap_conn_unreliable(conn, ECOMM);
5069 goto drop;
5070 }
5071
5072 if (skb->len > conn->rx_len) {
5073 BT_ERR("Fragment is too long (len %d, expected %d)",
5074 skb->len, conn->rx_len);
5075 kfree_skb(conn->rx_skb);
5076 conn->rx_skb = NULL;
5077 conn->rx_len = 0;
5078 l2cap_conn_unreliable(conn, ECOMM);
5079 goto drop;
5080 }
5081
5082 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5083 skb->len);
5084 conn->rx_len -= skb->len;
5085
5086 if (!conn->rx_len) {
5087 /* Complete frame received */
5088 l2cap_recv_frame(conn, conn->rx_skb);
5089 conn->rx_skb = NULL;
5090 }
5091 }
5092
5093 drop:
5094 kfree_skb(skb);
5095 return 0;
5096 }
5097
5098 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5099 {
5100 struct l2cap_chan *c;
5101
5102 read_lock(&chan_list_lock);
5103
5104 list_for_each_entry(c, &chan_list, global_l) {
5105 struct sock *sk = c->sk;
5106
5107 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5108 batostr(&bt_sk(sk)->src),
5109 batostr(&bt_sk(sk)->dst),
5110 c->state, __le16_to_cpu(c->psm),
5111 c->scid, c->dcid, c->imtu, c->omtu,
5112 c->sec_level, c->mode);
5113 }
5114
5115 read_unlock(&chan_list_lock);
5116
5117 return 0;
5118 }
5119
5120 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5121 {
5122 return single_open(file, l2cap_debugfs_show, inode->i_private);
5123 }
5124
5125 static const struct file_operations l2cap_debugfs_fops = {
5126 .open = l2cap_debugfs_open,
5127 .read = seq_read,
5128 .llseek = seq_lseek,
5129 .release = single_release,
5130 };
5131
5132 static struct dentry *l2cap_debugfs;
5133
5134 int __init l2cap_init(void)
5135 {
5136 int err;
5137
5138 err = l2cap_init_sockets();
5139 if (err < 0)
5140 return err;
5141
5142 if (bt_debugfs) {
5143 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5144 bt_debugfs, NULL, &l2cap_debugfs_fops);
5145 if (!l2cap_debugfs)
5146 BT_ERR("Failed to create L2CAP debug file");
5147 }
5148
5149 return 0;
5150 }
5151
5152 void l2cap_exit(void)
5153 {
5154 debugfs_remove(l2cap_debugfs);
5155 l2cap_cleanup_sockets();
5156 }
5157
5158 module_param(disable_ertm, bool, 0644);
5159 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.133504 seconds and 6 git commands to generate.