Bluetooth: Remove double check for BT_CONNECTED
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/types.h>
34#include <linux/capability.h>
35#include <linux/errno.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/poll.h>
40#include <linux/fcntl.h>
41#include <linux/init.h>
42#include <linux/interrupt.h>
43#include <linux/socket.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/device.h>
47#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49#include <linux/uaccess.h>
50#include <linux/crc16.h>
51#include <net/sock.h>
52
53#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
58#include <net/bluetooth/smp.h>
59
60bool disable_ertm;
61
62static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65static LIST_HEAD(chan_list);
66static DEFINE_RWLOCK(chan_list_lock);
67
68static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
78
79/* ---- L2CAP channels ---- */
80
81static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82{
83 struct l2cap_chan *c;
84
85 list_for_each_entry(c, &conn->chan_l, list) {
86 if (c->dcid == cid)
87 return c;
88 }
89 return NULL;
90}
91
92static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93{
94 struct l2cap_chan *c;
95
96 list_for_each_entry(c, &conn->chan_l, list) {
97 if (c->scid == cid)
98 return c;
99 }
100 return NULL;
101}
102
103/* Find channel with given SCID.
104 * Returns locked channel. */
105static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106{
107 struct l2cap_chan *c;
108
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 l2cap_chan_lock(c);
113 mutex_unlock(&conn->chan_lock);
114
115 return c;
116}
117
118static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119{
120 struct l2cap_chan *c;
121
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
124 return c;
125 }
126 return NULL;
127}
128
129static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
130{
131 struct l2cap_chan *c;
132
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
135 return c;
136 }
137 return NULL;
138}
139
140int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
141{
142 int err;
143
144 write_lock(&chan_list_lock);
145
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
147 err = -EADDRINUSE;
148 goto done;
149 }
150
151 if (psm) {
152 chan->psm = psm;
153 chan->sport = psm;
154 err = 0;
155 } else {
156 u16 p;
157
158 err = -EINVAL;
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
163 err = 0;
164 break;
165 }
166 }
167
168done:
169 write_unlock(&chan_list_lock);
170 return err;
171}
172
173int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
174{
175 write_lock(&chan_list_lock);
176
177 chan->scid = scid;
178
179 write_unlock(&chan_list_lock);
180
181 return 0;
182}
183
184static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
185{
186 u16 cid = L2CAP_CID_DYN_START;
187
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
190 return cid;
191 }
192
193 return 0;
194}
195
196static void __l2cap_state_change(struct l2cap_chan *chan, int state)
197{
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
200
201 chan->state = state;
202 chan->ops->state_change(chan->data, state);
203}
204
205static void l2cap_state_change(struct l2cap_chan *chan, int state)
206{
207 struct sock *sk = chan->sk;
208
209 lock_sock(sk);
210 __l2cap_state_change(chan, state);
211 release_sock(sk);
212}
213
214static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
215{
216 struct sock *sk = chan->sk;
217
218 sk->sk_err = err;
219}
220
221static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
222{
223 struct sock *sk = chan->sk;
224
225 lock_sock(sk);
226 __l2cap_chan_set_err(chan, err);
227 release_sock(sk);
228}
229
230static void __set_retrans_timer(struct l2cap_chan *chan)
231{
232 if (!delayed_work_pending(&chan->monitor_timer) &&
233 chan->retrans_timeout) {
234 l2cap_set_timer(chan, &chan->retrans_timer,
235 msecs_to_jiffies(chan->retrans_timeout));
236 }
237}
238
239static void __set_monitor_timer(struct l2cap_chan *chan)
240{
241 __clear_retrans_timer(chan);
242 if (chan->monitor_timeout) {
243 l2cap_set_timer(chan, &chan->monitor_timer,
244 msecs_to_jiffies(chan->monitor_timeout));
245 }
246}
247
248static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
249 u16 seq)
250{
251 struct sk_buff *skb;
252
253 skb_queue_walk(head, skb) {
254 if (bt_cb(skb)->control.txseq == seq)
255 return skb;
256 }
257
258 return NULL;
259}
260
261/* ---- L2CAP sequence number lists ---- */
262
263/* For ERTM, ordered lists of sequence numbers must be tracked for
264 * SREJ requests that are received and for frames that are to be
265 * retransmitted. These seq_list functions implement a singly-linked
266 * list in an array, where membership in the list can also be checked
267 * in constant time. Items can also be added to the tail of the list
268 * and removed from the head in constant time, without further memory
269 * allocs or frees.
270 */
271
272static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
273{
274 size_t alloc_size, i;
275
276 /* Allocated size is a power of 2 to map sequence numbers
277 * (which may be up to 14 bits) in to a smaller array that is
278 * sized for the negotiated ERTM transmit windows.
279 */
280 alloc_size = roundup_pow_of_two(size);
281
282 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
283 if (!seq_list->list)
284 return -ENOMEM;
285
286 seq_list->mask = alloc_size - 1;
287 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
288 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
289 for (i = 0; i < alloc_size; i++)
290 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
291
292 return 0;
293}
294
295static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
296{
297 kfree(seq_list->list);
298}
299
300static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
301 u16 seq)
302{
303 /* Constant-time check for list membership */
304 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
305}
306
307static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
308{
309 u16 mask = seq_list->mask;
310
311 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
312 /* In case someone tries to pop the head of an empty list */
313 return L2CAP_SEQ_LIST_CLEAR;
314 } else if (seq_list->head == seq) {
315 /* Head can be removed in constant time */
316 seq_list->head = seq_list->list[seq & mask];
317 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
318
319 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
320 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
321 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
322 }
323 } else {
324 /* Walk the list to find the sequence number */
325 u16 prev = seq_list->head;
326 while (seq_list->list[prev & mask] != seq) {
327 prev = seq_list->list[prev & mask];
328 if (prev == L2CAP_SEQ_LIST_TAIL)
329 return L2CAP_SEQ_LIST_CLEAR;
330 }
331
332 /* Unlink the number from the list and clear it */
333 seq_list->list[prev & mask] = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 if (seq_list->tail == seq)
336 seq_list->tail = prev;
337 }
338 return seq;
339}
340
341static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
342{
343 /* Remove the head in constant time */
344 return l2cap_seq_list_remove(seq_list, seq_list->head);
345}
346
347static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
348{
349 u16 i;
350
351 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
352 return;
353
354 for (i = 0; i <= seq_list->mask; i++)
355 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
356
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
359}
360
361static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
362{
363 u16 mask = seq_list->mask;
364
365 /* All appends happen in constant time */
366
367 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
368 return;
369
370 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
371 seq_list->head = seq;
372 else
373 seq_list->list[seq_list->tail & mask] = seq;
374
375 seq_list->tail = seq;
376 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
377}
378
379static void l2cap_chan_timeout(struct work_struct *work)
380{
381 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
382 chan_timer.work);
383 struct l2cap_conn *conn = chan->conn;
384 int reason;
385
386 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
387
388 mutex_lock(&conn->chan_lock);
389 l2cap_chan_lock(chan);
390
391 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
392 reason = ECONNREFUSED;
393 else if (chan->state == BT_CONNECT &&
394 chan->sec_level != BT_SECURITY_SDP)
395 reason = ECONNREFUSED;
396 else
397 reason = ETIMEDOUT;
398
399 l2cap_chan_close(chan, reason);
400
401 l2cap_chan_unlock(chan);
402
403 chan->ops->close(chan->data);
404 mutex_unlock(&conn->chan_lock);
405
406 l2cap_chan_put(chan);
407}
408
409struct l2cap_chan *l2cap_chan_create(void)
410{
411 struct l2cap_chan *chan;
412
413 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
414 if (!chan)
415 return NULL;
416
417 mutex_init(&chan->lock);
418
419 write_lock(&chan_list_lock);
420 list_add(&chan->global_l, &chan_list);
421 write_unlock(&chan_list_lock);
422
423 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
424
425 chan->state = BT_OPEN;
426
427 atomic_set(&chan->refcnt, 1);
428
429 /* This flag is cleared in l2cap_chan_ready() */
430 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
431
432 BT_DBG("chan %p", chan);
433
434 return chan;
435}
436
437void l2cap_chan_destroy(struct l2cap_chan *chan)
438{
439 write_lock(&chan_list_lock);
440 list_del(&chan->global_l);
441 write_unlock(&chan_list_lock);
442
443 l2cap_chan_put(chan);
444}
445
446void l2cap_chan_set_defaults(struct l2cap_chan *chan)
447{
448 chan->fcs = L2CAP_FCS_CRC16;
449 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
450 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
451 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
452 chan->sec_level = BT_SECURITY_LOW;
453
454 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
455}
456
457static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
458{
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
460 __le16_to_cpu(chan->psm), chan->dcid);
461
462 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
463
464 chan->conn = conn;
465
466 switch (chan->chan_type) {
467 case L2CAP_CHAN_CONN_ORIENTED:
468 if (conn->hcon->type == LE_LINK) {
469 /* LE connection */
470 chan->omtu = L2CAP_LE_DEFAULT_MTU;
471 chan->scid = L2CAP_CID_LE_DATA;
472 chan->dcid = L2CAP_CID_LE_DATA;
473 } else {
474 /* Alloc CID for connection-oriented socket */
475 chan->scid = l2cap_alloc_cid(conn);
476 chan->omtu = L2CAP_DEFAULT_MTU;
477 }
478 break;
479
480 case L2CAP_CHAN_CONN_LESS:
481 /* Connectionless socket */
482 chan->scid = L2CAP_CID_CONN_LESS;
483 chan->dcid = L2CAP_CID_CONN_LESS;
484 chan->omtu = L2CAP_DEFAULT_MTU;
485 break;
486
487 default:
488 /* Raw socket can send/recv signalling messages only */
489 chan->scid = L2CAP_CID_SIGNALING;
490 chan->dcid = L2CAP_CID_SIGNALING;
491 chan->omtu = L2CAP_DEFAULT_MTU;
492 }
493
494 chan->local_id = L2CAP_BESTEFFORT_ID;
495 chan->local_stype = L2CAP_SERV_BESTEFFORT;
496 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
497 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
498 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
499 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
500
501 l2cap_chan_hold(chan);
502
503 list_add(&chan->list, &conn->chan_l);
504}
505
506static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
507{
508 mutex_lock(&conn->chan_lock);
509 __l2cap_chan_add(conn, chan);
510 mutex_unlock(&conn->chan_lock);
511}
512
513static void l2cap_chan_del(struct l2cap_chan *chan, int err)
514{
515 struct sock *sk = chan->sk;
516 struct l2cap_conn *conn = chan->conn;
517 struct sock *parent = bt_sk(sk)->parent;
518
519 __clear_chan_timer(chan);
520
521 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
522
523 if (conn) {
524 /* Delete from channel list */
525 list_del(&chan->list);
526
527 l2cap_chan_put(chan);
528
529 chan->conn = NULL;
530 hci_conn_put(conn->hcon);
531 }
532
533 lock_sock(sk);
534
535 __l2cap_state_change(chan, BT_CLOSED);
536 sock_set_flag(sk, SOCK_ZAPPED);
537
538 if (err)
539 __l2cap_chan_set_err(chan, err);
540
541 if (parent) {
542 bt_accept_unlink(sk);
543 parent->sk_data_ready(parent, 0);
544 } else
545 sk->sk_state_change(sk);
546
547 release_sock(sk);
548
549 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
550 return;
551
552 switch(chan->mode) {
553 case L2CAP_MODE_BASIC:
554 break;
555
556 case L2CAP_MODE_ERTM:
557 __clear_retrans_timer(chan);
558 __clear_monitor_timer(chan);
559 __clear_ack_timer(chan);
560
561 skb_queue_purge(&chan->srej_q);
562
563 l2cap_seq_list_free(&chan->srej_list);
564 l2cap_seq_list_free(&chan->retrans_list);
565
566 /* fall through */
567
568 case L2CAP_MODE_STREAMING:
569 skb_queue_purge(&chan->tx_q);
570 break;
571 }
572
573 return;
574}
575
576static void l2cap_chan_cleanup_listen(struct sock *parent)
577{
578 struct sock *sk;
579
580 BT_DBG("parent %p", parent);
581
582 /* Close not yet accepted channels */
583 while ((sk = bt_accept_dequeue(parent, NULL))) {
584 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
585
586 l2cap_chan_lock(chan);
587 __clear_chan_timer(chan);
588 l2cap_chan_close(chan, ECONNRESET);
589 l2cap_chan_unlock(chan);
590
591 chan->ops->close(chan->data);
592 }
593}
594
595void l2cap_chan_close(struct l2cap_chan *chan, int reason)
596{
597 struct l2cap_conn *conn = chan->conn;
598 struct sock *sk = chan->sk;
599
600 BT_DBG("chan %p state %s sk %p", chan,
601 state_to_string(chan->state), sk);
602
603 switch (chan->state) {
604 case BT_LISTEN:
605 lock_sock(sk);
606 l2cap_chan_cleanup_listen(sk);
607
608 __l2cap_state_change(chan, BT_CLOSED);
609 sock_set_flag(sk, SOCK_ZAPPED);
610 release_sock(sk);
611 break;
612
613 case BT_CONNECTED:
614 case BT_CONFIG:
615 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
616 conn->hcon->type == ACL_LINK) {
617 __set_chan_timer(chan, sk->sk_sndtimeo);
618 l2cap_send_disconn_req(conn, chan, reason);
619 } else
620 l2cap_chan_del(chan, reason);
621 break;
622
623 case BT_CONNECT2:
624 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
625 conn->hcon->type == ACL_LINK) {
626 struct l2cap_conn_rsp rsp;
627 __u16 result;
628
629 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
630 result = L2CAP_CR_SEC_BLOCK;
631 else
632 result = L2CAP_CR_BAD_PSM;
633 l2cap_state_change(chan, BT_DISCONN);
634
635 rsp.scid = cpu_to_le16(chan->dcid);
636 rsp.dcid = cpu_to_le16(chan->scid);
637 rsp.result = cpu_to_le16(result);
638 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
639 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
640 sizeof(rsp), &rsp);
641 }
642
643 l2cap_chan_del(chan, reason);
644 break;
645
646 case BT_CONNECT:
647 case BT_DISCONN:
648 l2cap_chan_del(chan, reason);
649 break;
650
651 default:
652 lock_sock(sk);
653 sock_set_flag(sk, SOCK_ZAPPED);
654 release_sock(sk);
655 break;
656 }
657}
658
659static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
660{
661 if (chan->chan_type == L2CAP_CHAN_RAW) {
662 switch (chan->sec_level) {
663 case BT_SECURITY_HIGH:
664 return HCI_AT_DEDICATED_BONDING_MITM;
665 case BT_SECURITY_MEDIUM:
666 return HCI_AT_DEDICATED_BONDING;
667 default:
668 return HCI_AT_NO_BONDING;
669 }
670 } else if (chan->psm == cpu_to_le16(0x0001)) {
671 if (chan->sec_level == BT_SECURITY_LOW)
672 chan->sec_level = BT_SECURITY_SDP;
673
674 if (chan->sec_level == BT_SECURITY_HIGH)
675 return HCI_AT_NO_BONDING_MITM;
676 else
677 return HCI_AT_NO_BONDING;
678 } else {
679 switch (chan->sec_level) {
680 case BT_SECURITY_HIGH:
681 return HCI_AT_GENERAL_BONDING_MITM;
682 case BT_SECURITY_MEDIUM:
683 return HCI_AT_GENERAL_BONDING;
684 default:
685 return HCI_AT_NO_BONDING;
686 }
687 }
688}
689
690/* Service level security */
691int l2cap_chan_check_security(struct l2cap_chan *chan)
692{
693 struct l2cap_conn *conn = chan->conn;
694 __u8 auth_type;
695
696 auth_type = l2cap_get_auth_type(chan);
697
698 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
699}
700
701static u8 l2cap_get_ident(struct l2cap_conn *conn)
702{
703 u8 id;
704
705 /* Get next available identificator.
706 * 1 - 128 are used by kernel.
707 * 129 - 199 are reserved.
708 * 200 - 254 are used by utilities like l2ping, etc.
709 */
710
711 spin_lock(&conn->lock);
712
713 if (++conn->tx_ident > 128)
714 conn->tx_ident = 1;
715
716 id = conn->tx_ident;
717
718 spin_unlock(&conn->lock);
719
720 return id;
721}
722
723static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
724{
725 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
726 u8 flags;
727
728 BT_DBG("code 0x%2.2x", code);
729
730 if (!skb)
731 return;
732
733 if (lmp_no_flush_capable(conn->hcon->hdev))
734 flags = ACL_START_NO_FLUSH;
735 else
736 flags = ACL_START;
737
738 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
739 skb->priority = HCI_PRIO_MAX;
740
741 hci_send_acl(conn->hchan, skb, flags);
742}
743
744static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
745{
746 struct hci_conn *hcon = chan->conn->hcon;
747 u16 flags;
748
749 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
750 skb->priority);
751
752 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
753 lmp_no_flush_capable(hcon->hdev))
754 flags = ACL_START_NO_FLUSH;
755 else
756 flags = ACL_START;
757
758 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
759 hci_send_acl(chan->conn->hchan, skb, flags);
760}
761
762static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
763{
764 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
765 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
766
767 if (enh & L2CAP_CTRL_FRAME_TYPE) {
768 /* S-Frame */
769 control->sframe = 1;
770 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
771 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
772
773 control->sar = 0;
774 control->txseq = 0;
775 } else {
776 /* I-Frame */
777 control->sframe = 0;
778 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
779 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
780
781 control->poll = 0;
782 control->super = 0;
783 }
784}
785
786static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
787{
788 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
789 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
790
791 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
792 /* S-Frame */
793 control->sframe = 1;
794 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
795 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
796
797 control->sar = 0;
798 control->txseq = 0;
799 } else {
800 /* I-Frame */
801 control->sframe = 0;
802 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
803 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
804
805 control->poll = 0;
806 control->super = 0;
807 }
808}
809
810static inline void __unpack_control(struct l2cap_chan *chan,
811 struct sk_buff *skb)
812{
813 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
814 __unpack_extended_control(get_unaligned_le32(skb->data),
815 &bt_cb(skb)->control);
816 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
817 } else {
818 __unpack_enhanced_control(get_unaligned_le16(skb->data),
819 &bt_cb(skb)->control);
820 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
821 }
822}
823
824static u32 __pack_extended_control(struct l2cap_ctrl *control)
825{
826 u32 packed;
827
828 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
829 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
830
831 if (control->sframe) {
832 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
833 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
834 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
835 } else {
836 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
837 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
838 }
839
840 return packed;
841}
842
843static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
844{
845 u16 packed;
846
847 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
848 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
849
850 if (control->sframe) {
851 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
852 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
853 packed |= L2CAP_CTRL_FRAME_TYPE;
854 } else {
855 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
856 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
857 }
858
859 return packed;
860}
861
862static inline void __pack_control(struct l2cap_chan *chan,
863 struct l2cap_ctrl *control,
864 struct sk_buff *skb)
865{
866 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
867 put_unaligned_le32(__pack_extended_control(control),
868 skb->data + L2CAP_HDR_SIZE);
869 } else {
870 put_unaligned_le16(__pack_enhanced_control(control),
871 skb->data + L2CAP_HDR_SIZE);
872 }
873}
874
875static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
876 u32 control)
877{
878 struct sk_buff *skb;
879 struct l2cap_hdr *lh;
880 int hlen;
881
882 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
883 hlen = L2CAP_EXT_HDR_SIZE;
884 else
885 hlen = L2CAP_ENH_HDR_SIZE;
886
887 if (chan->fcs == L2CAP_FCS_CRC16)
888 hlen += L2CAP_FCS_SIZE;
889
890 skb = bt_skb_alloc(hlen, GFP_KERNEL);
891
892 if (!skb)
893 return ERR_PTR(-ENOMEM);
894
895 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
896 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
897 lh->cid = cpu_to_le16(chan->dcid);
898
899 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
900 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
901 else
902 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
903
904 if (chan->fcs == L2CAP_FCS_CRC16) {
905 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
906 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
907 }
908
909 skb->priority = HCI_PRIO_MAX;
910 return skb;
911}
912
913static void l2cap_send_sframe(struct l2cap_chan *chan,
914 struct l2cap_ctrl *control)
915{
916 struct sk_buff *skb;
917 u32 control_field;
918
919 BT_DBG("chan %p, control %p", chan, control);
920
921 if (!control->sframe)
922 return;
923
924 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
925 !control->poll)
926 control->final = 1;
927
928 if (control->super == L2CAP_SUPER_RR)
929 clear_bit(CONN_RNR_SENT, &chan->conn_state);
930 else if (control->super == L2CAP_SUPER_RNR)
931 set_bit(CONN_RNR_SENT, &chan->conn_state);
932
933 if (control->super != L2CAP_SUPER_SREJ) {
934 chan->last_acked_seq = control->reqseq;
935 __clear_ack_timer(chan);
936 }
937
938 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
939 control->final, control->poll, control->super);
940
941 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
942 control_field = __pack_extended_control(control);
943 else
944 control_field = __pack_enhanced_control(control);
945
946 skb = l2cap_create_sframe_pdu(chan, control_field);
947 if (!IS_ERR(skb))
948 l2cap_do_send(chan, skb);
949}
950
951static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
952{
953 struct l2cap_ctrl control;
954
955 BT_DBG("chan %p, poll %d", chan, poll);
956
957 memset(&control, 0, sizeof(control));
958 control.sframe = 1;
959 control.poll = poll;
960
961 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
962 control.super = L2CAP_SUPER_RNR;
963 else
964 control.super = L2CAP_SUPER_RR;
965
966 control.reqseq = chan->buffer_seq;
967 l2cap_send_sframe(chan, &control);
968}
969
970static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
971{
972 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
973}
974
975static void l2cap_send_conn_req(struct l2cap_chan *chan)
976{
977 struct l2cap_conn *conn = chan->conn;
978 struct l2cap_conn_req req;
979
980 req.scid = cpu_to_le16(chan->scid);
981 req.psm = chan->psm;
982
983 chan->ident = l2cap_get_ident(conn);
984
985 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
986
987 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
988}
989
990static void l2cap_chan_ready(struct l2cap_chan *chan)
991{
992 struct sock *sk = chan->sk;
993 struct sock *parent;
994
995 lock_sock(sk);
996
997 parent = bt_sk(sk)->parent;
998
999 BT_DBG("sk %p, parent %p", sk, parent);
1000
1001 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1002 chan->conf_state = 0;
1003 __clear_chan_timer(chan);
1004
1005 __l2cap_state_change(chan, BT_CONNECTED);
1006 sk->sk_state_change(sk);
1007
1008 if (parent)
1009 parent->sk_data_ready(parent, 0);
1010
1011 release_sock(sk);
1012}
1013
1014static void l2cap_do_start(struct l2cap_chan *chan)
1015{
1016 struct l2cap_conn *conn = chan->conn;
1017
1018 if (conn->hcon->type == LE_LINK) {
1019 l2cap_chan_ready(chan);
1020 return;
1021 }
1022
1023 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1024 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1025 return;
1026
1027 if (l2cap_chan_check_security(chan) &&
1028 __l2cap_no_conn_pending(chan))
1029 l2cap_send_conn_req(chan);
1030 } else {
1031 struct l2cap_info_req req;
1032 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1033
1034 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1035 conn->info_ident = l2cap_get_ident(conn);
1036
1037 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1038
1039 l2cap_send_cmd(conn, conn->info_ident,
1040 L2CAP_INFO_REQ, sizeof(req), &req);
1041 }
1042}
1043
1044static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1045{
1046 u32 local_feat_mask = l2cap_feat_mask;
1047 if (!disable_ertm)
1048 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1049
1050 switch (mode) {
1051 case L2CAP_MODE_ERTM:
1052 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1053 case L2CAP_MODE_STREAMING:
1054 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1055 default:
1056 return 0x00;
1057 }
1058}
1059
1060static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1061{
1062 struct sock *sk = chan->sk;
1063 struct l2cap_disconn_req req;
1064
1065 if (!conn)
1066 return;
1067
1068 if (chan->mode == L2CAP_MODE_ERTM) {
1069 __clear_retrans_timer(chan);
1070 __clear_monitor_timer(chan);
1071 __clear_ack_timer(chan);
1072 }
1073
1074 req.dcid = cpu_to_le16(chan->dcid);
1075 req.scid = cpu_to_le16(chan->scid);
1076 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1077 L2CAP_DISCONN_REQ, sizeof(req), &req);
1078
1079 lock_sock(sk);
1080 __l2cap_state_change(chan, BT_DISCONN);
1081 __l2cap_chan_set_err(chan, err);
1082 release_sock(sk);
1083}
1084
1085/* ---- L2CAP connections ---- */
1086static void l2cap_conn_start(struct l2cap_conn *conn)
1087{
1088 struct l2cap_chan *chan, *tmp;
1089
1090 BT_DBG("conn %p", conn);
1091
1092 mutex_lock(&conn->chan_lock);
1093
1094 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1095 struct sock *sk = chan->sk;
1096
1097 l2cap_chan_lock(chan);
1098
1099 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1100 l2cap_chan_unlock(chan);
1101 continue;
1102 }
1103
1104 if (chan->state == BT_CONNECT) {
1105 if (!l2cap_chan_check_security(chan) ||
1106 !__l2cap_no_conn_pending(chan)) {
1107 l2cap_chan_unlock(chan);
1108 continue;
1109 }
1110
1111 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1112 && test_bit(CONF_STATE2_DEVICE,
1113 &chan->conf_state)) {
1114 l2cap_chan_close(chan, ECONNRESET);
1115 l2cap_chan_unlock(chan);
1116 continue;
1117 }
1118
1119 l2cap_send_conn_req(chan);
1120
1121 } else if (chan->state == BT_CONNECT2) {
1122 struct l2cap_conn_rsp rsp;
1123 char buf[128];
1124 rsp.scid = cpu_to_le16(chan->dcid);
1125 rsp.dcid = cpu_to_le16(chan->scid);
1126
1127 if (l2cap_chan_check_security(chan)) {
1128 lock_sock(sk);
1129 if (test_bit(BT_SK_DEFER_SETUP,
1130 &bt_sk(sk)->flags)) {
1131 struct sock *parent = bt_sk(sk)->parent;
1132 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1133 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1134 if (parent)
1135 parent->sk_data_ready(parent, 0);
1136
1137 } else {
1138 __l2cap_state_change(chan, BT_CONFIG);
1139 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1140 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1141 }
1142 release_sock(sk);
1143 } else {
1144 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1145 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1146 }
1147
1148 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1149 sizeof(rsp), &rsp);
1150
1151 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1152 rsp.result != L2CAP_CR_SUCCESS) {
1153 l2cap_chan_unlock(chan);
1154 continue;
1155 }
1156
1157 set_bit(CONF_REQ_SENT, &chan->conf_state);
1158 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1159 l2cap_build_conf_req(chan, buf), buf);
1160 chan->num_conf_req++;
1161 }
1162
1163 l2cap_chan_unlock(chan);
1164 }
1165
1166 mutex_unlock(&conn->chan_lock);
1167}
1168
1169/* Find socket with cid and source/destination bdaddr.
1170 * Returns closest match, locked.
1171 */
1172static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1173 bdaddr_t *src,
1174 bdaddr_t *dst)
1175{
1176 struct l2cap_chan *c, *c1 = NULL;
1177
1178 read_lock(&chan_list_lock);
1179
1180 list_for_each_entry(c, &chan_list, global_l) {
1181 struct sock *sk = c->sk;
1182
1183 if (state && c->state != state)
1184 continue;
1185
1186 if (c->scid == cid) {
1187 int src_match, dst_match;
1188 int src_any, dst_any;
1189
1190 /* Exact match. */
1191 src_match = !bacmp(&bt_sk(sk)->src, src);
1192 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1193 if (src_match && dst_match) {
1194 read_unlock(&chan_list_lock);
1195 return c;
1196 }
1197
1198 /* Closest match */
1199 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1200 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1201 if ((src_match && dst_any) || (src_any && dst_match) ||
1202 (src_any && dst_any))
1203 c1 = c;
1204 }
1205 }
1206
1207 read_unlock(&chan_list_lock);
1208
1209 return c1;
1210}
1211
1212static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1213{
1214 struct sock *parent, *sk;
1215 struct l2cap_chan *chan, *pchan;
1216
1217 BT_DBG("");
1218
1219 /* Check if we have socket listening on cid */
1220 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1221 conn->src, conn->dst);
1222 if (!pchan)
1223 return;
1224
1225 parent = pchan->sk;
1226
1227 lock_sock(parent);
1228
1229 /* Check for backlog size */
1230 if (sk_acceptq_is_full(parent)) {
1231 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1232 goto clean;
1233 }
1234
1235 chan = pchan->ops->new_connection(pchan->data);
1236 if (!chan)
1237 goto clean;
1238
1239 sk = chan->sk;
1240
1241 hci_conn_hold(conn->hcon);
1242
1243 bacpy(&bt_sk(sk)->src, conn->src);
1244 bacpy(&bt_sk(sk)->dst, conn->dst);
1245
1246 bt_accept_enqueue(parent, sk);
1247
1248 l2cap_chan_add(conn, chan);
1249
1250 __set_chan_timer(chan, sk->sk_sndtimeo);
1251
1252 __l2cap_state_change(chan, BT_CONNECTED);
1253 parent->sk_data_ready(parent, 0);
1254
1255clean:
1256 release_sock(parent);
1257}
1258
1259static void l2cap_conn_ready(struct l2cap_conn *conn)
1260{
1261 struct l2cap_chan *chan;
1262
1263 BT_DBG("conn %p", conn);
1264
1265 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1266 l2cap_le_conn_ready(conn);
1267
1268 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1269 smp_conn_security(conn, conn->hcon->pending_sec_level);
1270
1271 mutex_lock(&conn->chan_lock);
1272
1273 list_for_each_entry(chan, &conn->chan_l, list) {
1274
1275 l2cap_chan_lock(chan);
1276
1277 if (conn->hcon->type == LE_LINK) {
1278 if (smp_conn_security(conn, chan->sec_level))
1279 l2cap_chan_ready(chan);
1280
1281 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1282 struct sock *sk = chan->sk;
1283 __clear_chan_timer(chan);
1284 lock_sock(sk);
1285 __l2cap_state_change(chan, BT_CONNECTED);
1286 sk->sk_state_change(sk);
1287 release_sock(sk);
1288
1289 } else if (chan->state == BT_CONNECT)
1290 l2cap_do_start(chan);
1291
1292 l2cap_chan_unlock(chan);
1293 }
1294
1295 mutex_unlock(&conn->chan_lock);
1296}
1297
1298/* Notify sockets that we cannot guaranty reliability anymore */
1299static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1300{
1301 struct l2cap_chan *chan;
1302
1303 BT_DBG("conn %p", conn);
1304
1305 mutex_lock(&conn->chan_lock);
1306
1307 list_for_each_entry(chan, &conn->chan_l, list) {
1308 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1309 __l2cap_chan_set_err(chan, err);
1310 }
1311
1312 mutex_unlock(&conn->chan_lock);
1313}
1314
1315static void l2cap_info_timeout(struct work_struct *work)
1316{
1317 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1318 info_timer.work);
1319
1320 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1321 conn->info_ident = 0;
1322
1323 l2cap_conn_start(conn);
1324}
1325
1326static void l2cap_conn_del(struct hci_conn *hcon, int err)
1327{
1328 struct l2cap_conn *conn = hcon->l2cap_data;
1329 struct l2cap_chan *chan, *l;
1330
1331 if (!conn)
1332 return;
1333
1334 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1335
1336 kfree_skb(conn->rx_skb);
1337
1338 mutex_lock(&conn->chan_lock);
1339
1340 /* Kill channels */
1341 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1342 l2cap_chan_hold(chan);
1343 l2cap_chan_lock(chan);
1344
1345 l2cap_chan_del(chan, err);
1346
1347 l2cap_chan_unlock(chan);
1348
1349 chan->ops->close(chan->data);
1350 l2cap_chan_put(chan);
1351 }
1352
1353 mutex_unlock(&conn->chan_lock);
1354
1355 hci_chan_del(conn->hchan);
1356
1357 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1358 cancel_delayed_work_sync(&conn->info_timer);
1359
1360 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1361 cancel_delayed_work_sync(&conn->security_timer);
1362 smp_chan_destroy(conn);
1363 }
1364
1365 hcon->l2cap_data = NULL;
1366 kfree(conn);
1367}
1368
1369static void security_timeout(struct work_struct *work)
1370{
1371 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1372 security_timer.work);
1373
1374 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1375}
1376
1377static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1378{
1379 struct l2cap_conn *conn = hcon->l2cap_data;
1380 struct hci_chan *hchan;
1381
1382 if (conn || status)
1383 return conn;
1384
1385 hchan = hci_chan_create(hcon);
1386 if (!hchan)
1387 return NULL;
1388
1389 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1390 if (!conn) {
1391 hci_chan_del(hchan);
1392 return NULL;
1393 }
1394
1395 hcon->l2cap_data = conn;
1396 conn->hcon = hcon;
1397 conn->hchan = hchan;
1398
1399 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1400
1401 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1402 conn->mtu = hcon->hdev->le_mtu;
1403 else
1404 conn->mtu = hcon->hdev->acl_mtu;
1405
1406 conn->src = &hcon->hdev->bdaddr;
1407 conn->dst = &hcon->dst;
1408
1409 conn->feat_mask = 0;
1410
1411 spin_lock_init(&conn->lock);
1412 mutex_init(&conn->chan_lock);
1413
1414 INIT_LIST_HEAD(&conn->chan_l);
1415
1416 if (hcon->type == LE_LINK)
1417 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1418 else
1419 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1420
1421 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1422
1423 return conn;
1424}
1425
1426/* ---- Socket interface ---- */
1427
1428/* Find socket with psm and source / destination bdaddr.
1429 * Returns closest match.
1430 */
1431static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1432 bdaddr_t *src,
1433 bdaddr_t *dst)
1434{
1435 struct l2cap_chan *c, *c1 = NULL;
1436
1437 read_lock(&chan_list_lock);
1438
1439 list_for_each_entry(c, &chan_list, global_l) {
1440 struct sock *sk = c->sk;
1441
1442 if (state && c->state != state)
1443 continue;
1444
1445 if (c->psm == psm) {
1446 int src_match, dst_match;
1447 int src_any, dst_any;
1448
1449 /* Exact match. */
1450 src_match = !bacmp(&bt_sk(sk)->src, src);
1451 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1452 if (src_match && dst_match) {
1453 read_unlock(&chan_list_lock);
1454 return c;
1455 }
1456
1457 /* Closest match */
1458 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1459 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1460 if ((src_match && dst_any) || (src_any && dst_match) ||
1461 (src_any && dst_any))
1462 c1 = c;
1463 }
1464 }
1465
1466 read_unlock(&chan_list_lock);
1467
1468 return c1;
1469}
1470
1471int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1472 bdaddr_t *dst, u8 dst_type)
1473{
1474 struct sock *sk = chan->sk;
1475 bdaddr_t *src = &bt_sk(sk)->src;
1476 struct l2cap_conn *conn;
1477 struct hci_conn *hcon;
1478 struct hci_dev *hdev;
1479 __u8 auth_type;
1480 int err;
1481
1482 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1483 dst_type, __le16_to_cpu(chan->psm));
1484
1485 hdev = hci_get_route(dst, src);
1486 if (!hdev)
1487 return -EHOSTUNREACH;
1488
1489 hci_dev_lock(hdev);
1490
1491 l2cap_chan_lock(chan);
1492
1493 /* PSM must be odd and lsb of upper byte must be 0 */
1494 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1495 chan->chan_type != L2CAP_CHAN_RAW) {
1496 err = -EINVAL;
1497 goto done;
1498 }
1499
1500 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1501 err = -EINVAL;
1502 goto done;
1503 }
1504
1505 switch (chan->mode) {
1506 case L2CAP_MODE_BASIC:
1507 break;
1508 case L2CAP_MODE_ERTM:
1509 case L2CAP_MODE_STREAMING:
1510 if (!disable_ertm)
1511 break;
1512 /* fall through */
1513 default:
1514 err = -ENOTSUPP;
1515 goto done;
1516 }
1517
1518 lock_sock(sk);
1519
1520 switch (sk->sk_state) {
1521 case BT_CONNECT:
1522 case BT_CONNECT2:
1523 case BT_CONFIG:
1524 /* Already connecting */
1525 err = 0;
1526 release_sock(sk);
1527 goto done;
1528
1529 case BT_CONNECTED:
1530 /* Already connected */
1531 err = -EISCONN;
1532 release_sock(sk);
1533 goto done;
1534
1535 case BT_OPEN:
1536 case BT_BOUND:
1537 /* Can connect */
1538 break;
1539
1540 default:
1541 err = -EBADFD;
1542 release_sock(sk);
1543 goto done;
1544 }
1545
1546 /* Set destination address and psm */
1547 bacpy(&bt_sk(sk)->dst, dst);
1548
1549 release_sock(sk);
1550
1551 chan->psm = psm;
1552 chan->dcid = cid;
1553
1554 auth_type = l2cap_get_auth_type(chan);
1555
1556 if (chan->dcid == L2CAP_CID_LE_DATA)
1557 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1558 chan->sec_level, auth_type);
1559 else
1560 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1561 chan->sec_level, auth_type);
1562
1563 if (IS_ERR(hcon)) {
1564 err = PTR_ERR(hcon);
1565 goto done;
1566 }
1567
1568 conn = l2cap_conn_add(hcon, 0);
1569 if (!conn) {
1570 hci_conn_put(hcon);
1571 err = -ENOMEM;
1572 goto done;
1573 }
1574
1575 if (hcon->type == LE_LINK) {
1576 err = 0;
1577
1578 if (!list_empty(&conn->chan_l)) {
1579 err = -EBUSY;
1580 hci_conn_put(hcon);
1581 }
1582
1583 if (err)
1584 goto done;
1585 }
1586
1587 /* Update source addr of the socket */
1588 bacpy(src, conn->src);
1589
1590 l2cap_chan_unlock(chan);
1591 l2cap_chan_add(conn, chan);
1592 l2cap_chan_lock(chan);
1593
1594 l2cap_state_change(chan, BT_CONNECT);
1595 __set_chan_timer(chan, sk->sk_sndtimeo);
1596
1597 if (hcon->state == BT_CONNECTED) {
1598 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1599 __clear_chan_timer(chan);
1600 if (l2cap_chan_check_security(chan))
1601 l2cap_state_change(chan, BT_CONNECTED);
1602 } else
1603 l2cap_do_start(chan);
1604 }
1605
1606 err = 0;
1607
1608done:
1609 l2cap_chan_unlock(chan);
1610 hci_dev_unlock(hdev);
1611 hci_dev_put(hdev);
1612 return err;
1613}
1614
1615int __l2cap_wait_ack(struct sock *sk)
1616{
1617 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1618 DECLARE_WAITQUEUE(wait, current);
1619 int err = 0;
1620 int timeo = HZ/5;
1621
1622 add_wait_queue(sk_sleep(sk), &wait);
1623 set_current_state(TASK_INTERRUPTIBLE);
1624 while (chan->unacked_frames > 0 && chan->conn) {
1625 if (!timeo)
1626 timeo = HZ/5;
1627
1628 if (signal_pending(current)) {
1629 err = sock_intr_errno(timeo);
1630 break;
1631 }
1632
1633 release_sock(sk);
1634 timeo = schedule_timeout(timeo);
1635 lock_sock(sk);
1636 set_current_state(TASK_INTERRUPTIBLE);
1637
1638 err = sock_error(sk);
1639 if (err)
1640 break;
1641 }
1642 set_current_state(TASK_RUNNING);
1643 remove_wait_queue(sk_sleep(sk), &wait);
1644 return err;
1645}
1646
1647static void l2cap_monitor_timeout(struct work_struct *work)
1648{
1649 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1650 monitor_timer.work);
1651
1652 BT_DBG("chan %p", chan);
1653
1654 l2cap_chan_lock(chan);
1655
1656 if (!chan->conn) {
1657 l2cap_chan_unlock(chan);
1658 l2cap_chan_put(chan);
1659 return;
1660 }
1661
1662 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1663
1664 l2cap_chan_unlock(chan);
1665 l2cap_chan_put(chan);
1666}
1667
1668static void l2cap_retrans_timeout(struct work_struct *work)
1669{
1670 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1671 retrans_timer.work);
1672
1673 BT_DBG("chan %p", chan);
1674
1675 l2cap_chan_lock(chan);
1676
1677 if (!chan->conn) {
1678 l2cap_chan_unlock(chan);
1679 l2cap_chan_put(chan);
1680 return;
1681 }
1682
1683 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1684 l2cap_chan_unlock(chan);
1685 l2cap_chan_put(chan);
1686}
1687
1688static int l2cap_streaming_send(struct l2cap_chan *chan,
1689 struct sk_buff_head *skbs)
1690{
1691 struct sk_buff *skb;
1692 struct l2cap_ctrl *control;
1693
1694 BT_DBG("chan %p, skbs %p", chan, skbs);
1695
1696 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1697
1698 while (!skb_queue_empty(&chan->tx_q)) {
1699
1700 skb = skb_dequeue(&chan->tx_q);
1701
1702 bt_cb(skb)->control.retries = 1;
1703 control = &bt_cb(skb)->control;
1704
1705 control->reqseq = 0;
1706 control->txseq = chan->next_tx_seq;
1707
1708 __pack_control(chan, control, skb);
1709
1710 if (chan->fcs == L2CAP_FCS_CRC16) {
1711 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1712 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1713 }
1714
1715 l2cap_do_send(chan, skb);
1716
1717 BT_DBG("Sent txseq %d", (int)control->txseq);
1718
1719 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1720 chan->frames_sent++;
1721 }
1722
1723 return 0;
1724}
1725
1726static int l2cap_ertm_send(struct l2cap_chan *chan)
1727{
1728 struct sk_buff *skb, *tx_skb;
1729 struct l2cap_ctrl *control;
1730 int sent = 0;
1731
1732 BT_DBG("chan %p", chan);
1733
1734 if (chan->state != BT_CONNECTED)
1735 return -ENOTCONN;
1736
1737 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1738 return 0;
1739
1740 while (chan->tx_send_head &&
1741 chan->unacked_frames < chan->remote_tx_win &&
1742 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1743
1744 skb = chan->tx_send_head;
1745
1746 bt_cb(skb)->control.retries = 1;
1747 control = &bt_cb(skb)->control;
1748
1749 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1750 control->final = 1;
1751
1752 control->reqseq = chan->buffer_seq;
1753 chan->last_acked_seq = chan->buffer_seq;
1754 control->txseq = chan->next_tx_seq;
1755
1756 __pack_control(chan, control, skb);
1757
1758 if (chan->fcs == L2CAP_FCS_CRC16) {
1759 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1760 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1761 }
1762
1763 /* Clone after data has been modified. Data is assumed to be
1764 read-only (for locking purposes) on cloned sk_buffs.
1765 */
1766 tx_skb = skb_clone(skb, GFP_KERNEL);
1767
1768 if (!tx_skb)
1769 break;
1770
1771 __set_retrans_timer(chan);
1772
1773 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1774 chan->unacked_frames++;
1775 chan->frames_sent++;
1776 sent++;
1777
1778 if (skb_queue_is_last(&chan->tx_q, skb))
1779 chan->tx_send_head = NULL;
1780 else
1781 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1782
1783 l2cap_do_send(chan, tx_skb);
1784 BT_DBG("Sent txseq %d", (int)control->txseq);
1785 }
1786
1787 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1788 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1789
1790 return sent;
1791}
1792
1793static void l2cap_ertm_resend(struct l2cap_chan *chan)
1794{
1795 struct l2cap_ctrl control;
1796 struct sk_buff *skb;
1797 struct sk_buff *tx_skb;
1798 u16 seq;
1799
1800 BT_DBG("chan %p", chan);
1801
1802 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1803 return;
1804
1805 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1806 seq = l2cap_seq_list_pop(&chan->retrans_list);
1807
1808 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1809 if (!skb) {
1810 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1811 seq);
1812 continue;
1813 }
1814
1815 bt_cb(skb)->control.retries++;
1816 control = bt_cb(skb)->control;
1817
1818 if (chan->max_tx != 0 &&
1819 bt_cb(skb)->control.retries > chan->max_tx) {
1820 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1821 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1822 l2cap_seq_list_clear(&chan->retrans_list);
1823 break;
1824 }
1825
1826 control.reqseq = chan->buffer_seq;
1827 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1828 control.final = 1;
1829 else
1830 control.final = 0;
1831
1832 if (skb_cloned(skb)) {
1833 /* Cloned sk_buffs are read-only, so we need a
1834 * writeable copy
1835 */
1836 tx_skb = skb_copy(skb, GFP_ATOMIC);
1837 } else {
1838 tx_skb = skb_clone(skb, GFP_ATOMIC);
1839 }
1840
1841 if (!tx_skb) {
1842 l2cap_seq_list_clear(&chan->retrans_list);
1843 break;
1844 }
1845
1846 /* Update skb contents */
1847 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1848 put_unaligned_le32(__pack_extended_control(&control),
1849 tx_skb->data + L2CAP_HDR_SIZE);
1850 } else {
1851 put_unaligned_le16(__pack_enhanced_control(&control),
1852 tx_skb->data + L2CAP_HDR_SIZE);
1853 }
1854
1855 if (chan->fcs == L2CAP_FCS_CRC16) {
1856 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1857 put_unaligned_le16(fcs, skb_put(tx_skb,
1858 L2CAP_FCS_SIZE));
1859 }
1860
1861 l2cap_do_send(chan, tx_skb);
1862
1863 BT_DBG("Resent txseq %d", control.txseq);
1864
1865 chan->last_acked_seq = chan->buffer_seq;
1866 }
1867}
1868
1869static void l2cap_retransmit(struct l2cap_chan *chan,
1870 struct l2cap_ctrl *control)
1871{
1872 BT_DBG("chan %p, control %p", chan, control);
1873
1874 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1875 l2cap_ertm_resend(chan);
1876}
1877
1878static void l2cap_retransmit_all(struct l2cap_chan *chan,
1879 struct l2cap_ctrl *control)
1880{
1881 struct sk_buff *skb;
1882
1883 BT_DBG("chan %p, control %p", chan, control);
1884
1885 if (control->poll)
1886 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1887
1888 l2cap_seq_list_clear(&chan->retrans_list);
1889
1890 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1891 return;
1892
1893 if (chan->unacked_frames) {
1894 skb_queue_walk(&chan->tx_q, skb) {
1895 if (bt_cb(skb)->control.txseq == control->reqseq ||
1896 skb == chan->tx_send_head)
1897 break;
1898 }
1899
1900 skb_queue_walk_from(&chan->tx_q, skb) {
1901 if (skb == chan->tx_send_head)
1902 break;
1903
1904 l2cap_seq_list_append(&chan->retrans_list,
1905 bt_cb(skb)->control.txseq);
1906 }
1907
1908 l2cap_ertm_resend(chan);
1909 }
1910}
1911
1912static void l2cap_send_ack(struct l2cap_chan *chan)
1913{
1914 struct l2cap_ctrl control;
1915 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1916 chan->last_acked_seq);
1917 int threshold;
1918
1919 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1920 chan, chan->last_acked_seq, chan->buffer_seq);
1921
1922 memset(&control, 0, sizeof(control));
1923 control.sframe = 1;
1924
1925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1926 chan->rx_state == L2CAP_RX_STATE_RECV) {
1927 __clear_ack_timer(chan);
1928 control.super = L2CAP_SUPER_RNR;
1929 control.reqseq = chan->buffer_seq;
1930 l2cap_send_sframe(chan, &control);
1931 } else {
1932 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1933 l2cap_ertm_send(chan);
1934 /* If any i-frames were sent, they included an ack */
1935 if (chan->buffer_seq == chan->last_acked_seq)
1936 frames_to_ack = 0;
1937 }
1938
1939 /* Ack now if the tx window is 3/4ths full.
1940 * Calculate without mul or div
1941 */
1942 threshold = chan->tx_win;
1943 threshold += threshold << 1;
1944 threshold >>= 2;
1945
1946 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1947 threshold);
1948
1949 if (frames_to_ack >= threshold) {
1950 __clear_ack_timer(chan);
1951 control.super = L2CAP_SUPER_RR;
1952 control.reqseq = chan->buffer_seq;
1953 l2cap_send_sframe(chan, &control);
1954 frames_to_ack = 0;
1955 }
1956
1957 if (frames_to_ack)
1958 __set_ack_timer(chan);
1959 }
1960}
1961
1962static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1963 struct msghdr *msg, int len,
1964 int count, struct sk_buff *skb)
1965{
1966 struct l2cap_conn *conn = chan->conn;
1967 struct sk_buff **frag;
1968 int sent = 0;
1969
1970 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1971 return -EFAULT;
1972
1973 sent += count;
1974 len -= count;
1975
1976 /* Continuation fragments (no L2CAP header) */
1977 frag = &skb_shinfo(skb)->frag_list;
1978 while (len) {
1979 struct sk_buff *tmp;
1980
1981 count = min_t(unsigned int, conn->mtu, len);
1982
1983 tmp = chan->ops->alloc_skb(chan, count,
1984 msg->msg_flags & MSG_DONTWAIT);
1985 if (IS_ERR(tmp))
1986 return PTR_ERR(tmp);
1987
1988 *frag = tmp;
1989
1990 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1991 return -EFAULT;
1992
1993 (*frag)->priority = skb->priority;
1994
1995 sent += count;
1996 len -= count;
1997
1998 skb->len += (*frag)->len;
1999 skb->data_len += (*frag)->len;
2000
2001 frag = &(*frag)->next;
2002 }
2003
2004 return sent;
2005}
2006
2007static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2008 struct msghdr *msg, size_t len,
2009 u32 priority)
2010{
2011 struct l2cap_conn *conn = chan->conn;
2012 struct sk_buff *skb;
2013 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2014 struct l2cap_hdr *lh;
2015
2016 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
2017
2018 count = min_t(unsigned int, (conn->mtu - hlen), len);
2019
2020 skb = chan->ops->alloc_skb(chan, count + hlen,
2021 msg->msg_flags & MSG_DONTWAIT);
2022 if (IS_ERR(skb))
2023 return skb;
2024
2025 skb->priority = priority;
2026
2027 /* Create L2CAP header */
2028 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2029 lh->cid = cpu_to_le16(chan->dcid);
2030 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2031 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2032
2033 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2034 if (unlikely(err < 0)) {
2035 kfree_skb(skb);
2036 return ERR_PTR(err);
2037 }
2038 return skb;
2039}
2040
2041static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2042 struct msghdr *msg, size_t len,
2043 u32 priority)
2044{
2045 struct l2cap_conn *conn = chan->conn;
2046 struct sk_buff *skb;
2047 int err, count;
2048 struct l2cap_hdr *lh;
2049
2050 BT_DBG("chan %p len %d", chan, (int)len);
2051
2052 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2053
2054 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2055 msg->msg_flags & MSG_DONTWAIT);
2056 if (IS_ERR(skb))
2057 return skb;
2058
2059 skb->priority = priority;
2060
2061 /* Create L2CAP header */
2062 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2063 lh->cid = cpu_to_le16(chan->dcid);
2064 lh->len = cpu_to_le16(len);
2065
2066 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2067 if (unlikely(err < 0)) {
2068 kfree_skb(skb);
2069 return ERR_PTR(err);
2070 }
2071 return skb;
2072}
2073
2074static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2075 struct msghdr *msg, size_t len,
2076 u16 sdulen)
2077{
2078 struct l2cap_conn *conn = chan->conn;
2079 struct sk_buff *skb;
2080 int err, count, hlen;
2081 struct l2cap_hdr *lh;
2082
2083 BT_DBG("chan %p len %d", chan, (int)len);
2084
2085 if (!conn)
2086 return ERR_PTR(-ENOTCONN);
2087
2088 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2089 hlen = L2CAP_EXT_HDR_SIZE;
2090 else
2091 hlen = L2CAP_ENH_HDR_SIZE;
2092
2093 if (sdulen)
2094 hlen += L2CAP_SDULEN_SIZE;
2095
2096 if (chan->fcs == L2CAP_FCS_CRC16)
2097 hlen += L2CAP_FCS_SIZE;
2098
2099 count = min_t(unsigned int, (conn->mtu - hlen), len);
2100
2101 skb = chan->ops->alloc_skb(chan, count + hlen,
2102 msg->msg_flags & MSG_DONTWAIT);
2103 if (IS_ERR(skb))
2104 return skb;
2105
2106 /* Create L2CAP header */
2107 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2108 lh->cid = cpu_to_le16(chan->dcid);
2109 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2110
2111 /* Control header is populated later */
2112 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2113 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2114 else
2115 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2116
2117 if (sdulen)
2118 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2119
2120 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2121 if (unlikely(err < 0)) {
2122 kfree_skb(skb);
2123 return ERR_PTR(err);
2124 }
2125
2126 bt_cb(skb)->control.fcs = chan->fcs;
2127 bt_cb(skb)->control.retries = 0;
2128 return skb;
2129}
2130
2131static int l2cap_segment_sdu(struct l2cap_chan *chan,
2132 struct sk_buff_head *seg_queue,
2133 struct msghdr *msg, size_t len)
2134{
2135 struct sk_buff *skb;
2136 u16 sdu_len;
2137 size_t pdu_len;
2138 int err = 0;
2139 u8 sar;
2140
2141 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2142
2143 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2144 * so fragmented skbs are not used. The HCI layer's handling
2145 * of fragmented skbs is not compatible with ERTM's queueing.
2146 */
2147
2148 /* PDU size is derived from the HCI MTU */
2149 pdu_len = chan->conn->mtu;
2150
2151 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2152
2153 /* Adjust for largest possible L2CAP overhead. */
2154 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2155
2156 /* Remote device may have requested smaller PDUs */
2157 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2158
2159 if (len <= pdu_len) {
2160 sar = L2CAP_SAR_UNSEGMENTED;
2161 sdu_len = 0;
2162 pdu_len = len;
2163 } else {
2164 sar = L2CAP_SAR_START;
2165 sdu_len = len;
2166 pdu_len -= L2CAP_SDULEN_SIZE;
2167 }
2168
2169 while (len > 0) {
2170 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2171
2172 if (IS_ERR(skb)) {
2173 __skb_queue_purge(seg_queue);
2174 return PTR_ERR(skb);
2175 }
2176
2177 bt_cb(skb)->control.sar = sar;
2178 __skb_queue_tail(seg_queue, skb);
2179
2180 len -= pdu_len;
2181 if (sdu_len) {
2182 sdu_len = 0;
2183 pdu_len += L2CAP_SDULEN_SIZE;
2184 }
2185
2186 if (len <= pdu_len) {
2187 sar = L2CAP_SAR_END;
2188 pdu_len = len;
2189 } else {
2190 sar = L2CAP_SAR_CONTINUE;
2191 }
2192 }
2193
2194 return err;
2195}
2196
2197int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2198 u32 priority)
2199{
2200 struct sk_buff *skb;
2201 int err;
2202 struct sk_buff_head seg_queue;
2203
2204 /* Connectionless channel */
2205 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2206 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2207 if (IS_ERR(skb))
2208 return PTR_ERR(skb);
2209
2210 l2cap_do_send(chan, skb);
2211 return len;
2212 }
2213
2214 switch (chan->mode) {
2215 case L2CAP_MODE_BASIC:
2216 /* Check outgoing MTU */
2217 if (len > chan->omtu)
2218 return -EMSGSIZE;
2219
2220 /* Create a basic PDU */
2221 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2222 if (IS_ERR(skb))
2223 return PTR_ERR(skb);
2224
2225 l2cap_do_send(chan, skb);
2226 err = len;
2227 break;
2228
2229 case L2CAP_MODE_ERTM:
2230 case L2CAP_MODE_STREAMING:
2231 /* Check outgoing MTU */
2232 if (len > chan->omtu) {
2233 err = -EMSGSIZE;
2234 break;
2235 }
2236
2237 __skb_queue_head_init(&seg_queue);
2238
2239 /* Do segmentation before calling in to the state machine,
2240 * since it's possible to block while waiting for memory
2241 * allocation.
2242 */
2243 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2244
2245 /* The channel could have been closed while segmenting,
2246 * check that it is still connected.
2247 */
2248 if (chan->state != BT_CONNECTED) {
2249 __skb_queue_purge(&seg_queue);
2250 err = -ENOTCONN;
2251 }
2252
2253 if (err)
2254 break;
2255
2256 if (chan->mode == L2CAP_MODE_ERTM)
2257 err = l2cap_tx(chan, NULL, &seg_queue,
2258 L2CAP_EV_DATA_REQUEST);
2259 else
2260 err = l2cap_streaming_send(chan, &seg_queue);
2261
2262 if (!err)
2263 err = len;
2264
2265 /* If the skbs were not queued for sending, they'll still be in
2266 * seg_queue and need to be purged.
2267 */
2268 __skb_queue_purge(&seg_queue);
2269 break;
2270
2271 default:
2272 BT_DBG("bad state %1.1x", chan->mode);
2273 err = -EBADFD;
2274 }
2275
2276 return err;
2277}
2278
2279static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2280{
2281 struct l2cap_ctrl control;
2282 u16 seq;
2283
2284 BT_DBG("chan %p, txseq %d", chan, txseq);
2285
2286 memset(&control, 0, sizeof(control));
2287 control.sframe = 1;
2288 control.super = L2CAP_SUPER_SREJ;
2289
2290 for (seq = chan->expected_tx_seq; seq != txseq;
2291 seq = __next_seq(chan, seq)) {
2292 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2293 control.reqseq = seq;
2294 l2cap_send_sframe(chan, &control);
2295 l2cap_seq_list_append(&chan->srej_list, seq);
2296 }
2297 }
2298
2299 chan->expected_tx_seq = __next_seq(chan, txseq);
2300}
2301
2302static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2303{
2304 struct l2cap_ctrl control;
2305
2306 BT_DBG("chan %p", chan);
2307
2308 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2309 return;
2310
2311 memset(&control, 0, sizeof(control));
2312 control.sframe = 1;
2313 control.super = L2CAP_SUPER_SREJ;
2314 control.reqseq = chan->srej_list.tail;
2315 l2cap_send_sframe(chan, &control);
2316}
2317
2318static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2319{
2320 struct l2cap_ctrl control;
2321 u16 initial_head;
2322 u16 seq;
2323
2324 BT_DBG("chan %p, txseq %d", chan, txseq);
2325
2326 memset(&control, 0, sizeof(control));
2327 control.sframe = 1;
2328 control.super = L2CAP_SUPER_SREJ;
2329
2330 /* Capture initial list head to allow only one pass through the list. */
2331 initial_head = chan->srej_list.head;
2332
2333 do {
2334 seq = l2cap_seq_list_pop(&chan->srej_list);
2335 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2336 break;
2337
2338 control.reqseq = seq;
2339 l2cap_send_sframe(chan, &control);
2340 l2cap_seq_list_append(&chan->srej_list, seq);
2341 } while (chan->srej_list.head != initial_head);
2342}
2343
2344static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2345{
2346 struct sk_buff *acked_skb;
2347 u16 ackseq;
2348
2349 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2350
2351 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2352 return;
2353
2354 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2355 chan->expected_ack_seq, chan->unacked_frames);
2356
2357 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2358 ackseq = __next_seq(chan, ackseq)) {
2359
2360 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2361 if (acked_skb) {
2362 skb_unlink(acked_skb, &chan->tx_q);
2363 kfree_skb(acked_skb);
2364 chan->unacked_frames--;
2365 }
2366 }
2367
2368 chan->expected_ack_seq = reqseq;
2369
2370 if (chan->unacked_frames == 0)
2371 __clear_retrans_timer(chan);
2372
2373 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2374}
2375
2376static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2377{
2378 BT_DBG("chan %p", chan);
2379
2380 chan->expected_tx_seq = chan->buffer_seq;
2381 l2cap_seq_list_clear(&chan->srej_list);
2382 skb_queue_purge(&chan->srej_q);
2383 chan->rx_state = L2CAP_RX_STATE_RECV;
2384}
2385
2386static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2387 struct l2cap_ctrl *control,
2388 struct sk_buff_head *skbs, u8 event)
2389{
2390 int err = 0;
2391
2392 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2393 event);
2394
2395 switch (event) {
2396 case L2CAP_EV_DATA_REQUEST:
2397 if (chan->tx_send_head == NULL)
2398 chan->tx_send_head = skb_peek(skbs);
2399
2400 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2401 l2cap_ertm_send(chan);
2402 break;
2403 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2404 BT_DBG("Enter LOCAL_BUSY");
2405 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2406
2407 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2408 /* The SREJ_SENT state must be aborted if we are to
2409 * enter the LOCAL_BUSY state.
2410 */
2411 l2cap_abort_rx_srej_sent(chan);
2412 }
2413
2414 l2cap_send_ack(chan);
2415
2416 break;
2417 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2418 BT_DBG("Exit LOCAL_BUSY");
2419 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2420
2421 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2422 struct l2cap_ctrl local_control;
2423
2424 memset(&local_control, 0, sizeof(local_control));
2425 local_control.sframe = 1;
2426 local_control.super = L2CAP_SUPER_RR;
2427 local_control.poll = 1;
2428 local_control.reqseq = chan->buffer_seq;
2429 l2cap_send_sframe(chan, &local_control);
2430
2431 chan->retry_count = 1;
2432 __set_monitor_timer(chan);
2433 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2434 }
2435 break;
2436 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2437 l2cap_process_reqseq(chan, control->reqseq);
2438 break;
2439 case L2CAP_EV_EXPLICIT_POLL:
2440 l2cap_send_rr_or_rnr(chan, 1);
2441 chan->retry_count = 1;
2442 __set_monitor_timer(chan);
2443 __clear_ack_timer(chan);
2444 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2445 break;
2446 case L2CAP_EV_RETRANS_TO:
2447 l2cap_send_rr_or_rnr(chan, 1);
2448 chan->retry_count = 1;
2449 __set_monitor_timer(chan);
2450 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2451 break;
2452 case L2CAP_EV_RECV_FBIT:
2453 /* Nothing to process */
2454 break;
2455 default:
2456 break;
2457 }
2458
2459 return err;
2460}
2461
2462static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2463 struct l2cap_ctrl *control,
2464 struct sk_buff_head *skbs, u8 event)
2465{
2466 int err = 0;
2467
2468 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2469 event);
2470
2471 switch (event) {
2472 case L2CAP_EV_DATA_REQUEST:
2473 if (chan->tx_send_head == NULL)
2474 chan->tx_send_head = skb_peek(skbs);
2475 /* Queue data, but don't send. */
2476 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2477 break;
2478 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2479 BT_DBG("Enter LOCAL_BUSY");
2480 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2481
2482 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2483 /* The SREJ_SENT state must be aborted if we are to
2484 * enter the LOCAL_BUSY state.
2485 */
2486 l2cap_abort_rx_srej_sent(chan);
2487 }
2488
2489 l2cap_send_ack(chan);
2490
2491 break;
2492 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2493 BT_DBG("Exit LOCAL_BUSY");
2494 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2495
2496 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2497 struct l2cap_ctrl local_control;
2498 memset(&local_control, 0, sizeof(local_control));
2499 local_control.sframe = 1;
2500 local_control.super = L2CAP_SUPER_RR;
2501 local_control.poll = 1;
2502 local_control.reqseq = chan->buffer_seq;
2503 l2cap_send_sframe(chan, &local_control);
2504
2505 chan->retry_count = 1;
2506 __set_monitor_timer(chan);
2507 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2508 }
2509 break;
2510 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2511 l2cap_process_reqseq(chan, control->reqseq);
2512
2513 /* Fall through */
2514
2515 case L2CAP_EV_RECV_FBIT:
2516 if (control && control->final) {
2517 __clear_monitor_timer(chan);
2518 if (chan->unacked_frames > 0)
2519 __set_retrans_timer(chan);
2520 chan->retry_count = 0;
2521 chan->tx_state = L2CAP_TX_STATE_XMIT;
2522 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2523 }
2524 break;
2525 case L2CAP_EV_EXPLICIT_POLL:
2526 /* Ignore */
2527 break;
2528 case L2CAP_EV_MONITOR_TO:
2529 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2530 l2cap_send_rr_or_rnr(chan, 1);
2531 __set_monitor_timer(chan);
2532 chan->retry_count++;
2533 } else {
2534 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2535 }
2536 break;
2537 default:
2538 break;
2539 }
2540
2541 return err;
2542}
2543
2544static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2545 struct sk_buff_head *skbs, u8 event)
2546{
2547 int err = 0;
2548
2549 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2550 chan, control, skbs, event, chan->tx_state);
2551
2552 switch (chan->tx_state) {
2553 case L2CAP_TX_STATE_XMIT:
2554 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2555 break;
2556 case L2CAP_TX_STATE_WAIT_F:
2557 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2558 break;
2559 default:
2560 /* Ignore event */
2561 break;
2562 }
2563
2564 return err;
2565}
2566
2567static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2568 struct l2cap_ctrl *control)
2569{
2570 BT_DBG("chan %p, control %p", chan, control);
2571 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2572}
2573
2574static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2575 struct l2cap_ctrl *control)
2576{
2577 BT_DBG("chan %p, control %p", chan, control);
2578 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2579}
2580
2581/* Copy frame to all raw sockets on that connection */
2582static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2583{
2584 struct sk_buff *nskb;
2585 struct l2cap_chan *chan;
2586
2587 BT_DBG("conn %p", conn);
2588
2589 mutex_lock(&conn->chan_lock);
2590
2591 list_for_each_entry(chan, &conn->chan_l, list) {
2592 struct sock *sk = chan->sk;
2593 if (chan->chan_type != L2CAP_CHAN_RAW)
2594 continue;
2595
2596 /* Don't send frame to the socket it came from */
2597 if (skb->sk == sk)
2598 continue;
2599 nskb = skb_clone(skb, GFP_ATOMIC);
2600 if (!nskb)
2601 continue;
2602
2603 if (chan->ops->recv(chan->data, nskb))
2604 kfree_skb(nskb);
2605 }
2606
2607 mutex_unlock(&conn->chan_lock);
2608}
2609
2610/* ---- L2CAP signalling commands ---- */
2611static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2612 u8 code, u8 ident, u16 dlen, void *data)
2613{
2614 struct sk_buff *skb, **frag;
2615 struct l2cap_cmd_hdr *cmd;
2616 struct l2cap_hdr *lh;
2617 int len, count;
2618
2619 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2620 conn, code, ident, dlen);
2621
2622 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2623 count = min_t(unsigned int, conn->mtu, len);
2624
2625 skb = bt_skb_alloc(count, GFP_ATOMIC);
2626 if (!skb)
2627 return NULL;
2628
2629 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2630 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2631
2632 if (conn->hcon->type == LE_LINK)
2633 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2634 else
2635 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2636
2637 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2638 cmd->code = code;
2639 cmd->ident = ident;
2640 cmd->len = cpu_to_le16(dlen);
2641
2642 if (dlen) {
2643 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2644 memcpy(skb_put(skb, count), data, count);
2645 data += count;
2646 }
2647
2648 len -= skb->len;
2649
2650 /* Continuation fragments (no L2CAP header) */
2651 frag = &skb_shinfo(skb)->frag_list;
2652 while (len) {
2653 count = min_t(unsigned int, conn->mtu, len);
2654
2655 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2656 if (!*frag)
2657 goto fail;
2658
2659 memcpy(skb_put(*frag, count), data, count);
2660
2661 len -= count;
2662 data += count;
2663
2664 frag = &(*frag)->next;
2665 }
2666
2667 return skb;
2668
2669fail:
2670 kfree_skb(skb);
2671 return NULL;
2672}
2673
2674static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2675{
2676 struct l2cap_conf_opt *opt = *ptr;
2677 int len;
2678
2679 len = L2CAP_CONF_OPT_SIZE + opt->len;
2680 *ptr += len;
2681
2682 *type = opt->type;
2683 *olen = opt->len;
2684
2685 switch (opt->len) {
2686 case 1:
2687 *val = *((u8 *) opt->val);
2688 break;
2689
2690 case 2:
2691 *val = get_unaligned_le16(opt->val);
2692 break;
2693
2694 case 4:
2695 *val = get_unaligned_le32(opt->val);
2696 break;
2697
2698 default:
2699 *val = (unsigned long) opt->val;
2700 break;
2701 }
2702
2703 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2704 return len;
2705}
2706
2707static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2708{
2709 struct l2cap_conf_opt *opt = *ptr;
2710
2711 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2712
2713 opt->type = type;
2714 opt->len = len;
2715
2716 switch (len) {
2717 case 1:
2718 *((u8 *) opt->val) = val;
2719 break;
2720
2721 case 2:
2722 put_unaligned_le16(val, opt->val);
2723 break;
2724
2725 case 4:
2726 put_unaligned_le32(val, opt->val);
2727 break;
2728
2729 default:
2730 memcpy(opt->val, (void *) val, len);
2731 break;
2732 }
2733
2734 *ptr += L2CAP_CONF_OPT_SIZE + len;
2735}
2736
2737static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2738{
2739 struct l2cap_conf_efs efs;
2740
2741 switch (chan->mode) {
2742 case L2CAP_MODE_ERTM:
2743 efs.id = chan->local_id;
2744 efs.stype = chan->local_stype;
2745 efs.msdu = cpu_to_le16(chan->local_msdu);
2746 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2747 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2748 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2749 break;
2750
2751 case L2CAP_MODE_STREAMING:
2752 efs.id = 1;
2753 efs.stype = L2CAP_SERV_BESTEFFORT;
2754 efs.msdu = cpu_to_le16(chan->local_msdu);
2755 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2756 efs.acc_lat = 0;
2757 efs.flush_to = 0;
2758 break;
2759
2760 default:
2761 return;
2762 }
2763
2764 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2765 (unsigned long) &efs);
2766}
2767
2768static void l2cap_ack_timeout(struct work_struct *work)
2769{
2770 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2771 ack_timer.work);
2772 u16 frames_to_ack;
2773
2774 BT_DBG("chan %p", chan);
2775
2776 l2cap_chan_lock(chan);
2777
2778 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2779 chan->last_acked_seq);
2780
2781 if (frames_to_ack)
2782 l2cap_send_rr_or_rnr(chan, 0);
2783
2784 l2cap_chan_unlock(chan);
2785 l2cap_chan_put(chan);
2786}
2787
2788static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2789{
2790 int err;
2791
2792 chan->next_tx_seq = 0;
2793 chan->expected_tx_seq = 0;
2794 chan->expected_ack_seq = 0;
2795 chan->unacked_frames = 0;
2796 chan->buffer_seq = 0;
2797 chan->frames_sent = 0;
2798 chan->last_acked_seq = 0;
2799 chan->sdu = NULL;
2800 chan->sdu_last_frag = NULL;
2801 chan->sdu_len = 0;
2802
2803 skb_queue_head_init(&chan->tx_q);
2804
2805 if (chan->mode != L2CAP_MODE_ERTM)
2806 return 0;
2807
2808 chan->rx_state = L2CAP_RX_STATE_RECV;
2809 chan->tx_state = L2CAP_TX_STATE_XMIT;
2810
2811 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2812 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2813 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2814
2815 skb_queue_head_init(&chan->srej_q);
2816
2817 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2818 if (err < 0)
2819 return err;
2820
2821 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2822 if (err < 0)
2823 l2cap_seq_list_free(&chan->srej_list);
2824
2825 return err;
2826}
2827
2828static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2829{
2830 switch (mode) {
2831 case L2CAP_MODE_STREAMING:
2832 case L2CAP_MODE_ERTM:
2833 if (l2cap_mode_supported(mode, remote_feat_mask))
2834 return mode;
2835 /* fall through */
2836 default:
2837 return L2CAP_MODE_BASIC;
2838 }
2839}
2840
2841static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2842{
2843 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2844}
2845
2846static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2847{
2848 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2849}
2850
2851static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2852{
2853 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2854 __l2cap_ews_supported(chan)) {
2855 /* use extended control field */
2856 set_bit(FLAG_EXT_CTRL, &chan->flags);
2857 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2858 } else {
2859 chan->tx_win = min_t(u16, chan->tx_win,
2860 L2CAP_DEFAULT_TX_WINDOW);
2861 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2862 }
2863}
2864
2865static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2866{
2867 struct l2cap_conf_req *req = data;
2868 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2869 void *ptr = req->data;
2870 u16 size;
2871
2872 BT_DBG("chan %p", chan);
2873
2874 if (chan->num_conf_req || chan->num_conf_rsp)
2875 goto done;
2876
2877 switch (chan->mode) {
2878 case L2CAP_MODE_STREAMING:
2879 case L2CAP_MODE_ERTM:
2880 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2881 break;
2882
2883 if (__l2cap_efs_supported(chan))
2884 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2885
2886 /* fall through */
2887 default:
2888 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2889 break;
2890 }
2891
2892done:
2893 if (chan->imtu != L2CAP_DEFAULT_MTU)
2894 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2895
2896 switch (chan->mode) {
2897 case L2CAP_MODE_BASIC:
2898 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2899 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2900 break;
2901
2902 rfc.mode = L2CAP_MODE_BASIC;
2903 rfc.txwin_size = 0;
2904 rfc.max_transmit = 0;
2905 rfc.retrans_timeout = 0;
2906 rfc.monitor_timeout = 0;
2907 rfc.max_pdu_size = 0;
2908
2909 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2910 (unsigned long) &rfc);
2911 break;
2912
2913 case L2CAP_MODE_ERTM:
2914 rfc.mode = L2CAP_MODE_ERTM;
2915 rfc.max_transmit = chan->max_tx;
2916 rfc.retrans_timeout = 0;
2917 rfc.monitor_timeout = 0;
2918
2919 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2920 L2CAP_EXT_HDR_SIZE -
2921 L2CAP_SDULEN_SIZE -
2922 L2CAP_FCS_SIZE);
2923 rfc.max_pdu_size = cpu_to_le16(size);
2924
2925 l2cap_txwin_setup(chan);
2926
2927 rfc.txwin_size = min_t(u16, chan->tx_win,
2928 L2CAP_DEFAULT_TX_WINDOW);
2929
2930 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2931 (unsigned long) &rfc);
2932
2933 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2934 l2cap_add_opt_efs(&ptr, chan);
2935
2936 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2937 break;
2938
2939 if (chan->fcs == L2CAP_FCS_NONE ||
2940 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2941 chan->fcs = L2CAP_FCS_NONE;
2942 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2943 }
2944
2945 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2946 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2947 chan->tx_win);
2948 break;
2949
2950 case L2CAP_MODE_STREAMING:
2951 l2cap_txwin_setup(chan);
2952 rfc.mode = L2CAP_MODE_STREAMING;
2953 rfc.txwin_size = 0;
2954 rfc.max_transmit = 0;
2955 rfc.retrans_timeout = 0;
2956 rfc.monitor_timeout = 0;
2957
2958 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2959 L2CAP_EXT_HDR_SIZE -
2960 L2CAP_SDULEN_SIZE -
2961 L2CAP_FCS_SIZE);
2962 rfc.max_pdu_size = cpu_to_le16(size);
2963
2964 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2965 (unsigned long) &rfc);
2966
2967 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2968 l2cap_add_opt_efs(&ptr, chan);
2969
2970 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2971 break;
2972
2973 if (chan->fcs == L2CAP_FCS_NONE ||
2974 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2975 chan->fcs = L2CAP_FCS_NONE;
2976 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2977 }
2978 break;
2979 }
2980
2981 req->dcid = cpu_to_le16(chan->dcid);
2982 req->flags = cpu_to_le16(0);
2983
2984 return ptr - data;
2985}
2986
2987static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2988{
2989 struct l2cap_conf_rsp *rsp = data;
2990 void *ptr = rsp->data;
2991 void *req = chan->conf_req;
2992 int len = chan->conf_len;
2993 int type, hint, olen;
2994 unsigned long val;
2995 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2996 struct l2cap_conf_efs efs;
2997 u8 remote_efs = 0;
2998 u16 mtu = L2CAP_DEFAULT_MTU;
2999 u16 result = L2CAP_CONF_SUCCESS;
3000 u16 size;
3001
3002 BT_DBG("chan %p", chan);
3003
3004 while (len >= L2CAP_CONF_OPT_SIZE) {
3005 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3006
3007 hint = type & L2CAP_CONF_HINT;
3008 type &= L2CAP_CONF_MASK;
3009
3010 switch (type) {
3011 case L2CAP_CONF_MTU:
3012 mtu = val;
3013 break;
3014
3015 case L2CAP_CONF_FLUSH_TO:
3016 chan->flush_to = val;
3017 break;
3018
3019 case L2CAP_CONF_QOS:
3020 break;
3021
3022 case L2CAP_CONF_RFC:
3023 if (olen == sizeof(rfc))
3024 memcpy(&rfc, (void *) val, olen);
3025 break;
3026
3027 case L2CAP_CONF_FCS:
3028 if (val == L2CAP_FCS_NONE)
3029 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3030 break;
3031
3032 case L2CAP_CONF_EFS:
3033 remote_efs = 1;
3034 if (olen == sizeof(efs))
3035 memcpy(&efs, (void *) val, olen);
3036 break;
3037
3038 case L2CAP_CONF_EWS:
3039 if (!enable_hs)
3040 return -ECONNREFUSED;
3041
3042 set_bit(FLAG_EXT_CTRL, &chan->flags);
3043 set_bit(CONF_EWS_RECV, &chan->conf_state);
3044 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3045 chan->remote_tx_win = val;
3046 break;
3047
3048 default:
3049 if (hint)
3050 break;
3051
3052 result = L2CAP_CONF_UNKNOWN;
3053 *((u8 *) ptr++) = type;
3054 break;
3055 }
3056 }
3057
3058 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3059 goto done;
3060
3061 switch (chan->mode) {
3062 case L2CAP_MODE_STREAMING:
3063 case L2CAP_MODE_ERTM:
3064 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3065 chan->mode = l2cap_select_mode(rfc.mode,
3066 chan->conn->feat_mask);
3067 break;
3068 }
3069
3070 if (remote_efs) {
3071 if (__l2cap_efs_supported(chan))
3072 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3073 else
3074 return -ECONNREFUSED;
3075 }
3076
3077 if (chan->mode != rfc.mode)
3078 return -ECONNREFUSED;
3079
3080 break;
3081 }
3082
3083done:
3084 if (chan->mode != rfc.mode) {
3085 result = L2CAP_CONF_UNACCEPT;
3086 rfc.mode = chan->mode;
3087
3088 if (chan->num_conf_rsp == 1)
3089 return -ECONNREFUSED;
3090
3091 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3092 sizeof(rfc), (unsigned long) &rfc);
3093 }
3094
3095 if (result == L2CAP_CONF_SUCCESS) {
3096 /* Configure output options and let the other side know
3097 * which ones we don't like. */
3098
3099 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3100 result = L2CAP_CONF_UNACCEPT;
3101 else {
3102 chan->omtu = mtu;
3103 set_bit(CONF_MTU_DONE, &chan->conf_state);
3104 }
3105 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3106
3107 if (remote_efs) {
3108 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3109 efs.stype != L2CAP_SERV_NOTRAFIC &&
3110 efs.stype != chan->local_stype) {
3111
3112 result = L2CAP_CONF_UNACCEPT;
3113
3114 if (chan->num_conf_req >= 1)
3115 return -ECONNREFUSED;
3116
3117 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3118 sizeof(efs),
3119 (unsigned long) &efs);
3120 } else {
3121 /* Send PENDING Conf Rsp */
3122 result = L2CAP_CONF_PENDING;
3123 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3124 }
3125 }
3126
3127 switch (rfc.mode) {
3128 case L2CAP_MODE_BASIC:
3129 chan->fcs = L2CAP_FCS_NONE;
3130 set_bit(CONF_MODE_DONE, &chan->conf_state);
3131 break;
3132
3133 case L2CAP_MODE_ERTM:
3134 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3135 chan->remote_tx_win = rfc.txwin_size;
3136 else
3137 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3138
3139 chan->remote_max_tx = rfc.max_transmit;
3140
3141 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3142 chan->conn->mtu -
3143 L2CAP_EXT_HDR_SIZE -
3144 L2CAP_SDULEN_SIZE -
3145 L2CAP_FCS_SIZE);
3146 rfc.max_pdu_size = cpu_to_le16(size);
3147 chan->remote_mps = size;
3148
3149 rfc.retrans_timeout =
3150 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3151 rfc.monitor_timeout =
3152 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3153
3154 set_bit(CONF_MODE_DONE, &chan->conf_state);
3155
3156 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3157 sizeof(rfc), (unsigned long) &rfc);
3158
3159 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3160 chan->remote_id = efs.id;
3161 chan->remote_stype = efs.stype;
3162 chan->remote_msdu = le16_to_cpu(efs.msdu);
3163 chan->remote_flush_to =
3164 le32_to_cpu(efs.flush_to);
3165 chan->remote_acc_lat =
3166 le32_to_cpu(efs.acc_lat);
3167 chan->remote_sdu_itime =
3168 le32_to_cpu(efs.sdu_itime);
3169 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3170 sizeof(efs), (unsigned long) &efs);
3171 }
3172 break;
3173
3174 case L2CAP_MODE_STREAMING:
3175 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3176 chan->conn->mtu -
3177 L2CAP_EXT_HDR_SIZE -
3178 L2CAP_SDULEN_SIZE -
3179 L2CAP_FCS_SIZE);
3180 rfc.max_pdu_size = cpu_to_le16(size);
3181 chan->remote_mps = size;
3182
3183 set_bit(CONF_MODE_DONE, &chan->conf_state);
3184
3185 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3186 sizeof(rfc), (unsigned long) &rfc);
3187
3188 break;
3189
3190 default:
3191 result = L2CAP_CONF_UNACCEPT;
3192
3193 memset(&rfc, 0, sizeof(rfc));
3194 rfc.mode = chan->mode;
3195 }
3196
3197 if (result == L2CAP_CONF_SUCCESS)
3198 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3199 }
3200 rsp->scid = cpu_to_le16(chan->dcid);
3201 rsp->result = cpu_to_le16(result);
3202 rsp->flags = cpu_to_le16(0x0000);
3203
3204 return ptr - data;
3205}
3206
3207static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3208{
3209 struct l2cap_conf_req *req = data;
3210 void *ptr = req->data;
3211 int type, olen;
3212 unsigned long val;
3213 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3214 struct l2cap_conf_efs efs;
3215
3216 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3217
3218 while (len >= L2CAP_CONF_OPT_SIZE) {
3219 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3220
3221 switch (type) {
3222 case L2CAP_CONF_MTU:
3223 if (val < L2CAP_DEFAULT_MIN_MTU) {
3224 *result = L2CAP_CONF_UNACCEPT;
3225 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3226 } else
3227 chan->imtu = val;
3228 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3229 break;
3230
3231 case L2CAP_CONF_FLUSH_TO:
3232 chan->flush_to = val;
3233 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3234 2, chan->flush_to);
3235 break;
3236
3237 case L2CAP_CONF_RFC:
3238 if (olen == sizeof(rfc))
3239 memcpy(&rfc, (void *)val, olen);
3240
3241 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3242 rfc.mode != chan->mode)
3243 return -ECONNREFUSED;
3244
3245 chan->fcs = 0;
3246
3247 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3248 sizeof(rfc), (unsigned long) &rfc);
3249 break;
3250
3251 case L2CAP_CONF_EWS:
3252 chan->tx_win = min_t(u16, val,
3253 L2CAP_DEFAULT_EXT_WINDOW);
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3255 chan->tx_win);
3256 break;
3257
3258 case L2CAP_CONF_EFS:
3259 if (olen == sizeof(efs))
3260 memcpy(&efs, (void *)val, olen);
3261
3262 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3263 efs.stype != L2CAP_SERV_NOTRAFIC &&
3264 efs.stype != chan->local_stype)
3265 return -ECONNREFUSED;
3266
3267 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3268 sizeof(efs), (unsigned long) &efs);
3269 break;
3270 }
3271 }
3272
3273 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3274 return -ECONNREFUSED;
3275
3276 chan->mode = rfc.mode;
3277
3278 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3279 switch (rfc.mode) {
3280 case L2CAP_MODE_ERTM:
3281 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3282 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3283 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3284
3285 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3286 chan->local_msdu = le16_to_cpu(efs.msdu);
3287 chan->local_sdu_itime =
3288 le32_to_cpu(efs.sdu_itime);
3289 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3290 chan->local_flush_to =
3291 le32_to_cpu(efs.flush_to);
3292 }
3293 break;
3294
3295 case L2CAP_MODE_STREAMING:
3296 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3297 }
3298 }
3299
3300 req->dcid = cpu_to_le16(chan->dcid);
3301 req->flags = cpu_to_le16(0x0000);
3302
3303 return ptr - data;
3304}
3305
3306static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3307{
3308 struct l2cap_conf_rsp *rsp = data;
3309 void *ptr = rsp->data;
3310
3311 BT_DBG("chan %p", chan);
3312
3313 rsp->scid = cpu_to_le16(chan->dcid);
3314 rsp->result = cpu_to_le16(result);
3315 rsp->flags = cpu_to_le16(flags);
3316
3317 return ptr - data;
3318}
3319
3320void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3321{
3322 struct l2cap_conn_rsp rsp;
3323 struct l2cap_conn *conn = chan->conn;
3324 u8 buf[128];
3325
3326 rsp.scid = cpu_to_le16(chan->dcid);
3327 rsp.dcid = cpu_to_le16(chan->scid);
3328 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3329 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3330 l2cap_send_cmd(conn, chan->ident,
3331 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3332
3333 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3334 return;
3335
3336 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3337 l2cap_build_conf_req(chan, buf), buf);
3338 chan->num_conf_req++;
3339}
3340
3341static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3342{
3343 int type, olen;
3344 unsigned long val;
3345 struct l2cap_conf_rfc rfc;
3346
3347 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3348
3349 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3350 return;
3351
3352 while (len >= L2CAP_CONF_OPT_SIZE) {
3353 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3354
3355 switch (type) {
3356 case L2CAP_CONF_RFC:
3357 if (olen == sizeof(rfc))
3358 memcpy(&rfc, (void *)val, olen);
3359 goto done;
3360 }
3361 }
3362
3363 /* Use sane default values in case a misbehaving remote device
3364 * did not send an RFC option.
3365 */
3366 rfc.mode = chan->mode;
3367 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3368 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3369 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3370
3371 BT_ERR("Expected RFC option was not found, using defaults");
3372
3373done:
3374 switch (rfc.mode) {
3375 case L2CAP_MODE_ERTM:
3376 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3377 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3378 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3379 break;
3380 case L2CAP_MODE_STREAMING:
3381 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3382 }
3383}
3384
3385static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3386{
3387 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3388
3389 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3390 return 0;
3391
3392 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3393 cmd->ident == conn->info_ident) {
3394 cancel_delayed_work(&conn->info_timer);
3395
3396 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3397 conn->info_ident = 0;
3398
3399 l2cap_conn_start(conn);
3400 }
3401
3402 return 0;
3403}
3404
3405static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3406{
3407 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3408 struct l2cap_conn_rsp rsp;
3409 struct l2cap_chan *chan = NULL, *pchan;
3410 struct sock *parent, *sk = NULL;
3411 int result, status = L2CAP_CS_NO_INFO;
3412
3413 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3414 __le16 psm = req->psm;
3415
3416 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3417
3418 /* Check if we have socket listening on psm */
3419 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3420 if (!pchan) {
3421 result = L2CAP_CR_BAD_PSM;
3422 goto sendresp;
3423 }
3424
3425 parent = pchan->sk;
3426
3427 mutex_lock(&conn->chan_lock);
3428 lock_sock(parent);
3429
3430 /* Check if the ACL is secure enough (if not SDP) */
3431 if (psm != cpu_to_le16(0x0001) &&
3432 !hci_conn_check_link_mode(conn->hcon)) {
3433 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3434 result = L2CAP_CR_SEC_BLOCK;
3435 goto response;
3436 }
3437
3438 result = L2CAP_CR_NO_MEM;
3439
3440 /* Check for backlog size */
3441 if (sk_acceptq_is_full(parent)) {
3442 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3443 goto response;
3444 }
3445
3446 chan = pchan->ops->new_connection(pchan->data);
3447 if (!chan)
3448 goto response;
3449
3450 sk = chan->sk;
3451
3452 /* Check if we already have channel with that dcid */
3453 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3454 sock_set_flag(sk, SOCK_ZAPPED);
3455 chan->ops->close(chan->data);
3456 goto response;
3457 }
3458
3459 hci_conn_hold(conn->hcon);
3460
3461 bacpy(&bt_sk(sk)->src, conn->src);
3462 bacpy(&bt_sk(sk)->dst, conn->dst);
3463 chan->psm = psm;
3464 chan->dcid = scid;
3465
3466 bt_accept_enqueue(parent, sk);
3467
3468 __l2cap_chan_add(conn, chan);
3469
3470 dcid = chan->scid;
3471
3472 __set_chan_timer(chan, sk->sk_sndtimeo);
3473
3474 chan->ident = cmd->ident;
3475
3476 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3477 if (l2cap_chan_check_security(chan)) {
3478 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3479 __l2cap_state_change(chan, BT_CONNECT2);
3480 result = L2CAP_CR_PEND;
3481 status = L2CAP_CS_AUTHOR_PEND;
3482 parent->sk_data_ready(parent, 0);
3483 } else {
3484 __l2cap_state_change(chan, BT_CONFIG);
3485 result = L2CAP_CR_SUCCESS;
3486 status = L2CAP_CS_NO_INFO;
3487 }
3488 } else {
3489 __l2cap_state_change(chan, BT_CONNECT2);
3490 result = L2CAP_CR_PEND;
3491 status = L2CAP_CS_AUTHEN_PEND;
3492 }
3493 } else {
3494 __l2cap_state_change(chan, BT_CONNECT2);
3495 result = L2CAP_CR_PEND;
3496 status = L2CAP_CS_NO_INFO;
3497 }
3498
3499response:
3500 release_sock(parent);
3501 mutex_unlock(&conn->chan_lock);
3502
3503sendresp:
3504 rsp.scid = cpu_to_le16(scid);
3505 rsp.dcid = cpu_to_le16(dcid);
3506 rsp.result = cpu_to_le16(result);
3507 rsp.status = cpu_to_le16(status);
3508 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3509
3510 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3511 struct l2cap_info_req info;
3512 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3513
3514 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3515 conn->info_ident = l2cap_get_ident(conn);
3516
3517 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3518
3519 l2cap_send_cmd(conn, conn->info_ident,
3520 L2CAP_INFO_REQ, sizeof(info), &info);
3521 }
3522
3523 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3524 result == L2CAP_CR_SUCCESS) {
3525 u8 buf[128];
3526 set_bit(CONF_REQ_SENT, &chan->conf_state);
3527 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3528 l2cap_build_conf_req(chan, buf), buf);
3529 chan->num_conf_req++;
3530 }
3531
3532 return 0;
3533}
3534
3535static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3536{
3537 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3538 u16 scid, dcid, result, status;
3539 struct l2cap_chan *chan;
3540 u8 req[128];
3541 int err;
3542
3543 scid = __le16_to_cpu(rsp->scid);
3544 dcid = __le16_to_cpu(rsp->dcid);
3545 result = __le16_to_cpu(rsp->result);
3546 status = __le16_to_cpu(rsp->status);
3547
3548 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3549 dcid, scid, result, status);
3550
3551 mutex_lock(&conn->chan_lock);
3552
3553 if (scid) {
3554 chan = __l2cap_get_chan_by_scid(conn, scid);
3555 if (!chan) {
3556 err = -EFAULT;
3557 goto unlock;
3558 }
3559 } else {
3560 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3561 if (!chan) {
3562 err = -EFAULT;
3563 goto unlock;
3564 }
3565 }
3566
3567 err = 0;
3568
3569 l2cap_chan_lock(chan);
3570
3571 switch (result) {
3572 case L2CAP_CR_SUCCESS:
3573 l2cap_state_change(chan, BT_CONFIG);
3574 chan->ident = 0;
3575 chan->dcid = dcid;
3576 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3577
3578 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3579 break;
3580
3581 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3582 l2cap_build_conf_req(chan, req), req);
3583 chan->num_conf_req++;
3584 break;
3585
3586 case L2CAP_CR_PEND:
3587 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3588 break;
3589
3590 default:
3591 l2cap_chan_del(chan, ECONNREFUSED);
3592 break;
3593 }
3594
3595 l2cap_chan_unlock(chan);
3596
3597unlock:
3598 mutex_unlock(&conn->chan_lock);
3599
3600 return err;
3601}
3602
3603static inline void set_default_fcs(struct l2cap_chan *chan)
3604{
3605 /* FCS is enabled only in ERTM or streaming mode, if one or both
3606 * sides request it.
3607 */
3608 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3609 chan->fcs = L2CAP_FCS_NONE;
3610 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3611 chan->fcs = L2CAP_FCS_CRC16;
3612}
3613
3614static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3615{
3616 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3617 u16 dcid, flags;
3618 u8 rsp[64];
3619 struct l2cap_chan *chan;
3620 int len, err = 0;
3621
3622 dcid = __le16_to_cpu(req->dcid);
3623 flags = __le16_to_cpu(req->flags);
3624
3625 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3626
3627 chan = l2cap_get_chan_by_scid(conn, dcid);
3628 if (!chan)
3629 return -ENOENT;
3630
3631 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3632 struct l2cap_cmd_rej_cid rej;
3633
3634 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3635 rej.scid = cpu_to_le16(chan->scid);
3636 rej.dcid = cpu_to_le16(chan->dcid);
3637
3638 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3639 sizeof(rej), &rej);
3640 goto unlock;
3641 }
3642
3643 /* Reject if config buffer is too small. */
3644 len = cmd_len - sizeof(*req);
3645 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3646 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3647 l2cap_build_conf_rsp(chan, rsp,
3648 L2CAP_CONF_REJECT, flags), rsp);
3649 goto unlock;
3650 }
3651
3652 /* Store config. */
3653 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3654 chan->conf_len += len;
3655
3656 if (flags & 0x0001) {
3657 /* Incomplete config. Send empty response. */
3658 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3659 l2cap_build_conf_rsp(chan, rsp,
3660 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3661 goto unlock;
3662 }
3663
3664 /* Complete config. */
3665 len = l2cap_parse_conf_req(chan, rsp);
3666 if (len < 0) {
3667 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3668 goto unlock;
3669 }
3670
3671 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3672 chan->num_conf_rsp++;
3673
3674 /* Reset config buffer. */
3675 chan->conf_len = 0;
3676
3677 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3678 goto unlock;
3679
3680 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3681 set_default_fcs(chan);
3682
3683 l2cap_state_change(chan, BT_CONNECTED);
3684
3685 if (chan->mode == L2CAP_MODE_ERTM ||
3686 chan->mode == L2CAP_MODE_STREAMING)
3687 err = l2cap_ertm_init(chan);
3688
3689 if (err < 0)
3690 l2cap_send_disconn_req(chan->conn, chan, -err);
3691 else
3692 l2cap_chan_ready(chan);
3693
3694 goto unlock;
3695 }
3696
3697 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3698 u8 buf[64];
3699 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3700 l2cap_build_conf_req(chan, buf), buf);
3701 chan->num_conf_req++;
3702 }
3703
3704 /* Got Conf Rsp PENDING from remote side and asume we sent
3705 Conf Rsp PENDING in the code above */
3706 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3707 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3708
3709 /* check compatibility */
3710
3711 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3712 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3713
3714 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3715 l2cap_build_conf_rsp(chan, rsp,
3716 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3717 }
3718
3719unlock:
3720 l2cap_chan_unlock(chan);
3721 return err;
3722}
3723
3724static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3725{
3726 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3727 u16 scid, flags, result;
3728 struct l2cap_chan *chan;
3729 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3730 int err = 0;
3731
3732 scid = __le16_to_cpu(rsp->scid);
3733 flags = __le16_to_cpu(rsp->flags);
3734 result = __le16_to_cpu(rsp->result);
3735
3736 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3737 result, len);
3738
3739 chan = l2cap_get_chan_by_scid(conn, scid);
3740 if (!chan)
3741 return 0;
3742
3743 switch (result) {
3744 case L2CAP_CONF_SUCCESS:
3745 l2cap_conf_rfc_get(chan, rsp->data, len);
3746 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3747 break;
3748
3749 case L2CAP_CONF_PENDING:
3750 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3751
3752 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3753 char buf[64];
3754
3755 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3756 buf, &result);
3757 if (len < 0) {
3758 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3759 goto done;
3760 }
3761
3762 /* check compatibility */
3763
3764 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3765 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3766
3767 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3768 l2cap_build_conf_rsp(chan, buf,
3769 L2CAP_CONF_SUCCESS, 0x0000), buf);
3770 }
3771 goto done;
3772
3773 case L2CAP_CONF_UNACCEPT:
3774 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3775 char req[64];
3776
3777 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3778 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3779 goto done;
3780 }
3781
3782 /* throw out any old stored conf requests */
3783 result = L2CAP_CONF_SUCCESS;
3784 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3785 req, &result);
3786 if (len < 0) {
3787 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3788 goto done;
3789 }
3790
3791 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3792 L2CAP_CONF_REQ, len, req);
3793 chan->num_conf_req++;
3794 if (result != L2CAP_CONF_SUCCESS)
3795 goto done;
3796 break;
3797 }
3798
3799 default:
3800 l2cap_chan_set_err(chan, ECONNRESET);
3801
3802 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3803 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3804 goto done;
3805 }
3806
3807 if (flags & 0x01)
3808 goto done;
3809
3810 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3811
3812 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3813 set_default_fcs(chan);
3814
3815 l2cap_state_change(chan, BT_CONNECTED);
3816 if (chan->mode == L2CAP_MODE_ERTM ||
3817 chan->mode == L2CAP_MODE_STREAMING)
3818 err = l2cap_ertm_init(chan);
3819
3820 if (err < 0)
3821 l2cap_send_disconn_req(chan->conn, chan, -err);
3822 else
3823 l2cap_chan_ready(chan);
3824 }
3825
3826done:
3827 l2cap_chan_unlock(chan);
3828 return err;
3829}
3830
3831static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3832{
3833 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3834 struct l2cap_disconn_rsp rsp;
3835 u16 dcid, scid;
3836 struct l2cap_chan *chan;
3837 struct sock *sk;
3838
3839 scid = __le16_to_cpu(req->scid);
3840 dcid = __le16_to_cpu(req->dcid);
3841
3842 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3843
3844 mutex_lock(&conn->chan_lock);
3845
3846 chan = __l2cap_get_chan_by_scid(conn, dcid);
3847 if (!chan) {
3848 mutex_unlock(&conn->chan_lock);
3849 return 0;
3850 }
3851
3852 l2cap_chan_lock(chan);
3853
3854 sk = chan->sk;
3855
3856 rsp.dcid = cpu_to_le16(chan->scid);
3857 rsp.scid = cpu_to_le16(chan->dcid);
3858 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3859
3860 lock_sock(sk);
3861 sk->sk_shutdown = SHUTDOWN_MASK;
3862 release_sock(sk);
3863
3864 l2cap_chan_hold(chan);
3865 l2cap_chan_del(chan, ECONNRESET);
3866
3867 l2cap_chan_unlock(chan);
3868
3869 chan->ops->close(chan->data);
3870 l2cap_chan_put(chan);
3871
3872 mutex_unlock(&conn->chan_lock);
3873
3874 return 0;
3875}
3876
3877static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3878{
3879 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3880 u16 dcid, scid;
3881 struct l2cap_chan *chan;
3882
3883 scid = __le16_to_cpu(rsp->scid);
3884 dcid = __le16_to_cpu(rsp->dcid);
3885
3886 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3887
3888 mutex_lock(&conn->chan_lock);
3889
3890 chan = __l2cap_get_chan_by_scid(conn, scid);
3891 if (!chan) {
3892 mutex_unlock(&conn->chan_lock);
3893 return 0;
3894 }
3895
3896 l2cap_chan_lock(chan);
3897
3898 l2cap_chan_hold(chan);
3899 l2cap_chan_del(chan, 0);
3900
3901 l2cap_chan_unlock(chan);
3902
3903 chan->ops->close(chan->data);
3904 l2cap_chan_put(chan);
3905
3906 mutex_unlock(&conn->chan_lock);
3907
3908 return 0;
3909}
3910
3911static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3912{
3913 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3914 u16 type;
3915
3916 type = __le16_to_cpu(req->type);
3917
3918 BT_DBG("type 0x%4.4x", type);
3919
3920 if (type == L2CAP_IT_FEAT_MASK) {
3921 u8 buf[8];
3922 u32 feat_mask = l2cap_feat_mask;
3923 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3924 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3925 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3926 if (!disable_ertm)
3927 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3928 | L2CAP_FEAT_FCS;
3929 if (enable_hs)
3930 feat_mask |= L2CAP_FEAT_EXT_FLOW
3931 | L2CAP_FEAT_EXT_WINDOW;
3932
3933 put_unaligned_le32(feat_mask, rsp->data);
3934 l2cap_send_cmd(conn, cmd->ident,
3935 L2CAP_INFO_RSP, sizeof(buf), buf);
3936 } else if (type == L2CAP_IT_FIXED_CHAN) {
3937 u8 buf[12];
3938 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3939
3940 if (enable_hs)
3941 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3942 else
3943 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3944
3945 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3946 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3947 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3948 l2cap_send_cmd(conn, cmd->ident,
3949 L2CAP_INFO_RSP, sizeof(buf), buf);
3950 } else {
3951 struct l2cap_info_rsp rsp;
3952 rsp.type = cpu_to_le16(type);
3953 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3954 l2cap_send_cmd(conn, cmd->ident,
3955 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3956 }
3957
3958 return 0;
3959}
3960
3961static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3962{
3963 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3964 u16 type, result;
3965
3966 type = __le16_to_cpu(rsp->type);
3967 result = __le16_to_cpu(rsp->result);
3968
3969 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3970
3971 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3972 if (cmd->ident != conn->info_ident ||
3973 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3974 return 0;
3975
3976 cancel_delayed_work(&conn->info_timer);
3977
3978 if (result != L2CAP_IR_SUCCESS) {
3979 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3980 conn->info_ident = 0;
3981
3982 l2cap_conn_start(conn);
3983
3984 return 0;
3985 }
3986
3987 switch (type) {
3988 case L2CAP_IT_FEAT_MASK:
3989 conn->feat_mask = get_unaligned_le32(rsp->data);
3990
3991 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3992 struct l2cap_info_req req;
3993 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3994
3995 conn->info_ident = l2cap_get_ident(conn);
3996
3997 l2cap_send_cmd(conn, conn->info_ident,
3998 L2CAP_INFO_REQ, sizeof(req), &req);
3999 } else {
4000 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4001 conn->info_ident = 0;
4002
4003 l2cap_conn_start(conn);
4004 }
4005 break;
4006
4007 case L2CAP_IT_FIXED_CHAN:
4008 conn->fixed_chan_mask = rsp->data[0];
4009 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4010 conn->info_ident = 0;
4011
4012 l2cap_conn_start(conn);
4013 break;
4014 }
4015
4016 return 0;
4017}
4018
4019static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4020 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4021 void *data)
4022{
4023 struct l2cap_create_chan_req *req = data;
4024 struct l2cap_create_chan_rsp rsp;
4025 u16 psm, scid;
4026
4027 if (cmd_len != sizeof(*req))
4028 return -EPROTO;
4029
4030 if (!enable_hs)
4031 return -EINVAL;
4032
4033 psm = le16_to_cpu(req->psm);
4034 scid = le16_to_cpu(req->scid);
4035
4036 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
4037
4038 /* Placeholder: Always reject */
4039 rsp.dcid = 0;
4040 rsp.scid = cpu_to_le16(scid);
4041 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4042 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4043
4044 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4045 sizeof(rsp), &rsp);
4046
4047 return 0;
4048}
4049
4050static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4051 struct l2cap_cmd_hdr *cmd, void *data)
4052{
4053 BT_DBG("conn %p", conn);
4054
4055 return l2cap_connect_rsp(conn, cmd, data);
4056}
4057
4058static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4059 u16 icid, u16 result)
4060{
4061 struct l2cap_move_chan_rsp rsp;
4062
4063 BT_DBG("icid %d, result %d", icid, result);
4064
4065 rsp.icid = cpu_to_le16(icid);
4066 rsp.result = cpu_to_le16(result);
4067
4068 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4069}
4070
4071static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4072 struct l2cap_chan *chan, u16 icid, u16 result)
4073{
4074 struct l2cap_move_chan_cfm cfm;
4075 u8 ident;
4076
4077 BT_DBG("icid %d, result %d", icid, result);
4078
4079 ident = l2cap_get_ident(conn);
4080 if (chan)
4081 chan->ident = ident;
4082
4083 cfm.icid = cpu_to_le16(icid);
4084 cfm.result = cpu_to_le16(result);
4085
4086 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4087}
4088
4089static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4090 u16 icid)
4091{
4092 struct l2cap_move_chan_cfm_rsp rsp;
4093
4094 BT_DBG("icid %d", icid);
4095
4096 rsp.icid = cpu_to_le16(icid);
4097 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4098}
4099
4100static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4101 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4102{
4103 struct l2cap_move_chan_req *req = data;
4104 u16 icid = 0;
4105 u16 result = L2CAP_MR_NOT_ALLOWED;
4106
4107 if (cmd_len != sizeof(*req))
4108 return -EPROTO;
4109
4110 icid = le16_to_cpu(req->icid);
4111
4112 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4113
4114 if (!enable_hs)
4115 return -EINVAL;
4116
4117 /* Placeholder: Always refuse */
4118 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4119
4120 return 0;
4121}
4122
4123static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4124 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4125{
4126 struct l2cap_move_chan_rsp *rsp = data;
4127 u16 icid, result;
4128
4129 if (cmd_len != sizeof(*rsp))
4130 return -EPROTO;
4131
4132 icid = le16_to_cpu(rsp->icid);
4133 result = le16_to_cpu(rsp->result);
4134
4135 BT_DBG("icid %d, result %d", icid, result);
4136
4137 /* Placeholder: Always unconfirmed */
4138 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4139
4140 return 0;
4141}
4142
4143static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4144 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4145{
4146 struct l2cap_move_chan_cfm *cfm = data;
4147 u16 icid, result;
4148
4149 if (cmd_len != sizeof(*cfm))
4150 return -EPROTO;
4151
4152 icid = le16_to_cpu(cfm->icid);
4153 result = le16_to_cpu(cfm->result);
4154
4155 BT_DBG("icid %d, result %d", icid, result);
4156
4157 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4158
4159 return 0;
4160}
4161
4162static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4163 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4164{
4165 struct l2cap_move_chan_cfm_rsp *rsp = data;
4166 u16 icid;
4167
4168 if (cmd_len != sizeof(*rsp))
4169 return -EPROTO;
4170
4171 icid = le16_to_cpu(rsp->icid);
4172
4173 BT_DBG("icid %d", icid);
4174
4175 return 0;
4176}
4177
4178static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4179 u16 to_multiplier)
4180{
4181 u16 max_latency;
4182
4183 if (min > max || min < 6 || max > 3200)
4184 return -EINVAL;
4185
4186 if (to_multiplier < 10 || to_multiplier > 3200)
4187 return -EINVAL;
4188
4189 if (max >= to_multiplier * 8)
4190 return -EINVAL;
4191
4192 max_latency = (to_multiplier * 8 / max) - 1;
4193 if (latency > 499 || latency > max_latency)
4194 return -EINVAL;
4195
4196 return 0;
4197}
4198
4199static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4200 struct l2cap_cmd_hdr *cmd, u8 *data)
4201{
4202 struct hci_conn *hcon = conn->hcon;
4203 struct l2cap_conn_param_update_req *req;
4204 struct l2cap_conn_param_update_rsp rsp;
4205 u16 min, max, latency, to_multiplier, cmd_len;
4206 int err;
4207
4208 if (!(hcon->link_mode & HCI_LM_MASTER))
4209 return -EINVAL;
4210
4211 cmd_len = __le16_to_cpu(cmd->len);
4212 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4213 return -EPROTO;
4214
4215 req = (struct l2cap_conn_param_update_req *) data;
4216 min = __le16_to_cpu(req->min);
4217 max = __le16_to_cpu(req->max);
4218 latency = __le16_to_cpu(req->latency);
4219 to_multiplier = __le16_to_cpu(req->to_multiplier);
4220
4221 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4222 min, max, latency, to_multiplier);
4223
4224 memset(&rsp, 0, sizeof(rsp));
4225
4226 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4227 if (err)
4228 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4229 else
4230 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4231
4232 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4233 sizeof(rsp), &rsp);
4234
4235 if (!err)
4236 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4237
4238 return 0;
4239}
4240
4241static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4242 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4243{
4244 int err = 0;
4245
4246 switch (cmd->code) {
4247 case L2CAP_COMMAND_REJ:
4248 l2cap_command_rej(conn, cmd, data);
4249 break;
4250
4251 case L2CAP_CONN_REQ:
4252 err = l2cap_connect_req(conn, cmd, data);
4253 break;
4254
4255 case L2CAP_CONN_RSP:
4256 err = l2cap_connect_rsp(conn, cmd, data);
4257 break;
4258
4259 case L2CAP_CONF_REQ:
4260 err = l2cap_config_req(conn, cmd, cmd_len, data);
4261 break;
4262
4263 case L2CAP_CONF_RSP:
4264 err = l2cap_config_rsp(conn, cmd, data);
4265 break;
4266
4267 case L2CAP_DISCONN_REQ:
4268 err = l2cap_disconnect_req(conn, cmd, data);
4269 break;
4270
4271 case L2CAP_DISCONN_RSP:
4272 err = l2cap_disconnect_rsp(conn, cmd, data);
4273 break;
4274
4275 case L2CAP_ECHO_REQ:
4276 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4277 break;
4278
4279 case L2CAP_ECHO_RSP:
4280 break;
4281
4282 case L2CAP_INFO_REQ:
4283 err = l2cap_information_req(conn, cmd, data);
4284 break;
4285
4286 case L2CAP_INFO_RSP:
4287 err = l2cap_information_rsp(conn, cmd, data);
4288 break;
4289
4290 case L2CAP_CREATE_CHAN_REQ:
4291 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4292 break;
4293
4294 case L2CAP_CREATE_CHAN_RSP:
4295 err = l2cap_create_channel_rsp(conn, cmd, data);
4296 break;
4297
4298 case L2CAP_MOVE_CHAN_REQ:
4299 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4300 break;
4301
4302 case L2CAP_MOVE_CHAN_RSP:
4303 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4304 break;
4305
4306 case L2CAP_MOVE_CHAN_CFM:
4307 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4308 break;
4309
4310 case L2CAP_MOVE_CHAN_CFM_RSP:
4311 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4312 break;
4313
4314 default:
4315 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4316 err = -EINVAL;
4317 break;
4318 }
4319
4320 return err;
4321}
4322
4323static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4324 struct l2cap_cmd_hdr *cmd, u8 *data)
4325{
4326 switch (cmd->code) {
4327 case L2CAP_COMMAND_REJ:
4328 return 0;
4329
4330 case L2CAP_CONN_PARAM_UPDATE_REQ:
4331 return l2cap_conn_param_update_req(conn, cmd, data);
4332
4333 case L2CAP_CONN_PARAM_UPDATE_RSP:
4334 return 0;
4335
4336 default:
4337 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4338 return -EINVAL;
4339 }
4340}
4341
4342static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4343 struct sk_buff *skb)
4344{
4345 u8 *data = skb->data;
4346 int len = skb->len;
4347 struct l2cap_cmd_hdr cmd;
4348 int err;
4349
4350 l2cap_raw_recv(conn, skb);
4351
4352 while (len >= L2CAP_CMD_HDR_SIZE) {
4353 u16 cmd_len;
4354 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4355 data += L2CAP_CMD_HDR_SIZE;
4356 len -= L2CAP_CMD_HDR_SIZE;
4357
4358 cmd_len = le16_to_cpu(cmd.len);
4359
4360 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4361
4362 if (cmd_len > len || !cmd.ident) {
4363 BT_DBG("corrupted command");
4364 break;
4365 }
4366
4367 if (conn->hcon->type == LE_LINK)
4368 err = l2cap_le_sig_cmd(conn, &cmd, data);
4369 else
4370 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4371
4372 if (err) {
4373 struct l2cap_cmd_rej_unk rej;
4374
4375 BT_ERR("Wrong link type (%d)", err);
4376
4377 /* FIXME: Map err to a valid reason */
4378 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4379 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4380 }
4381
4382 data += cmd_len;
4383 len -= cmd_len;
4384 }
4385
4386 kfree_skb(skb);
4387}
4388
4389static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4390{
4391 u16 our_fcs, rcv_fcs;
4392 int hdr_size;
4393
4394 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4395 hdr_size = L2CAP_EXT_HDR_SIZE;
4396 else
4397 hdr_size = L2CAP_ENH_HDR_SIZE;
4398
4399 if (chan->fcs == L2CAP_FCS_CRC16) {
4400 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4401 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4402 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4403
4404 if (our_fcs != rcv_fcs)
4405 return -EBADMSG;
4406 }
4407 return 0;
4408}
4409
4410static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4411{
4412 struct l2cap_ctrl control;
4413
4414 BT_DBG("chan %p", chan);
4415
4416 memset(&control, 0, sizeof(control));
4417 control.sframe = 1;
4418 control.final = 1;
4419 control.reqseq = chan->buffer_seq;
4420 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4421
4422 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4423 control.super = L2CAP_SUPER_RNR;
4424 l2cap_send_sframe(chan, &control);
4425 }
4426
4427 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4428 chan->unacked_frames > 0)
4429 __set_retrans_timer(chan);
4430
4431 /* Send pending iframes */
4432 l2cap_ertm_send(chan);
4433
4434 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4435 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4436 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4437 * send it now.
4438 */
4439 control.super = L2CAP_SUPER_RR;
4440 l2cap_send_sframe(chan, &control);
4441 }
4442}
4443
4444static void append_skb_frag(struct sk_buff *skb,
4445 struct sk_buff *new_frag, struct sk_buff **last_frag)
4446{
4447 /* skb->len reflects data in skb as well as all fragments
4448 * skb->data_len reflects only data in fragments
4449 */
4450 if (!skb_has_frag_list(skb))
4451 skb_shinfo(skb)->frag_list = new_frag;
4452
4453 new_frag->next = NULL;
4454
4455 (*last_frag)->next = new_frag;
4456 *last_frag = new_frag;
4457
4458 skb->len += new_frag->len;
4459 skb->data_len += new_frag->len;
4460 skb->truesize += new_frag->truesize;
4461}
4462
4463static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4464 struct l2cap_ctrl *control)
4465{
4466 int err = -EINVAL;
4467
4468 switch (control->sar) {
4469 case L2CAP_SAR_UNSEGMENTED:
4470 if (chan->sdu)
4471 break;
4472
4473 err = chan->ops->recv(chan->data, skb);
4474 break;
4475
4476 case L2CAP_SAR_START:
4477 if (chan->sdu)
4478 break;
4479
4480 chan->sdu_len = get_unaligned_le16(skb->data);
4481 skb_pull(skb, L2CAP_SDULEN_SIZE);
4482
4483 if (chan->sdu_len > chan->imtu) {
4484 err = -EMSGSIZE;
4485 break;
4486 }
4487
4488 if (skb->len >= chan->sdu_len)
4489 break;
4490
4491 chan->sdu = skb;
4492 chan->sdu_last_frag = skb;
4493
4494 skb = NULL;
4495 err = 0;
4496 break;
4497
4498 case L2CAP_SAR_CONTINUE:
4499 if (!chan->sdu)
4500 break;
4501
4502 append_skb_frag(chan->sdu, skb,
4503 &chan->sdu_last_frag);
4504 skb = NULL;
4505
4506 if (chan->sdu->len >= chan->sdu_len)
4507 break;
4508
4509 err = 0;
4510 break;
4511
4512 case L2CAP_SAR_END:
4513 if (!chan->sdu)
4514 break;
4515
4516 append_skb_frag(chan->sdu, skb,
4517 &chan->sdu_last_frag);
4518 skb = NULL;
4519
4520 if (chan->sdu->len != chan->sdu_len)
4521 break;
4522
4523 err = chan->ops->recv(chan->data, chan->sdu);
4524
4525 if (!err) {
4526 /* Reassembly complete */
4527 chan->sdu = NULL;
4528 chan->sdu_last_frag = NULL;
4529 chan->sdu_len = 0;
4530 }
4531 break;
4532 }
4533
4534 if (err) {
4535 kfree_skb(skb);
4536 kfree_skb(chan->sdu);
4537 chan->sdu = NULL;
4538 chan->sdu_last_frag = NULL;
4539 chan->sdu_len = 0;
4540 }
4541
4542 return err;
4543}
4544
4545void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4546{
4547 u8 event;
4548
4549 if (chan->mode != L2CAP_MODE_ERTM)
4550 return;
4551
4552 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4553 l2cap_tx(chan, NULL, NULL, event);
4554}
4555
4556static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4557{
4558 int err = 0;
4559 /* Pass sequential frames to l2cap_reassemble_sdu()
4560 * until a gap is encountered.
4561 */
4562
4563 BT_DBG("chan %p", chan);
4564
4565 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4566 struct sk_buff *skb;
4567 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4568 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4569
4570 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4571
4572 if (!skb)
4573 break;
4574
4575 skb_unlink(skb, &chan->srej_q);
4576 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4577 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4578 if (err)
4579 break;
4580 }
4581
4582 if (skb_queue_empty(&chan->srej_q)) {
4583 chan->rx_state = L2CAP_RX_STATE_RECV;
4584 l2cap_send_ack(chan);
4585 }
4586
4587 return err;
4588}
4589
4590static void l2cap_handle_srej(struct l2cap_chan *chan,
4591 struct l2cap_ctrl *control)
4592{
4593 struct sk_buff *skb;
4594
4595 BT_DBG("chan %p, control %p", chan, control);
4596
4597 if (control->reqseq == chan->next_tx_seq) {
4598 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4599 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4600 return;
4601 }
4602
4603 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4604
4605 if (skb == NULL) {
4606 BT_DBG("Seq %d not available for retransmission",
4607 control->reqseq);
4608 return;
4609 }
4610
4611 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4612 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4613 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4614 return;
4615 }
4616
4617 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4618
4619 if (control->poll) {
4620 l2cap_pass_to_tx(chan, control);
4621
4622 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4623 l2cap_retransmit(chan, control);
4624 l2cap_ertm_send(chan);
4625
4626 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4627 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4628 chan->srej_save_reqseq = control->reqseq;
4629 }
4630 } else {
4631 l2cap_pass_to_tx_fbit(chan, control);
4632
4633 if (control->final) {
4634 if (chan->srej_save_reqseq != control->reqseq ||
4635 !test_and_clear_bit(CONN_SREJ_ACT,
4636 &chan->conn_state))
4637 l2cap_retransmit(chan, control);
4638 } else {
4639 l2cap_retransmit(chan, control);
4640 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4641 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4642 chan->srej_save_reqseq = control->reqseq;
4643 }
4644 }
4645 }
4646}
4647
4648static void l2cap_handle_rej(struct l2cap_chan *chan,
4649 struct l2cap_ctrl *control)
4650{
4651 struct sk_buff *skb;
4652
4653 BT_DBG("chan %p, control %p", chan, control);
4654
4655 if (control->reqseq == chan->next_tx_seq) {
4656 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4657 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4658 return;
4659 }
4660
4661 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4662
4663 if (chan->max_tx && skb &&
4664 bt_cb(skb)->control.retries >= chan->max_tx) {
4665 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4666 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4667 return;
4668 }
4669
4670 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4671
4672 l2cap_pass_to_tx(chan, control);
4673
4674 if (control->final) {
4675 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4676 l2cap_retransmit_all(chan, control);
4677 } else {
4678 l2cap_retransmit_all(chan, control);
4679 l2cap_ertm_send(chan);
4680 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4681 set_bit(CONN_REJ_ACT, &chan->conn_state);
4682 }
4683}
4684
4685static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4686{
4687 BT_DBG("chan %p, txseq %d", chan, txseq);
4688
4689 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4690 chan->expected_tx_seq);
4691
4692 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4693 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4694 chan->tx_win) {
4695 /* See notes below regarding "double poll" and
4696 * invalid packets.
4697 */
4698 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4699 BT_DBG("Invalid/Ignore - after SREJ");
4700 return L2CAP_TXSEQ_INVALID_IGNORE;
4701 } else {
4702 BT_DBG("Invalid - in window after SREJ sent");
4703 return L2CAP_TXSEQ_INVALID;
4704 }
4705 }
4706
4707 if (chan->srej_list.head == txseq) {
4708 BT_DBG("Expected SREJ");
4709 return L2CAP_TXSEQ_EXPECTED_SREJ;
4710 }
4711
4712 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4713 BT_DBG("Duplicate SREJ - txseq already stored");
4714 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4715 }
4716
4717 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4718 BT_DBG("Unexpected SREJ - not requested");
4719 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4720 }
4721 }
4722
4723 if (chan->expected_tx_seq == txseq) {
4724 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4725 chan->tx_win) {
4726 BT_DBG("Invalid - txseq outside tx window");
4727 return L2CAP_TXSEQ_INVALID;
4728 } else {
4729 BT_DBG("Expected");
4730 return L2CAP_TXSEQ_EXPECTED;
4731 }
4732 }
4733
4734 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4735 __seq_offset(chan, chan->expected_tx_seq,
4736 chan->last_acked_seq)){
4737 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4738 return L2CAP_TXSEQ_DUPLICATE;
4739 }
4740
4741 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4742 /* A source of invalid packets is a "double poll" condition,
4743 * where delays cause us to send multiple poll packets. If
4744 * the remote stack receives and processes both polls,
4745 * sequence numbers can wrap around in such a way that a
4746 * resent frame has a sequence number that looks like new data
4747 * with a sequence gap. This would trigger an erroneous SREJ
4748 * request.
4749 *
4750 * Fortunately, this is impossible with a tx window that's
4751 * less than half of the maximum sequence number, which allows
4752 * invalid frames to be safely ignored.
4753 *
4754 * With tx window sizes greater than half of the tx window
4755 * maximum, the frame is invalid and cannot be ignored. This
4756 * causes a disconnect.
4757 */
4758
4759 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4760 BT_DBG("Invalid/Ignore - txseq outside tx window");
4761 return L2CAP_TXSEQ_INVALID_IGNORE;
4762 } else {
4763 BT_DBG("Invalid - txseq outside tx window");
4764 return L2CAP_TXSEQ_INVALID;
4765 }
4766 } else {
4767 BT_DBG("Unexpected - txseq indicates missing frames");
4768 return L2CAP_TXSEQ_UNEXPECTED;
4769 }
4770}
4771
4772static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4773 struct l2cap_ctrl *control,
4774 struct sk_buff *skb, u8 event)
4775{
4776 int err = 0;
4777 bool skb_in_use = 0;
4778
4779 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4780 event);
4781
4782 switch (event) {
4783 case L2CAP_EV_RECV_IFRAME:
4784 switch (l2cap_classify_txseq(chan, control->txseq)) {
4785 case L2CAP_TXSEQ_EXPECTED:
4786 l2cap_pass_to_tx(chan, control);
4787
4788 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4789 BT_DBG("Busy, discarding expected seq %d",
4790 control->txseq);
4791 break;
4792 }
4793
4794 chan->expected_tx_seq = __next_seq(chan,
4795 control->txseq);
4796
4797 chan->buffer_seq = chan->expected_tx_seq;
4798 skb_in_use = 1;
4799
4800 err = l2cap_reassemble_sdu(chan, skb, control);
4801 if (err)
4802 break;
4803
4804 if (control->final) {
4805 if (!test_and_clear_bit(CONN_REJ_ACT,
4806 &chan->conn_state)) {
4807 control->final = 0;
4808 l2cap_retransmit_all(chan, control);
4809 l2cap_ertm_send(chan);
4810 }
4811 }
4812
4813 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4814 l2cap_send_ack(chan);
4815 break;
4816 case L2CAP_TXSEQ_UNEXPECTED:
4817 l2cap_pass_to_tx(chan, control);
4818
4819 /* Can't issue SREJ frames in the local busy state.
4820 * Drop this frame, it will be seen as missing
4821 * when local busy is exited.
4822 */
4823 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4824 BT_DBG("Busy, discarding unexpected seq %d",
4825 control->txseq);
4826 break;
4827 }
4828
4829 /* There was a gap in the sequence, so an SREJ
4830 * must be sent for each missing frame. The
4831 * current frame is stored for later use.
4832 */
4833 skb_queue_tail(&chan->srej_q, skb);
4834 skb_in_use = 1;
4835 BT_DBG("Queued %p (queue len %d)", skb,
4836 skb_queue_len(&chan->srej_q));
4837
4838 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4839 l2cap_seq_list_clear(&chan->srej_list);
4840 l2cap_send_srej(chan, control->txseq);
4841
4842 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4843 break;
4844 case L2CAP_TXSEQ_DUPLICATE:
4845 l2cap_pass_to_tx(chan, control);
4846 break;
4847 case L2CAP_TXSEQ_INVALID_IGNORE:
4848 break;
4849 case L2CAP_TXSEQ_INVALID:
4850 default:
4851 l2cap_send_disconn_req(chan->conn, chan,
4852 ECONNRESET);
4853 break;
4854 }
4855 break;
4856 case L2CAP_EV_RECV_RR:
4857 l2cap_pass_to_tx(chan, control);
4858 if (control->final) {
4859 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4860
4861 if (!test_and_clear_bit(CONN_REJ_ACT,
4862 &chan->conn_state)) {
4863 control->final = 0;
4864 l2cap_retransmit_all(chan, control);
4865 }
4866
4867 l2cap_ertm_send(chan);
4868 } else if (control->poll) {
4869 l2cap_send_i_or_rr_or_rnr(chan);
4870 } else {
4871 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4872 &chan->conn_state) &&
4873 chan->unacked_frames)
4874 __set_retrans_timer(chan);
4875
4876 l2cap_ertm_send(chan);
4877 }
4878 break;
4879 case L2CAP_EV_RECV_RNR:
4880 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4881 l2cap_pass_to_tx(chan, control);
4882 if (control && control->poll) {
4883 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4884 l2cap_send_rr_or_rnr(chan, 0);
4885 }
4886 __clear_retrans_timer(chan);
4887 l2cap_seq_list_clear(&chan->retrans_list);
4888 break;
4889 case L2CAP_EV_RECV_REJ:
4890 l2cap_handle_rej(chan, control);
4891 break;
4892 case L2CAP_EV_RECV_SREJ:
4893 l2cap_handle_srej(chan, control);
4894 break;
4895 default:
4896 break;
4897 }
4898
4899 if (skb && !skb_in_use) {
4900 BT_DBG("Freeing %p", skb);
4901 kfree_skb(skb);
4902 }
4903
4904 return err;
4905}
4906
4907static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4908 struct l2cap_ctrl *control,
4909 struct sk_buff *skb, u8 event)
4910{
4911 int err = 0;
4912 u16 txseq = control->txseq;
4913 bool skb_in_use = 0;
4914
4915 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4916 event);
4917
4918 switch (event) {
4919 case L2CAP_EV_RECV_IFRAME:
4920 switch (l2cap_classify_txseq(chan, txseq)) {
4921 case L2CAP_TXSEQ_EXPECTED:
4922 /* Keep frame for reassembly later */
4923 l2cap_pass_to_tx(chan, control);
4924 skb_queue_tail(&chan->srej_q, skb);
4925 skb_in_use = 1;
4926 BT_DBG("Queued %p (queue len %d)", skb,
4927 skb_queue_len(&chan->srej_q));
4928
4929 chan->expected_tx_seq = __next_seq(chan, txseq);
4930 break;
4931 case L2CAP_TXSEQ_EXPECTED_SREJ:
4932 l2cap_seq_list_pop(&chan->srej_list);
4933
4934 l2cap_pass_to_tx(chan, control);
4935 skb_queue_tail(&chan->srej_q, skb);
4936 skb_in_use = 1;
4937 BT_DBG("Queued %p (queue len %d)", skb,
4938 skb_queue_len(&chan->srej_q));
4939
4940 err = l2cap_rx_queued_iframes(chan);
4941 if (err)
4942 break;
4943
4944 break;
4945 case L2CAP_TXSEQ_UNEXPECTED:
4946 /* Got a frame that can't be reassembled yet.
4947 * Save it for later, and send SREJs to cover
4948 * the missing frames.
4949 */
4950 skb_queue_tail(&chan->srej_q, skb);
4951 skb_in_use = 1;
4952 BT_DBG("Queued %p (queue len %d)", skb,
4953 skb_queue_len(&chan->srej_q));
4954
4955 l2cap_pass_to_tx(chan, control);
4956 l2cap_send_srej(chan, control->txseq);
4957 break;
4958 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4959 /* This frame was requested with an SREJ, but
4960 * some expected retransmitted frames are
4961 * missing. Request retransmission of missing
4962 * SREJ'd frames.
4963 */
4964 skb_queue_tail(&chan->srej_q, skb);
4965 skb_in_use = 1;
4966 BT_DBG("Queued %p (queue len %d)", skb,
4967 skb_queue_len(&chan->srej_q));
4968
4969 l2cap_pass_to_tx(chan, control);
4970 l2cap_send_srej_list(chan, control->txseq);
4971 break;
4972 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4973 /* We've already queued this frame. Drop this copy. */
4974 l2cap_pass_to_tx(chan, control);
4975 break;
4976 case L2CAP_TXSEQ_DUPLICATE:
4977 /* Expecting a later sequence number, so this frame
4978 * was already received. Ignore it completely.
4979 */
4980 break;
4981 case L2CAP_TXSEQ_INVALID_IGNORE:
4982 break;
4983 case L2CAP_TXSEQ_INVALID:
4984 default:
4985 l2cap_send_disconn_req(chan->conn, chan,
4986 ECONNRESET);
4987 break;
4988 }
4989 break;
4990 case L2CAP_EV_RECV_RR:
4991 l2cap_pass_to_tx(chan, control);
4992 if (control->final) {
4993 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4994
4995 if (!test_and_clear_bit(CONN_REJ_ACT,
4996 &chan->conn_state)) {
4997 control->final = 0;
4998 l2cap_retransmit_all(chan, control);
4999 }
5000
5001 l2cap_ertm_send(chan);
5002 } else if (control->poll) {
5003 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5004 &chan->conn_state) &&
5005 chan->unacked_frames) {
5006 __set_retrans_timer(chan);
5007 }
5008
5009 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5010 l2cap_send_srej_tail(chan);
5011 } else {
5012 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5013 &chan->conn_state) &&
5014 chan->unacked_frames)
5015 __set_retrans_timer(chan);
5016
5017 l2cap_send_ack(chan);
5018 }
5019 break;
5020 case L2CAP_EV_RECV_RNR:
5021 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5022 l2cap_pass_to_tx(chan, control);
5023 if (control->poll) {
5024 l2cap_send_srej_tail(chan);
5025 } else {
5026 struct l2cap_ctrl rr_control;
5027 memset(&rr_control, 0, sizeof(rr_control));
5028 rr_control.sframe = 1;
5029 rr_control.super = L2CAP_SUPER_RR;
5030 rr_control.reqseq = chan->buffer_seq;
5031 l2cap_send_sframe(chan, &rr_control);
5032 }
5033
5034 break;
5035 case L2CAP_EV_RECV_REJ:
5036 l2cap_handle_rej(chan, control);
5037 break;
5038 case L2CAP_EV_RECV_SREJ:
5039 l2cap_handle_srej(chan, control);
5040 break;
5041 }
5042
5043 if (skb && !skb_in_use) {
5044 BT_DBG("Freeing %p", skb);
5045 kfree_skb(skb);
5046 }
5047
5048 return err;
5049}
5050
5051static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5052{
5053 /* Make sure reqseq is for a packet that has been sent but not acked */
5054 u16 unacked;
5055
5056 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5057 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5058}
5059
5060static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5061 struct sk_buff *skb, u8 event)
5062{
5063 int err = 0;
5064
5065 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5066 control, skb, event, chan->rx_state);
5067
5068 if (__valid_reqseq(chan, control->reqseq)) {
5069 switch (chan->rx_state) {
5070 case L2CAP_RX_STATE_RECV:
5071 err = l2cap_rx_state_recv(chan, control, skb, event);
5072 break;
5073 case L2CAP_RX_STATE_SREJ_SENT:
5074 err = l2cap_rx_state_srej_sent(chan, control, skb,
5075 event);
5076 break;
5077 default:
5078 /* shut it down */
5079 break;
5080 }
5081 } else {
5082 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5083 control->reqseq, chan->next_tx_seq,
5084 chan->expected_ack_seq);
5085 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5086 }
5087
5088 return err;
5089}
5090
5091static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5092 struct sk_buff *skb)
5093{
5094 int err = 0;
5095
5096 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5097 chan->rx_state);
5098
5099 if (l2cap_classify_txseq(chan, control->txseq) ==
5100 L2CAP_TXSEQ_EXPECTED) {
5101 l2cap_pass_to_tx(chan, control);
5102
5103 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5104 __next_seq(chan, chan->buffer_seq));
5105
5106 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5107
5108 l2cap_reassemble_sdu(chan, skb, control);
5109 } else {
5110 if (chan->sdu) {
5111 kfree_skb(chan->sdu);
5112 chan->sdu = NULL;
5113 }
5114 chan->sdu_last_frag = NULL;
5115 chan->sdu_len = 0;
5116
5117 if (skb) {
5118 BT_DBG("Freeing %p", skb);
5119 kfree_skb(skb);
5120 }
5121 }
5122
5123 chan->last_acked_seq = control->txseq;
5124 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5125
5126 return err;
5127}
5128
5129static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5130{
5131 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5132 u16 len;
5133 u8 event;
5134
5135 __unpack_control(chan, skb);
5136
5137 len = skb->len;
5138
5139 /*
5140 * We can just drop the corrupted I-frame here.
5141 * Receiver will miss it and start proper recovery
5142 * procedures and ask for retransmission.
5143 */
5144 if (l2cap_check_fcs(chan, skb))
5145 goto drop;
5146
5147 if (!control->sframe && control->sar == L2CAP_SAR_START)
5148 len -= L2CAP_SDULEN_SIZE;
5149
5150 if (chan->fcs == L2CAP_FCS_CRC16)
5151 len -= L2CAP_FCS_SIZE;
5152
5153 if (len > chan->mps) {
5154 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5155 goto drop;
5156 }
5157
5158 if (!control->sframe) {
5159 int err;
5160
5161 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5162 control->sar, control->reqseq, control->final,
5163 control->txseq);
5164
5165 /* Validate F-bit - F=0 always valid, F=1 only
5166 * valid in TX WAIT_F
5167 */
5168 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5169 goto drop;
5170
5171 if (chan->mode != L2CAP_MODE_STREAMING) {
5172 event = L2CAP_EV_RECV_IFRAME;
5173 err = l2cap_rx(chan, control, skb, event);
5174 } else {
5175 err = l2cap_stream_rx(chan, control, skb);
5176 }
5177
5178 if (err)
5179 l2cap_send_disconn_req(chan->conn, chan,
5180 ECONNRESET);
5181 } else {
5182 const u8 rx_func_to_event[4] = {
5183 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5184 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5185 };
5186
5187 /* Only I-frames are expected in streaming mode */
5188 if (chan->mode == L2CAP_MODE_STREAMING)
5189 goto drop;
5190
5191 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5192 control->reqseq, control->final, control->poll,
5193 control->super);
5194
5195 if (len != 0) {
5196 BT_ERR("%d", len);
5197 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5198 goto drop;
5199 }
5200
5201 /* Validate F and P bits */
5202 if (control->final && (control->poll ||
5203 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5204 goto drop;
5205
5206 event = rx_func_to_event[control->super];
5207 if (l2cap_rx(chan, control, skb, event))
5208 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5209 }
5210
5211 return 0;
5212
5213drop:
5214 kfree_skb(skb);
5215 return 0;
5216}
5217
5218static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5219{
5220 struct l2cap_chan *chan;
5221
5222 chan = l2cap_get_chan_by_scid(conn, cid);
5223 if (!chan) {
5224 BT_DBG("unknown cid 0x%4.4x", cid);
5225 /* Drop packet and return */
5226 kfree_skb(skb);
5227 return 0;
5228 }
5229
5230 BT_DBG("chan %p, len %d", chan, skb->len);
5231
5232 if (chan->state != BT_CONNECTED)
5233 goto drop;
5234
5235 switch (chan->mode) {
5236 case L2CAP_MODE_BASIC:
5237 /* If socket recv buffers overflows we drop data here
5238 * which is *bad* because L2CAP has to be reliable.
5239 * But we don't have any other choice. L2CAP doesn't
5240 * provide flow control mechanism. */
5241
5242 if (chan->imtu < skb->len)
5243 goto drop;
5244
5245 if (!chan->ops->recv(chan->data, skb))
5246 goto done;
5247 break;
5248
5249 case L2CAP_MODE_ERTM:
5250 case L2CAP_MODE_STREAMING:
5251 l2cap_data_rcv(chan, skb);
5252 goto done;
5253
5254 default:
5255 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5256 break;
5257 }
5258
5259drop:
5260 kfree_skb(skb);
5261
5262done:
5263 l2cap_chan_unlock(chan);
5264
5265 return 0;
5266}
5267
5268static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5269{
5270 struct l2cap_chan *chan;
5271
5272 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5273 if (!chan)
5274 goto drop;
5275
5276 BT_DBG("chan %p, len %d", chan, skb->len);
5277
5278 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5279 goto drop;
5280
5281 if (chan->imtu < skb->len)
5282 goto drop;
5283
5284 if (!chan->ops->recv(chan->data, skb))
5285 return 0;
5286
5287drop:
5288 kfree_skb(skb);
5289
5290 return 0;
5291}
5292
5293static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5294 struct sk_buff *skb)
5295{
5296 struct l2cap_chan *chan;
5297
5298 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5299 if (!chan)
5300 goto drop;
5301
5302 BT_DBG("chan %p, len %d", chan, skb->len);
5303
5304 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5305 goto drop;
5306
5307 if (chan->imtu < skb->len)
5308 goto drop;
5309
5310 if (!chan->ops->recv(chan->data, skb))
5311 return 0;
5312
5313drop:
5314 kfree_skb(skb);
5315
5316 return 0;
5317}
5318
5319static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5320{
5321 struct l2cap_hdr *lh = (void *) skb->data;
5322 u16 cid, len;
5323 __le16 psm;
5324
5325 skb_pull(skb, L2CAP_HDR_SIZE);
5326 cid = __le16_to_cpu(lh->cid);
5327 len = __le16_to_cpu(lh->len);
5328
5329 if (len != skb->len) {
5330 kfree_skb(skb);
5331 return;
5332 }
5333
5334 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5335
5336 switch (cid) {
5337 case L2CAP_CID_LE_SIGNALING:
5338 case L2CAP_CID_SIGNALING:
5339 l2cap_sig_channel(conn, skb);
5340 break;
5341
5342 case L2CAP_CID_CONN_LESS:
5343 psm = get_unaligned((__le16 *) skb->data);
5344 skb_pull(skb, 2);
5345 l2cap_conless_channel(conn, psm, skb);
5346 break;
5347
5348 case L2CAP_CID_LE_DATA:
5349 l2cap_att_channel(conn, cid, skb);
5350 break;
5351
5352 case L2CAP_CID_SMP:
5353 if (smp_sig_channel(conn, skb))
5354 l2cap_conn_del(conn->hcon, EACCES);
5355 break;
5356
5357 default:
5358 l2cap_data_channel(conn, cid, skb);
5359 break;
5360 }
5361}
5362
5363/* ---- L2CAP interface with lower layer (HCI) ---- */
5364
5365int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5366{
5367 int exact = 0, lm1 = 0, lm2 = 0;
5368 struct l2cap_chan *c;
5369
5370 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5371
5372 /* Find listening sockets and check their link_mode */
5373 read_lock(&chan_list_lock);
5374 list_for_each_entry(c, &chan_list, global_l) {
5375 struct sock *sk = c->sk;
5376
5377 if (c->state != BT_LISTEN)
5378 continue;
5379
5380 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5381 lm1 |= HCI_LM_ACCEPT;
5382 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5383 lm1 |= HCI_LM_MASTER;
5384 exact++;
5385 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5386 lm2 |= HCI_LM_ACCEPT;
5387 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5388 lm2 |= HCI_LM_MASTER;
5389 }
5390 }
5391 read_unlock(&chan_list_lock);
5392
5393 return exact ? lm1 : lm2;
5394}
5395
5396int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5397{
5398 struct l2cap_conn *conn;
5399
5400 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5401
5402 if (!status) {
5403 conn = l2cap_conn_add(hcon, status);
5404 if (conn)
5405 l2cap_conn_ready(conn);
5406 } else
5407 l2cap_conn_del(hcon, bt_to_errno(status));
5408
5409 return 0;
5410}
5411
5412int l2cap_disconn_ind(struct hci_conn *hcon)
5413{
5414 struct l2cap_conn *conn = hcon->l2cap_data;
5415
5416 BT_DBG("hcon %p", hcon);
5417
5418 if (!conn)
5419 return HCI_ERROR_REMOTE_USER_TERM;
5420 return conn->disc_reason;
5421}
5422
5423int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5424{
5425 BT_DBG("hcon %p reason %d", hcon, reason);
5426
5427 l2cap_conn_del(hcon, bt_to_errno(reason));
5428 return 0;
5429}
5430
5431static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5432{
5433 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5434 return;
5435
5436 if (encrypt == 0x00) {
5437 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5438 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5439 } else if (chan->sec_level == BT_SECURITY_HIGH)
5440 l2cap_chan_close(chan, ECONNREFUSED);
5441 } else {
5442 if (chan->sec_level == BT_SECURITY_MEDIUM)
5443 __clear_chan_timer(chan);
5444 }
5445}
5446
5447int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5448{
5449 struct l2cap_conn *conn = hcon->l2cap_data;
5450 struct l2cap_chan *chan;
5451
5452 if (!conn)
5453 return 0;
5454
5455 BT_DBG("conn %p", conn);
5456
5457 if (hcon->type == LE_LINK) {
5458 if (!status && encrypt)
5459 smp_distribute_keys(conn, 0);
5460 cancel_delayed_work(&conn->security_timer);
5461 }
5462
5463 mutex_lock(&conn->chan_lock);
5464
5465 list_for_each_entry(chan, &conn->chan_l, list) {
5466 l2cap_chan_lock(chan);
5467
5468 BT_DBG("chan->scid %d", chan->scid);
5469
5470 if (chan->scid == L2CAP_CID_LE_DATA) {
5471 if (!status && encrypt) {
5472 chan->sec_level = hcon->sec_level;
5473 l2cap_chan_ready(chan);
5474 }
5475
5476 l2cap_chan_unlock(chan);
5477 continue;
5478 }
5479
5480 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5481 l2cap_chan_unlock(chan);
5482 continue;
5483 }
5484
5485 if (!status && (chan->state == BT_CONNECTED ||
5486 chan->state == BT_CONFIG)) {
5487 struct sock *sk = chan->sk;
5488
5489 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5490 sk->sk_state_change(sk);
5491
5492 l2cap_check_encryption(chan, encrypt);
5493 l2cap_chan_unlock(chan);
5494 continue;
5495 }
5496
5497 if (chan->state == BT_CONNECT) {
5498 if (!status) {
5499 l2cap_send_conn_req(chan);
5500 } else {
5501 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5502 }
5503 } else if (chan->state == BT_CONNECT2) {
5504 struct sock *sk = chan->sk;
5505 struct l2cap_conn_rsp rsp;
5506 __u16 res, stat;
5507
5508 lock_sock(sk);
5509
5510 if (!status) {
5511 if (test_bit(BT_SK_DEFER_SETUP,
5512 &bt_sk(sk)->flags)) {
5513 struct sock *parent = bt_sk(sk)->parent;
5514 res = L2CAP_CR_PEND;
5515 stat = L2CAP_CS_AUTHOR_PEND;
5516 if (parent)
5517 parent->sk_data_ready(parent, 0);
5518 } else {
5519 __l2cap_state_change(chan, BT_CONFIG);
5520 res = L2CAP_CR_SUCCESS;
5521 stat = L2CAP_CS_NO_INFO;
5522 }
5523 } else {
5524 __l2cap_state_change(chan, BT_DISCONN);
5525 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5526 res = L2CAP_CR_SEC_BLOCK;
5527 stat = L2CAP_CS_NO_INFO;
5528 }
5529
5530 release_sock(sk);
5531
5532 rsp.scid = cpu_to_le16(chan->dcid);
5533 rsp.dcid = cpu_to_le16(chan->scid);
5534 rsp.result = cpu_to_le16(res);
5535 rsp.status = cpu_to_le16(stat);
5536 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5537 sizeof(rsp), &rsp);
5538 }
5539
5540 l2cap_chan_unlock(chan);
5541 }
5542
5543 mutex_unlock(&conn->chan_lock);
5544
5545 return 0;
5546}
5547
5548int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5549{
5550 struct l2cap_conn *conn = hcon->l2cap_data;
5551
5552 if (!conn)
5553 conn = l2cap_conn_add(hcon, 0);
5554
5555 if (!conn)
5556 goto drop;
5557
5558 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5559
5560 if (!(flags & ACL_CONT)) {
5561 struct l2cap_hdr *hdr;
5562 int len;
5563
5564 if (conn->rx_len) {
5565 BT_ERR("Unexpected start frame (len %d)", skb->len);
5566 kfree_skb(conn->rx_skb);
5567 conn->rx_skb = NULL;
5568 conn->rx_len = 0;
5569 l2cap_conn_unreliable(conn, ECOMM);
5570 }
5571
5572 /* Start fragment always begin with Basic L2CAP header */
5573 if (skb->len < L2CAP_HDR_SIZE) {
5574 BT_ERR("Frame is too short (len %d)", skb->len);
5575 l2cap_conn_unreliable(conn, ECOMM);
5576 goto drop;
5577 }
5578
5579 hdr = (struct l2cap_hdr *) skb->data;
5580 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5581
5582 if (len == skb->len) {
5583 /* Complete frame received */
5584 l2cap_recv_frame(conn, skb);
5585 return 0;
5586 }
5587
5588 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5589
5590 if (skb->len > len) {
5591 BT_ERR("Frame is too long (len %d, expected len %d)",
5592 skb->len, len);
5593 l2cap_conn_unreliable(conn, ECOMM);
5594 goto drop;
5595 }
5596
5597 /* Allocate skb for the complete frame (with header) */
5598 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5599 if (!conn->rx_skb)
5600 goto drop;
5601
5602 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5603 skb->len);
5604 conn->rx_len = len - skb->len;
5605 } else {
5606 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5607
5608 if (!conn->rx_len) {
5609 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5610 l2cap_conn_unreliable(conn, ECOMM);
5611 goto drop;
5612 }
5613
5614 if (skb->len > conn->rx_len) {
5615 BT_ERR("Fragment is too long (len %d, expected %d)",
5616 skb->len, conn->rx_len);
5617 kfree_skb(conn->rx_skb);
5618 conn->rx_skb = NULL;
5619 conn->rx_len = 0;
5620 l2cap_conn_unreliable(conn, ECOMM);
5621 goto drop;
5622 }
5623
5624 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5625 skb->len);
5626 conn->rx_len -= skb->len;
5627
5628 if (!conn->rx_len) {
5629 /* Complete frame received */
5630 l2cap_recv_frame(conn, conn->rx_skb);
5631 conn->rx_skb = NULL;
5632 }
5633 }
5634
5635drop:
5636 kfree_skb(skb);
5637 return 0;
5638}
5639
5640static int l2cap_debugfs_show(struct seq_file *f, void *p)
5641{
5642 struct l2cap_chan *c;
5643
5644 read_lock(&chan_list_lock);
5645
5646 list_for_each_entry(c, &chan_list, global_l) {
5647 struct sock *sk = c->sk;
5648
5649 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5650 batostr(&bt_sk(sk)->src),
5651 batostr(&bt_sk(sk)->dst),
5652 c->state, __le16_to_cpu(c->psm),
5653 c->scid, c->dcid, c->imtu, c->omtu,
5654 c->sec_level, c->mode);
5655 }
5656
5657 read_unlock(&chan_list_lock);
5658
5659 return 0;
5660}
5661
5662static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5663{
5664 return single_open(file, l2cap_debugfs_show, inode->i_private);
5665}
5666
5667static const struct file_operations l2cap_debugfs_fops = {
5668 .open = l2cap_debugfs_open,
5669 .read = seq_read,
5670 .llseek = seq_lseek,
5671 .release = single_release,
5672};
5673
5674static struct dentry *l2cap_debugfs;
5675
5676int __init l2cap_init(void)
5677{
5678 int err;
5679
5680 err = l2cap_init_sockets();
5681 if (err < 0)
5682 return err;
5683
5684 if (bt_debugfs) {
5685 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5686 bt_debugfs, NULL, &l2cap_debugfs_fops);
5687 if (!l2cap_debugfs)
5688 BT_ERR("Failed to create L2CAP debug file");
5689 }
5690
5691 return 0;
5692}
5693
5694void l2cap_exit(void)
5695{
5696 debugfs_remove(l2cap_debugfs);
5697 l2cap_cleanup_sockets();
5698}
5699
5700module_param(disable_ertm, bool, 0644);
5701MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.045101 seconds and 5 git commands to generate.