Bluetooth: Check rules when setting retransmit or monitor timers
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/types.h>
34#include <linux/capability.h>
35#include <linux/errno.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/poll.h>
40#include <linux/fcntl.h>
41#include <linux/init.h>
42#include <linux/interrupt.h>
43#include <linux/socket.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/device.h>
47#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49#include <linux/uaccess.h>
50#include <linux/crc16.h>
51#include <net/sock.h>
52
53#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
58#include <net/bluetooth/smp.h>
59
60bool disable_ertm = 1;
61
62static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65static LIST_HEAD(chan_list);
66static DEFINE_RWLOCK(chan_list_lock);
67
68static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
78
79/* ---- L2CAP channels ---- */
80
81static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82{
83 struct l2cap_chan *c;
84
85 list_for_each_entry(c, &conn->chan_l, list) {
86 if (c->dcid == cid)
87 return c;
88 }
89 return NULL;
90}
91
92static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93{
94 struct l2cap_chan *c;
95
96 list_for_each_entry(c, &conn->chan_l, list) {
97 if (c->scid == cid)
98 return c;
99 }
100 return NULL;
101}
102
103/* Find channel with given SCID.
104 * Returns locked channel. */
105static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106{
107 struct l2cap_chan *c;
108
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 l2cap_chan_lock(c);
113 mutex_unlock(&conn->chan_lock);
114
115 return c;
116}
117
118static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119{
120 struct l2cap_chan *c;
121
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
124 return c;
125 }
126 return NULL;
127}
128
129static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
130{
131 struct l2cap_chan *c;
132
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
135 return c;
136 }
137 return NULL;
138}
139
140int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
141{
142 int err;
143
144 write_lock(&chan_list_lock);
145
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
147 err = -EADDRINUSE;
148 goto done;
149 }
150
151 if (psm) {
152 chan->psm = psm;
153 chan->sport = psm;
154 err = 0;
155 } else {
156 u16 p;
157
158 err = -EINVAL;
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
163 err = 0;
164 break;
165 }
166 }
167
168done:
169 write_unlock(&chan_list_lock);
170 return err;
171}
172
173int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
174{
175 write_lock(&chan_list_lock);
176
177 chan->scid = scid;
178
179 write_unlock(&chan_list_lock);
180
181 return 0;
182}
183
184static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
185{
186 u16 cid = L2CAP_CID_DYN_START;
187
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
190 return cid;
191 }
192
193 return 0;
194}
195
196static void __l2cap_state_change(struct l2cap_chan *chan, int state)
197{
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
200
201 chan->state = state;
202 chan->ops->state_change(chan->data, state);
203}
204
205static void l2cap_state_change(struct l2cap_chan *chan, int state)
206{
207 struct sock *sk = chan->sk;
208
209 lock_sock(sk);
210 __l2cap_state_change(chan, state);
211 release_sock(sk);
212}
213
214static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
215{
216 struct sock *sk = chan->sk;
217
218 sk->sk_err = err;
219}
220
221static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
222{
223 struct sock *sk = chan->sk;
224
225 lock_sock(sk);
226 __l2cap_chan_set_err(chan, err);
227 release_sock(sk);
228}
229
230static void __set_retrans_timer(struct l2cap_chan *chan)
231{
232 if (!delayed_work_pending(&chan->monitor_timer) &&
233 chan->retrans_timeout) {
234 l2cap_set_timer(chan, &chan->retrans_timer,
235 msecs_to_jiffies(chan->retrans_timeout));
236 }
237}
238
239static void __set_monitor_timer(struct l2cap_chan *chan)
240{
241 __clear_retrans_timer(chan);
242 if (chan->monitor_timeout) {
243 l2cap_set_timer(chan, &chan->monitor_timer,
244 msecs_to_jiffies(chan->monitor_timeout));
245 }
246}
247
248static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
249 u16 seq)
250{
251 struct sk_buff *skb;
252
253 skb_queue_walk(head, skb) {
254 if (bt_cb(skb)->control.txseq == seq)
255 return skb;
256 }
257
258 return NULL;
259}
260
261/* ---- L2CAP sequence number lists ---- */
262
263/* For ERTM, ordered lists of sequence numbers must be tracked for
264 * SREJ requests that are received and for frames that are to be
265 * retransmitted. These seq_list functions implement a singly-linked
266 * list in an array, where membership in the list can also be checked
267 * in constant time. Items can also be added to the tail of the list
268 * and removed from the head in constant time, without further memory
269 * allocs or frees.
270 */
271
272static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
273{
274 size_t alloc_size, i;
275
276 /* Allocated size is a power of 2 to map sequence numbers
277 * (which may be up to 14 bits) in to a smaller array that is
278 * sized for the negotiated ERTM transmit windows.
279 */
280 alloc_size = roundup_pow_of_two(size);
281
282 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
283 if (!seq_list->list)
284 return -ENOMEM;
285
286 seq_list->mask = alloc_size - 1;
287 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
288 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
289 for (i = 0; i < alloc_size; i++)
290 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
291
292 return 0;
293}
294
295static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
296{
297 kfree(seq_list->list);
298}
299
300static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
301 u16 seq)
302{
303 /* Constant-time check for list membership */
304 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
305}
306
307static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
308{
309 u16 mask = seq_list->mask;
310
311 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
312 /* In case someone tries to pop the head of an empty list */
313 return L2CAP_SEQ_LIST_CLEAR;
314 } else if (seq_list->head == seq) {
315 /* Head can be removed in constant time */
316 seq_list->head = seq_list->list[seq & mask];
317 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
318
319 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
320 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
321 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
322 }
323 } else {
324 /* Walk the list to find the sequence number */
325 u16 prev = seq_list->head;
326 while (seq_list->list[prev & mask] != seq) {
327 prev = seq_list->list[prev & mask];
328 if (prev == L2CAP_SEQ_LIST_TAIL)
329 return L2CAP_SEQ_LIST_CLEAR;
330 }
331
332 /* Unlink the number from the list and clear it */
333 seq_list->list[prev & mask] = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 if (seq_list->tail == seq)
336 seq_list->tail = prev;
337 }
338 return seq;
339}
340
341static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
342{
343 /* Remove the head in constant time */
344 return l2cap_seq_list_remove(seq_list, seq_list->head);
345}
346
347static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
348{
349 u16 i;
350
351 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
352 return;
353
354 for (i = 0; i <= seq_list->mask; i++)
355 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
356
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
359}
360
361static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
362{
363 u16 mask = seq_list->mask;
364
365 /* All appends happen in constant time */
366
367 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
368 return;
369
370 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
371 seq_list->head = seq;
372 else
373 seq_list->list[seq_list->tail & mask] = seq;
374
375 seq_list->tail = seq;
376 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
377}
378
379static void l2cap_chan_timeout(struct work_struct *work)
380{
381 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
382 chan_timer.work);
383 struct l2cap_conn *conn = chan->conn;
384 int reason;
385
386 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
387
388 mutex_lock(&conn->chan_lock);
389 l2cap_chan_lock(chan);
390
391 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
392 reason = ECONNREFUSED;
393 else if (chan->state == BT_CONNECT &&
394 chan->sec_level != BT_SECURITY_SDP)
395 reason = ECONNREFUSED;
396 else
397 reason = ETIMEDOUT;
398
399 l2cap_chan_close(chan, reason);
400
401 l2cap_chan_unlock(chan);
402
403 chan->ops->close(chan->data);
404 mutex_unlock(&conn->chan_lock);
405
406 l2cap_chan_put(chan);
407}
408
409struct l2cap_chan *l2cap_chan_create(void)
410{
411 struct l2cap_chan *chan;
412
413 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
414 if (!chan)
415 return NULL;
416
417 mutex_init(&chan->lock);
418
419 write_lock(&chan_list_lock);
420 list_add(&chan->global_l, &chan_list);
421 write_unlock(&chan_list_lock);
422
423 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
424
425 chan->state = BT_OPEN;
426
427 atomic_set(&chan->refcnt, 1);
428
429 /* This flag is cleared in l2cap_chan_ready() */
430 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
431
432 BT_DBG("chan %p", chan);
433
434 return chan;
435}
436
437void l2cap_chan_destroy(struct l2cap_chan *chan)
438{
439 write_lock(&chan_list_lock);
440 list_del(&chan->global_l);
441 write_unlock(&chan_list_lock);
442
443 l2cap_chan_put(chan);
444}
445
446void l2cap_chan_set_defaults(struct l2cap_chan *chan)
447{
448 chan->fcs = L2CAP_FCS_CRC16;
449 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
450 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
451 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
452 chan->sec_level = BT_SECURITY_LOW;
453
454 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
455}
456
457static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
458{
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
460 __le16_to_cpu(chan->psm), chan->dcid);
461
462 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
463
464 chan->conn = conn;
465
466 switch (chan->chan_type) {
467 case L2CAP_CHAN_CONN_ORIENTED:
468 if (conn->hcon->type == LE_LINK) {
469 /* LE connection */
470 chan->omtu = L2CAP_LE_DEFAULT_MTU;
471 chan->scid = L2CAP_CID_LE_DATA;
472 chan->dcid = L2CAP_CID_LE_DATA;
473 } else {
474 /* Alloc CID for connection-oriented socket */
475 chan->scid = l2cap_alloc_cid(conn);
476 chan->omtu = L2CAP_DEFAULT_MTU;
477 }
478 break;
479
480 case L2CAP_CHAN_CONN_LESS:
481 /* Connectionless socket */
482 chan->scid = L2CAP_CID_CONN_LESS;
483 chan->dcid = L2CAP_CID_CONN_LESS;
484 chan->omtu = L2CAP_DEFAULT_MTU;
485 break;
486
487 default:
488 /* Raw socket can send/recv signalling messages only */
489 chan->scid = L2CAP_CID_SIGNALING;
490 chan->dcid = L2CAP_CID_SIGNALING;
491 chan->omtu = L2CAP_DEFAULT_MTU;
492 }
493
494 chan->local_id = L2CAP_BESTEFFORT_ID;
495 chan->local_stype = L2CAP_SERV_BESTEFFORT;
496 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
497 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
498 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
499 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
500
501 l2cap_chan_hold(chan);
502
503 list_add(&chan->list, &conn->chan_l);
504}
505
506static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
507{
508 mutex_lock(&conn->chan_lock);
509 __l2cap_chan_add(conn, chan);
510 mutex_unlock(&conn->chan_lock);
511}
512
513static void l2cap_chan_del(struct l2cap_chan *chan, int err)
514{
515 struct sock *sk = chan->sk;
516 struct l2cap_conn *conn = chan->conn;
517 struct sock *parent = bt_sk(sk)->parent;
518
519 __clear_chan_timer(chan);
520
521 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
522
523 if (conn) {
524 /* Delete from channel list */
525 list_del(&chan->list);
526
527 l2cap_chan_put(chan);
528
529 chan->conn = NULL;
530 hci_conn_put(conn->hcon);
531 }
532
533 lock_sock(sk);
534
535 __l2cap_state_change(chan, BT_CLOSED);
536 sock_set_flag(sk, SOCK_ZAPPED);
537
538 if (err)
539 __l2cap_chan_set_err(chan, err);
540
541 if (parent) {
542 bt_accept_unlink(sk);
543 parent->sk_data_ready(parent, 0);
544 } else
545 sk->sk_state_change(sk);
546
547 release_sock(sk);
548
549 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
550 return;
551
552 skb_queue_purge(&chan->tx_q);
553
554 if (chan->mode == L2CAP_MODE_ERTM) {
555 __clear_retrans_timer(chan);
556 __clear_monitor_timer(chan);
557 __clear_ack_timer(chan);
558
559 skb_queue_purge(&chan->srej_q);
560
561 l2cap_seq_list_free(&chan->srej_list);
562 l2cap_seq_list_free(&chan->retrans_list);
563 }
564}
565
566static void l2cap_chan_cleanup_listen(struct sock *parent)
567{
568 struct sock *sk;
569
570 BT_DBG("parent %p", parent);
571
572 /* Close not yet accepted channels */
573 while ((sk = bt_accept_dequeue(parent, NULL))) {
574 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
575
576 l2cap_chan_lock(chan);
577 __clear_chan_timer(chan);
578 l2cap_chan_close(chan, ECONNRESET);
579 l2cap_chan_unlock(chan);
580
581 chan->ops->close(chan->data);
582 }
583}
584
585void l2cap_chan_close(struct l2cap_chan *chan, int reason)
586{
587 struct l2cap_conn *conn = chan->conn;
588 struct sock *sk = chan->sk;
589
590 BT_DBG("chan %p state %s sk %p", chan,
591 state_to_string(chan->state), sk);
592
593 switch (chan->state) {
594 case BT_LISTEN:
595 lock_sock(sk);
596 l2cap_chan_cleanup_listen(sk);
597
598 __l2cap_state_change(chan, BT_CLOSED);
599 sock_set_flag(sk, SOCK_ZAPPED);
600 release_sock(sk);
601 break;
602
603 case BT_CONNECTED:
604 case BT_CONFIG:
605 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
606 conn->hcon->type == ACL_LINK) {
607 __set_chan_timer(chan, sk->sk_sndtimeo);
608 l2cap_send_disconn_req(conn, chan, reason);
609 } else
610 l2cap_chan_del(chan, reason);
611 break;
612
613 case BT_CONNECT2:
614 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
615 conn->hcon->type == ACL_LINK) {
616 struct l2cap_conn_rsp rsp;
617 __u16 result;
618
619 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
620 result = L2CAP_CR_SEC_BLOCK;
621 else
622 result = L2CAP_CR_BAD_PSM;
623 l2cap_state_change(chan, BT_DISCONN);
624
625 rsp.scid = cpu_to_le16(chan->dcid);
626 rsp.dcid = cpu_to_le16(chan->scid);
627 rsp.result = cpu_to_le16(result);
628 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
629 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
630 sizeof(rsp), &rsp);
631 }
632
633 l2cap_chan_del(chan, reason);
634 break;
635
636 case BT_CONNECT:
637 case BT_DISCONN:
638 l2cap_chan_del(chan, reason);
639 break;
640
641 default:
642 lock_sock(sk);
643 sock_set_flag(sk, SOCK_ZAPPED);
644 release_sock(sk);
645 break;
646 }
647}
648
649static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
650{
651 if (chan->chan_type == L2CAP_CHAN_RAW) {
652 switch (chan->sec_level) {
653 case BT_SECURITY_HIGH:
654 return HCI_AT_DEDICATED_BONDING_MITM;
655 case BT_SECURITY_MEDIUM:
656 return HCI_AT_DEDICATED_BONDING;
657 default:
658 return HCI_AT_NO_BONDING;
659 }
660 } else if (chan->psm == cpu_to_le16(0x0001)) {
661 if (chan->sec_level == BT_SECURITY_LOW)
662 chan->sec_level = BT_SECURITY_SDP;
663
664 if (chan->sec_level == BT_SECURITY_HIGH)
665 return HCI_AT_NO_BONDING_MITM;
666 else
667 return HCI_AT_NO_BONDING;
668 } else {
669 switch (chan->sec_level) {
670 case BT_SECURITY_HIGH:
671 return HCI_AT_GENERAL_BONDING_MITM;
672 case BT_SECURITY_MEDIUM:
673 return HCI_AT_GENERAL_BONDING;
674 default:
675 return HCI_AT_NO_BONDING;
676 }
677 }
678}
679
680/* Service level security */
681int l2cap_chan_check_security(struct l2cap_chan *chan)
682{
683 struct l2cap_conn *conn = chan->conn;
684 __u8 auth_type;
685
686 auth_type = l2cap_get_auth_type(chan);
687
688 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
689}
690
691static u8 l2cap_get_ident(struct l2cap_conn *conn)
692{
693 u8 id;
694
695 /* Get next available identificator.
696 * 1 - 128 are used by kernel.
697 * 129 - 199 are reserved.
698 * 200 - 254 are used by utilities like l2ping, etc.
699 */
700
701 spin_lock(&conn->lock);
702
703 if (++conn->tx_ident > 128)
704 conn->tx_ident = 1;
705
706 id = conn->tx_ident;
707
708 spin_unlock(&conn->lock);
709
710 return id;
711}
712
713static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
714{
715 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
716 u8 flags;
717
718 BT_DBG("code 0x%2.2x", code);
719
720 if (!skb)
721 return;
722
723 if (lmp_no_flush_capable(conn->hcon->hdev))
724 flags = ACL_START_NO_FLUSH;
725 else
726 flags = ACL_START;
727
728 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
729 skb->priority = HCI_PRIO_MAX;
730
731 hci_send_acl(conn->hchan, skb, flags);
732}
733
734static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
735{
736 struct hci_conn *hcon = chan->conn->hcon;
737 u16 flags;
738
739 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
740 skb->priority);
741
742 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
743 lmp_no_flush_capable(hcon->hdev))
744 flags = ACL_START_NO_FLUSH;
745 else
746 flags = ACL_START;
747
748 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
749 hci_send_acl(chan->conn->hchan, skb, flags);
750}
751
752static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
753{
754 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
755 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
756
757 if (enh & L2CAP_CTRL_FRAME_TYPE) {
758 /* S-Frame */
759 control->sframe = 1;
760 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
761 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
762
763 control->sar = 0;
764 control->txseq = 0;
765 } else {
766 /* I-Frame */
767 control->sframe = 0;
768 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
769 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
770
771 control->poll = 0;
772 control->super = 0;
773 }
774}
775
776static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
777{
778 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
779 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
780
781 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
782 /* S-Frame */
783 control->sframe = 1;
784 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
785 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
786
787 control->sar = 0;
788 control->txseq = 0;
789 } else {
790 /* I-Frame */
791 control->sframe = 0;
792 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
793 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
794
795 control->poll = 0;
796 control->super = 0;
797 }
798}
799
800static inline void __unpack_control(struct l2cap_chan *chan,
801 struct sk_buff *skb)
802{
803 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
804 __unpack_extended_control(get_unaligned_le32(skb->data),
805 &bt_cb(skb)->control);
806 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
807 } else {
808 __unpack_enhanced_control(get_unaligned_le16(skb->data),
809 &bt_cb(skb)->control);
810 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
811 }
812}
813
814static u32 __pack_extended_control(struct l2cap_ctrl *control)
815{
816 u32 packed;
817
818 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
820
821 if (control->sframe) {
822 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
823 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
824 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
825 } else {
826 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
827 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
828 }
829
830 return packed;
831}
832
833static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
834{
835 u16 packed;
836
837 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
838 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
839
840 if (control->sframe) {
841 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
842 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
843 packed |= L2CAP_CTRL_FRAME_TYPE;
844 } else {
845 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
846 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
847 }
848
849 return packed;
850}
851
852static inline void __pack_control(struct l2cap_chan *chan,
853 struct l2cap_ctrl *control,
854 struct sk_buff *skb)
855{
856 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
857 put_unaligned_le32(__pack_extended_control(control),
858 skb->data + L2CAP_HDR_SIZE);
859 } else {
860 put_unaligned_le16(__pack_enhanced_control(control),
861 skb->data + L2CAP_HDR_SIZE);
862 }
863}
864
865static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
866 u32 control)
867{
868 struct sk_buff *skb;
869 struct l2cap_hdr *lh;
870 int hlen;
871
872 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
873 hlen = L2CAP_EXT_HDR_SIZE;
874 else
875 hlen = L2CAP_ENH_HDR_SIZE;
876
877 if (chan->fcs == L2CAP_FCS_CRC16)
878 hlen += L2CAP_FCS_SIZE;
879
880 skb = bt_skb_alloc(hlen, GFP_KERNEL);
881
882 if (!skb)
883 return ERR_PTR(-ENOMEM);
884
885 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
886 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
887 lh->cid = cpu_to_le16(chan->dcid);
888
889 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
890 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
891 else
892 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
893
894 if (chan->fcs == L2CAP_FCS_CRC16) {
895 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
896 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
897 }
898
899 skb->priority = HCI_PRIO_MAX;
900 return skb;
901}
902
903static void l2cap_send_sframe(struct l2cap_chan *chan,
904 struct l2cap_ctrl *control)
905{
906 struct sk_buff *skb;
907 u32 control_field;
908
909 BT_DBG("chan %p, control %p", chan, control);
910
911 if (!control->sframe)
912 return;
913
914 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
915 !control->poll)
916 control->final = 1;
917
918 if (control->super == L2CAP_SUPER_RR)
919 clear_bit(CONN_RNR_SENT, &chan->conn_state);
920 else if (control->super == L2CAP_SUPER_RNR)
921 set_bit(CONN_RNR_SENT, &chan->conn_state);
922
923 if (control->super != L2CAP_SUPER_SREJ) {
924 chan->last_acked_seq = control->reqseq;
925 __clear_ack_timer(chan);
926 }
927
928 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
929 control->final, control->poll, control->super);
930
931 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
932 control_field = __pack_extended_control(control);
933 else
934 control_field = __pack_enhanced_control(control);
935
936 skb = l2cap_create_sframe_pdu(chan, control_field);
937 if (!IS_ERR(skb))
938 l2cap_do_send(chan, skb);
939}
940
941static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
942{
943 struct l2cap_ctrl control;
944
945 BT_DBG("chan %p, poll %d", chan, poll);
946
947 memset(&control, 0, sizeof(control));
948 control.sframe = 1;
949 control.poll = poll;
950
951 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
952 control.super = L2CAP_SUPER_RNR;
953 else
954 control.super = L2CAP_SUPER_RR;
955
956 control.reqseq = chan->buffer_seq;
957 l2cap_send_sframe(chan, &control);
958}
959
960static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
961{
962 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
963}
964
965static void l2cap_send_conn_req(struct l2cap_chan *chan)
966{
967 struct l2cap_conn *conn = chan->conn;
968 struct l2cap_conn_req req;
969
970 req.scid = cpu_to_le16(chan->scid);
971 req.psm = chan->psm;
972
973 chan->ident = l2cap_get_ident(conn);
974
975 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
976
977 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
978}
979
980static void l2cap_chan_ready(struct l2cap_chan *chan)
981{
982 struct sock *sk = chan->sk;
983 struct sock *parent;
984
985 lock_sock(sk);
986
987 parent = bt_sk(sk)->parent;
988
989 BT_DBG("sk %p, parent %p", sk, parent);
990
991 /* This clears all conf flags, including CONF_NOT_COMPLETE */
992 chan->conf_state = 0;
993 __clear_chan_timer(chan);
994
995 __l2cap_state_change(chan, BT_CONNECTED);
996 sk->sk_state_change(sk);
997
998 if (parent)
999 parent->sk_data_ready(parent, 0);
1000
1001 release_sock(sk);
1002}
1003
1004static void l2cap_do_start(struct l2cap_chan *chan)
1005{
1006 struct l2cap_conn *conn = chan->conn;
1007
1008 if (conn->hcon->type == LE_LINK) {
1009 l2cap_chan_ready(chan);
1010 return;
1011 }
1012
1013 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1014 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1015 return;
1016
1017 if (l2cap_chan_check_security(chan) &&
1018 __l2cap_no_conn_pending(chan))
1019 l2cap_send_conn_req(chan);
1020 } else {
1021 struct l2cap_info_req req;
1022 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1023
1024 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1025 conn->info_ident = l2cap_get_ident(conn);
1026
1027 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1028
1029 l2cap_send_cmd(conn, conn->info_ident,
1030 L2CAP_INFO_REQ, sizeof(req), &req);
1031 }
1032}
1033
1034static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1035{
1036 u32 local_feat_mask = l2cap_feat_mask;
1037 if (!disable_ertm)
1038 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1039
1040 switch (mode) {
1041 case L2CAP_MODE_ERTM:
1042 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1043 case L2CAP_MODE_STREAMING:
1044 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1045 default:
1046 return 0x00;
1047 }
1048}
1049
1050static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1051{
1052 struct sock *sk = chan->sk;
1053 struct l2cap_disconn_req req;
1054
1055 if (!conn)
1056 return;
1057
1058 if (chan->mode == L2CAP_MODE_ERTM) {
1059 __clear_retrans_timer(chan);
1060 __clear_monitor_timer(chan);
1061 __clear_ack_timer(chan);
1062 }
1063
1064 req.dcid = cpu_to_le16(chan->dcid);
1065 req.scid = cpu_to_le16(chan->scid);
1066 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1067 L2CAP_DISCONN_REQ, sizeof(req), &req);
1068
1069 lock_sock(sk);
1070 __l2cap_state_change(chan, BT_DISCONN);
1071 __l2cap_chan_set_err(chan, err);
1072 release_sock(sk);
1073}
1074
1075/* ---- L2CAP connections ---- */
1076static void l2cap_conn_start(struct l2cap_conn *conn)
1077{
1078 struct l2cap_chan *chan, *tmp;
1079
1080 BT_DBG("conn %p", conn);
1081
1082 mutex_lock(&conn->chan_lock);
1083
1084 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1085 struct sock *sk = chan->sk;
1086
1087 l2cap_chan_lock(chan);
1088
1089 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1090 l2cap_chan_unlock(chan);
1091 continue;
1092 }
1093
1094 if (chan->state == BT_CONNECT) {
1095 if (!l2cap_chan_check_security(chan) ||
1096 !__l2cap_no_conn_pending(chan)) {
1097 l2cap_chan_unlock(chan);
1098 continue;
1099 }
1100
1101 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1102 && test_bit(CONF_STATE2_DEVICE,
1103 &chan->conf_state)) {
1104 l2cap_chan_close(chan, ECONNRESET);
1105 l2cap_chan_unlock(chan);
1106 continue;
1107 }
1108
1109 l2cap_send_conn_req(chan);
1110
1111 } else if (chan->state == BT_CONNECT2) {
1112 struct l2cap_conn_rsp rsp;
1113 char buf[128];
1114 rsp.scid = cpu_to_le16(chan->dcid);
1115 rsp.dcid = cpu_to_le16(chan->scid);
1116
1117 if (l2cap_chan_check_security(chan)) {
1118 lock_sock(sk);
1119 if (test_bit(BT_SK_DEFER_SETUP,
1120 &bt_sk(sk)->flags)) {
1121 struct sock *parent = bt_sk(sk)->parent;
1122 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1123 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1124 if (parent)
1125 parent->sk_data_ready(parent, 0);
1126
1127 } else {
1128 __l2cap_state_change(chan, BT_CONFIG);
1129 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1130 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1131 }
1132 release_sock(sk);
1133 } else {
1134 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1135 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1136 }
1137
1138 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1139 sizeof(rsp), &rsp);
1140
1141 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1142 rsp.result != L2CAP_CR_SUCCESS) {
1143 l2cap_chan_unlock(chan);
1144 continue;
1145 }
1146
1147 set_bit(CONF_REQ_SENT, &chan->conf_state);
1148 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1149 l2cap_build_conf_req(chan, buf), buf);
1150 chan->num_conf_req++;
1151 }
1152
1153 l2cap_chan_unlock(chan);
1154 }
1155
1156 mutex_unlock(&conn->chan_lock);
1157}
1158
1159/* Find socket with cid and source/destination bdaddr.
1160 * Returns closest match, locked.
1161 */
1162static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1163 bdaddr_t *src,
1164 bdaddr_t *dst)
1165{
1166 struct l2cap_chan *c, *c1 = NULL;
1167
1168 read_lock(&chan_list_lock);
1169
1170 list_for_each_entry(c, &chan_list, global_l) {
1171 struct sock *sk = c->sk;
1172
1173 if (state && c->state != state)
1174 continue;
1175
1176 if (c->scid == cid) {
1177 int src_match, dst_match;
1178 int src_any, dst_any;
1179
1180 /* Exact match. */
1181 src_match = !bacmp(&bt_sk(sk)->src, src);
1182 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1183 if (src_match && dst_match) {
1184 read_unlock(&chan_list_lock);
1185 return c;
1186 }
1187
1188 /* Closest match */
1189 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1190 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1191 if ((src_match && dst_any) || (src_any && dst_match) ||
1192 (src_any && dst_any))
1193 c1 = c;
1194 }
1195 }
1196
1197 read_unlock(&chan_list_lock);
1198
1199 return c1;
1200}
1201
1202static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1203{
1204 struct sock *parent, *sk;
1205 struct l2cap_chan *chan, *pchan;
1206
1207 BT_DBG("");
1208
1209 /* Check if we have socket listening on cid */
1210 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1211 conn->src, conn->dst);
1212 if (!pchan)
1213 return;
1214
1215 parent = pchan->sk;
1216
1217 lock_sock(parent);
1218
1219 /* Check for backlog size */
1220 if (sk_acceptq_is_full(parent)) {
1221 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1222 goto clean;
1223 }
1224
1225 chan = pchan->ops->new_connection(pchan->data);
1226 if (!chan)
1227 goto clean;
1228
1229 sk = chan->sk;
1230
1231 hci_conn_hold(conn->hcon);
1232
1233 bacpy(&bt_sk(sk)->src, conn->src);
1234 bacpy(&bt_sk(sk)->dst, conn->dst);
1235
1236 bt_accept_enqueue(parent, sk);
1237
1238 l2cap_chan_add(conn, chan);
1239
1240 __set_chan_timer(chan, sk->sk_sndtimeo);
1241
1242 __l2cap_state_change(chan, BT_CONNECTED);
1243 parent->sk_data_ready(parent, 0);
1244
1245clean:
1246 release_sock(parent);
1247}
1248
1249static void l2cap_conn_ready(struct l2cap_conn *conn)
1250{
1251 struct l2cap_chan *chan;
1252
1253 BT_DBG("conn %p", conn);
1254
1255 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1256 l2cap_le_conn_ready(conn);
1257
1258 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1259 smp_conn_security(conn, conn->hcon->pending_sec_level);
1260
1261 mutex_lock(&conn->chan_lock);
1262
1263 list_for_each_entry(chan, &conn->chan_l, list) {
1264
1265 l2cap_chan_lock(chan);
1266
1267 if (conn->hcon->type == LE_LINK) {
1268 if (smp_conn_security(conn, chan->sec_level))
1269 l2cap_chan_ready(chan);
1270
1271 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1272 struct sock *sk = chan->sk;
1273 __clear_chan_timer(chan);
1274 lock_sock(sk);
1275 __l2cap_state_change(chan, BT_CONNECTED);
1276 sk->sk_state_change(sk);
1277 release_sock(sk);
1278
1279 } else if (chan->state == BT_CONNECT)
1280 l2cap_do_start(chan);
1281
1282 l2cap_chan_unlock(chan);
1283 }
1284
1285 mutex_unlock(&conn->chan_lock);
1286}
1287
1288/* Notify sockets that we cannot guaranty reliability anymore */
1289static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1290{
1291 struct l2cap_chan *chan;
1292
1293 BT_DBG("conn %p", conn);
1294
1295 mutex_lock(&conn->chan_lock);
1296
1297 list_for_each_entry(chan, &conn->chan_l, list) {
1298 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1299 __l2cap_chan_set_err(chan, err);
1300 }
1301
1302 mutex_unlock(&conn->chan_lock);
1303}
1304
1305static void l2cap_info_timeout(struct work_struct *work)
1306{
1307 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1308 info_timer.work);
1309
1310 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1311 conn->info_ident = 0;
1312
1313 l2cap_conn_start(conn);
1314}
1315
1316static void l2cap_conn_del(struct hci_conn *hcon, int err)
1317{
1318 struct l2cap_conn *conn = hcon->l2cap_data;
1319 struct l2cap_chan *chan, *l;
1320
1321 if (!conn)
1322 return;
1323
1324 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1325
1326 kfree_skb(conn->rx_skb);
1327
1328 mutex_lock(&conn->chan_lock);
1329
1330 /* Kill channels */
1331 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1332 l2cap_chan_hold(chan);
1333 l2cap_chan_lock(chan);
1334
1335 l2cap_chan_del(chan, err);
1336
1337 l2cap_chan_unlock(chan);
1338
1339 chan->ops->close(chan->data);
1340 l2cap_chan_put(chan);
1341 }
1342
1343 mutex_unlock(&conn->chan_lock);
1344
1345 hci_chan_del(conn->hchan);
1346
1347 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1348 cancel_delayed_work_sync(&conn->info_timer);
1349
1350 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1351 cancel_delayed_work_sync(&conn->security_timer);
1352 smp_chan_destroy(conn);
1353 }
1354
1355 hcon->l2cap_data = NULL;
1356 kfree(conn);
1357}
1358
1359static void security_timeout(struct work_struct *work)
1360{
1361 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1362 security_timer.work);
1363
1364 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1365}
1366
1367static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1368{
1369 struct l2cap_conn *conn = hcon->l2cap_data;
1370 struct hci_chan *hchan;
1371
1372 if (conn || status)
1373 return conn;
1374
1375 hchan = hci_chan_create(hcon);
1376 if (!hchan)
1377 return NULL;
1378
1379 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1380 if (!conn) {
1381 hci_chan_del(hchan);
1382 return NULL;
1383 }
1384
1385 hcon->l2cap_data = conn;
1386 conn->hcon = hcon;
1387 conn->hchan = hchan;
1388
1389 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1390
1391 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1392 conn->mtu = hcon->hdev->le_mtu;
1393 else
1394 conn->mtu = hcon->hdev->acl_mtu;
1395
1396 conn->src = &hcon->hdev->bdaddr;
1397 conn->dst = &hcon->dst;
1398
1399 conn->feat_mask = 0;
1400
1401 spin_lock_init(&conn->lock);
1402 mutex_init(&conn->chan_lock);
1403
1404 INIT_LIST_HEAD(&conn->chan_l);
1405
1406 if (hcon->type == LE_LINK)
1407 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1408 else
1409 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1410
1411 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1412
1413 return conn;
1414}
1415
1416/* ---- Socket interface ---- */
1417
1418/* Find socket with psm and source / destination bdaddr.
1419 * Returns closest match.
1420 */
1421static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1422 bdaddr_t *src,
1423 bdaddr_t *dst)
1424{
1425 struct l2cap_chan *c, *c1 = NULL;
1426
1427 read_lock(&chan_list_lock);
1428
1429 list_for_each_entry(c, &chan_list, global_l) {
1430 struct sock *sk = c->sk;
1431
1432 if (state && c->state != state)
1433 continue;
1434
1435 if (c->psm == psm) {
1436 int src_match, dst_match;
1437 int src_any, dst_any;
1438
1439 /* Exact match. */
1440 src_match = !bacmp(&bt_sk(sk)->src, src);
1441 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1442 if (src_match && dst_match) {
1443 read_unlock(&chan_list_lock);
1444 return c;
1445 }
1446
1447 /* Closest match */
1448 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1449 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1450 if ((src_match && dst_any) || (src_any && dst_match) ||
1451 (src_any && dst_any))
1452 c1 = c;
1453 }
1454 }
1455
1456 read_unlock(&chan_list_lock);
1457
1458 return c1;
1459}
1460
1461int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1462 bdaddr_t *dst, u8 dst_type)
1463{
1464 struct sock *sk = chan->sk;
1465 bdaddr_t *src = &bt_sk(sk)->src;
1466 struct l2cap_conn *conn;
1467 struct hci_conn *hcon;
1468 struct hci_dev *hdev;
1469 __u8 auth_type;
1470 int err;
1471
1472 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1473 dst_type, __le16_to_cpu(chan->psm));
1474
1475 hdev = hci_get_route(dst, src);
1476 if (!hdev)
1477 return -EHOSTUNREACH;
1478
1479 hci_dev_lock(hdev);
1480
1481 l2cap_chan_lock(chan);
1482
1483 /* PSM must be odd and lsb of upper byte must be 0 */
1484 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1485 chan->chan_type != L2CAP_CHAN_RAW) {
1486 err = -EINVAL;
1487 goto done;
1488 }
1489
1490 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1491 err = -EINVAL;
1492 goto done;
1493 }
1494
1495 switch (chan->mode) {
1496 case L2CAP_MODE_BASIC:
1497 break;
1498 case L2CAP_MODE_ERTM:
1499 case L2CAP_MODE_STREAMING:
1500 if (!disable_ertm)
1501 break;
1502 /* fall through */
1503 default:
1504 err = -ENOTSUPP;
1505 goto done;
1506 }
1507
1508 lock_sock(sk);
1509
1510 switch (sk->sk_state) {
1511 case BT_CONNECT:
1512 case BT_CONNECT2:
1513 case BT_CONFIG:
1514 /* Already connecting */
1515 err = 0;
1516 release_sock(sk);
1517 goto done;
1518
1519 case BT_CONNECTED:
1520 /* Already connected */
1521 err = -EISCONN;
1522 release_sock(sk);
1523 goto done;
1524
1525 case BT_OPEN:
1526 case BT_BOUND:
1527 /* Can connect */
1528 break;
1529
1530 default:
1531 err = -EBADFD;
1532 release_sock(sk);
1533 goto done;
1534 }
1535
1536 /* Set destination address and psm */
1537 bacpy(&bt_sk(sk)->dst, dst);
1538
1539 release_sock(sk);
1540
1541 chan->psm = psm;
1542 chan->dcid = cid;
1543
1544 auth_type = l2cap_get_auth_type(chan);
1545
1546 if (chan->dcid == L2CAP_CID_LE_DATA)
1547 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1548 chan->sec_level, auth_type);
1549 else
1550 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1551 chan->sec_level, auth_type);
1552
1553 if (IS_ERR(hcon)) {
1554 err = PTR_ERR(hcon);
1555 goto done;
1556 }
1557
1558 conn = l2cap_conn_add(hcon, 0);
1559 if (!conn) {
1560 hci_conn_put(hcon);
1561 err = -ENOMEM;
1562 goto done;
1563 }
1564
1565 if (hcon->type == LE_LINK) {
1566 err = 0;
1567
1568 if (!list_empty(&conn->chan_l)) {
1569 err = -EBUSY;
1570 hci_conn_put(hcon);
1571 }
1572
1573 if (err)
1574 goto done;
1575 }
1576
1577 /* Update source addr of the socket */
1578 bacpy(src, conn->src);
1579
1580 l2cap_chan_unlock(chan);
1581 l2cap_chan_add(conn, chan);
1582 l2cap_chan_lock(chan);
1583
1584 l2cap_state_change(chan, BT_CONNECT);
1585 __set_chan_timer(chan, sk->sk_sndtimeo);
1586
1587 if (hcon->state == BT_CONNECTED) {
1588 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1589 __clear_chan_timer(chan);
1590 if (l2cap_chan_check_security(chan))
1591 l2cap_state_change(chan, BT_CONNECTED);
1592 } else
1593 l2cap_do_start(chan);
1594 }
1595
1596 err = 0;
1597
1598done:
1599 l2cap_chan_unlock(chan);
1600 hci_dev_unlock(hdev);
1601 hci_dev_put(hdev);
1602 return err;
1603}
1604
1605int __l2cap_wait_ack(struct sock *sk)
1606{
1607 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1608 DECLARE_WAITQUEUE(wait, current);
1609 int err = 0;
1610 int timeo = HZ/5;
1611
1612 add_wait_queue(sk_sleep(sk), &wait);
1613 set_current_state(TASK_INTERRUPTIBLE);
1614 while (chan->unacked_frames > 0 && chan->conn) {
1615 if (!timeo)
1616 timeo = HZ/5;
1617
1618 if (signal_pending(current)) {
1619 err = sock_intr_errno(timeo);
1620 break;
1621 }
1622
1623 release_sock(sk);
1624 timeo = schedule_timeout(timeo);
1625 lock_sock(sk);
1626 set_current_state(TASK_INTERRUPTIBLE);
1627
1628 err = sock_error(sk);
1629 if (err)
1630 break;
1631 }
1632 set_current_state(TASK_RUNNING);
1633 remove_wait_queue(sk_sleep(sk), &wait);
1634 return err;
1635}
1636
1637static void l2cap_monitor_timeout(struct work_struct *work)
1638{
1639 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1640 monitor_timer.work);
1641
1642 BT_DBG("chan %p", chan);
1643
1644 l2cap_chan_lock(chan);
1645
1646 if (chan->retry_count >= chan->remote_max_tx) {
1647 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1648 l2cap_chan_unlock(chan);
1649 l2cap_chan_put(chan);
1650 return;
1651 }
1652
1653 chan->retry_count++;
1654 __set_monitor_timer(chan);
1655
1656 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1657 l2cap_chan_unlock(chan);
1658 l2cap_chan_put(chan);
1659}
1660
1661static void l2cap_retrans_timeout(struct work_struct *work)
1662{
1663 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1664 retrans_timer.work);
1665
1666 BT_DBG("chan %p", chan);
1667
1668 l2cap_chan_lock(chan);
1669
1670 chan->retry_count = 1;
1671 __set_monitor_timer(chan);
1672
1673 set_bit(CONN_WAIT_F, &chan->conn_state);
1674
1675 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1676
1677 l2cap_chan_unlock(chan);
1678 l2cap_chan_put(chan);
1679}
1680
1681static int l2cap_streaming_send(struct l2cap_chan *chan,
1682 struct sk_buff_head *skbs)
1683{
1684 struct sk_buff *skb;
1685 struct l2cap_ctrl *control;
1686
1687 BT_DBG("chan %p, skbs %p", chan, skbs);
1688
1689 if (chan->state != BT_CONNECTED)
1690 return -ENOTCONN;
1691
1692 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1693
1694 while (!skb_queue_empty(&chan->tx_q)) {
1695
1696 skb = skb_dequeue(&chan->tx_q);
1697
1698 bt_cb(skb)->control.retries = 1;
1699 control = &bt_cb(skb)->control;
1700
1701 control->reqseq = 0;
1702 control->txseq = chan->next_tx_seq;
1703
1704 __pack_control(chan, control, skb);
1705
1706 if (chan->fcs == L2CAP_FCS_CRC16) {
1707 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1708 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1709 }
1710
1711 l2cap_do_send(chan, skb);
1712
1713 BT_DBG("Sent txseq %d", (int)control->txseq);
1714
1715 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1716 chan->frames_sent++;
1717 }
1718
1719 return 0;
1720}
1721
1722static int l2cap_ertm_send(struct l2cap_chan *chan)
1723{
1724 struct sk_buff *skb, *tx_skb;
1725 struct l2cap_ctrl *control;
1726 int sent = 0;
1727
1728 BT_DBG("chan %p", chan);
1729
1730 if (chan->state != BT_CONNECTED)
1731 return -ENOTCONN;
1732
1733 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1734 return 0;
1735
1736 while (chan->tx_send_head &&
1737 chan->unacked_frames < chan->remote_tx_win &&
1738 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1739
1740 skb = chan->tx_send_head;
1741
1742 bt_cb(skb)->control.retries = 1;
1743 control = &bt_cb(skb)->control;
1744
1745 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1746 control->final = 1;
1747
1748 control->reqseq = chan->buffer_seq;
1749 chan->last_acked_seq = chan->buffer_seq;
1750 control->txseq = chan->next_tx_seq;
1751
1752 __pack_control(chan, control, skb);
1753
1754 if (chan->fcs == L2CAP_FCS_CRC16) {
1755 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1756 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1757 }
1758
1759 /* Clone after data has been modified. Data is assumed to be
1760 read-only (for locking purposes) on cloned sk_buffs.
1761 */
1762 tx_skb = skb_clone(skb, GFP_KERNEL);
1763
1764 if (!tx_skb)
1765 break;
1766
1767 __set_retrans_timer(chan);
1768
1769 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1770 chan->unacked_frames++;
1771 chan->frames_sent++;
1772 sent++;
1773
1774 if (skb_queue_is_last(&chan->tx_q, skb))
1775 chan->tx_send_head = NULL;
1776 else
1777 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1778
1779 l2cap_do_send(chan, tx_skb);
1780 BT_DBG("Sent txseq %d", (int)control->txseq);
1781 }
1782
1783 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1784 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1785
1786 return sent;
1787}
1788
1789static void l2cap_ertm_resend(struct l2cap_chan *chan)
1790{
1791 struct l2cap_ctrl control;
1792 struct sk_buff *skb;
1793 struct sk_buff *tx_skb;
1794 u16 seq;
1795
1796 BT_DBG("chan %p", chan);
1797
1798 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1799 return;
1800
1801 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1802 seq = l2cap_seq_list_pop(&chan->retrans_list);
1803
1804 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1805 if (!skb) {
1806 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1807 seq);
1808 continue;
1809 }
1810
1811 bt_cb(skb)->control.retries++;
1812 control = bt_cb(skb)->control;
1813
1814 if (chan->max_tx != 0 &&
1815 bt_cb(skb)->control.retries > chan->max_tx) {
1816 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1817 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1818 l2cap_seq_list_clear(&chan->retrans_list);
1819 break;
1820 }
1821
1822 control.reqseq = chan->buffer_seq;
1823 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1824 control.final = 1;
1825 else
1826 control.final = 0;
1827
1828 if (skb_cloned(skb)) {
1829 /* Cloned sk_buffs are read-only, so we need a
1830 * writeable copy
1831 */
1832 tx_skb = skb_copy(skb, GFP_ATOMIC);
1833 } else {
1834 tx_skb = skb_clone(skb, GFP_ATOMIC);
1835 }
1836
1837 if (!tx_skb) {
1838 l2cap_seq_list_clear(&chan->retrans_list);
1839 break;
1840 }
1841
1842 /* Update skb contents */
1843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1844 put_unaligned_le32(__pack_extended_control(&control),
1845 tx_skb->data + L2CAP_HDR_SIZE);
1846 } else {
1847 put_unaligned_le16(__pack_enhanced_control(&control),
1848 tx_skb->data + L2CAP_HDR_SIZE);
1849 }
1850
1851 if (chan->fcs == L2CAP_FCS_CRC16) {
1852 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1853 put_unaligned_le16(fcs, skb_put(tx_skb,
1854 L2CAP_FCS_SIZE));
1855 }
1856
1857 l2cap_do_send(chan, tx_skb);
1858
1859 BT_DBG("Resent txseq %d", control.txseq);
1860
1861 chan->last_acked_seq = chan->buffer_seq;
1862 }
1863}
1864
1865static void l2cap_retransmit(struct l2cap_chan *chan,
1866 struct l2cap_ctrl *control)
1867{
1868 BT_DBG("chan %p, control %p", chan, control);
1869
1870 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1871 l2cap_ertm_resend(chan);
1872}
1873
1874static void l2cap_retransmit_all(struct l2cap_chan *chan,
1875 struct l2cap_ctrl *control)
1876{
1877 struct sk_buff *skb;
1878
1879 BT_DBG("chan %p, control %p", chan, control);
1880
1881 if (control->poll)
1882 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1883
1884 l2cap_seq_list_clear(&chan->retrans_list);
1885
1886 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1887 return;
1888
1889 if (chan->unacked_frames) {
1890 skb_queue_walk(&chan->tx_q, skb) {
1891 if (bt_cb(skb)->control.txseq == control->reqseq ||
1892 skb == chan->tx_send_head)
1893 break;
1894 }
1895
1896 skb_queue_walk_from(&chan->tx_q, skb) {
1897 if (skb == chan->tx_send_head)
1898 break;
1899
1900 l2cap_seq_list_append(&chan->retrans_list,
1901 bt_cb(skb)->control.txseq);
1902 }
1903
1904 l2cap_ertm_resend(chan);
1905 }
1906}
1907
1908static void l2cap_send_ack(struct l2cap_chan *chan)
1909{
1910 struct l2cap_ctrl control;
1911 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1912 chan->last_acked_seq);
1913 int threshold;
1914
1915 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1916 chan, chan->last_acked_seq, chan->buffer_seq);
1917
1918 memset(&control, 0, sizeof(control));
1919 control.sframe = 1;
1920
1921 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1922 chan->rx_state == L2CAP_RX_STATE_RECV) {
1923 __clear_ack_timer(chan);
1924 control.super = L2CAP_SUPER_RNR;
1925 control.reqseq = chan->buffer_seq;
1926 l2cap_send_sframe(chan, &control);
1927 } else {
1928 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1929 l2cap_ertm_send(chan);
1930 /* If any i-frames were sent, they included an ack */
1931 if (chan->buffer_seq == chan->last_acked_seq)
1932 frames_to_ack = 0;
1933 }
1934
1935 /* Ack now if the tx window is 3/4ths full.
1936 * Calculate without mul or div
1937 */
1938 threshold = chan->tx_win;
1939 threshold += threshold << 1;
1940 threshold >>= 2;
1941
1942 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1943 threshold);
1944
1945 if (frames_to_ack >= threshold) {
1946 __clear_ack_timer(chan);
1947 control.super = L2CAP_SUPER_RR;
1948 control.reqseq = chan->buffer_seq;
1949 l2cap_send_sframe(chan, &control);
1950 frames_to_ack = 0;
1951 }
1952
1953 if (frames_to_ack)
1954 __set_ack_timer(chan);
1955 }
1956}
1957
1958static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1959 struct msghdr *msg, int len,
1960 int count, struct sk_buff *skb)
1961{
1962 struct l2cap_conn *conn = chan->conn;
1963 struct sk_buff **frag;
1964 int sent = 0;
1965
1966 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1967 return -EFAULT;
1968
1969 sent += count;
1970 len -= count;
1971
1972 /* Continuation fragments (no L2CAP header) */
1973 frag = &skb_shinfo(skb)->frag_list;
1974 while (len) {
1975 struct sk_buff *tmp;
1976
1977 count = min_t(unsigned int, conn->mtu, len);
1978
1979 tmp = chan->ops->alloc_skb(chan, count,
1980 msg->msg_flags & MSG_DONTWAIT);
1981 if (IS_ERR(tmp))
1982 return PTR_ERR(tmp);
1983
1984 *frag = tmp;
1985
1986 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1987 return -EFAULT;
1988
1989 (*frag)->priority = skb->priority;
1990
1991 sent += count;
1992 len -= count;
1993
1994 skb->len += (*frag)->len;
1995 skb->data_len += (*frag)->len;
1996
1997 frag = &(*frag)->next;
1998 }
1999
2000 return sent;
2001}
2002
2003static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2004 struct msghdr *msg, size_t len,
2005 u32 priority)
2006{
2007 struct l2cap_conn *conn = chan->conn;
2008 struct sk_buff *skb;
2009 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2010 struct l2cap_hdr *lh;
2011
2012 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
2013
2014 count = min_t(unsigned int, (conn->mtu - hlen), len);
2015
2016 skb = chan->ops->alloc_skb(chan, count + hlen,
2017 msg->msg_flags & MSG_DONTWAIT);
2018 if (IS_ERR(skb))
2019 return skb;
2020
2021 skb->priority = priority;
2022
2023 /* Create L2CAP header */
2024 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2025 lh->cid = cpu_to_le16(chan->dcid);
2026 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2027 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2028
2029 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2030 if (unlikely(err < 0)) {
2031 kfree_skb(skb);
2032 return ERR_PTR(err);
2033 }
2034 return skb;
2035}
2036
2037static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2038 struct msghdr *msg, size_t len,
2039 u32 priority)
2040{
2041 struct l2cap_conn *conn = chan->conn;
2042 struct sk_buff *skb;
2043 int err, count;
2044 struct l2cap_hdr *lh;
2045
2046 BT_DBG("chan %p len %d", chan, (int)len);
2047
2048 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2049
2050 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2051 msg->msg_flags & MSG_DONTWAIT);
2052 if (IS_ERR(skb))
2053 return skb;
2054
2055 skb->priority = priority;
2056
2057 /* Create L2CAP header */
2058 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2059 lh->cid = cpu_to_le16(chan->dcid);
2060 lh->len = cpu_to_le16(len);
2061
2062 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2063 if (unlikely(err < 0)) {
2064 kfree_skb(skb);
2065 return ERR_PTR(err);
2066 }
2067 return skb;
2068}
2069
2070static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2071 struct msghdr *msg, size_t len,
2072 u16 sdulen)
2073{
2074 struct l2cap_conn *conn = chan->conn;
2075 struct sk_buff *skb;
2076 int err, count, hlen;
2077 struct l2cap_hdr *lh;
2078
2079 BT_DBG("chan %p len %d", chan, (int)len);
2080
2081 if (!conn)
2082 return ERR_PTR(-ENOTCONN);
2083
2084 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2085 hlen = L2CAP_EXT_HDR_SIZE;
2086 else
2087 hlen = L2CAP_ENH_HDR_SIZE;
2088
2089 if (sdulen)
2090 hlen += L2CAP_SDULEN_SIZE;
2091
2092 if (chan->fcs == L2CAP_FCS_CRC16)
2093 hlen += L2CAP_FCS_SIZE;
2094
2095 count = min_t(unsigned int, (conn->mtu - hlen), len);
2096
2097 skb = chan->ops->alloc_skb(chan, count + hlen,
2098 msg->msg_flags & MSG_DONTWAIT);
2099 if (IS_ERR(skb))
2100 return skb;
2101
2102 /* Create L2CAP header */
2103 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2104 lh->cid = cpu_to_le16(chan->dcid);
2105 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2106
2107 /* Control header is populated later */
2108 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2109 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2110 else
2111 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2112
2113 if (sdulen)
2114 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2115
2116 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2117 if (unlikely(err < 0)) {
2118 kfree_skb(skb);
2119 return ERR_PTR(err);
2120 }
2121
2122 bt_cb(skb)->control.fcs = chan->fcs;
2123 bt_cb(skb)->control.retries = 0;
2124 return skb;
2125}
2126
2127static int l2cap_segment_sdu(struct l2cap_chan *chan,
2128 struct sk_buff_head *seg_queue,
2129 struct msghdr *msg, size_t len)
2130{
2131 struct sk_buff *skb;
2132 u16 sdu_len;
2133 size_t pdu_len;
2134 int err = 0;
2135 u8 sar;
2136
2137 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2138
2139 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2140 * so fragmented skbs are not used. The HCI layer's handling
2141 * of fragmented skbs is not compatible with ERTM's queueing.
2142 */
2143
2144 /* PDU size is derived from the HCI MTU */
2145 pdu_len = chan->conn->mtu;
2146
2147 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2148
2149 /* Adjust for largest possible L2CAP overhead. */
2150 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2151
2152 /* Remote device may have requested smaller PDUs */
2153 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2154
2155 if (len <= pdu_len) {
2156 sar = L2CAP_SAR_UNSEGMENTED;
2157 sdu_len = 0;
2158 pdu_len = len;
2159 } else {
2160 sar = L2CAP_SAR_START;
2161 sdu_len = len;
2162 pdu_len -= L2CAP_SDULEN_SIZE;
2163 }
2164
2165 while (len > 0) {
2166 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2167
2168 if (IS_ERR(skb)) {
2169 __skb_queue_purge(seg_queue);
2170 return PTR_ERR(skb);
2171 }
2172
2173 bt_cb(skb)->control.sar = sar;
2174 __skb_queue_tail(seg_queue, skb);
2175
2176 len -= pdu_len;
2177 if (sdu_len) {
2178 sdu_len = 0;
2179 pdu_len += L2CAP_SDULEN_SIZE;
2180 }
2181
2182 if (len <= pdu_len) {
2183 sar = L2CAP_SAR_END;
2184 pdu_len = len;
2185 } else {
2186 sar = L2CAP_SAR_CONTINUE;
2187 }
2188 }
2189
2190 return err;
2191}
2192
2193int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2194 u32 priority)
2195{
2196 struct sk_buff *skb;
2197 int err;
2198 struct sk_buff_head seg_queue;
2199
2200 /* Connectionless channel */
2201 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2202 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2203 if (IS_ERR(skb))
2204 return PTR_ERR(skb);
2205
2206 l2cap_do_send(chan, skb);
2207 return len;
2208 }
2209
2210 switch (chan->mode) {
2211 case L2CAP_MODE_BASIC:
2212 /* Check outgoing MTU */
2213 if (len > chan->omtu)
2214 return -EMSGSIZE;
2215
2216 /* Create a basic PDU */
2217 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2218 if (IS_ERR(skb))
2219 return PTR_ERR(skb);
2220
2221 l2cap_do_send(chan, skb);
2222 err = len;
2223 break;
2224
2225 case L2CAP_MODE_ERTM:
2226 case L2CAP_MODE_STREAMING:
2227 /* Check outgoing MTU */
2228 if (len > chan->omtu) {
2229 err = -EMSGSIZE;
2230 break;
2231 }
2232
2233 __skb_queue_head_init(&seg_queue);
2234
2235 /* Do segmentation before calling in to the state machine,
2236 * since it's possible to block while waiting for memory
2237 * allocation.
2238 */
2239 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2240
2241 /* The channel could have been closed while segmenting,
2242 * check that it is still connected.
2243 */
2244 if (chan->state != BT_CONNECTED) {
2245 __skb_queue_purge(&seg_queue);
2246 err = -ENOTCONN;
2247 }
2248
2249 if (err)
2250 break;
2251
2252 if (chan->mode == L2CAP_MODE_ERTM)
2253 err = l2cap_tx(chan, 0, &seg_queue,
2254 L2CAP_EV_DATA_REQUEST);
2255 else
2256 err = l2cap_streaming_send(chan, &seg_queue);
2257
2258 if (!err)
2259 err = len;
2260
2261 /* If the skbs were not queued for sending, they'll still be in
2262 * seg_queue and need to be purged.
2263 */
2264 __skb_queue_purge(&seg_queue);
2265 break;
2266
2267 default:
2268 BT_DBG("bad state %1.1x", chan->mode);
2269 err = -EBADFD;
2270 }
2271
2272 return err;
2273}
2274
2275static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2276{
2277 struct l2cap_ctrl control;
2278 u16 seq;
2279
2280 BT_DBG("chan %p, txseq %d", chan, txseq);
2281
2282 memset(&control, 0, sizeof(control));
2283 control.sframe = 1;
2284 control.super = L2CAP_SUPER_SREJ;
2285
2286 for (seq = chan->expected_tx_seq; seq != txseq;
2287 seq = __next_seq(chan, seq)) {
2288 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2289 control.reqseq = seq;
2290 l2cap_send_sframe(chan, &control);
2291 l2cap_seq_list_append(&chan->srej_list, seq);
2292 }
2293 }
2294
2295 chan->expected_tx_seq = __next_seq(chan, txseq);
2296}
2297
2298static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2299{
2300 struct l2cap_ctrl control;
2301
2302 BT_DBG("chan %p", chan);
2303
2304 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2305 return;
2306
2307 memset(&control, 0, sizeof(control));
2308 control.sframe = 1;
2309 control.super = L2CAP_SUPER_SREJ;
2310 control.reqseq = chan->srej_list.tail;
2311 l2cap_send_sframe(chan, &control);
2312}
2313
2314static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2315{
2316 struct l2cap_ctrl control;
2317 u16 initial_head;
2318 u16 seq;
2319
2320 BT_DBG("chan %p, txseq %d", chan, txseq);
2321
2322 memset(&control, 0, sizeof(control));
2323 control.sframe = 1;
2324 control.super = L2CAP_SUPER_SREJ;
2325
2326 /* Capture initial list head to allow only one pass through the list. */
2327 initial_head = chan->srej_list.head;
2328
2329 do {
2330 seq = l2cap_seq_list_pop(&chan->srej_list);
2331 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2332 break;
2333
2334 control.reqseq = seq;
2335 l2cap_send_sframe(chan, &control);
2336 l2cap_seq_list_append(&chan->srej_list, seq);
2337 } while (chan->srej_list.head != initial_head);
2338}
2339
2340static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2341{
2342 struct sk_buff *acked_skb;
2343 u16 ackseq;
2344
2345 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2346
2347 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2348 return;
2349
2350 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2351 chan->expected_ack_seq, chan->unacked_frames);
2352
2353 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2354 ackseq = __next_seq(chan, ackseq)) {
2355
2356 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2357 if (acked_skb) {
2358 skb_unlink(acked_skb, &chan->tx_q);
2359 kfree_skb(acked_skb);
2360 chan->unacked_frames--;
2361 }
2362 }
2363
2364 chan->expected_ack_seq = reqseq;
2365
2366 if (chan->unacked_frames == 0)
2367 __clear_retrans_timer(chan);
2368
2369 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2370}
2371
2372static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2373{
2374 BT_DBG("chan %p", chan);
2375
2376 chan->expected_tx_seq = chan->buffer_seq;
2377 l2cap_seq_list_clear(&chan->srej_list);
2378 skb_queue_purge(&chan->srej_q);
2379 chan->rx_state = L2CAP_RX_STATE_RECV;
2380}
2381
2382static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2383 struct l2cap_ctrl *control,
2384 struct sk_buff_head *skbs, u8 event)
2385{
2386 int err = 0;
2387
2388 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2389 event);
2390
2391 switch (event) {
2392 case L2CAP_EV_DATA_REQUEST:
2393 if (chan->tx_send_head == NULL)
2394 chan->tx_send_head = skb_peek(skbs);
2395
2396 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2397 l2cap_ertm_send(chan);
2398 break;
2399 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2400 BT_DBG("Enter LOCAL_BUSY");
2401 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2402
2403 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2404 /* The SREJ_SENT state must be aborted if we are to
2405 * enter the LOCAL_BUSY state.
2406 */
2407 l2cap_abort_rx_srej_sent(chan);
2408 }
2409
2410 l2cap_send_ack(chan);
2411
2412 break;
2413 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2414 BT_DBG("Exit LOCAL_BUSY");
2415 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2416
2417 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2418 struct l2cap_ctrl local_control;
2419
2420 memset(&local_control, 0, sizeof(local_control));
2421 local_control.sframe = 1;
2422 local_control.super = L2CAP_SUPER_RR;
2423 local_control.poll = 1;
2424 local_control.reqseq = chan->buffer_seq;
2425 l2cap_send_sframe(chan, &local_control);
2426
2427 chan->retry_count = 1;
2428 __set_monitor_timer(chan);
2429 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2430 }
2431 break;
2432 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2433 l2cap_process_reqseq(chan, control->reqseq);
2434 break;
2435 case L2CAP_EV_EXPLICIT_POLL:
2436 l2cap_send_rr_or_rnr(chan, 1);
2437 chan->retry_count = 1;
2438 __set_monitor_timer(chan);
2439 __clear_ack_timer(chan);
2440 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2441 break;
2442 case L2CAP_EV_RETRANS_TO:
2443 l2cap_send_rr_or_rnr(chan, 1);
2444 chan->retry_count = 1;
2445 __set_monitor_timer(chan);
2446 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2447 break;
2448 case L2CAP_EV_RECV_FBIT:
2449 /* Nothing to process */
2450 break;
2451 default:
2452 break;
2453 }
2454
2455 return err;
2456}
2457
2458static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2459 struct l2cap_ctrl *control,
2460 struct sk_buff_head *skbs, u8 event)
2461{
2462 int err = 0;
2463
2464 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2465 event);
2466
2467 switch (event) {
2468 case L2CAP_EV_DATA_REQUEST:
2469 if (chan->tx_send_head == NULL)
2470 chan->tx_send_head = skb_peek(skbs);
2471 /* Queue data, but don't send. */
2472 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2473 break;
2474 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2475 BT_DBG("Enter LOCAL_BUSY");
2476 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2477
2478 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2479 /* The SREJ_SENT state must be aborted if we are to
2480 * enter the LOCAL_BUSY state.
2481 */
2482 l2cap_abort_rx_srej_sent(chan);
2483 }
2484
2485 l2cap_send_ack(chan);
2486
2487 break;
2488 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2489 BT_DBG("Exit LOCAL_BUSY");
2490 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2491
2492 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2493 struct l2cap_ctrl local_control;
2494 memset(&local_control, 0, sizeof(local_control));
2495 local_control.sframe = 1;
2496 local_control.super = L2CAP_SUPER_RR;
2497 local_control.poll = 1;
2498 local_control.reqseq = chan->buffer_seq;
2499 l2cap_send_sframe(chan, &local_control);
2500
2501 chan->retry_count = 1;
2502 __set_monitor_timer(chan);
2503 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2504 }
2505 break;
2506 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2507 l2cap_process_reqseq(chan, control->reqseq);
2508
2509 /* Fall through */
2510
2511 case L2CAP_EV_RECV_FBIT:
2512 if (control && control->final) {
2513 __clear_monitor_timer(chan);
2514 if (chan->unacked_frames > 0)
2515 __set_retrans_timer(chan);
2516 chan->retry_count = 0;
2517 chan->tx_state = L2CAP_TX_STATE_XMIT;
2518 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2519 }
2520 break;
2521 case L2CAP_EV_EXPLICIT_POLL:
2522 /* Ignore */
2523 break;
2524 case L2CAP_EV_MONITOR_TO:
2525 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2526 l2cap_send_rr_or_rnr(chan, 1);
2527 __set_monitor_timer(chan);
2528 chan->retry_count++;
2529 } else {
2530 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2531 }
2532 break;
2533 default:
2534 break;
2535 }
2536
2537 return err;
2538}
2539
2540static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2541 struct sk_buff_head *skbs, u8 event)
2542{
2543 int err = 0;
2544
2545 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2546 chan, control, skbs, event, chan->tx_state);
2547
2548 switch (chan->tx_state) {
2549 case L2CAP_TX_STATE_XMIT:
2550 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2551 break;
2552 case L2CAP_TX_STATE_WAIT_F:
2553 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2554 break;
2555 default:
2556 /* Ignore event */
2557 break;
2558 }
2559
2560 return err;
2561}
2562
2563static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2564 struct l2cap_ctrl *control)
2565{
2566 BT_DBG("chan %p, control %p", chan, control);
2567 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2568}
2569
2570static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2571 struct l2cap_ctrl *control)
2572{
2573 BT_DBG("chan %p, control %p", chan, control);
2574 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_FBIT);
2575}
2576
2577/* Copy frame to all raw sockets on that connection */
2578static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2579{
2580 struct sk_buff *nskb;
2581 struct l2cap_chan *chan;
2582
2583 BT_DBG("conn %p", conn);
2584
2585 mutex_lock(&conn->chan_lock);
2586
2587 list_for_each_entry(chan, &conn->chan_l, list) {
2588 struct sock *sk = chan->sk;
2589 if (chan->chan_type != L2CAP_CHAN_RAW)
2590 continue;
2591
2592 /* Don't send frame to the socket it came from */
2593 if (skb->sk == sk)
2594 continue;
2595 nskb = skb_clone(skb, GFP_ATOMIC);
2596 if (!nskb)
2597 continue;
2598
2599 if (chan->ops->recv(chan->data, nskb))
2600 kfree_skb(nskb);
2601 }
2602
2603 mutex_unlock(&conn->chan_lock);
2604}
2605
2606/* ---- L2CAP signalling commands ---- */
2607static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2608 u8 code, u8 ident, u16 dlen, void *data)
2609{
2610 struct sk_buff *skb, **frag;
2611 struct l2cap_cmd_hdr *cmd;
2612 struct l2cap_hdr *lh;
2613 int len, count;
2614
2615 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2616 conn, code, ident, dlen);
2617
2618 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2619 count = min_t(unsigned int, conn->mtu, len);
2620
2621 skb = bt_skb_alloc(count, GFP_ATOMIC);
2622 if (!skb)
2623 return NULL;
2624
2625 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2626 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2627
2628 if (conn->hcon->type == LE_LINK)
2629 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2630 else
2631 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2632
2633 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2634 cmd->code = code;
2635 cmd->ident = ident;
2636 cmd->len = cpu_to_le16(dlen);
2637
2638 if (dlen) {
2639 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2640 memcpy(skb_put(skb, count), data, count);
2641 data += count;
2642 }
2643
2644 len -= skb->len;
2645
2646 /* Continuation fragments (no L2CAP header) */
2647 frag = &skb_shinfo(skb)->frag_list;
2648 while (len) {
2649 count = min_t(unsigned int, conn->mtu, len);
2650
2651 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2652 if (!*frag)
2653 goto fail;
2654
2655 memcpy(skb_put(*frag, count), data, count);
2656
2657 len -= count;
2658 data += count;
2659
2660 frag = &(*frag)->next;
2661 }
2662
2663 return skb;
2664
2665fail:
2666 kfree_skb(skb);
2667 return NULL;
2668}
2669
2670static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2671{
2672 struct l2cap_conf_opt *opt = *ptr;
2673 int len;
2674
2675 len = L2CAP_CONF_OPT_SIZE + opt->len;
2676 *ptr += len;
2677
2678 *type = opt->type;
2679 *olen = opt->len;
2680
2681 switch (opt->len) {
2682 case 1:
2683 *val = *((u8 *) opt->val);
2684 break;
2685
2686 case 2:
2687 *val = get_unaligned_le16(opt->val);
2688 break;
2689
2690 case 4:
2691 *val = get_unaligned_le32(opt->val);
2692 break;
2693
2694 default:
2695 *val = (unsigned long) opt->val;
2696 break;
2697 }
2698
2699 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2700 return len;
2701}
2702
2703static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2704{
2705 struct l2cap_conf_opt *opt = *ptr;
2706
2707 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2708
2709 opt->type = type;
2710 opt->len = len;
2711
2712 switch (len) {
2713 case 1:
2714 *((u8 *) opt->val) = val;
2715 break;
2716
2717 case 2:
2718 put_unaligned_le16(val, opt->val);
2719 break;
2720
2721 case 4:
2722 put_unaligned_le32(val, opt->val);
2723 break;
2724
2725 default:
2726 memcpy(opt->val, (void *) val, len);
2727 break;
2728 }
2729
2730 *ptr += L2CAP_CONF_OPT_SIZE + len;
2731}
2732
2733static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2734{
2735 struct l2cap_conf_efs efs;
2736
2737 switch (chan->mode) {
2738 case L2CAP_MODE_ERTM:
2739 efs.id = chan->local_id;
2740 efs.stype = chan->local_stype;
2741 efs.msdu = cpu_to_le16(chan->local_msdu);
2742 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2743 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2744 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2745 break;
2746
2747 case L2CAP_MODE_STREAMING:
2748 efs.id = 1;
2749 efs.stype = L2CAP_SERV_BESTEFFORT;
2750 efs.msdu = cpu_to_le16(chan->local_msdu);
2751 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2752 efs.acc_lat = 0;
2753 efs.flush_to = 0;
2754 break;
2755
2756 default:
2757 return;
2758 }
2759
2760 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2761 (unsigned long) &efs);
2762}
2763
2764static void l2cap_ack_timeout(struct work_struct *work)
2765{
2766 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2767 ack_timer.work);
2768
2769 BT_DBG("chan %p", chan);
2770
2771 l2cap_chan_lock(chan);
2772
2773 l2cap_send_ack(chan);
2774
2775 l2cap_chan_unlock(chan);
2776
2777 l2cap_chan_put(chan);
2778}
2779
2780static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2781{
2782 int err;
2783
2784 chan->next_tx_seq = 0;
2785 chan->expected_tx_seq = 0;
2786 chan->expected_ack_seq = 0;
2787 chan->unacked_frames = 0;
2788 chan->buffer_seq = 0;
2789 chan->frames_sent = 0;
2790 chan->last_acked_seq = 0;
2791 chan->sdu = NULL;
2792 chan->sdu_last_frag = NULL;
2793 chan->sdu_len = 0;
2794
2795 skb_queue_head_init(&chan->tx_q);
2796
2797 if (chan->mode != L2CAP_MODE_ERTM)
2798 return 0;
2799
2800 chan->rx_state = L2CAP_RX_STATE_RECV;
2801 chan->tx_state = L2CAP_TX_STATE_XMIT;
2802
2803 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2804 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2805 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2806
2807 skb_queue_head_init(&chan->srej_q);
2808
2809 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2810 if (err < 0)
2811 return err;
2812
2813 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2814 if (err < 0)
2815 l2cap_seq_list_free(&chan->srej_list);
2816
2817 return err;
2818}
2819
2820static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2821{
2822 switch (mode) {
2823 case L2CAP_MODE_STREAMING:
2824 case L2CAP_MODE_ERTM:
2825 if (l2cap_mode_supported(mode, remote_feat_mask))
2826 return mode;
2827 /* fall through */
2828 default:
2829 return L2CAP_MODE_BASIC;
2830 }
2831}
2832
2833static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2834{
2835 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2836}
2837
2838static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2839{
2840 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2841}
2842
2843static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2844{
2845 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2846 __l2cap_ews_supported(chan)) {
2847 /* use extended control field */
2848 set_bit(FLAG_EXT_CTRL, &chan->flags);
2849 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2850 } else {
2851 chan->tx_win = min_t(u16, chan->tx_win,
2852 L2CAP_DEFAULT_TX_WINDOW);
2853 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2854 }
2855}
2856
2857static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2858{
2859 struct l2cap_conf_req *req = data;
2860 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2861 void *ptr = req->data;
2862 u16 size;
2863
2864 BT_DBG("chan %p", chan);
2865
2866 if (chan->num_conf_req || chan->num_conf_rsp)
2867 goto done;
2868
2869 switch (chan->mode) {
2870 case L2CAP_MODE_STREAMING:
2871 case L2CAP_MODE_ERTM:
2872 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2873 break;
2874
2875 if (__l2cap_efs_supported(chan))
2876 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2877
2878 /* fall through */
2879 default:
2880 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2881 break;
2882 }
2883
2884done:
2885 if (chan->imtu != L2CAP_DEFAULT_MTU)
2886 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2887
2888 switch (chan->mode) {
2889 case L2CAP_MODE_BASIC:
2890 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2891 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2892 break;
2893
2894 rfc.mode = L2CAP_MODE_BASIC;
2895 rfc.txwin_size = 0;
2896 rfc.max_transmit = 0;
2897 rfc.retrans_timeout = 0;
2898 rfc.monitor_timeout = 0;
2899 rfc.max_pdu_size = 0;
2900
2901 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2902 (unsigned long) &rfc);
2903 break;
2904
2905 case L2CAP_MODE_ERTM:
2906 rfc.mode = L2CAP_MODE_ERTM;
2907 rfc.max_transmit = chan->max_tx;
2908 rfc.retrans_timeout = 0;
2909 rfc.monitor_timeout = 0;
2910
2911 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2912 L2CAP_EXT_HDR_SIZE -
2913 L2CAP_SDULEN_SIZE -
2914 L2CAP_FCS_SIZE);
2915 rfc.max_pdu_size = cpu_to_le16(size);
2916
2917 l2cap_txwin_setup(chan);
2918
2919 rfc.txwin_size = min_t(u16, chan->tx_win,
2920 L2CAP_DEFAULT_TX_WINDOW);
2921
2922 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2923 (unsigned long) &rfc);
2924
2925 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2926 l2cap_add_opt_efs(&ptr, chan);
2927
2928 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2929 break;
2930
2931 if (chan->fcs == L2CAP_FCS_NONE ||
2932 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2933 chan->fcs = L2CAP_FCS_NONE;
2934 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2935 }
2936
2937 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2938 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2939 chan->tx_win);
2940 break;
2941
2942 case L2CAP_MODE_STREAMING:
2943 rfc.mode = L2CAP_MODE_STREAMING;
2944 rfc.txwin_size = 0;
2945 rfc.max_transmit = 0;
2946 rfc.retrans_timeout = 0;
2947 rfc.monitor_timeout = 0;
2948
2949 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2950 L2CAP_EXT_HDR_SIZE -
2951 L2CAP_SDULEN_SIZE -
2952 L2CAP_FCS_SIZE);
2953 rfc.max_pdu_size = cpu_to_le16(size);
2954
2955 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2956 (unsigned long) &rfc);
2957
2958 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2959 l2cap_add_opt_efs(&ptr, chan);
2960
2961 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2962 break;
2963
2964 if (chan->fcs == L2CAP_FCS_NONE ||
2965 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2966 chan->fcs = L2CAP_FCS_NONE;
2967 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2968 }
2969 break;
2970 }
2971
2972 req->dcid = cpu_to_le16(chan->dcid);
2973 req->flags = cpu_to_le16(0);
2974
2975 return ptr - data;
2976}
2977
2978static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2979{
2980 struct l2cap_conf_rsp *rsp = data;
2981 void *ptr = rsp->data;
2982 void *req = chan->conf_req;
2983 int len = chan->conf_len;
2984 int type, hint, olen;
2985 unsigned long val;
2986 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2987 struct l2cap_conf_efs efs;
2988 u8 remote_efs = 0;
2989 u16 mtu = L2CAP_DEFAULT_MTU;
2990 u16 result = L2CAP_CONF_SUCCESS;
2991 u16 size;
2992
2993 BT_DBG("chan %p", chan);
2994
2995 while (len >= L2CAP_CONF_OPT_SIZE) {
2996 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2997
2998 hint = type & L2CAP_CONF_HINT;
2999 type &= L2CAP_CONF_MASK;
3000
3001 switch (type) {
3002 case L2CAP_CONF_MTU:
3003 mtu = val;
3004 break;
3005
3006 case L2CAP_CONF_FLUSH_TO:
3007 chan->flush_to = val;
3008 break;
3009
3010 case L2CAP_CONF_QOS:
3011 break;
3012
3013 case L2CAP_CONF_RFC:
3014 if (olen == sizeof(rfc))
3015 memcpy(&rfc, (void *) val, olen);
3016 break;
3017
3018 case L2CAP_CONF_FCS:
3019 if (val == L2CAP_FCS_NONE)
3020 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3021 break;
3022
3023 case L2CAP_CONF_EFS:
3024 remote_efs = 1;
3025 if (olen == sizeof(efs))
3026 memcpy(&efs, (void *) val, olen);
3027 break;
3028
3029 case L2CAP_CONF_EWS:
3030 if (!enable_hs)
3031 return -ECONNREFUSED;
3032
3033 set_bit(FLAG_EXT_CTRL, &chan->flags);
3034 set_bit(CONF_EWS_RECV, &chan->conf_state);
3035 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3036 chan->remote_tx_win = val;
3037 break;
3038
3039 default:
3040 if (hint)
3041 break;
3042
3043 result = L2CAP_CONF_UNKNOWN;
3044 *((u8 *) ptr++) = type;
3045 break;
3046 }
3047 }
3048
3049 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3050 goto done;
3051
3052 switch (chan->mode) {
3053 case L2CAP_MODE_STREAMING:
3054 case L2CAP_MODE_ERTM:
3055 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3056 chan->mode = l2cap_select_mode(rfc.mode,
3057 chan->conn->feat_mask);
3058 break;
3059 }
3060
3061 if (remote_efs) {
3062 if (__l2cap_efs_supported(chan))
3063 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3064 else
3065 return -ECONNREFUSED;
3066 }
3067
3068 if (chan->mode != rfc.mode)
3069 return -ECONNREFUSED;
3070
3071 break;
3072 }
3073
3074done:
3075 if (chan->mode != rfc.mode) {
3076 result = L2CAP_CONF_UNACCEPT;
3077 rfc.mode = chan->mode;
3078
3079 if (chan->num_conf_rsp == 1)
3080 return -ECONNREFUSED;
3081
3082 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3083 sizeof(rfc), (unsigned long) &rfc);
3084 }
3085
3086 if (result == L2CAP_CONF_SUCCESS) {
3087 /* Configure output options and let the other side know
3088 * which ones we don't like. */
3089
3090 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3091 result = L2CAP_CONF_UNACCEPT;
3092 else {
3093 chan->omtu = mtu;
3094 set_bit(CONF_MTU_DONE, &chan->conf_state);
3095 }
3096 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3097
3098 if (remote_efs) {
3099 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3100 efs.stype != L2CAP_SERV_NOTRAFIC &&
3101 efs.stype != chan->local_stype) {
3102
3103 result = L2CAP_CONF_UNACCEPT;
3104
3105 if (chan->num_conf_req >= 1)
3106 return -ECONNREFUSED;
3107
3108 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3109 sizeof(efs),
3110 (unsigned long) &efs);
3111 } else {
3112 /* Send PENDING Conf Rsp */
3113 result = L2CAP_CONF_PENDING;
3114 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3115 }
3116 }
3117
3118 switch (rfc.mode) {
3119 case L2CAP_MODE_BASIC:
3120 chan->fcs = L2CAP_FCS_NONE;
3121 set_bit(CONF_MODE_DONE, &chan->conf_state);
3122 break;
3123
3124 case L2CAP_MODE_ERTM:
3125 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3126 chan->remote_tx_win = rfc.txwin_size;
3127 else
3128 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3129
3130 chan->remote_max_tx = rfc.max_transmit;
3131
3132 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3133 chan->conn->mtu -
3134 L2CAP_EXT_HDR_SIZE -
3135 L2CAP_SDULEN_SIZE -
3136 L2CAP_FCS_SIZE);
3137 rfc.max_pdu_size = cpu_to_le16(size);
3138 chan->remote_mps = size;
3139
3140 rfc.retrans_timeout =
3141 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3142 rfc.monitor_timeout =
3143 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3144
3145 set_bit(CONF_MODE_DONE, &chan->conf_state);
3146
3147 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3148 sizeof(rfc), (unsigned long) &rfc);
3149
3150 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3151 chan->remote_id = efs.id;
3152 chan->remote_stype = efs.stype;
3153 chan->remote_msdu = le16_to_cpu(efs.msdu);
3154 chan->remote_flush_to =
3155 le32_to_cpu(efs.flush_to);
3156 chan->remote_acc_lat =
3157 le32_to_cpu(efs.acc_lat);
3158 chan->remote_sdu_itime =
3159 le32_to_cpu(efs.sdu_itime);
3160 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3161 sizeof(efs), (unsigned long) &efs);
3162 }
3163 break;
3164
3165 case L2CAP_MODE_STREAMING:
3166 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3167 chan->conn->mtu -
3168 L2CAP_EXT_HDR_SIZE -
3169 L2CAP_SDULEN_SIZE -
3170 L2CAP_FCS_SIZE);
3171 rfc.max_pdu_size = cpu_to_le16(size);
3172 chan->remote_mps = size;
3173
3174 set_bit(CONF_MODE_DONE, &chan->conf_state);
3175
3176 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3177 sizeof(rfc), (unsigned long) &rfc);
3178
3179 break;
3180
3181 default:
3182 result = L2CAP_CONF_UNACCEPT;
3183
3184 memset(&rfc, 0, sizeof(rfc));
3185 rfc.mode = chan->mode;
3186 }
3187
3188 if (result == L2CAP_CONF_SUCCESS)
3189 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3190 }
3191 rsp->scid = cpu_to_le16(chan->dcid);
3192 rsp->result = cpu_to_le16(result);
3193 rsp->flags = cpu_to_le16(0x0000);
3194
3195 return ptr - data;
3196}
3197
3198static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3199{
3200 struct l2cap_conf_req *req = data;
3201 void *ptr = req->data;
3202 int type, olen;
3203 unsigned long val;
3204 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3205 struct l2cap_conf_efs efs;
3206
3207 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3208
3209 while (len >= L2CAP_CONF_OPT_SIZE) {
3210 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3211
3212 switch (type) {
3213 case L2CAP_CONF_MTU:
3214 if (val < L2CAP_DEFAULT_MIN_MTU) {
3215 *result = L2CAP_CONF_UNACCEPT;
3216 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3217 } else
3218 chan->imtu = val;
3219 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3220 break;
3221
3222 case L2CAP_CONF_FLUSH_TO:
3223 chan->flush_to = val;
3224 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3225 2, chan->flush_to);
3226 break;
3227
3228 case L2CAP_CONF_RFC:
3229 if (olen == sizeof(rfc))
3230 memcpy(&rfc, (void *)val, olen);
3231
3232 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3233 rfc.mode != chan->mode)
3234 return -ECONNREFUSED;
3235
3236 chan->fcs = 0;
3237
3238 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3239 sizeof(rfc), (unsigned long) &rfc);
3240 break;
3241
3242 case L2CAP_CONF_EWS:
3243 chan->tx_win = min_t(u16, val,
3244 L2CAP_DEFAULT_EXT_WINDOW);
3245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3246 chan->tx_win);
3247 break;
3248
3249 case L2CAP_CONF_EFS:
3250 if (olen == sizeof(efs))
3251 memcpy(&efs, (void *)val, olen);
3252
3253 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3254 efs.stype != L2CAP_SERV_NOTRAFIC &&
3255 efs.stype != chan->local_stype)
3256 return -ECONNREFUSED;
3257
3258 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3259 sizeof(efs), (unsigned long) &efs);
3260 break;
3261 }
3262 }
3263
3264 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3265 return -ECONNREFUSED;
3266
3267 chan->mode = rfc.mode;
3268
3269 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3270 switch (rfc.mode) {
3271 case L2CAP_MODE_ERTM:
3272 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3273 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3274 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3275
3276 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3277 chan->local_msdu = le16_to_cpu(efs.msdu);
3278 chan->local_sdu_itime =
3279 le32_to_cpu(efs.sdu_itime);
3280 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3281 chan->local_flush_to =
3282 le32_to_cpu(efs.flush_to);
3283 }
3284 break;
3285
3286 case L2CAP_MODE_STREAMING:
3287 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3288 }
3289 }
3290
3291 req->dcid = cpu_to_le16(chan->dcid);
3292 req->flags = cpu_to_le16(0x0000);
3293
3294 return ptr - data;
3295}
3296
3297static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3298{
3299 struct l2cap_conf_rsp *rsp = data;
3300 void *ptr = rsp->data;
3301
3302 BT_DBG("chan %p", chan);
3303
3304 rsp->scid = cpu_to_le16(chan->dcid);
3305 rsp->result = cpu_to_le16(result);
3306 rsp->flags = cpu_to_le16(flags);
3307
3308 return ptr - data;
3309}
3310
3311void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3312{
3313 struct l2cap_conn_rsp rsp;
3314 struct l2cap_conn *conn = chan->conn;
3315 u8 buf[128];
3316
3317 rsp.scid = cpu_to_le16(chan->dcid);
3318 rsp.dcid = cpu_to_le16(chan->scid);
3319 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3320 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3321 l2cap_send_cmd(conn, chan->ident,
3322 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3323
3324 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3325 return;
3326
3327 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3328 l2cap_build_conf_req(chan, buf), buf);
3329 chan->num_conf_req++;
3330}
3331
3332static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3333{
3334 int type, olen;
3335 unsigned long val;
3336 struct l2cap_conf_rfc rfc;
3337
3338 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3339
3340 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3341 return;
3342
3343 while (len >= L2CAP_CONF_OPT_SIZE) {
3344 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3345
3346 switch (type) {
3347 case L2CAP_CONF_RFC:
3348 if (olen == sizeof(rfc))
3349 memcpy(&rfc, (void *)val, olen);
3350 goto done;
3351 }
3352 }
3353
3354 /* Use sane default values in case a misbehaving remote device
3355 * did not send an RFC option.
3356 */
3357 rfc.mode = chan->mode;
3358 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3359 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3360 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3361
3362 BT_ERR("Expected RFC option was not found, using defaults");
3363
3364done:
3365 switch (rfc.mode) {
3366 case L2CAP_MODE_ERTM:
3367 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3368 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3369 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3370 break;
3371 case L2CAP_MODE_STREAMING:
3372 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3373 }
3374}
3375
3376static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3377{
3378 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3379
3380 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3381 return 0;
3382
3383 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3384 cmd->ident == conn->info_ident) {
3385 cancel_delayed_work(&conn->info_timer);
3386
3387 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3388 conn->info_ident = 0;
3389
3390 l2cap_conn_start(conn);
3391 }
3392
3393 return 0;
3394}
3395
3396static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3397{
3398 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3399 struct l2cap_conn_rsp rsp;
3400 struct l2cap_chan *chan = NULL, *pchan;
3401 struct sock *parent, *sk = NULL;
3402 int result, status = L2CAP_CS_NO_INFO;
3403
3404 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3405 __le16 psm = req->psm;
3406
3407 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3408
3409 /* Check if we have socket listening on psm */
3410 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3411 if (!pchan) {
3412 result = L2CAP_CR_BAD_PSM;
3413 goto sendresp;
3414 }
3415
3416 parent = pchan->sk;
3417
3418 mutex_lock(&conn->chan_lock);
3419 lock_sock(parent);
3420
3421 /* Check if the ACL is secure enough (if not SDP) */
3422 if (psm != cpu_to_le16(0x0001) &&
3423 !hci_conn_check_link_mode(conn->hcon)) {
3424 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3425 result = L2CAP_CR_SEC_BLOCK;
3426 goto response;
3427 }
3428
3429 result = L2CAP_CR_NO_MEM;
3430
3431 /* Check for backlog size */
3432 if (sk_acceptq_is_full(parent)) {
3433 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3434 goto response;
3435 }
3436
3437 chan = pchan->ops->new_connection(pchan->data);
3438 if (!chan)
3439 goto response;
3440
3441 sk = chan->sk;
3442
3443 /* Check if we already have channel with that dcid */
3444 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3445 sock_set_flag(sk, SOCK_ZAPPED);
3446 chan->ops->close(chan->data);
3447 goto response;
3448 }
3449
3450 hci_conn_hold(conn->hcon);
3451
3452 bacpy(&bt_sk(sk)->src, conn->src);
3453 bacpy(&bt_sk(sk)->dst, conn->dst);
3454 chan->psm = psm;
3455 chan->dcid = scid;
3456
3457 bt_accept_enqueue(parent, sk);
3458
3459 __l2cap_chan_add(conn, chan);
3460
3461 dcid = chan->scid;
3462
3463 __set_chan_timer(chan, sk->sk_sndtimeo);
3464
3465 chan->ident = cmd->ident;
3466
3467 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3468 if (l2cap_chan_check_security(chan)) {
3469 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3470 __l2cap_state_change(chan, BT_CONNECT2);
3471 result = L2CAP_CR_PEND;
3472 status = L2CAP_CS_AUTHOR_PEND;
3473 parent->sk_data_ready(parent, 0);
3474 } else {
3475 __l2cap_state_change(chan, BT_CONFIG);
3476 result = L2CAP_CR_SUCCESS;
3477 status = L2CAP_CS_NO_INFO;
3478 }
3479 } else {
3480 __l2cap_state_change(chan, BT_CONNECT2);
3481 result = L2CAP_CR_PEND;
3482 status = L2CAP_CS_AUTHEN_PEND;
3483 }
3484 } else {
3485 __l2cap_state_change(chan, BT_CONNECT2);
3486 result = L2CAP_CR_PEND;
3487 status = L2CAP_CS_NO_INFO;
3488 }
3489
3490response:
3491 release_sock(parent);
3492 mutex_unlock(&conn->chan_lock);
3493
3494sendresp:
3495 rsp.scid = cpu_to_le16(scid);
3496 rsp.dcid = cpu_to_le16(dcid);
3497 rsp.result = cpu_to_le16(result);
3498 rsp.status = cpu_to_le16(status);
3499 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3500
3501 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3502 struct l2cap_info_req info;
3503 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3504
3505 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3506 conn->info_ident = l2cap_get_ident(conn);
3507
3508 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3509
3510 l2cap_send_cmd(conn, conn->info_ident,
3511 L2CAP_INFO_REQ, sizeof(info), &info);
3512 }
3513
3514 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3515 result == L2CAP_CR_SUCCESS) {
3516 u8 buf[128];
3517 set_bit(CONF_REQ_SENT, &chan->conf_state);
3518 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3519 l2cap_build_conf_req(chan, buf), buf);
3520 chan->num_conf_req++;
3521 }
3522
3523 return 0;
3524}
3525
3526static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3527{
3528 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3529 u16 scid, dcid, result, status;
3530 struct l2cap_chan *chan;
3531 u8 req[128];
3532 int err;
3533
3534 scid = __le16_to_cpu(rsp->scid);
3535 dcid = __le16_to_cpu(rsp->dcid);
3536 result = __le16_to_cpu(rsp->result);
3537 status = __le16_to_cpu(rsp->status);
3538
3539 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3540 dcid, scid, result, status);
3541
3542 mutex_lock(&conn->chan_lock);
3543
3544 if (scid) {
3545 chan = __l2cap_get_chan_by_scid(conn, scid);
3546 if (!chan) {
3547 err = -EFAULT;
3548 goto unlock;
3549 }
3550 } else {
3551 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3552 if (!chan) {
3553 err = -EFAULT;
3554 goto unlock;
3555 }
3556 }
3557
3558 err = 0;
3559
3560 l2cap_chan_lock(chan);
3561
3562 switch (result) {
3563 case L2CAP_CR_SUCCESS:
3564 l2cap_state_change(chan, BT_CONFIG);
3565 chan->ident = 0;
3566 chan->dcid = dcid;
3567 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3568
3569 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3570 break;
3571
3572 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3573 l2cap_build_conf_req(chan, req), req);
3574 chan->num_conf_req++;
3575 break;
3576
3577 case L2CAP_CR_PEND:
3578 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3579 break;
3580
3581 default:
3582 l2cap_chan_del(chan, ECONNREFUSED);
3583 break;
3584 }
3585
3586 l2cap_chan_unlock(chan);
3587
3588unlock:
3589 mutex_unlock(&conn->chan_lock);
3590
3591 return err;
3592}
3593
3594static inline void set_default_fcs(struct l2cap_chan *chan)
3595{
3596 /* FCS is enabled only in ERTM or streaming mode, if one or both
3597 * sides request it.
3598 */
3599 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3600 chan->fcs = L2CAP_FCS_NONE;
3601 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3602 chan->fcs = L2CAP_FCS_CRC16;
3603}
3604
3605static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3606{
3607 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3608 u16 dcid, flags;
3609 u8 rsp[64];
3610 struct l2cap_chan *chan;
3611 int len, err = 0;
3612
3613 dcid = __le16_to_cpu(req->dcid);
3614 flags = __le16_to_cpu(req->flags);
3615
3616 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3617
3618 chan = l2cap_get_chan_by_scid(conn, dcid);
3619 if (!chan)
3620 return -ENOENT;
3621
3622 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3623 struct l2cap_cmd_rej_cid rej;
3624
3625 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3626 rej.scid = cpu_to_le16(chan->scid);
3627 rej.dcid = cpu_to_le16(chan->dcid);
3628
3629 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3630 sizeof(rej), &rej);
3631 goto unlock;
3632 }
3633
3634 /* Reject if config buffer is too small. */
3635 len = cmd_len - sizeof(*req);
3636 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3637 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3638 l2cap_build_conf_rsp(chan, rsp,
3639 L2CAP_CONF_REJECT, flags), rsp);
3640 goto unlock;
3641 }
3642
3643 /* Store config. */
3644 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3645 chan->conf_len += len;
3646
3647 if (flags & 0x0001) {
3648 /* Incomplete config. Send empty response. */
3649 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3650 l2cap_build_conf_rsp(chan, rsp,
3651 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3652 goto unlock;
3653 }
3654
3655 /* Complete config. */
3656 len = l2cap_parse_conf_req(chan, rsp);
3657 if (len < 0) {
3658 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3659 goto unlock;
3660 }
3661
3662 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3663 chan->num_conf_rsp++;
3664
3665 /* Reset config buffer. */
3666 chan->conf_len = 0;
3667
3668 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3669 goto unlock;
3670
3671 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3672 set_default_fcs(chan);
3673
3674 l2cap_state_change(chan, BT_CONNECTED);
3675
3676 if (chan->mode == L2CAP_MODE_ERTM ||
3677 chan->mode == L2CAP_MODE_STREAMING)
3678 err = l2cap_ertm_init(chan);
3679
3680 if (err < 0)
3681 l2cap_send_disconn_req(chan->conn, chan, -err);
3682 else
3683 l2cap_chan_ready(chan);
3684
3685 goto unlock;
3686 }
3687
3688 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3689 u8 buf[64];
3690 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3691 l2cap_build_conf_req(chan, buf), buf);
3692 chan->num_conf_req++;
3693 }
3694
3695 /* Got Conf Rsp PENDING from remote side and asume we sent
3696 Conf Rsp PENDING in the code above */
3697 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3698 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3699
3700 /* check compatibility */
3701
3702 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3703 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3704
3705 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3706 l2cap_build_conf_rsp(chan, rsp,
3707 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3708 }
3709
3710unlock:
3711 l2cap_chan_unlock(chan);
3712 return err;
3713}
3714
3715static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3716{
3717 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3718 u16 scid, flags, result;
3719 struct l2cap_chan *chan;
3720 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3721 int err = 0;
3722
3723 scid = __le16_to_cpu(rsp->scid);
3724 flags = __le16_to_cpu(rsp->flags);
3725 result = __le16_to_cpu(rsp->result);
3726
3727 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3728 result, len);
3729
3730 chan = l2cap_get_chan_by_scid(conn, scid);
3731 if (!chan)
3732 return 0;
3733
3734 switch (result) {
3735 case L2CAP_CONF_SUCCESS:
3736 l2cap_conf_rfc_get(chan, rsp->data, len);
3737 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3738 break;
3739
3740 case L2CAP_CONF_PENDING:
3741 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3742
3743 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3744 char buf[64];
3745
3746 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3747 buf, &result);
3748 if (len < 0) {
3749 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3750 goto done;
3751 }
3752
3753 /* check compatibility */
3754
3755 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3756 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3757
3758 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3759 l2cap_build_conf_rsp(chan, buf,
3760 L2CAP_CONF_SUCCESS, 0x0000), buf);
3761 }
3762 goto done;
3763
3764 case L2CAP_CONF_UNACCEPT:
3765 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3766 char req[64];
3767
3768 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3769 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3770 goto done;
3771 }
3772
3773 /* throw out any old stored conf requests */
3774 result = L2CAP_CONF_SUCCESS;
3775 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3776 req, &result);
3777 if (len < 0) {
3778 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3779 goto done;
3780 }
3781
3782 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3783 L2CAP_CONF_REQ, len, req);
3784 chan->num_conf_req++;
3785 if (result != L2CAP_CONF_SUCCESS)
3786 goto done;
3787 break;
3788 }
3789
3790 default:
3791 l2cap_chan_set_err(chan, ECONNRESET);
3792
3793 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3794 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3795 goto done;
3796 }
3797
3798 if (flags & 0x01)
3799 goto done;
3800
3801 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3802
3803 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3804 set_default_fcs(chan);
3805
3806 l2cap_state_change(chan, BT_CONNECTED);
3807 if (chan->mode == L2CAP_MODE_ERTM ||
3808 chan->mode == L2CAP_MODE_STREAMING)
3809 err = l2cap_ertm_init(chan);
3810
3811 if (err < 0)
3812 l2cap_send_disconn_req(chan->conn, chan, -err);
3813 else
3814 l2cap_chan_ready(chan);
3815 }
3816
3817done:
3818 l2cap_chan_unlock(chan);
3819 return err;
3820}
3821
3822static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3823{
3824 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3825 struct l2cap_disconn_rsp rsp;
3826 u16 dcid, scid;
3827 struct l2cap_chan *chan;
3828 struct sock *sk;
3829
3830 scid = __le16_to_cpu(req->scid);
3831 dcid = __le16_to_cpu(req->dcid);
3832
3833 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3834
3835 mutex_lock(&conn->chan_lock);
3836
3837 chan = __l2cap_get_chan_by_scid(conn, dcid);
3838 if (!chan) {
3839 mutex_unlock(&conn->chan_lock);
3840 return 0;
3841 }
3842
3843 l2cap_chan_lock(chan);
3844
3845 sk = chan->sk;
3846
3847 rsp.dcid = cpu_to_le16(chan->scid);
3848 rsp.scid = cpu_to_le16(chan->dcid);
3849 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3850
3851 lock_sock(sk);
3852 sk->sk_shutdown = SHUTDOWN_MASK;
3853 release_sock(sk);
3854
3855 l2cap_chan_hold(chan);
3856 l2cap_chan_del(chan, ECONNRESET);
3857
3858 l2cap_chan_unlock(chan);
3859
3860 chan->ops->close(chan->data);
3861 l2cap_chan_put(chan);
3862
3863 mutex_unlock(&conn->chan_lock);
3864
3865 return 0;
3866}
3867
3868static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3869{
3870 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3871 u16 dcid, scid;
3872 struct l2cap_chan *chan;
3873
3874 scid = __le16_to_cpu(rsp->scid);
3875 dcid = __le16_to_cpu(rsp->dcid);
3876
3877 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3878
3879 mutex_lock(&conn->chan_lock);
3880
3881 chan = __l2cap_get_chan_by_scid(conn, scid);
3882 if (!chan) {
3883 mutex_unlock(&conn->chan_lock);
3884 return 0;
3885 }
3886
3887 l2cap_chan_lock(chan);
3888
3889 l2cap_chan_hold(chan);
3890 l2cap_chan_del(chan, 0);
3891
3892 l2cap_chan_unlock(chan);
3893
3894 chan->ops->close(chan->data);
3895 l2cap_chan_put(chan);
3896
3897 mutex_unlock(&conn->chan_lock);
3898
3899 return 0;
3900}
3901
3902static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3903{
3904 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3905 u16 type;
3906
3907 type = __le16_to_cpu(req->type);
3908
3909 BT_DBG("type 0x%4.4x", type);
3910
3911 if (type == L2CAP_IT_FEAT_MASK) {
3912 u8 buf[8];
3913 u32 feat_mask = l2cap_feat_mask;
3914 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3915 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3916 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3917 if (!disable_ertm)
3918 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3919 | L2CAP_FEAT_FCS;
3920 if (enable_hs)
3921 feat_mask |= L2CAP_FEAT_EXT_FLOW
3922 | L2CAP_FEAT_EXT_WINDOW;
3923
3924 put_unaligned_le32(feat_mask, rsp->data);
3925 l2cap_send_cmd(conn, cmd->ident,
3926 L2CAP_INFO_RSP, sizeof(buf), buf);
3927 } else if (type == L2CAP_IT_FIXED_CHAN) {
3928 u8 buf[12];
3929 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3930
3931 if (enable_hs)
3932 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3933 else
3934 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3935
3936 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3937 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3938 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3939 l2cap_send_cmd(conn, cmd->ident,
3940 L2CAP_INFO_RSP, sizeof(buf), buf);
3941 } else {
3942 struct l2cap_info_rsp rsp;
3943 rsp.type = cpu_to_le16(type);
3944 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3945 l2cap_send_cmd(conn, cmd->ident,
3946 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3947 }
3948
3949 return 0;
3950}
3951
3952static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3953{
3954 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3955 u16 type, result;
3956
3957 type = __le16_to_cpu(rsp->type);
3958 result = __le16_to_cpu(rsp->result);
3959
3960 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3961
3962 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3963 if (cmd->ident != conn->info_ident ||
3964 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3965 return 0;
3966
3967 cancel_delayed_work(&conn->info_timer);
3968
3969 if (result != L2CAP_IR_SUCCESS) {
3970 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3971 conn->info_ident = 0;
3972
3973 l2cap_conn_start(conn);
3974
3975 return 0;
3976 }
3977
3978 switch (type) {
3979 case L2CAP_IT_FEAT_MASK:
3980 conn->feat_mask = get_unaligned_le32(rsp->data);
3981
3982 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3983 struct l2cap_info_req req;
3984 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3985
3986 conn->info_ident = l2cap_get_ident(conn);
3987
3988 l2cap_send_cmd(conn, conn->info_ident,
3989 L2CAP_INFO_REQ, sizeof(req), &req);
3990 } else {
3991 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3992 conn->info_ident = 0;
3993
3994 l2cap_conn_start(conn);
3995 }
3996 break;
3997
3998 case L2CAP_IT_FIXED_CHAN:
3999 conn->fixed_chan_mask = rsp->data[0];
4000 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4001 conn->info_ident = 0;
4002
4003 l2cap_conn_start(conn);
4004 break;
4005 }
4006
4007 return 0;
4008}
4009
4010static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4011 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4012 void *data)
4013{
4014 struct l2cap_create_chan_req *req = data;
4015 struct l2cap_create_chan_rsp rsp;
4016 u16 psm, scid;
4017
4018 if (cmd_len != sizeof(*req))
4019 return -EPROTO;
4020
4021 if (!enable_hs)
4022 return -EINVAL;
4023
4024 psm = le16_to_cpu(req->psm);
4025 scid = le16_to_cpu(req->scid);
4026
4027 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
4028
4029 /* Placeholder: Always reject */
4030 rsp.dcid = 0;
4031 rsp.scid = cpu_to_le16(scid);
4032 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4033 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4034
4035 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4036 sizeof(rsp), &rsp);
4037
4038 return 0;
4039}
4040
4041static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4042 struct l2cap_cmd_hdr *cmd, void *data)
4043{
4044 BT_DBG("conn %p", conn);
4045
4046 return l2cap_connect_rsp(conn, cmd, data);
4047}
4048
4049static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4050 u16 icid, u16 result)
4051{
4052 struct l2cap_move_chan_rsp rsp;
4053
4054 BT_DBG("icid %d, result %d", icid, result);
4055
4056 rsp.icid = cpu_to_le16(icid);
4057 rsp.result = cpu_to_le16(result);
4058
4059 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4060}
4061
4062static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4063 struct l2cap_chan *chan, u16 icid, u16 result)
4064{
4065 struct l2cap_move_chan_cfm cfm;
4066 u8 ident;
4067
4068 BT_DBG("icid %d, result %d", icid, result);
4069
4070 ident = l2cap_get_ident(conn);
4071 if (chan)
4072 chan->ident = ident;
4073
4074 cfm.icid = cpu_to_le16(icid);
4075 cfm.result = cpu_to_le16(result);
4076
4077 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4078}
4079
4080static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4081 u16 icid)
4082{
4083 struct l2cap_move_chan_cfm_rsp rsp;
4084
4085 BT_DBG("icid %d", icid);
4086
4087 rsp.icid = cpu_to_le16(icid);
4088 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4089}
4090
4091static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4092 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4093{
4094 struct l2cap_move_chan_req *req = data;
4095 u16 icid = 0;
4096 u16 result = L2CAP_MR_NOT_ALLOWED;
4097
4098 if (cmd_len != sizeof(*req))
4099 return -EPROTO;
4100
4101 icid = le16_to_cpu(req->icid);
4102
4103 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4104
4105 if (!enable_hs)
4106 return -EINVAL;
4107
4108 /* Placeholder: Always refuse */
4109 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4110
4111 return 0;
4112}
4113
4114static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4115 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4116{
4117 struct l2cap_move_chan_rsp *rsp = data;
4118 u16 icid, result;
4119
4120 if (cmd_len != sizeof(*rsp))
4121 return -EPROTO;
4122
4123 icid = le16_to_cpu(rsp->icid);
4124 result = le16_to_cpu(rsp->result);
4125
4126 BT_DBG("icid %d, result %d", icid, result);
4127
4128 /* Placeholder: Always unconfirmed */
4129 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4130
4131 return 0;
4132}
4133
4134static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4135 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4136{
4137 struct l2cap_move_chan_cfm *cfm = data;
4138 u16 icid, result;
4139
4140 if (cmd_len != sizeof(*cfm))
4141 return -EPROTO;
4142
4143 icid = le16_to_cpu(cfm->icid);
4144 result = le16_to_cpu(cfm->result);
4145
4146 BT_DBG("icid %d, result %d", icid, result);
4147
4148 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4149
4150 return 0;
4151}
4152
4153static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4154 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4155{
4156 struct l2cap_move_chan_cfm_rsp *rsp = data;
4157 u16 icid;
4158
4159 if (cmd_len != sizeof(*rsp))
4160 return -EPROTO;
4161
4162 icid = le16_to_cpu(rsp->icid);
4163
4164 BT_DBG("icid %d", icid);
4165
4166 return 0;
4167}
4168
4169static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4170 u16 to_multiplier)
4171{
4172 u16 max_latency;
4173
4174 if (min > max || min < 6 || max > 3200)
4175 return -EINVAL;
4176
4177 if (to_multiplier < 10 || to_multiplier > 3200)
4178 return -EINVAL;
4179
4180 if (max >= to_multiplier * 8)
4181 return -EINVAL;
4182
4183 max_latency = (to_multiplier * 8 / max) - 1;
4184 if (latency > 499 || latency > max_latency)
4185 return -EINVAL;
4186
4187 return 0;
4188}
4189
4190static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4191 struct l2cap_cmd_hdr *cmd, u8 *data)
4192{
4193 struct hci_conn *hcon = conn->hcon;
4194 struct l2cap_conn_param_update_req *req;
4195 struct l2cap_conn_param_update_rsp rsp;
4196 u16 min, max, latency, to_multiplier, cmd_len;
4197 int err;
4198
4199 if (!(hcon->link_mode & HCI_LM_MASTER))
4200 return -EINVAL;
4201
4202 cmd_len = __le16_to_cpu(cmd->len);
4203 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4204 return -EPROTO;
4205
4206 req = (struct l2cap_conn_param_update_req *) data;
4207 min = __le16_to_cpu(req->min);
4208 max = __le16_to_cpu(req->max);
4209 latency = __le16_to_cpu(req->latency);
4210 to_multiplier = __le16_to_cpu(req->to_multiplier);
4211
4212 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4213 min, max, latency, to_multiplier);
4214
4215 memset(&rsp, 0, sizeof(rsp));
4216
4217 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4218 if (err)
4219 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4220 else
4221 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4222
4223 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4224 sizeof(rsp), &rsp);
4225
4226 if (!err)
4227 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4228
4229 return 0;
4230}
4231
4232static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4233 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4234{
4235 int err = 0;
4236
4237 switch (cmd->code) {
4238 case L2CAP_COMMAND_REJ:
4239 l2cap_command_rej(conn, cmd, data);
4240 break;
4241
4242 case L2CAP_CONN_REQ:
4243 err = l2cap_connect_req(conn, cmd, data);
4244 break;
4245
4246 case L2CAP_CONN_RSP:
4247 err = l2cap_connect_rsp(conn, cmd, data);
4248 break;
4249
4250 case L2CAP_CONF_REQ:
4251 err = l2cap_config_req(conn, cmd, cmd_len, data);
4252 break;
4253
4254 case L2CAP_CONF_RSP:
4255 err = l2cap_config_rsp(conn, cmd, data);
4256 break;
4257
4258 case L2CAP_DISCONN_REQ:
4259 err = l2cap_disconnect_req(conn, cmd, data);
4260 break;
4261
4262 case L2CAP_DISCONN_RSP:
4263 err = l2cap_disconnect_rsp(conn, cmd, data);
4264 break;
4265
4266 case L2CAP_ECHO_REQ:
4267 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4268 break;
4269
4270 case L2CAP_ECHO_RSP:
4271 break;
4272
4273 case L2CAP_INFO_REQ:
4274 err = l2cap_information_req(conn, cmd, data);
4275 break;
4276
4277 case L2CAP_INFO_RSP:
4278 err = l2cap_information_rsp(conn, cmd, data);
4279 break;
4280
4281 case L2CAP_CREATE_CHAN_REQ:
4282 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4283 break;
4284
4285 case L2CAP_CREATE_CHAN_RSP:
4286 err = l2cap_create_channel_rsp(conn, cmd, data);
4287 break;
4288
4289 case L2CAP_MOVE_CHAN_REQ:
4290 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4291 break;
4292
4293 case L2CAP_MOVE_CHAN_RSP:
4294 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4295 break;
4296
4297 case L2CAP_MOVE_CHAN_CFM:
4298 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4299 break;
4300
4301 case L2CAP_MOVE_CHAN_CFM_RSP:
4302 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4303 break;
4304
4305 default:
4306 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4307 err = -EINVAL;
4308 break;
4309 }
4310
4311 return err;
4312}
4313
4314static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4315 struct l2cap_cmd_hdr *cmd, u8 *data)
4316{
4317 switch (cmd->code) {
4318 case L2CAP_COMMAND_REJ:
4319 return 0;
4320
4321 case L2CAP_CONN_PARAM_UPDATE_REQ:
4322 return l2cap_conn_param_update_req(conn, cmd, data);
4323
4324 case L2CAP_CONN_PARAM_UPDATE_RSP:
4325 return 0;
4326
4327 default:
4328 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4329 return -EINVAL;
4330 }
4331}
4332
4333static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4334 struct sk_buff *skb)
4335{
4336 u8 *data = skb->data;
4337 int len = skb->len;
4338 struct l2cap_cmd_hdr cmd;
4339 int err;
4340
4341 l2cap_raw_recv(conn, skb);
4342
4343 while (len >= L2CAP_CMD_HDR_SIZE) {
4344 u16 cmd_len;
4345 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4346 data += L2CAP_CMD_HDR_SIZE;
4347 len -= L2CAP_CMD_HDR_SIZE;
4348
4349 cmd_len = le16_to_cpu(cmd.len);
4350
4351 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4352
4353 if (cmd_len > len || !cmd.ident) {
4354 BT_DBG("corrupted command");
4355 break;
4356 }
4357
4358 if (conn->hcon->type == LE_LINK)
4359 err = l2cap_le_sig_cmd(conn, &cmd, data);
4360 else
4361 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4362
4363 if (err) {
4364 struct l2cap_cmd_rej_unk rej;
4365
4366 BT_ERR("Wrong link type (%d)", err);
4367
4368 /* FIXME: Map err to a valid reason */
4369 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4370 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4371 }
4372
4373 data += cmd_len;
4374 len -= cmd_len;
4375 }
4376
4377 kfree_skb(skb);
4378}
4379
4380static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4381{
4382 u16 our_fcs, rcv_fcs;
4383 int hdr_size;
4384
4385 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4386 hdr_size = L2CAP_EXT_HDR_SIZE;
4387 else
4388 hdr_size = L2CAP_ENH_HDR_SIZE;
4389
4390 if (chan->fcs == L2CAP_FCS_CRC16) {
4391 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4392 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4393 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4394
4395 if (our_fcs != rcv_fcs)
4396 return -EBADMSG;
4397 }
4398 return 0;
4399}
4400
4401static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4402{
4403 struct l2cap_ctrl control;
4404
4405 BT_DBG("chan %p", chan);
4406
4407 memset(&control, 0, sizeof(control));
4408 control.sframe = 1;
4409 control.final = 1;
4410 control.reqseq = chan->buffer_seq;
4411 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4412
4413 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4414 control.super = L2CAP_SUPER_RNR;
4415 l2cap_send_sframe(chan, &control);
4416 }
4417
4418 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4419 chan->unacked_frames > 0)
4420 __set_retrans_timer(chan);
4421
4422 /* Send pending iframes */
4423 l2cap_ertm_send(chan);
4424
4425 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4426 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4427 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4428 * send it now.
4429 */
4430 control.super = L2CAP_SUPER_RR;
4431 l2cap_send_sframe(chan, &control);
4432 }
4433}
4434
4435static void append_skb_frag(struct sk_buff *skb,
4436 struct sk_buff *new_frag, struct sk_buff **last_frag)
4437{
4438 /* skb->len reflects data in skb as well as all fragments
4439 * skb->data_len reflects only data in fragments
4440 */
4441 if (!skb_has_frag_list(skb))
4442 skb_shinfo(skb)->frag_list = new_frag;
4443
4444 new_frag->next = NULL;
4445
4446 (*last_frag)->next = new_frag;
4447 *last_frag = new_frag;
4448
4449 skb->len += new_frag->len;
4450 skb->data_len += new_frag->len;
4451 skb->truesize += new_frag->truesize;
4452}
4453
4454static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4455 struct l2cap_ctrl *control)
4456{
4457 int err = -EINVAL;
4458
4459 switch (control->sar) {
4460 case L2CAP_SAR_UNSEGMENTED:
4461 if (chan->sdu)
4462 break;
4463
4464 err = chan->ops->recv(chan->data, skb);
4465 break;
4466
4467 case L2CAP_SAR_START:
4468 if (chan->sdu)
4469 break;
4470
4471 chan->sdu_len = get_unaligned_le16(skb->data);
4472 skb_pull(skb, L2CAP_SDULEN_SIZE);
4473
4474 if (chan->sdu_len > chan->imtu) {
4475 err = -EMSGSIZE;
4476 break;
4477 }
4478
4479 if (skb->len >= chan->sdu_len)
4480 break;
4481
4482 chan->sdu = skb;
4483 chan->sdu_last_frag = skb;
4484
4485 skb = NULL;
4486 err = 0;
4487 break;
4488
4489 case L2CAP_SAR_CONTINUE:
4490 if (!chan->sdu)
4491 break;
4492
4493 append_skb_frag(chan->sdu, skb,
4494 &chan->sdu_last_frag);
4495 skb = NULL;
4496
4497 if (chan->sdu->len >= chan->sdu_len)
4498 break;
4499
4500 err = 0;
4501 break;
4502
4503 case L2CAP_SAR_END:
4504 if (!chan->sdu)
4505 break;
4506
4507 append_skb_frag(chan->sdu, skb,
4508 &chan->sdu_last_frag);
4509 skb = NULL;
4510
4511 if (chan->sdu->len != chan->sdu_len)
4512 break;
4513
4514 err = chan->ops->recv(chan->data, chan->sdu);
4515
4516 if (!err) {
4517 /* Reassembly complete */
4518 chan->sdu = NULL;
4519 chan->sdu_last_frag = NULL;
4520 chan->sdu_len = 0;
4521 }
4522 break;
4523 }
4524
4525 if (err) {
4526 kfree_skb(skb);
4527 kfree_skb(chan->sdu);
4528 chan->sdu = NULL;
4529 chan->sdu_last_frag = NULL;
4530 chan->sdu_len = 0;
4531 }
4532
4533 return err;
4534}
4535
4536void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4537{
4538 u8 event;
4539
4540 if (chan->mode != L2CAP_MODE_ERTM)
4541 return;
4542
4543 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4544 l2cap_tx(chan, 0, 0, event);
4545}
4546
4547static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4548{
4549 int err = 0;
4550 /* Pass sequential frames to l2cap_reassemble_sdu()
4551 * until a gap is encountered.
4552 */
4553
4554 BT_DBG("chan %p", chan);
4555
4556 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4557 struct sk_buff *skb;
4558 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4559 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4560
4561 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4562
4563 if (!skb)
4564 break;
4565
4566 skb_unlink(skb, &chan->srej_q);
4567 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4568 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4569 if (err)
4570 break;
4571 }
4572
4573 if (skb_queue_empty(&chan->srej_q)) {
4574 chan->rx_state = L2CAP_RX_STATE_RECV;
4575 l2cap_send_ack(chan);
4576 }
4577
4578 return err;
4579}
4580
4581static void l2cap_handle_srej(struct l2cap_chan *chan,
4582 struct l2cap_ctrl *control)
4583{
4584 struct sk_buff *skb;
4585
4586 BT_DBG("chan %p, control %p", chan, control);
4587
4588 if (control->reqseq == chan->next_tx_seq) {
4589 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4590 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4591 return;
4592 }
4593
4594 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4595
4596 if (skb == NULL) {
4597 BT_DBG("Seq %d not available for retransmission",
4598 control->reqseq);
4599 return;
4600 }
4601
4602 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4603 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4604 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4605 return;
4606 }
4607
4608 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4609
4610 if (control->poll) {
4611 l2cap_pass_to_tx(chan, control);
4612
4613 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4614 l2cap_retransmit(chan, control);
4615 l2cap_ertm_send(chan);
4616
4617 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4618 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4619 chan->srej_save_reqseq = control->reqseq;
4620 }
4621 } else {
4622 l2cap_pass_to_tx_fbit(chan, control);
4623
4624 if (control->final) {
4625 if (chan->srej_save_reqseq != control->reqseq ||
4626 !test_and_clear_bit(CONN_SREJ_ACT,
4627 &chan->conn_state))
4628 l2cap_retransmit(chan, control);
4629 } else {
4630 l2cap_retransmit(chan, control);
4631 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4632 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4633 chan->srej_save_reqseq = control->reqseq;
4634 }
4635 }
4636 }
4637}
4638
4639static void l2cap_handle_rej(struct l2cap_chan *chan,
4640 struct l2cap_ctrl *control)
4641{
4642 struct sk_buff *skb;
4643
4644 BT_DBG("chan %p, control %p", chan, control);
4645
4646 if (control->reqseq == chan->next_tx_seq) {
4647 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4648 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4649 return;
4650 }
4651
4652 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4653
4654 if (chan->max_tx && skb &&
4655 bt_cb(skb)->control.retries >= chan->max_tx) {
4656 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4657 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4658 return;
4659 }
4660
4661 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4662
4663 l2cap_pass_to_tx(chan, control);
4664
4665 if (control->final) {
4666 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4667 l2cap_retransmit_all(chan, control);
4668 } else {
4669 l2cap_retransmit_all(chan, control);
4670 l2cap_ertm_send(chan);
4671 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4672 set_bit(CONN_REJ_ACT, &chan->conn_state);
4673 }
4674}
4675
4676static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4677{
4678 BT_DBG("chan %p, txseq %d", chan, txseq);
4679
4680 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4681 chan->expected_tx_seq);
4682
4683 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4684 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4685 chan->tx_win) {
4686 /* See notes below regarding "double poll" and
4687 * invalid packets.
4688 */
4689 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4690 BT_DBG("Invalid/Ignore - after SREJ");
4691 return L2CAP_TXSEQ_INVALID_IGNORE;
4692 } else {
4693 BT_DBG("Invalid - in window after SREJ sent");
4694 return L2CAP_TXSEQ_INVALID;
4695 }
4696 }
4697
4698 if (chan->srej_list.head == txseq) {
4699 BT_DBG("Expected SREJ");
4700 return L2CAP_TXSEQ_EXPECTED_SREJ;
4701 }
4702
4703 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4704 BT_DBG("Duplicate SREJ - txseq already stored");
4705 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4706 }
4707
4708 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4709 BT_DBG("Unexpected SREJ - not requested");
4710 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4711 }
4712 }
4713
4714 if (chan->expected_tx_seq == txseq) {
4715 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4716 chan->tx_win) {
4717 BT_DBG("Invalid - txseq outside tx window");
4718 return L2CAP_TXSEQ_INVALID;
4719 } else {
4720 BT_DBG("Expected");
4721 return L2CAP_TXSEQ_EXPECTED;
4722 }
4723 }
4724
4725 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4726 __seq_offset(chan, chan->expected_tx_seq,
4727 chan->last_acked_seq)){
4728 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4729 return L2CAP_TXSEQ_DUPLICATE;
4730 }
4731
4732 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4733 /* A source of invalid packets is a "double poll" condition,
4734 * where delays cause us to send multiple poll packets. If
4735 * the remote stack receives and processes both polls,
4736 * sequence numbers can wrap around in such a way that a
4737 * resent frame has a sequence number that looks like new data
4738 * with a sequence gap. This would trigger an erroneous SREJ
4739 * request.
4740 *
4741 * Fortunately, this is impossible with a tx window that's
4742 * less than half of the maximum sequence number, which allows
4743 * invalid frames to be safely ignored.
4744 *
4745 * With tx window sizes greater than half of the tx window
4746 * maximum, the frame is invalid and cannot be ignored. This
4747 * causes a disconnect.
4748 */
4749
4750 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4751 BT_DBG("Invalid/Ignore - txseq outside tx window");
4752 return L2CAP_TXSEQ_INVALID_IGNORE;
4753 } else {
4754 BT_DBG("Invalid - txseq outside tx window");
4755 return L2CAP_TXSEQ_INVALID;
4756 }
4757 } else {
4758 BT_DBG("Unexpected - txseq indicates missing frames");
4759 return L2CAP_TXSEQ_UNEXPECTED;
4760 }
4761}
4762
4763static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4764 struct l2cap_ctrl *control,
4765 struct sk_buff *skb, u8 event)
4766{
4767 int err = 0;
4768 bool skb_in_use = 0;
4769
4770 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4771 event);
4772
4773 switch (event) {
4774 case L2CAP_EV_RECV_IFRAME:
4775 switch (l2cap_classify_txseq(chan, control->txseq)) {
4776 case L2CAP_TXSEQ_EXPECTED:
4777 l2cap_pass_to_tx(chan, control);
4778
4779 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4780 BT_DBG("Busy, discarding expected seq %d",
4781 control->txseq);
4782 break;
4783 }
4784
4785 chan->expected_tx_seq = __next_seq(chan,
4786 control->txseq);
4787
4788 chan->buffer_seq = chan->expected_tx_seq;
4789 skb_in_use = 1;
4790
4791 err = l2cap_reassemble_sdu(chan, skb, control);
4792 if (err)
4793 break;
4794
4795 if (control->final) {
4796 if (!test_and_clear_bit(CONN_REJ_ACT,
4797 &chan->conn_state)) {
4798 control->final = 0;
4799 l2cap_retransmit_all(chan, control);
4800 l2cap_ertm_send(chan);
4801 }
4802 }
4803
4804 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4805 l2cap_send_ack(chan);
4806 break;
4807 case L2CAP_TXSEQ_UNEXPECTED:
4808 l2cap_pass_to_tx(chan, control);
4809
4810 /* Can't issue SREJ frames in the local busy state.
4811 * Drop this frame, it will be seen as missing
4812 * when local busy is exited.
4813 */
4814 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4815 BT_DBG("Busy, discarding unexpected seq %d",
4816 control->txseq);
4817 break;
4818 }
4819
4820 /* There was a gap in the sequence, so an SREJ
4821 * must be sent for each missing frame. The
4822 * current frame is stored for later use.
4823 */
4824 skb_queue_tail(&chan->srej_q, skb);
4825 skb_in_use = 1;
4826 BT_DBG("Queued %p (queue len %d)", skb,
4827 skb_queue_len(&chan->srej_q));
4828
4829 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4830 l2cap_seq_list_clear(&chan->srej_list);
4831 l2cap_send_srej(chan, control->txseq);
4832
4833 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4834 break;
4835 case L2CAP_TXSEQ_DUPLICATE:
4836 l2cap_pass_to_tx(chan, control);
4837 break;
4838 case L2CAP_TXSEQ_INVALID_IGNORE:
4839 break;
4840 case L2CAP_TXSEQ_INVALID:
4841 default:
4842 l2cap_send_disconn_req(chan->conn, chan,
4843 ECONNRESET);
4844 break;
4845 }
4846 break;
4847 case L2CAP_EV_RECV_RR:
4848 l2cap_pass_to_tx(chan, control);
4849 if (control->final) {
4850 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4851
4852 if (!test_and_clear_bit(CONN_REJ_ACT,
4853 &chan->conn_state)) {
4854 control->final = 0;
4855 l2cap_retransmit_all(chan, control);
4856 }
4857
4858 l2cap_ertm_send(chan);
4859 } else if (control->poll) {
4860 l2cap_send_i_or_rr_or_rnr(chan);
4861 } else {
4862 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4863 &chan->conn_state) &&
4864 chan->unacked_frames)
4865 __set_retrans_timer(chan);
4866
4867 l2cap_ertm_send(chan);
4868 }
4869 break;
4870 case L2CAP_EV_RECV_RNR:
4871 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4872 l2cap_pass_to_tx(chan, control);
4873 if (control && control->poll) {
4874 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4875 l2cap_send_rr_or_rnr(chan, 0);
4876 }
4877 __clear_retrans_timer(chan);
4878 l2cap_seq_list_clear(&chan->retrans_list);
4879 break;
4880 case L2CAP_EV_RECV_REJ:
4881 l2cap_handle_rej(chan, control);
4882 break;
4883 case L2CAP_EV_RECV_SREJ:
4884 l2cap_handle_srej(chan, control);
4885 break;
4886 default:
4887 break;
4888 }
4889
4890 if (skb && !skb_in_use) {
4891 BT_DBG("Freeing %p", skb);
4892 kfree_skb(skb);
4893 }
4894
4895 return err;
4896}
4897
4898static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4899 struct l2cap_ctrl *control,
4900 struct sk_buff *skb, u8 event)
4901{
4902 int err = 0;
4903 u16 txseq = control->txseq;
4904 bool skb_in_use = 0;
4905
4906 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4907 event);
4908
4909 switch (event) {
4910 case L2CAP_EV_RECV_IFRAME:
4911 switch (l2cap_classify_txseq(chan, txseq)) {
4912 case L2CAP_TXSEQ_EXPECTED:
4913 /* Keep frame for reassembly later */
4914 l2cap_pass_to_tx(chan, control);
4915 skb_queue_tail(&chan->srej_q, skb);
4916 skb_in_use = 1;
4917 BT_DBG("Queued %p (queue len %d)", skb,
4918 skb_queue_len(&chan->srej_q));
4919
4920 chan->expected_tx_seq = __next_seq(chan, txseq);
4921 break;
4922 case L2CAP_TXSEQ_EXPECTED_SREJ:
4923 l2cap_seq_list_pop(&chan->srej_list);
4924
4925 l2cap_pass_to_tx(chan, control);
4926 skb_queue_tail(&chan->srej_q, skb);
4927 skb_in_use = 1;
4928 BT_DBG("Queued %p (queue len %d)", skb,
4929 skb_queue_len(&chan->srej_q));
4930
4931 err = l2cap_rx_queued_iframes(chan);
4932 if (err)
4933 break;
4934
4935 break;
4936 case L2CAP_TXSEQ_UNEXPECTED:
4937 /* Got a frame that can't be reassembled yet.
4938 * Save it for later, and send SREJs to cover
4939 * the missing frames.
4940 */
4941 skb_queue_tail(&chan->srej_q, skb);
4942 skb_in_use = 1;
4943 BT_DBG("Queued %p (queue len %d)", skb,
4944 skb_queue_len(&chan->srej_q));
4945
4946 l2cap_pass_to_tx(chan, control);
4947 l2cap_send_srej(chan, control->txseq);
4948 break;
4949 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4950 /* This frame was requested with an SREJ, but
4951 * some expected retransmitted frames are
4952 * missing. Request retransmission of missing
4953 * SREJ'd frames.
4954 */
4955 skb_queue_tail(&chan->srej_q, skb);
4956 skb_in_use = 1;
4957 BT_DBG("Queued %p (queue len %d)", skb,
4958 skb_queue_len(&chan->srej_q));
4959
4960 l2cap_pass_to_tx(chan, control);
4961 l2cap_send_srej_list(chan, control->txseq);
4962 break;
4963 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4964 /* We've already queued this frame. Drop this copy. */
4965 l2cap_pass_to_tx(chan, control);
4966 break;
4967 case L2CAP_TXSEQ_DUPLICATE:
4968 /* Expecting a later sequence number, so this frame
4969 * was already received. Ignore it completely.
4970 */
4971 break;
4972 case L2CAP_TXSEQ_INVALID_IGNORE:
4973 break;
4974 case L2CAP_TXSEQ_INVALID:
4975 default:
4976 l2cap_send_disconn_req(chan->conn, chan,
4977 ECONNRESET);
4978 break;
4979 }
4980 break;
4981 case L2CAP_EV_RECV_RR:
4982 l2cap_pass_to_tx(chan, control);
4983 if (control->final) {
4984 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4985
4986 if (!test_and_clear_bit(CONN_REJ_ACT,
4987 &chan->conn_state)) {
4988 control->final = 0;
4989 l2cap_retransmit_all(chan, control);
4990 }
4991
4992 l2cap_ertm_send(chan);
4993 } else if (control->poll) {
4994 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4995 &chan->conn_state) &&
4996 chan->unacked_frames) {
4997 __set_retrans_timer(chan);
4998 }
4999
5000 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5001 l2cap_send_srej_tail(chan);
5002 } else {
5003 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5004 &chan->conn_state) &&
5005 chan->unacked_frames)
5006 __set_retrans_timer(chan);
5007
5008 l2cap_send_ack(chan);
5009 }
5010 break;
5011 case L2CAP_EV_RECV_RNR:
5012 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5013 l2cap_pass_to_tx(chan, control);
5014 if (control->poll) {
5015 l2cap_send_srej_tail(chan);
5016 } else {
5017 struct l2cap_ctrl rr_control;
5018 memset(&rr_control, 0, sizeof(rr_control));
5019 rr_control.sframe = 1;
5020 rr_control.super = L2CAP_SUPER_RR;
5021 rr_control.reqseq = chan->buffer_seq;
5022 l2cap_send_sframe(chan, &rr_control);
5023 }
5024
5025 break;
5026 case L2CAP_EV_RECV_REJ:
5027 l2cap_handle_rej(chan, control);
5028 break;
5029 case L2CAP_EV_RECV_SREJ:
5030 l2cap_handle_srej(chan, control);
5031 break;
5032 }
5033
5034 if (skb && !skb_in_use) {
5035 BT_DBG("Freeing %p", skb);
5036 kfree_skb(skb);
5037 }
5038
5039 return err;
5040}
5041
5042static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5043{
5044 /* Make sure reqseq is for a packet that has been sent but not acked */
5045 u16 unacked;
5046
5047 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5048 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5049}
5050
5051static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5052 struct sk_buff *skb, u8 event)
5053{
5054 int err = 0;
5055
5056 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5057 control, skb, event, chan->rx_state);
5058
5059 if (__valid_reqseq(chan, control->reqseq)) {
5060 switch (chan->rx_state) {
5061 case L2CAP_RX_STATE_RECV:
5062 err = l2cap_rx_state_recv(chan, control, skb, event);
5063 break;
5064 case L2CAP_RX_STATE_SREJ_SENT:
5065 err = l2cap_rx_state_srej_sent(chan, control, skb,
5066 event);
5067 break;
5068 default:
5069 /* shut it down */
5070 break;
5071 }
5072 } else {
5073 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5074 control->reqseq, chan->next_tx_seq,
5075 chan->expected_ack_seq);
5076 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5077 }
5078
5079 return err;
5080}
5081
5082static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5083 struct sk_buff *skb)
5084{
5085 int err = 0;
5086
5087 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5088 chan->rx_state);
5089
5090 if (l2cap_classify_txseq(chan, control->txseq) ==
5091 L2CAP_TXSEQ_EXPECTED) {
5092 l2cap_pass_to_tx(chan, control);
5093
5094 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5095 __next_seq(chan, chan->buffer_seq));
5096
5097 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5098
5099 l2cap_reassemble_sdu(chan, skb, control);
5100 } else {
5101 if (chan->sdu) {
5102 kfree_skb(chan->sdu);
5103 chan->sdu = NULL;
5104 }
5105 chan->sdu_last_frag = NULL;
5106 chan->sdu_len = 0;
5107
5108 if (skb) {
5109 BT_DBG("Freeing %p", skb);
5110 kfree_skb(skb);
5111 }
5112 }
5113
5114 chan->last_acked_seq = control->txseq;
5115 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5116
5117 return err;
5118}
5119
5120static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5121{
5122 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5123 u16 len;
5124 u8 event;
5125
5126 __unpack_control(chan, skb);
5127
5128 len = skb->len;
5129
5130 /*
5131 * We can just drop the corrupted I-frame here.
5132 * Receiver will miss it and start proper recovery
5133 * procedures and ask for retransmission.
5134 */
5135 if (l2cap_check_fcs(chan, skb))
5136 goto drop;
5137
5138 if (!control->sframe && control->sar == L2CAP_SAR_START)
5139 len -= L2CAP_SDULEN_SIZE;
5140
5141 if (chan->fcs == L2CAP_FCS_CRC16)
5142 len -= L2CAP_FCS_SIZE;
5143
5144 if (len > chan->mps) {
5145 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5146 goto drop;
5147 }
5148
5149 if (!control->sframe) {
5150 int err;
5151
5152 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5153 control->sar, control->reqseq, control->final,
5154 control->txseq);
5155
5156 /* Validate F-bit - F=0 always valid, F=1 only
5157 * valid in TX WAIT_F
5158 */
5159 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5160 goto drop;
5161
5162 if (chan->mode != L2CAP_MODE_STREAMING) {
5163 event = L2CAP_EV_RECV_IFRAME;
5164 err = l2cap_rx(chan, control, skb, event);
5165 } else {
5166 err = l2cap_stream_rx(chan, control, skb);
5167 }
5168
5169 if (err)
5170 l2cap_send_disconn_req(chan->conn, chan,
5171 ECONNRESET);
5172 } else {
5173 const u8 rx_func_to_event[4] = {
5174 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5175 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5176 };
5177
5178 /* Only I-frames are expected in streaming mode */
5179 if (chan->mode == L2CAP_MODE_STREAMING)
5180 goto drop;
5181
5182 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5183 control->reqseq, control->final, control->poll,
5184 control->super);
5185
5186 if (len != 0) {
5187 BT_ERR("%d", len);
5188 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5189 goto drop;
5190 }
5191
5192 /* Validate F and P bits */
5193 if (control->final && (control->poll ||
5194 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5195 goto drop;
5196
5197 event = rx_func_to_event[control->super];
5198 if (l2cap_rx(chan, control, skb, event))
5199 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5200 }
5201
5202 return 0;
5203
5204drop:
5205 kfree_skb(skb);
5206 return 0;
5207}
5208
5209static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5210{
5211 struct l2cap_chan *chan;
5212
5213 chan = l2cap_get_chan_by_scid(conn, cid);
5214 if (!chan) {
5215 BT_DBG("unknown cid 0x%4.4x", cid);
5216 /* Drop packet and return */
5217 kfree_skb(skb);
5218 return 0;
5219 }
5220
5221 BT_DBG("chan %p, len %d", chan, skb->len);
5222
5223 if (chan->state != BT_CONNECTED)
5224 goto drop;
5225
5226 switch (chan->mode) {
5227 case L2CAP_MODE_BASIC:
5228 /* If socket recv buffers overflows we drop data here
5229 * which is *bad* because L2CAP has to be reliable.
5230 * But we don't have any other choice. L2CAP doesn't
5231 * provide flow control mechanism. */
5232
5233 if (chan->imtu < skb->len)
5234 goto drop;
5235
5236 if (!chan->ops->recv(chan->data, skb))
5237 goto done;
5238 break;
5239
5240 case L2CAP_MODE_ERTM:
5241 case L2CAP_MODE_STREAMING:
5242 l2cap_data_rcv(chan, skb);
5243 goto done;
5244
5245 default:
5246 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5247 break;
5248 }
5249
5250drop:
5251 kfree_skb(skb);
5252
5253done:
5254 l2cap_chan_unlock(chan);
5255
5256 return 0;
5257}
5258
5259static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5260{
5261 struct l2cap_chan *chan;
5262
5263 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5264 if (!chan)
5265 goto drop;
5266
5267 BT_DBG("chan %p, len %d", chan, skb->len);
5268
5269 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5270 goto drop;
5271
5272 if (chan->imtu < skb->len)
5273 goto drop;
5274
5275 if (!chan->ops->recv(chan->data, skb))
5276 return 0;
5277
5278drop:
5279 kfree_skb(skb);
5280
5281 return 0;
5282}
5283
5284static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5285 struct sk_buff *skb)
5286{
5287 struct l2cap_chan *chan;
5288
5289 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5290 if (!chan)
5291 goto drop;
5292
5293 BT_DBG("chan %p, len %d", chan, skb->len);
5294
5295 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5296 goto drop;
5297
5298 if (chan->imtu < skb->len)
5299 goto drop;
5300
5301 if (!chan->ops->recv(chan->data, skb))
5302 return 0;
5303
5304drop:
5305 kfree_skb(skb);
5306
5307 return 0;
5308}
5309
5310static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5311{
5312 struct l2cap_hdr *lh = (void *) skb->data;
5313 u16 cid, len;
5314 __le16 psm;
5315
5316 skb_pull(skb, L2CAP_HDR_SIZE);
5317 cid = __le16_to_cpu(lh->cid);
5318 len = __le16_to_cpu(lh->len);
5319
5320 if (len != skb->len) {
5321 kfree_skb(skb);
5322 return;
5323 }
5324
5325 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5326
5327 switch (cid) {
5328 case L2CAP_CID_LE_SIGNALING:
5329 case L2CAP_CID_SIGNALING:
5330 l2cap_sig_channel(conn, skb);
5331 break;
5332
5333 case L2CAP_CID_CONN_LESS:
5334 psm = get_unaligned((__le16 *) skb->data);
5335 skb_pull(skb, 2);
5336 l2cap_conless_channel(conn, psm, skb);
5337 break;
5338
5339 case L2CAP_CID_LE_DATA:
5340 l2cap_att_channel(conn, cid, skb);
5341 break;
5342
5343 case L2CAP_CID_SMP:
5344 if (smp_sig_channel(conn, skb))
5345 l2cap_conn_del(conn->hcon, EACCES);
5346 break;
5347
5348 default:
5349 l2cap_data_channel(conn, cid, skb);
5350 break;
5351 }
5352}
5353
5354/* ---- L2CAP interface with lower layer (HCI) ---- */
5355
5356int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5357{
5358 int exact = 0, lm1 = 0, lm2 = 0;
5359 struct l2cap_chan *c;
5360
5361 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5362
5363 /* Find listening sockets and check their link_mode */
5364 read_lock(&chan_list_lock);
5365 list_for_each_entry(c, &chan_list, global_l) {
5366 struct sock *sk = c->sk;
5367
5368 if (c->state != BT_LISTEN)
5369 continue;
5370
5371 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5372 lm1 |= HCI_LM_ACCEPT;
5373 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5374 lm1 |= HCI_LM_MASTER;
5375 exact++;
5376 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5377 lm2 |= HCI_LM_ACCEPT;
5378 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5379 lm2 |= HCI_LM_MASTER;
5380 }
5381 }
5382 read_unlock(&chan_list_lock);
5383
5384 return exact ? lm1 : lm2;
5385}
5386
5387int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5388{
5389 struct l2cap_conn *conn;
5390
5391 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5392
5393 if (!status) {
5394 conn = l2cap_conn_add(hcon, status);
5395 if (conn)
5396 l2cap_conn_ready(conn);
5397 } else
5398 l2cap_conn_del(hcon, bt_to_errno(status));
5399
5400 return 0;
5401}
5402
5403int l2cap_disconn_ind(struct hci_conn *hcon)
5404{
5405 struct l2cap_conn *conn = hcon->l2cap_data;
5406
5407 BT_DBG("hcon %p", hcon);
5408
5409 if (!conn)
5410 return HCI_ERROR_REMOTE_USER_TERM;
5411 return conn->disc_reason;
5412}
5413
5414int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5415{
5416 BT_DBG("hcon %p reason %d", hcon, reason);
5417
5418 l2cap_conn_del(hcon, bt_to_errno(reason));
5419 return 0;
5420}
5421
5422static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5423{
5424 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5425 return;
5426
5427 if (encrypt == 0x00) {
5428 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5429 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5430 } else if (chan->sec_level == BT_SECURITY_HIGH)
5431 l2cap_chan_close(chan, ECONNREFUSED);
5432 } else {
5433 if (chan->sec_level == BT_SECURITY_MEDIUM)
5434 __clear_chan_timer(chan);
5435 }
5436}
5437
5438int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5439{
5440 struct l2cap_conn *conn = hcon->l2cap_data;
5441 struct l2cap_chan *chan;
5442
5443 if (!conn)
5444 return 0;
5445
5446 BT_DBG("conn %p", conn);
5447
5448 if (hcon->type == LE_LINK) {
5449 if (!status && encrypt)
5450 smp_distribute_keys(conn, 0);
5451 cancel_delayed_work(&conn->security_timer);
5452 }
5453
5454 mutex_lock(&conn->chan_lock);
5455
5456 list_for_each_entry(chan, &conn->chan_l, list) {
5457 l2cap_chan_lock(chan);
5458
5459 BT_DBG("chan->scid %d", chan->scid);
5460
5461 if (chan->scid == L2CAP_CID_LE_DATA) {
5462 if (!status && encrypt) {
5463 chan->sec_level = hcon->sec_level;
5464 l2cap_chan_ready(chan);
5465 }
5466
5467 l2cap_chan_unlock(chan);
5468 continue;
5469 }
5470
5471 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5472 l2cap_chan_unlock(chan);
5473 continue;
5474 }
5475
5476 if (!status && (chan->state == BT_CONNECTED ||
5477 chan->state == BT_CONFIG)) {
5478 struct sock *sk = chan->sk;
5479
5480 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5481 sk->sk_state_change(sk);
5482
5483 l2cap_check_encryption(chan, encrypt);
5484 l2cap_chan_unlock(chan);
5485 continue;
5486 }
5487
5488 if (chan->state == BT_CONNECT) {
5489 if (!status) {
5490 l2cap_send_conn_req(chan);
5491 } else {
5492 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5493 }
5494 } else if (chan->state == BT_CONNECT2) {
5495 struct sock *sk = chan->sk;
5496 struct l2cap_conn_rsp rsp;
5497 __u16 res, stat;
5498
5499 lock_sock(sk);
5500
5501 if (!status) {
5502 if (test_bit(BT_SK_DEFER_SETUP,
5503 &bt_sk(sk)->flags)) {
5504 struct sock *parent = bt_sk(sk)->parent;
5505 res = L2CAP_CR_PEND;
5506 stat = L2CAP_CS_AUTHOR_PEND;
5507 if (parent)
5508 parent->sk_data_ready(parent, 0);
5509 } else {
5510 __l2cap_state_change(chan, BT_CONFIG);
5511 res = L2CAP_CR_SUCCESS;
5512 stat = L2CAP_CS_NO_INFO;
5513 }
5514 } else {
5515 __l2cap_state_change(chan, BT_DISCONN);
5516 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5517 res = L2CAP_CR_SEC_BLOCK;
5518 stat = L2CAP_CS_NO_INFO;
5519 }
5520
5521 release_sock(sk);
5522
5523 rsp.scid = cpu_to_le16(chan->dcid);
5524 rsp.dcid = cpu_to_le16(chan->scid);
5525 rsp.result = cpu_to_le16(res);
5526 rsp.status = cpu_to_le16(stat);
5527 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5528 sizeof(rsp), &rsp);
5529 }
5530
5531 l2cap_chan_unlock(chan);
5532 }
5533
5534 mutex_unlock(&conn->chan_lock);
5535
5536 return 0;
5537}
5538
5539int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5540{
5541 struct l2cap_conn *conn = hcon->l2cap_data;
5542
5543 if (!conn)
5544 conn = l2cap_conn_add(hcon, 0);
5545
5546 if (!conn)
5547 goto drop;
5548
5549 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5550
5551 if (!(flags & ACL_CONT)) {
5552 struct l2cap_hdr *hdr;
5553 int len;
5554
5555 if (conn->rx_len) {
5556 BT_ERR("Unexpected start frame (len %d)", skb->len);
5557 kfree_skb(conn->rx_skb);
5558 conn->rx_skb = NULL;
5559 conn->rx_len = 0;
5560 l2cap_conn_unreliable(conn, ECOMM);
5561 }
5562
5563 /* Start fragment always begin with Basic L2CAP header */
5564 if (skb->len < L2CAP_HDR_SIZE) {
5565 BT_ERR("Frame is too short (len %d)", skb->len);
5566 l2cap_conn_unreliable(conn, ECOMM);
5567 goto drop;
5568 }
5569
5570 hdr = (struct l2cap_hdr *) skb->data;
5571 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5572
5573 if (len == skb->len) {
5574 /* Complete frame received */
5575 l2cap_recv_frame(conn, skb);
5576 return 0;
5577 }
5578
5579 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5580
5581 if (skb->len > len) {
5582 BT_ERR("Frame is too long (len %d, expected len %d)",
5583 skb->len, len);
5584 l2cap_conn_unreliable(conn, ECOMM);
5585 goto drop;
5586 }
5587
5588 /* Allocate skb for the complete frame (with header) */
5589 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5590 if (!conn->rx_skb)
5591 goto drop;
5592
5593 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5594 skb->len);
5595 conn->rx_len = len - skb->len;
5596 } else {
5597 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5598
5599 if (!conn->rx_len) {
5600 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5601 l2cap_conn_unreliable(conn, ECOMM);
5602 goto drop;
5603 }
5604
5605 if (skb->len > conn->rx_len) {
5606 BT_ERR("Fragment is too long (len %d, expected %d)",
5607 skb->len, conn->rx_len);
5608 kfree_skb(conn->rx_skb);
5609 conn->rx_skb = NULL;
5610 conn->rx_len = 0;
5611 l2cap_conn_unreliable(conn, ECOMM);
5612 goto drop;
5613 }
5614
5615 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5616 skb->len);
5617 conn->rx_len -= skb->len;
5618
5619 if (!conn->rx_len) {
5620 /* Complete frame received */
5621 l2cap_recv_frame(conn, conn->rx_skb);
5622 conn->rx_skb = NULL;
5623 }
5624 }
5625
5626drop:
5627 kfree_skb(skb);
5628 return 0;
5629}
5630
5631static int l2cap_debugfs_show(struct seq_file *f, void *p)
5632{
5633 struct l2cap_chan *c;
5634
5635 read_lock(&chan_list_lock);
5636
5637 list_for_each_entry(c, &chan_list, global_l) {
5638 struct sock *sk = c->sk;
5639
5640 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5641 batostr(&bt_sk(sk)->src),
5642 batostr(&bt_sk(sk)->dst),
5643 c->state, __le16_to_cpu(c->psm),
5644 c->scid, c->dcid, c->imtu, c->omtu,
5645 c->sec_level, c->mode);
5646 }
5647
5648 read_unlock(&chan_list_lock);
5649
5650 return 0;
5651}
5652
5653static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5654{
5655 return single_open(file, l2cap_debugfs_show, inode->i_private);
5656}
5657
5658static const struct file_operations l2cap_debugfs_fops = {
5659 .open = l2cap_debugfs_open,
5660 .read = seq_read,
5661 .llseek = seq_lseek,
5662 .release = single_release,
5663};
5664
5665static struct dentry *l2cap_debugfs;
5666
5667int __init l2cap_init(void)
5668{
5669 int err;
5670
5671 err = l2cap_init_sockets();
5672 if (err < 0)
5673 return err;
5674
5675 if (bt_debugfs) {
5676 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5677 bt_debugfs, NULL, &l2cap_debugfs_fops);
5678 if (!l2cap_debugfs)
5679 BT_ERR("Failed to create L2CAP debug file");
5680 }
5681
5682 return 0;
5683}
5684
5685void l2cap_exit(void)
5686{
5687 debugfs_remove(l2cap_debugfs);
5688 l2cap_cleanup_sockets();
5689}
5690
5691module_param(disable_ertm, bool, 0644);
5692MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.043802 seconds and 5 git commands to generate.