Bluetooth: Do not purge queue in Basic Mode
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
51 #include <net/sock.h>
52
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
78
79 /* ---- L2CAP channels ---- */
80
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 {
83 struct l2cap_chan *c;
84
85 list_for_each_entry(c, &conn->chan_l, list) {
86 if (c->dcid == cid)
87 return c;
88 }
89 return NULL;
90 }
91
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 {
94 struct l2cap_chan *c;
95
96 list_for_each_entry(c, &conn->chan_l, list) {
97 if (c->scid == cid)
98 return c;
99 }
100 return NULL;
101 }
102
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106 {
107 struct l2cap_chan *c;
108
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 l2cap_chan_lock(c);
113 mutex_unlock(&conn->chan_lock);
114
115 return c;
116 }
117
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 {
120 struct l2cap_chan *c;
121
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
124 return c;
125 }
126 return NULL;
127 }
128
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
130 {
131 struct l2cap_chan *c;
132
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
135 return c;
136 }
137 return NULL;
138 }
139
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
141 {
142 int err;
143
144 write_lock(&chan_list_lock);
145
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
147 err = -EADDRINUSE;
148 goto done;
149 }
150
151 if (psm) {
152 chan->psm = psm;
153 chan->sport = psm;
154 err = 0;
155 } else {
156 u16 p;
157
158 err = -EINVAL;
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
163 err = 0;
164 break;
165 }
166 }
167
168 done:
169 write_unlock(&chan_list_lock);
170 return err;
171 }
172
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
174 {
175 write_lock(&chan_list_lock);
176
177 chan->scid = scid;
178
179 write_unlock(&chan_list_lock);
180
181 return 0;
182 }
183
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
185 {
186 u16 cid = L2CAP_CID_DYN_START;
187
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
190 return cid;
191 }
192
193 return 0;
194 }
195
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
197 {
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
200
201 chan->state = state;
202 chan->ops->state_change(chan->data, state);
203 }
204
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
206 {
207 struct sock *sk = chan->sk;
208
209 lock_sock(sk);
210 __l2cap_state_change(chan, state);
211 release_sock(sk);
212 }
213
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
215 {
216 struct sock *sk = chan->sk;
217
218 sk->sk_err = err;
219 }
220
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
222 {
223 struct sock *sk = chan->sk;
224
225 lock_sock(sk);
226 __l2cap_chan_set_err(chan, err);
227 release_sock(sk);
228 }
229
230 static void __set_retrans_timer(struct l2cap_chan *chan)
231 {
232 if (!delayed_work_pending(&chan->monitor_timer) &&
233 chan->retrans_timeout) {
234 l2cap_set_timer(chan, &chan->retrans_timer,
235 msecs_to_jiffies(chan->retrans_timeout));
236 }
237 }
238
239 static void __set_monitor_timer(struct l2cap_chan *chan)
240 {
241 __clear_retrans_timer(chan);
242 if (chan->monitor_timeout) {
243 l2cap_set_timer(chan, &chan->monitor_timer,
244 msecs_to_jiffies(chan->monitor_timeout));
245 }
246 }
247
248 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
249 u16 seq)
250 {
251 struct sk_buff *skb;
252
253 skb_queue_walk(head, skb) {
254 if (bt_cb(skb)->control.txseq == seq)
255 return skb;
256 }
257
258 return NULL;
259 }
260
261 /* ---- L2CAP sequence number lists ---- */
262
263 /* For ERTM, ordered lists of sequence numbers must be tracked for
264 * SREJ requests that are received and for frames that are to be
265 * retransmitted. These seq_list functions implement a singly-linked
266 * list in an array, where membership in the list can also be checked
267 * in constant time. Items can also be added to the tail of the list
268 * and removed from the head in constant time, without further memory
269 * allocs or frees.
270 */
271
272 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
273 {
274 size_t alloc_size, i;
275
276 /* Allocated size is a power of 2 to map sequence numbers
277 * (which may be up to 14 bits) in to a smaller array that is
278 * sized for the negotiated ERTM transmit windows.
279 */
280 alloc_size = roundup_pow_of_two(size);
281
282 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
283 if (!seq_list->list)
284 return -ENOMEM;
285
286 seq_list->mask = alloc_size - 1;
287 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
288 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
289 for (i = 0; i < alloc_size; i++)
290 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
291
292 return 0;
293 }
294
295 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
296 {
297 kfree(seq_list->list);
298 }
299
300 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
301 u16 seq)
302 {
303 /* Constant-time check for list membership */
304 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
305 }
306
307 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
308 {
309 u16 mask = seq_list->mask;
310
311 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
312 /* In case someone tries to pop the head of an empty list */
313 return L2CAP_SEQ_LIST_CLEAR;
314 } else if (seq_list->head == seq) {
315 /* Head can be removed in constant time */
316 seq_list->head = seq_list->list[seq & mask];
317 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
318
319 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
320 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
321 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
322 }
323 } else {
324 /* Walk the list to find the sequence number */
325 u16 prev = seq_list->head;
326 while (seq_list->list[prev & mask] != seq) {
327 prev = seq_list->list[prev & mask];
328 if (prev == L2CAP_SEQ_LIST_TAIL)
329 return L2CAP_SEQ_LIST_CLEAR;
330 }
331
332 /* Unlink the number from the list and clear it */
333 seq_list->list[prev & mask] = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 if (seq_list->tail == seq)
336 seq_list->tail = prev;
337 }
338 return seq;
339 }
340
341 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
342 {
343 /* Remove the head in constant time */
344 return l2cap_seq_list_remove(seq_list, seq_list->head);
345 }
346
347 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
348 {
349 u16 i;
350
351 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
352 return;
353
354 for (i = 0; i <= seq_list->mask; i++)
355 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
356
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
359 }
360
361 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
362 {
363 u16 mask = seq_list->mask;
364
365 /* All appends happen in constant time */
366
367 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
368 return;
369
370 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
371 seq_list->head = seq;
372 else
373 seq_list->list[seq_list->tail & mask] = seq;
374
375 seq_list->tail = seq;
376 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
377 }
378
379 static void l2cap_chan_timeout(struct work_struct *work)
380 {
381 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
382 chan_timer.work);
383 struct l2cap_conn *conn = chan->conn;
384 int reason;
385
386 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
387
388 mutex_lock(&conn->chan_lock);
389 l2cap_chan_lock(chan);
390
391 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
392 reason = ECONNREFUSED;
393 else if (chan->state == BT_CONNECT &&
394 chan->sec_level != BT_SECURITY_SDP)
395 reason = ECONNREFUSED;
396 else
397 reason = ETIMEDOUT;
398
399 l2cap_chan_close(chan, reason);
400
401 l2cap_chan_unlock(chan);
402
403 chan->ops->close(chan->data);
404 mutex_unlock(&conn->chan_lock);
405
406 l2cap_chan_put(chan);
407 }
408
409 struct l2cap_chan *l2cap_chan_create(void)
410 {
411 struct l2cap_chan *chan;
412
413 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
414 if (!chan)
415 return NULL;
416
417 mutex_init(&chan->lock);
418
419 write_lock(&chan_list_lock);
420 list_add(&chan->global_l, &chan_list);
421 write_unlock(&chan_list_lock);
422
423 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
424
425 chan->state = BT_OPEN;
426
427 atomic_set(&chan->refcnt, 1);
428
429 /* This flag is cleared in l2cap_chan_ready() */
430 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
431
432 BT_DBG("chan %p", chan);
433
434 return chan;
435 }
436
437 void l2cap_chan_destroy(struct l2cap_chan *chan)
438 {
439 write_lock(&chan_list_lock);
440 list_del(&chan->global_l);
441 write_unlock(&chan_list_lock);
442
443 l2cap_chan_put(chan);
444 }
445
446 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
447 {
448 chan->fcs = L2CAP_FCS_CRC16;
449 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
450 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
451 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
452 chan->sec_level = BT_SECURITY_LOW;
453
454 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
455 }
456
457 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
458 {
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
460 __le16_to_cpu(chan->psm), chan->dcid);
461
462 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
463
464 chan->conn = conn;
465
466 switch (chan->chan_type) {
467 case L2CAP_CHAN_CONN_ORIENTED:
468 if (conn->hcon->type == LE_LINK) {
469 /* LE connection */
470 chan->omtu = L2CAP_LE_DEFAULT_MTU;
471 chan->scid = L2CAP_CID_LE_DATA;
472 chan->dcid = L2CAP_CID_LE_DATA;
473 } else {
474 /* Alloc CID for connection-oriented socket */
475 chan->scid = l2cap_alloc_cid(conn);
476 chan->omtu = L2CAP_DEFAULT_MTU;
477 }
478 break;
479
480 case L2CAP_CHAN_CONN_LESS:
481 /* Connectionless socket */
482 chan->scid = L2CAP_CID_CONN_LESS;
483 chan->dcid = L2CAP_CID_CONN_LESS;
484 chan->omtu = L2CAP_DEFAULT_MTU;
485 break;
486
487 default:
488 /* Raw socket can send/recv signalling messages only */
489 chan->scid = L2CAP_CID_SIGNALING;
490 chan->dcid = L2CAP_CID_SIGNALING;
491 chan->omtu = L2CAP_DEFAULT_MTU;
492 }
493
494 chan->local_id = L2CAP_BESTEFFORT_ID;
495 chan->local_stype = L2CAP_SERV_BESTEFFORT;
496 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
497 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
498 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
499 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
500
501 l2cap_chan_hold(chan);
502
503 list_add(&chan->list, &conn->chan_l);
504 }
505
506 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
507 {
508 mutex_lock(&conn->chan_lock);
509 __l2cap_chan_add(conn, chan);
510 mutex_unlock(&conn->chan_lock);
511 }
512
513 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
514 {
515 struct sock *sk = chan->sk;
516 struct l2cap_conn *conn = chan->conn;
517 struct sock *parent = bt_sk(sk)->parent;
518
519 __clear_chan_timer(chan);
520
521 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
522
523 if (conn) {
524 /* Delete from channel list */
525 list_del(&chan->list);
526
527 l2cap_chan_put(chan);
528
529 chan->conn = NULL;
530 hci_conn_put(conn->hcon);
531 }
532
533 lock_sock(sk);
534
535 __l2cap_state_change(chan, BT_CLOSED);
536 sock_set_flag(sk, SOCK_ZAPPED);
537
538 if (err)
539 __l2cap_chan_set_err(chan, err);
540
541 if (parent) {
542 bt_accept_unlink(sk);
543 parent->sk_data_ready(parent, 0);
544 } else
545 sk->sk_state_change(sk);
546
547 release_sock(sk);
548
549 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
550 return;
551
552 switch(chan->mode) {
553 case L2CAP_MODE_BASIC:
554 break;
555
556 case L2CAP_MODE_ERTM:
557 __clear_retrans_timer(chan);
558 __clear_monitor_timer(chan);
559 __clear_ack_timer(chan);
560
561 skb_queue_purge(&chan->srej_q);
562
563 l2cap_seq_list_free(&chan->srej_list);
564 l2cap_seq_list_free(&chan->retrans_list);
565
566 /* fall through */
567
568 case L2CAP_MODE_STREAMING:
569 skb_queue_purge(&chan->tx_q);
570 break;
571 }
572
573 return;
574 }
575
576 static void l2cap_chan_cleanup_listen(struct sock *parent)
577 {
578 struct sock *sk;
579
580 BT_DBG("parent %p", parent);
581
582 /* Close not yet accepted channels */
583 while ((sk = bt_accept_dequeue(parent, NULL))) {
584 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
585
586 l2cap_chan_lock(chan);
587 __clear_chan_timer(chan);
588 l2cap_chan_close(chan, ECONNRESET);
589 l2cap_chan_unlock(chan);
590
591 chan->ops->close(chan->data);
592 }
593 }
594
595 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
596 {
597 struct l2cap_conn *conn = chan->conn;
598 struct sock *sk = chan->sk;
599
600 BT_DBG("chan %p state %s sk %p", chan,
601 state_to_string(chan->state), sk);
602
603 switch (chan->state) {
604 case BT_LISTEN:
605 lock_sock(sk);
606 l2cap_chan_cleanup_listen(sk);
607
608 __l2cap_state_change(chan, BT_CLOSED);
609 sock_set_flag(sk, SOCK_ZAPPED);
610 release_sock(sk);
611 break;
612
613 case BT_CONNECTED:
614 case BT_CONFIG:
615 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
616 conn->hcon->type == ACL_LINK) {
617 __set_chan_timer(chan, sk->sk_sndtimeo);
618 l2cap_send_disconn_req(conn, chan, reason);
619 } else
620 l2cap_chan_del(chan, reason);
621 break;
622
623 case BT_CONNECT2:
624 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
625 conn->hcon->type == ACL_LINK) {
626 struct l2cap_conn_rsp rsp;
627 __u16 result;
628
629 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
630 result = L2CAP_CR_SEC_BLOCK;
631 else
632 result = L2CAP_CR_BAD_PSM;
633 l2cap_state_change(chan, BT_DISCONN);
634
635 rsp.scid = cpu_to_le16(chan->dcid);
636 rsp.dcid = cpu_to_le16(chan->scid);
637 rsp.result = cpu_to_le16(result);
638 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
639 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
640 sizeof(rsp), &rsp);
641 }
642
643 l2cap_chan_del(chan, reason);
644 break;
645
646 case BT_CONNECT:
647 case BT_DISCONN:
648 l2cap_chan_del(chan, reason);
649 break;
650
651 default:
652 lock_sock(sk);
653 sock_set_flag(sk, SOCK_ZAPPED);
654 release_sock(sk);
655 break;
656 }
657 }
658
659 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
660 {
661 if (chan->chan_type == L2CAP_CHAN_RAW) {
662 switch (chan->sec_level) {
663 case BT_SECURITY_HIGH:
664 return HCI_AT_DEDICATED_BONDING_MITM;
665 case BT_SECURITY_MEDIUM:
666 return HCI_AT_DEDICATED_BONDING;
667 default:
668 return HCI_AT_NO_BONDING;
669 }
670 } else if (chan->psm == cpu_to_le16(0x0001)) {
671 if (chan->sec_level == BT_SECURITY_LOW)
672 chan->sec_level = BT_SECURITY_SDP;
673
674 if (chan->sec_level == BT_SECURITY_HIGH)
675 return HCI_AT_NO_BONDING_MITM;
676 else
677 return HCI_AT_NO_BONDING;
678 } else {
679 switch (chan->sec_level) {
680 case BT_SECURITY_HIGH:
681 return HCI_AT_GENERAL_BONDING_MITM;
682 case BT_SECURITY_MEDIUM:
683 return HCI_AT_GENERAL_BONDING;
684 default:
685 return HCI_AT_NO_BONDING;
686 }
687 }
688 }
689
690 /* Service level security */
691 int l2cap_chan_check_security(struct l2cap_chan *chan)
692 {
693 struct l2cap_conn *conn = chan->conn;
694 __u8 auth_type;
695
696 auth_type = l2cap_get_auth_type(chan);
697
698 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
699 }
700
701 static u8 l2cap_get_ident(struct l2cap_conn *conn)
702 {
703 u8 id;
704
705 /* Get next available identificator.
706 * 1 - 128 are used by kernel.
707 * 129 - 199 are reserved.
708 * 200 - 254 are used by utilities like l2ping, etc.
709 */
710
711 spin_lock(&conn->lock);
712
713 if (++conn->tx_ident > 128)
714 conn->tx_ident = 1;
715
716 id = conn->tx_ident;
717
718 spin_unlock(&conn->lock);
719
720 return id;
721 }
722
723 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
724 {
725 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
726 u8 flags;
727
728 BT_DBG("code 0x%2.2x", code);
729
730 if (!skb)
731 return;
732
733 if (lmp_no_flush_capable(conn->hcon->hdev))
734 flags = ACL_START_NO_FLUSH;
735 else
736 flags = ACL_START;
737
738 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
739 skb->priority = HCI_PRIO_MAX;
740
741 hci_send_acl(conn->hchan, skb, flags);
742 }
743
744 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
745 {
746 struct hci_conn *hcon = chan->conn->hcon;
747 u16 flags;
748
749 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
750 skb->priority);
751
752 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
753 lmp_no_flush_capable(hcon->hdev))
754 flags = ACL_START_NO_FLUSH;
755 else
756 flags = ACL_START;
757
758 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
759 hci_send_acl(chan->conn->hchan, skb, flags);
760 }
761
762 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
763 {
764 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
765 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
766
767 if (enh & L2CAP_CTRL_FRAME_TYPE) {
768 /* S-Frame */
769 control->sframe = 1;
770 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
771 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
772
773 control->sar = 0;
774 control->txseq = 0;
775 } else {
776 /* I-Frame */
777 control->sframe = 0;
778 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
779 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
780
781 control->poll = 0;
782 control->super = 0;
783 }
784 }
785
786 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
787 {
788 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
789 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
790
791 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
792 /* S-Frame */
793 control->sframe = 1;
794 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
795 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
796
797 control->sar = 0;
798 control->txseq = 0;
799 } else {
800 /* I-Frame */
801 control->sframe = 0;
802 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
803 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
804
805 control->poll = 0;
806 control->super = 0;
807 }
808 }
809
810 static inline void __unpack_control(struct l2cap_chan *chan,
811 struct sk_buff *skb)
812 {
813 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
814 __unpack_extended_control(get_unaligned_le32(skb->data),
815 &bt_cb(skb)->control);
816 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
817 } else {
818 __unpack_enhanced_control(get_unaligned_le16(skb->data),
819 &bt_cb(skb)->control);
820 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
821 }
822 }
823
824 static u32 __pack_extended_control(struct l2cap_ctrl *control)
825 {
826 u32 packed;
827
828 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
829 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
830
831 if (control->sframe) {
832 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
833 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
834 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
835 } else {
836 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
837 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
838 }
839
840 return packed;
841 }
842
843 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
844 {
845 u16 packed;
846
847 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
848 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
849
850 if (control->sframe) {
851 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
852 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
853 packed |= L2CAP_CTRL_FRAME_TYPE;
854 } else {
855 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
856 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
857 }
858
859 return packed;
860 }
861
862 static inline void __pack_control(struct l2cap_chan *chan,
863 struct l2cap_ctrl *control,
864 struct sk_buff *skb)
865 {
866 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
867 put_unaligned_le32(__pack_extended_control(control),
868 skb->data + L2CAP_HDR_SIZE);
869 } else {
870 put_unaligned_le16(__pack_enhanced_control(control),
871 skb->data + L2CAP_HDR_SIZE);
872 }
873 }
874
875 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
876 u32 control)
877 {
878 struct sk_buff *skb;
879 struct l2cap_hdr *lh;
880 int hlen;
881
882 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
883 hlen = L2CAP_EXT_HDR_SIZE;
884 else
885 hlen = L2CAP_ENH_HDR_SIZE;
886
887 if (chan->fcs == L2CAP_FCS_CRC16)
888 hlen += L2CAP_FCS_SIZE;
889
890 skb = bt_skb_alloc(hlen, GFP_KERNEL);
891
892 if (!skb)
893 return ERR_PTR(-ENOMEM);
894
895 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
896 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
897 lh->cid = cpu_to_le16(chan->dcid);
898
899 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
900 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
901 else
902 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
903
904 if (chan->fcs == L2CAP_FCS_CRC16) {
905 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
906 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
907 }
908
909 skb->priority = HCI_PRIO_MAX;
910 return skb;
911 }
912
913 static void l2cap_send_sframe(struct l2cap_chan *chan,
914 struct l2cap_ctrl *control)
915 {
916 struct sk_buff *skb;
917 u32 control_field;
918
919 BT_DBG("chan %p, control %p", chan, control);
920
921 if (!control->sframe)
922 return;
923
924 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
925 !control->poll)
926 control->final = 1;
927
928 if (control->super == L2CAP_SUPER_RR)
929 clear_bit(CONN_RNR_SENT, &chan->conn_state);
930 else if (control->super == L2CAP_SUPER_RNR)
931 set_bit(CONN_RNR_SENT, &chan->conn_state);
932
933 if (control->super != L2CAP_SUPER_SREJ) {
934 chan->last_acked_seq = control->reqseq;
935 __clear_ack_timer(chan);
936 }
937
938 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
939 control->final, control->poll, control->super);
940
941 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
942 control_field = __pack_extended_control(control);
943 else
944 control_field = __pack_enhanced_control(control);
945
946 skb = l2cap_create_sframe_pdu(chan, control_field);
947 if (!IS_ERR(skb))
948 l2cap_do_send(chan, skb);
949 }
950
951 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
952 {
953 struct l2cap_ctrl control;
954
955 BT_DBG("chan %p, poll %d", chan, poll);
956
957 memset(&control, 0, sizeof(control));
958 control.sframe = 1;
959 control.poll = poll;
960
961 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
962 control.super = L2CAP_SUPER_RNR;
963 else
964 control.super = L2CAP_SUPER_RR;
965
966 control.reqseq = chan->buffer_seq;
967 l2cap_send_sframe(chan, &control);
968 }
969
970 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
971 {
972 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
973 }
974
975 static void l2cap_send_conn_req(struct l2cap_chan *chan)
976 {
977 struct l2cap_conn *conn = chan->conn;
978 struct l2cap_conn_req req;
979
980 req.scid = cpu_to_le16(chan->scid);
981 req.psm = chan->psm;
982
983 chan->ident = l2cap_get_ident(conn);
984
985 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
986
987 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
988 }
989
990 static void l2cap_chan_ready(struct l2cap_chan *chan)
991 {
992 struct sock *sk = chan->sk;
993 struct sock *parent;
994
995 lock_sock(sk);
996
997 parent = bt_sk(sk)->parent;
998
999 BT_DBG("sk %p, parent %p", sk, parent);
1000
1001 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1002 chan->conf_state = 0;
1003 __clear_chan_timer(chan);
1004
1005 __l2cap_state_change(chan, BT_CONNECTED);
1006 sk->sk_state_change(sk);
1007
1008 if (parent)
1009 parent->sk_data_ready(parent, 0);
1010
1011 release_sock(sk);
1012 }
1013
1014 static void l2cap_do_start(struct l2cap_chan *chan)
1015 {
1016 struct l2cap_conn *conn = chan->conn;
1017
1018 if (conn->hcon->type == LE_LINK) {
1019 l2cap_chan_ready(chan);
1020 return;
1021 }
1022
1023 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1024 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1025 return;
1026
1027 if (l2cap_chan_check_security(chan) &&
1028 __l2cap_no_conn_pending(chan))
1029 l2cap_send_conn_req(chan);
1030 } else {
1031 struct l2cap_info_req req;
1032 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1033
1034 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1035 conn->info_ident = l2cap_get_ident(conn);
1036
1037 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1038
1039 l2cap_send_cmd(conn, conn->info_ident,
1040 L2CAP_INFO_REQ, sizeof(req), &req);
1041 }
1042 }
1043
1044 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1045 {
1046 u32 local_feat_mask = l2cap_feat_mask;
1047 if (!disable_ertm)
1048 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1049
1050 switch (mode) {
1051 case L2CAP_MODE_ERTM:
1052 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1053 case L2CAP_MODE_STREAMING:
1054 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1055 default:
1056 return 0x00;
1057 }
1058 }
1059
1060 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1061 {
1062 struct sock *sk = chan->sk;
1063 struct l2cap_disconn_req req;
1064
1065 if (!conn)
1066 return;
1067
1068 if (chan->mode == L2CAP_MODE_ERTM) {
1069 __clear_retrans_timer(chan);
1070 __clear_monitor_timer(chan);
1071 __clear_ack_timer(chan);
1072 }
1073
1074 req.dcid = cpu_to_le16(chan->dcid);
1075 req.scid = cpu_to_le16(chan->scid);
1076 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1077 L2CAP_DISCONN_REQ, sizeof(req), &req);
1078
1079 lock_sock(sk);
1080 __l2cap_state_change(chan, BT_DISCONN);
1081 __l2cap_chan_set_err(chan, err);
1082 release_sock(sk);
1083 }
1084
1085 /* ---- L2CAP connections ---- */
1086 static void l2cap_conn_start(struct l2cap_conn *conn)
1087 {
1088 struct l2cap_chan *chan, *tmp;
1089
1090 BT_DBG("conn %p", conn);
1091
1092 mutex_lock(&conn->chan_lock);
1093
1094 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1095 struct sock *sk = chan->sk;
1096
1097 l2cap_chan_lock(chan);
1098
1099 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1100 l2cap_chan_unlock(chan);
1101 continue;
1102 }
1103
1104 if (chan->state == BT_CONNECT) {
1105 if (!l2cap_chan_check_security(chan) ||
1106 !__l2cap_no_conn_pending(chan)) {
1107 l2cap_chan_unlock(chan);
1108 continue;
1109 }
1110
1111 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1112 && test_bit(CONF_STATE2_DEVICE,
1113 &chan->conf_state)) {
1114 l2cap_chan_close(chan, ECONNRESET);
1115 l2cap_chan_unlock(chan);
1116 continue;
1117 }
1118
1119 l2cap_send_conn_req(chan);
1120
1121 } else if (chan->state == BT_CONNECT2) {
1122 struct l2cap_conn_rsp rsp;
1123 char buf[128];
1124 rsp.scid = cpu_to_le16(chan->dcid);
1125 rsp.dcid = cpu_to_le16(chan->scid);
1126
1127 if (l2cap_chan_check_security(chan)) {
1128 lock_sock(sk);
1129 if (test_bit(BT_SK_DEFER_SETUP,
1130 &bt_sk(sk)->flags)) {
1131 struct sock *parent = bt_sk(sk)->parent;
1132 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1133 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1134 if (parent)
1135 parent->sk_data_ready(parent, 0);
1136
1137 } else {
1138 __l2cap_state_change(chan, BT_CONFIG);
1139 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1140 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1141 }
1142 release_sock(sk);
1143 } else {
1144 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1145 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1146 }
1147
1148 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1149 sizeof(rsp), &rsp);
1150
1151 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1152 rsp.result != L2CAP_CR_SUCCESS) {
1153 l2cap_chan_unlock(chan);
1154 continue;
1155 }
1156
1157 set_bit(CONF_REQ_SENT, &chan->conf_state);
1158 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1159 l2cap_build_conf_req(chan, buf), buf);
1160 chan->num_conf_req++;
1161 }
1162
1163 l2cap_chan_unlock(chan);
1164 }
1165
1166 mutex_unlock(&conn->chan_lock);
1167 }
1168
1169 /* Find socket with cid and source/destination bdaddr.
1170 * Returns closest match, locked.
1171 */
1172 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1173 bdaddr_t *src,
1174 bdaddr_t *dst)
1175 {
1176 struct l2cap_chan *c, *c1 = NULL;
1177
1178 read_lock(&chan_list_lock);
1179
1180 list_for_each_entry(c, &chan_list, global_l) {
1181 struct sock *sk = c->sk;
1182
1183 if (state && c->state != state)
1184 continue;
1185
1186 if (c->scid == cid) {
1187 int src_match, dst_match;
1188 int src_any, dst_any;
1189
1190 /* Exact match. */
1191 src_match = !bacmp(&bt_sk(sk)->src, src);
1192 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1193 if (src_match && dst_match) {
1194 read_unlock(&chan_list_lock);
1195 return c;
1196 }
1197
1198 /* Closest match */
1199 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1200 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1201 if ((src_match && dst_any) || (src_any && dst_match) ||
1202 (src_any && dst_any))
1203 c1 = c;
1204 }
1205 }
1206
1207 read_unlock(&chan_list_lock);
1208
1209 return c1;
1210 }
1211
1212 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1213 {
1214 struct sock *parent, *sk;
1215 struct l2cap_chan *chan, *pchan;
1216
1217 BT_DBG("");
1218
1219 /* Check if we have socket listening on cid */
1220 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1221 conn->src, conn->dst);
1222 if (!pchan)
1223 return;
1224
1225 parent = pchan->sk;
1226
1227 lock_sock(parent);
1228
1229 /* Check for backlog size */
1230 if (sk_acceptq_is_full(parent)) {
1231 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1232 goto clean;
1233 }
1234
1235 chan = pchan->ops->new_connection(pchan->data);
1236 if (!chan)
1237 goto clean;
1238
1239 sk = chan->sk;
1240
1241 hci_conn_hold(conn->hcon);
1242
1243 bacpy(&bt_sk(sk)->src, conn->src);
1244 bacpy(&bt_sk(sk)->dst, conn->dst);
1245
1246 bt_accept_enqueue(parent, sk);
1247
1248 l2cap_chan_add(conn, chan);
1249
1250 __set_chan_timer(chan, sk->sk_sndtimeo);
1251
1252 __l2cap_state_change(chan, BT_CONNECTED);
1253 parent->sk_data_ready(parent, 0);
1254
1255 clean:
1256 release_sock(parent);
1257 }
1258
1259 static void l2cap_conn_ready(struct l2cap_conn *conn)
1260 {
1261 struct l2cap_chan *chan;
1262
1263 BT_DBG("conn %p", conn);
1264
1265 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1266 l2cap_le_conn_ready(conn);
1267
1268 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1269 smp_conn_security(conn, conn->hcon->pending_sec_level);
1270
1271 mutex_lock(&conn->chan_lock);
1272
1273 list_for_each_entry(chan, &conn->chan_l, list) {
1274
1275 l2cap_chan_lock(chan);
1276
1277 if (conn->hcon->type == LE_LINK) {
1278 if (smp_conn_security(conn, chan->sec_level))
1279 l2cap_chan_ready(chan);
1280
1281 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1282 struct sock *sk = chan->sk;
1283 __clear_chan_timer(chan);
1284 lock_sock(sk);
1285 __l2cap_state_change(chan, BT_CONNECTED);
1286 sk->sk_state_change(sk);
1287 release_sock(sk);
1288
1289 } else if (chan->state == BT_CONNECT)
1290 l2cap_do_start(chan);
1291
1292 l2cap_chan_unlock(chan);
1293 }
1294
1295 mutex_unlock(&conn->chan_lock);
1296 }
1297
1298 /* Notify sockets that we cannot guaranty reliability anymore */
1299 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1300 {
1301 struct l2cap_chan *chan;
1302
1303 BT_DBG("conn %p", conn);
1304
1305 mutex_lock(&conn->chan_lock);
1306
1307 list_for_each_entry(chan, &conn->chan_l, list) {
1308 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1309 __l2cap_chan_set_err(chan, err);
1310 }
1311
1312 mutex_unlock(&conn->chan_lock);
1313 }
1314
1315 static void l2cap_info_timeout(struct work_struct *work)
1316 {
1317 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1318 info_timer.work);
1319
1320 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1321 conn->info_ident = 0;
1322
1323 l2cap_conn_start(conn);
1324 }
1325
1326 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1327 {
1328 struct l2cap_conn *conn = hcon->l2cap_data;
1329 struct l2cap_chan *chan, *l;
1330
1331 if (!conn)
1332 return;
1333
1334 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1335
1336 kfree_skb(conn->rx_skb);
1337
1338 mutex_lock(&conn->chan_lock);
1339
1340 /* Kill channels */
1341 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1342 l2cap_chan_hold(chan);
1343 l2cap_chan_lock(chan);
1344
1345 l2cap_chan_del(chan, err);
1346
1347 l2cap_chan_unlock(chan);
1348
1349 chan->ops->close(chan->data);
1350 l2cap_chan_put(chan);
1351 }
1352
1353 mutex_unlock(&conn->chan_lock);
1354
1355 hci_chan_del(conn->hchan);
1356
1357 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1358 cancel_delayed_work_sync(&conn->info_timer);
1359
1360 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1361 cancel_delayed_work_sync(&conn->security_timer);
1362 smp_chan_destroy(conn);
1363 }
1364
1365 hcon->l2cap_data = NULL;
1366 kfree(conn);
1367 }
1368
1369 static void security_timeout(struct work_struct *work)
1370 {
1371 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1372 security_timer.work);
1373
1374 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1375 }
1376
1377 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1378 {
1379 struct l2cap_conn *conn = hcon->l2cap_data;
1380 struct hci_chan *hchan;
1381
1382 if (conn || status)
1383 return conn;
1384
1385 hchan = hci_chan_create(hcon);
1386 if (!hchan)
1387 return NULL;
1388
1389 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1390 if (!conn) {
1391 hci_chan_del(hchan);
1392 return NULL;
1393 }
1394
1395 hcon->l2cap_data = conn;
1396 conn->hcon = hcon;
1397 conn->hchan = hchan;
1398
1399 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1400
1401 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1402 conn->mtu = hcon->hdev->le_mtu;
1403 else
1404 conn->mtu = hcon->hdev->acl_mtu;
1405
1406 conn->src = &hcon->hdev->bdaddr;
1407 conn->dst = &hcon->dst;
1408
1409 conn->feat_mask = 0;
1410
1411 spin_lock_init(&conn->lock);
1412 mutex_init(&conn->chan_lock);
1413
1414 INIT_LIST_HEAD(&conn->chan_l);
1415
1416 if (hcon->type == LE_LINK)
1417 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1418 else
1419 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1420
1421 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1422
1423 return conn;
1424 }
1425
1426 /* ---- Socket interface ---- */
1427
1428 /* Find socket with psm and source / destination bdaddr.
1429 * Returns closest match.
1430 */
1431 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1432 bdaddr_t *src,
1433 bdaddr_t *dst)
1434 {
1435 struct l2cap_chan *c, *c1 = NULL;
1436
1437 read_lock(&chan_list_lock);
1438
1439 list_for_each_entry(c, &chan_list, global_l) {
1440 struct sock *sk = c->sk;
1441
1442 if (state && c->state != state)
1443 continue;
1444
1445 if (c->psm == psm) {
1446 int src_match, dst_match;
1447 int src_any, dst_any;
1448
1449 /* Exact match. */
1450 src_match = !bacmp(&bt_sk(sk)->src, src);
1451 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1452 if (src_match && dst_match) {
1453 read_unlock(&chan_list_lock);
1454 return c;
1455 }
1456
1457 /* Closest match */
1458 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1459 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1460 if ((src_match && dst_any) || (src_any && dst_match) ||
1461 (src_any && dst_any))
1462 c1 = c;
1463 }
1464 }
1465
1466 read_unlock(&chan_list_lock);
1467
1468 return c1;
1469 }
1470
1471 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1472 bdaddr_t *dst, u8 dst_type)
1473 {
1474 struct sock *sk = chan->sk;
1475 bdaddr_t *src = &bt_sk(sk)->src;
1476 struct l2cap_conn *conn;
1477 struct hci_conn *hcon;
1478 struct hci_dev *hdev;
1479 __u8 auth_type;
1480 int err;
1481
1482 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1483 dst_type, __le16_to_cpu(chan->psm));
1484
1485 hdev = hci_get_route(dst, src);
1486 if (!hdev)
1487 return -EHOSTUNREACH;
1488
1489 hci_dev_lock(hdev);
1490
1491 l2cap_chan_lock(chan);
1492
1493 /* PSM must be odd and lsb of upper byte must be 0 */
1494 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1495 chan->chan_type != L2CAP_CHAN_RAW) {
1496 err = -EINVAL;
1497 goto done;
1498 }
1499
1500 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1501 err = -EINVAL;
1502 goto done;
1503 }
1504
1505 switch (chan->mode) {
1506 case L2CAP_MODE_BASIC:
1507 break;
1508 case L2CAP_MODE_ERTM:
1509 case L2CAP_MODE_STREAMING:
1510 if (!disable_ertm)
1511 break;
1512 /* fall through */
1513 default:
1514 err = -ENOTSUPP;
1515 goto done;
1516 }
1517
1518 lock_sock(sk);
1519
1520 switch (sk->sk_state) {
1521 case BT_CONNECT:
1522 case BT_CONNECT2:
1523 case BT_CONFIG:
1524 /* Already connecting */
1525 err = 0;
1526 release_sock(sk);
1527 goto done;
1528
1529 case BT_CONNECTED:
1530 /* Already connected */
1531 err = -EISCONN;
1532 release_sock(sk);
1533 goto done;
1534
1535 case BT_OPEN:
1536 case BT_BOUND:
1537 /* Can connect */
1538 break;
1539
1540 default:
1541 err = -EBADFD;
1542 release_sock(sk);
1543 goto done;
1544 }
1545
1546 /* Set destination address and psm */
1547 bacpy(&bt_sk(sk)->dst, dst);
1548
1549 release_sock(sk);
1550
1551 chan->psm = psm;
1552 chan->dcid = cid;
1553
1554 auth_type = l2cap_get_auth_type(chan);
1555
1556 if (chan->dcid == L2CAP_CID_LE_DATA)
1557 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1558 chan->sec_level, auth_type);
1559 else
1560 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1561 chan->sec_level, auth_type);
1562
1563 if (IS_ERR(hcon)) {
1564 err = PTR_ERR(hcon);
1565 goto done;
1566 }
1567
1568 conn = l2cap_conn_add(hcon, 0);
1569 if (!conn) {
1570 hci_conn_put(hcon);
1571 err = -ENOMEM;
1572 goto done;
1573 }
1574
1575 if (hcon->type == LE_LINK) {
1576 err = 0;
1577
1578 if (!list_empty(&conn->chan_l)) {
1579 err = -EBUSY;
1580 hci_conn_put(hcon);
1581 }
1582
1583 if (err)
1584 goto done;
1585 }
1586
1587 /* Update source addr of the socket */
1588 bacpy(src, conn->src);
1589
1590 l2cap_chan_unlock(chan);
1591 l2cap_chan_add(conn, chan);
1592 l2cap_chan_lock(chan);
1593
1594 l2cap_state_change(chan, BT_CONNECT);
1595 __set_chan_timer(chan, sk->sk_sndtimeo);
1596
1597 if (hcon->state == BT_CONNECTED) {
1598 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1599 __clear_chan_timer(chan);
1600 if (l2cap_chan_check_security(chan))
1601 l2cap_state_change(chan, BT_CONNECTED);
1602 } else
1603 l2cap_do_start(chan);
1604 }
1605
1606 err = 0;
1607
1608 done:
1609 l2cap_chan_unlock(chan);
1610 hci_dev_unlock(hdev);
1611 hci_dev_put(hdev);
1612 return err;
1613 }
1614
1615 int __l2cap_wait_ack(struct sock *sk)
1616 {
1617 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1618 DECLARE_WAITQUEUE(wait, current);
1619 int err = 0;
1620 int timeo = HZ/5;
1621
1622 add_wait_queue(sk_sleep(sk), &wait);
1623 set_current_state(TASK_INTERRUPTIBLE);
1624 while (chan->unacked_frames > 0 && chan->conn) {
1625 if (!timeo)
1626 timeo = HZ/5;
1627
1628 if (signal_pending(current)) {
1629 err = sock_intr_errno(timeo);
1630 break;
1631 }
1632
1633 release_sock(sk);
1634 timeo = schedule_timeout(timeo);
1635 lock_sock(sk);
1636 set_current_state(TASK_INTERRUPTIBLE);
1637
1638 err = sock_error(sk);
1639 if (err)
1640 break;
1641 }
1642 set_current_state(TASK_RUNNING);
1643 remove_wait_queue(sk_sleep(sk), &wait);
1644 return err;
1645 }
1646
1647 static void l2cap_monitor_timeout(struct work_struct *work)
1648 {
1649 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1650 monitor_timer.work);
1651
1652 BT_DBG("chan %p", chan);
1653
1654 l2cap_chan_lock(chan);
1655
1656 if (!chan->conn) {
1657 l2cap_chan_unlock(chan);
1658 l2cap_chan_put(chan);
1659 return;
1660 }
1661
1662 l2cap_tx(chan, 0, 0, L2CAP_EV_MONITOR_TO);
1663
1664 l2cap_chan_unlock(chan);
1665 l2cap_chan_put(chan);
1666 }
1667
1668 static void l2cap_retrans_timeout(struct work_struct *work)
1669 {
1670 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1671 retrans_timer.work);
1672
1673 BT_DBG("chan %p", chan);
1674
1675 l2cap_chan_lock(chan);
1676
1677 if (!chan->conn) {
1678 l2cap_chan_unlock(chan);
1679 l2cap_chan_put(chan);
1680 return;
1681 }
1682
1683 l2cap_tx(chan, 0, 0, L2CAP_EV_RETRANS_TO);
1684 l2cap_chan_unlock(chan);
1685 l2cap_chan_put(chan);
1686 }
1687
1688 static int l2cap_streaming_send(struct l2cap_chan *chan,
1689 struct sk_buff_head *skbs)
1690 {
1691 struct sk_buff *skb;
1692 struct l2cap_ctrl *control;
1693
1694 BT_DBG("chan %p, skbs %p", chan, skbs);
1695
1696 if (chan->state != BT_CONNECTED)
1697 return -ENOTCONN;
1698
1699 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1700
1701 while (!skb_queue_empty(&chan->tx_q)) {
1702
1703 skb = skb_dequeue(&chan->tx_q);
1704
1705 bt_cb(skb)->control.retries = 1;
1706 control = &bt_cb(skb)->control;
1707
1708 control->reqseq = 0;
1709 control->txseq = chan->next_tx_seq;
1710
1711 __pack_control(chan, control, skb);
1712
1713 if (chan->fcs == L2CAP_FCS_CRC16) {
1714 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1715 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1716 }
1717
1718 l2cap_do_send(chan, skb);
1719
1720 BT_DBG("Sent txseq %d", (int)control->txseq);
1721
1722 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1723 chan->frames_sent++;
1724 }
1725
1726 return 0;
1727 }
1728
1729 static int l2cap_ertm_send(struct l2cap_chan *chan)
1730 {
1731 struct sk_buff *skb, *tx_skb;
1732 struct l2cap_ctrl *control;
1733 int sent = 0;
1734
1735 BT_DBG("chan %p", chan);
1736
1737 if (chan->state != BT_CONNECTED)
1738 return -ENOTCONN;
1739
1740 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1741 return 0;
1742
1743 while (chan->tx_send_head &&
1744 chan->unacked_frames < chan->remote_tx_win &&
1745 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1746
1747 skb = chan->tx_send_head;
1748
1749 bt_cb(skb)->control.retries = 1;
1750 control = &bt_cb(skb)->control;
1751
1752 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1753 control->final = 1;
1754
1755 control->reqseq = chan->buffer_seq;
1756 chan->last_acked_seq = chan->buffer_seq;
1757 control->txseq = chan->next_tx_seq;
1758
1759 __pack_control(chan, control, skb);
1760
1761 if (chan->fcs == L2CAP_FCS_CRC16) {
1762 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1763 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1764 }
1765
1766 /* Clone after data has been modified. Data is assumed to be
1767 read-only (for locking purposes) on cloned sk_buffs.
1768 */
1769 tx_skb = skb_clone(skb, GFP_KERNEL);
1770
1771 if (!tx_skb)
1772 break;
1773
1774 __set_retrans_timer(chan);
1775
1776 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1777 chan->unacked_frames++;
1778 chan->frames_sent++;
1779 sent++;
1780
1781 if (skb_queue_is_last(&chan->tx_q, skb))
1782 chan->tx_send_head = NULL;
1783 else
1784 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1785
1786 l2cap_do_send(chan, tx_skb);
1787 BT_DBG("Sent txseq %d", (int)control->txseq);
1788 }
1789
1790 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1791 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1792
1793 return sent;
1794 }
1795
1796 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1797 {
1798 struct l2cap_ctrl control;
1799 struct sk_buff *skb;
1800 struct sk_buff *tx_skb;
1801 u16 seq;
1802
1803 BT_DBG("chan %p", chan);
1804
1805 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1806 return;
1807
1808 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1809 seq = l2cap_seq_list_pop(&chan->retrans_list);
1810
1811 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1812 if (!skb) {
1813 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1814 seq);
1815 continue;
1816 }
1817
1818 bt_cb(skb)->control.retries++;
1819 control = bt_cb(skb)->control;
1820
1821 if (chan->max_tx != 0 &&
1822 bt_cb(skb)->control.retries > chan->max_tx) {
1823 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1824 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1825 l2cap_seq_list_clear(&chan->retrans_list);
1826 break;
1827 }
1828
1829 control.reqseq = chan->buffer_seq;
1830 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1831 control.final = 1;
1832 else
1833 control.final = 0;
1834
1835 if (skb_cloned(skb)) {
1836 /* Cloned sk_buffs are read-only, so we need a
1837 * writeable copy
1838 */
1839 tx_skb = skb_copy(skb, GFP_ATOMIC);
1840 } else {
1841 tx_skb = skb_clone(skb, GFP_ATOMIC);
1842 }
1843
1844 if (!tx_skb) {
1845 l2cap_seq_list_clear(&chan->retrans_list);
1846 break;
1847 }
1848
1849 /* Update skb contents */
1850 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1851 put_unaligned_le32(__pack_extended_control(&control),
1852 tx_skb->data + L2CAP_HDR_SIZE);
1853 } else {
1854 put_unaligned_le16(__pack_enhanced_control(&control),
1855 tx_skb->data + L2CAP_HDR_SIZE);
1856 }
1857
1858 if (chan->fcs == L2CAP_FCS_CRC16) {
1859 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1860 put_unaligned_le16(fcs, skb_put(tx_skb,
1861 L2CAP_FCS_SIZE));
1862 }
1863
1864 l2cap_do_send(chan, tx_skb);
1865
1866 BT_DBG("Resent txseq %d", control.txseq);
1867
1868 chan->last_acked_seq = chan->buffer_seq;
1869 }
1870 }
1871
1872 static void l2cap_retransmit(struct l2cap_chan *chan,
1873 struct l2cap_ctrl *control)
1874 {
1875 BT_DBG("chan %p, control %p", chan, control);
1876
1877 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1878 l2cap_ertm_resend(chan);
1879 }
1880
1881 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1882 struct l2cap_ctrl *control)
1883 {
1884 struct sk_buff *skb;
1885
1886 BT_DBG("chan %p, control %p", chan, control);
1887
1888 if (control->poll)
1889 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1890
1891 l2cap_seq_list_clear(&chan->retrans_list);
1892
1893 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1894 return;
1895
1896 if (chan->unacked_frames) {
1897 skb_queue_walk(&chan->tx_q, skb) {
1898 if (bt_cb(skb)->control.txseq == control->reqseq ||
1899 skb == chan->tx_send_head)
1900 break;
1901 }
1902
1903 skb_queue_walk_from(&chan->tx_q, skb) {
1904 if (skb == chan->tx_send_head)
1905 break;
1906
1907 l2cap_seq_list_append(&chan->retrans_list,
1908 bt_cb(skb)->control.txseq);
1909 }
1910
1911 l2cap_ertm_resend(chan);
1912 }
1913 }
1914
1915 static void l2cap_send_ack(struct l2cap_chan *chan)
1916 {
1917 struct l2cap_ctrl control;
1918 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1919 chan->last_acked_seq);
1920 int threshold;
1921
1922 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1923 chan, chan->last_acked_seq, chan->buffer_seq);
1924
1925 memset(&control, 0, sizeof(control));
1926 control.sframe = 1;
1927
1928 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1929 chan->rx_state == L2CAP_RX_STATE_RECV) {
1930 __clear_ack_timer(chan);
1931 control.super = L2CAP_SUPER_RNR;
1932 control.reqseq = chan->buffer_seq;
1933 l2cap_send_sframe(chan, &control);
1934 } else {
1935 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1936 l2cap_ertm_send(chan);
1937 /* If any i-frames were sent, they included an ack */
1938 if (chan->buffer_seq == chan->last_acked_seq)
1939 frames_to_ack = 0;
1940 }
1941
1942 /* Ack now if the tx window is 3/4ths full.
1943 * Calculate without mul or div
1944 */
1945 threshold = chan->tx_win;
1946 threshold += threshold << 1;
1947 threshold >>= 2;
1948
1949 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1950 threshold);
1951
1952 if (frames_to_ack >= threshold) {
1953 __clear_ack_timer(chan);
1954 control.super = L2CAP_SUPER_RR;
1955 control.reqseq = chan->buffer_seq;
1956 l2cap_send_sframe(chan, &control);
1957 frames_to_ack = 0;
1958 }
1959
1960 if (frames_to_ack)
1961 __set_ack_timer(chan);
1962 }
1963 }
1964
1965 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1966 struct msghdr *msg, int len,
1967 int count, struct sk_buff *skb)
1968 {
1969 struct l2cap_conn *conn = chan->conn;
1970 struct sk_buff **frag;
1971 int sent = 0;
1972
1973 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1974 return -EFAULT;
1975
1976 sent += count;
1977 len -= count;
1978
1979 /* Continuation fragments (no L2CAP header) */
1980 frag = &skb_shinfo(skb)->frag_list;
1981 while (len) {
1982 struct sk_buff *tmp;
1983
1984 count = min_t(unsigned int, conn->mtu, len);
1985
1986 tmp = chan->ops->alloc_skb(chan, count,
1987 msg->msg_flags & MSG_DONTWAIT);
1988 if (IS_ERR(tmp))
1989 return PTR_ERR(tmp);
1990
1991 *frag = tmp;
1992
1993 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1994 return -EFAULT;
1995
1996 (*frag)->priority = skb->priority;
1997
1998 sent += count;
1999 len -= count;
2000
2001 skb->len += (*frag)->len;
2002 skb->data_len += (*frag)->len;
2003
2004 frag = &(*frag)->next;
2005 }
2006
2007 return sent;
2008 }
2009
2010 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2011 struct msghdr *msg, size_t len,
2012 u32 priority)
2013 {
2014 struct l2cap_conn *conn = chan->conn;
2015 struct sk_buff *skb;
2016 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2017 struct l2cap_hdr *lh;
2018
2019 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
2020
2021 count = min_t(unsigned int, (conn->mtu - hlen), len);
2022
2023 skb = chan->ops->alloc_skb(chan, count + hlen,
2024 msg->msg_flags & MSG_DONTWAIT);
2025 if (IS_ERR(skb))
2026 return skb;
2027
2028 skb->priority = priority;
2029
2030 /* Create L2CAP header */
2031 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2032 lh->cid = cpu_to_le16(chan->dcid);
2033 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2034 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2035
2036 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2037 if (unlikely(err < 0)) {
2038 kfree_skb(skb);
2039 return ERR_PTR(err);
2040 }
2041 return skb;
2042 }
2043
2044 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2045 struct msghdr *msg, size_t len,
2046 u32 priority)
2047 {
2048 struct l2cap_conn *conn = chan->conn;
2049 struct sk_buff *skb;
2050 int err, count;
2051 struct l2cap_hdr *lh;
2052
2053 BT_DBG("chan %p len %d", chan, (int)len);
2054
2055 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2056
2057 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2058 msg->msg_flags & MSG_DONTWAIT);
2059 if (IS_ERR(skb))
2060 return skb;
2061
2062 skb->priority = priority;
2063
2064 /* Create L2CAP header */
2065 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2066 lh->cid = cpu_to_le16(chan->dcid);
2067 lh->len = cpu_to_le16(len);
2068
2069 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2070 if (unlikely(err < 0)) {
2071 kfree_skb(skb);
2072 return ERR_PTR(err);
2073 }
2074 return skb;
2075 }
2076
2077 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2078 struct msghdr *msg, size_t len,
2079 u16 sdulen)
2080 {
2081 struct l2cap_conn *conn = chan->conn;
2082 struct sk_buff *skb;
2083 int err, count, hlen;
2084 struct l2cap_hdr *lh;
2085
2086 BT_DBG("chan %p len %d", chan, (int)len);
2087
2088 if (!conn)
2089 return ERR_PTR(-ENOTCONN);
2090
2091 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2092 hlen = L2CAP_EXT_HDR_SIZE;
2093 else
2094 hlen = L2CAP_ENH_HDR_SIZE;
2095
2096 if (sdulen)
2097 hlen += L2CAP_SDULEN_SIZE;
2098
2099 if (chan->fcs == L2CAP_FCS_CRC16)
2100 hlen += L2CAP_FCS_SIZE;
2101
2102 count = min_t(unsigned int, (conn->mtu - hlen), len);
2103
2104 skb = chan->ops->alloc_skb(chan, count + hlen,
2105 msg->msg_flags & MSG_DONTWAIT);
2106 if (IS_ERR(skb))
2107 return skb;
2108
2109 /* Create L2CAP header */
2110 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2111 lh->cid = cpu_to_le16(chan->dcid);
2112 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2113
2114 /* Control header is populated later */
2115 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2116 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2117 else
2118 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2119
2120 if (sdulen)
2121 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2122
2123 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2124 if (unlikely(err < 0)) {
2125 kfree_skb(skb);
2126 return ERR_PTR(err);
2127 }
2128
2129 bt_cb(skb)->control.fcs = chan->fcs;
2130 bt_cb(skb)->control.retries = 0;
2131 return skb;
2132 }
2133
2134 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2135 struct sk_buff_head *seg_queue,
2136 struct msghdr *msg, size_t len)
2137 {
2138 struct sk_buff *skb;
2139 u16 sdu_len;
2140 size_t pdu_len;
2141 int err = 0;
2142 u8 sar;
2143
2144 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2145
2146 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2147 * so fragmented skbs are not used. The HCI layer's handling
2148 * of fragmented skbs is not compatible with ERTM's queueing.
2149 */
2150
2151 /* PDU size is derived from the HCI MTU */
2152 pdu_len = chan->conn->mtu;
2153
2154 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2155
2156 /* Adjust for largest possible L2CAP overhead. */
2157 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2158
2159 /* Remote device may have requested smaller PDUs */
2160 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2161
2162 if (len <= pdu_len) {
2163 sar = L2CAP_SAR_UNSEGMENTED;
2164 sdu_len = 0;
2165 pdu_len = len;
2166 } else {
2167 sar = L2CAP_SAR_START;
2168 sdu_len = len;
2169 pdu_len -= L2CAP_SDULEN_SIZE;
2170 }
2171
2172 while (len > 0) {
2173 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2174
2175 if (IS_ERR(skb)) {
2176 __skb_queue_purge(seg_queue);
2177 return PTR_ERR(skb);
2178 }
2179
2180 bt_cb(skb)->control.sar = sar;
2181 __skb_queue_tail(seg_queue, skb);
2182
2183 len -= pdu_len;
2184 if (sdu_len) {
2185 sdu_len = 0;
2186 pdu_len += L2CAP_SDULEN_SIZE;
2187 }
2188
2189 if (len <= pdu_len) {
2190 sar = L2CAP_SAR_END;
2191 pdu_len = len;
2192 } else {
2193 sar = L2CAP_SAR_CONTINUE;
2194 }
2195 }
2196
2197 return err;
2198 }
2199
2200 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2201 u32 priority)
2202 {
2203 struct sk_buff *skb;
2204 int err;
2205 struct sk_buff_head seg_queue;
2206
2207 /* Connectionless channel */
2208 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2209 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2210 if (IS_ERR(skb))
2211 return PTR_ERR(skb);
2212
2213 l2cap_do_send(chan, skb);
2214 return len;
2215 }
2216
2217 switch (chan->mode) {
2218 case L2CAP_MODE_BASIC:
2219 /* Check outgoing MTU */
2220 if (len > chan->omtu)
2221 return -EMSGSIZE;
2222
2223 /* Create a basic PDU */
2224 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2225 if (IS_ERR(skb))
2226 return PTR_ERR(skb);
2227
2228 l2cap_do_send(chan, skb);
2229 err = len;
2230 break;
2231
2232 case L2CAP_MODE_ERTM:
2233 case L2CAP_MODE_STREAMING:
2234 /* Check outgoing MTU */
2235 if (len > chan->omtu) {
2236 err = -EMSGSIZE;
2237 break;
2238 }
2239
2240 __skb_queue_head_init(&seg_queue);
2241
2242 /* Do segmentation before calling in to the state machine,
2243 * since it's possible to block while waiting for memory
2244 * allocation.
2245 */
2246 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2247
2248 /* The channel could have been closed while segmenting,
2249 * check that it is still connected.
2250 */
2251 if (chan->state != BT_CONNECTED) {
2252 __skb_queue_purge(&seg_queue);
2253 err = -ENOTCONN;
2254 }
2255
2256 if (err)
2257 break;
2258
2259 if (chan->mode == L2CAP_MODE_ERTM)
2260 err = l2cap_tx(chan, 0, &seg_queue,
2261 L2CAP_EV_DATA_REQUEST);
2262 else
2263 err = l2cap_streaming_send(chan, &seg_queue);
2264
2265 if (!err)
2266 err = len;
2267
2268 /* If the skbs were not queued for sending, they'll still be in
2269 * seg_queue and need to be purged.
2270 */
2271 __skb_queue_purge(&seg_queue);
2272 break;
2273
2274 default:
2275 BT_DBG("bad state %1.1x", chan->mode);
2276 err = -EBADFD;
2277 }
2278
2279 return err;
2280 }
2281
2282 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2283 {
2284 struct l2cap_ctrl control;
2285 u16 seq;
2286
2287 BT_DBG("chan %p, txseq %d", chan, txseq);
2288
2289 memset(&control, 0, sizeof(control));
2290 control.sframe = 1;
2291 control.super = L2CAP_SUPER_SREJ;
2292
2293 for (seq = chan->expected_tx_seq; seq != txseq;
2294 seq = __next_seq(chan, seq)) {
2295 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2296 control.reqseq = seq;
2297 l2cap_send_sframe(chan, &control);
2298 l2cap_seq_list_append(&chan->srej_list, seq);
2299 }
2300 }
2301
2302 chan->expected_tx_seq = __next_seq(chan, txseq);
2303 }
2304
2305 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2306 {
2307 struct l2cap_ctrl control;
2308
2309 BT_DBG("chan %p", chan);
2310
2311 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2312 return;
2313
2314 memset(&control, 0, sizeof(control));
2315 control.sframe = 1;
2316 control.super = L2CAP_SUPER_SREJ;
2317 control.reqseq = chan->srej_list.tail;
2318 l2cap_send_sframe(chan, &control);
2319 }
2320
2321 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2322 {
2323 struct l2cap_ctrl control;
2324 u16 initial_head;
2325 u16 seq;
2326
2327 BT_DBG("chan %p, txseq %d", chan, txseq);
2328
2329 memset(&control, 0, sizeof(control));
2330 control.sframe = 1;
2331 control.super = L2CAP_SUPER_SREJ;
2332
2333 /* Capture initial list head to allow only one pass through the list. */
2334 initial_head = chan->srej_list.head;
2335
2336 do {
2337 seq = l2cap_seq_list_pop(&chan->srej_list);
2338 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2339 break;
2340
2341 control.reqseq = seq;
2342 l2cap_send_sframe(chan, &control);
2343 l2cap_seq_list_append(&chan->srej_list, seq);
2344 } while (chan->srej_list.head != initial_head);
2345 }
2346
2347 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2348 {
2349 struct sk_buff *acked_skb;
2350 u16 ackseq;
2351
2352 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2353
2354 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2355 return;
2356
2357 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2358 chan->expected_ack_seq, chan->unacked_frames);
2359
2360 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2361 ackseq = __next_seq(chan, ackseq)) {
2362
2363 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2364 if (acked_skb) {
2365 skb_unlink(acked_skb, &chan->tx_q);
2366 kfree_skb(acked_skb);
2367 chan->unacked_frames--;
2368 }
2369 }
2370
2371 chan->expected_ack_seq = reqseq;
2372
2373 if (chan->unacked_frames == 0)
2374 __clear_retrans_timer(chan);
2375
2376 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2377 }
2378
2379 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2380 {
2381 BT_DBG("chan %p", chan);
2382
2383 chan->expected_tx_seq = chan->buffer_seq;
2384 l2cap_seq_list_clear(&chan->srej_list);
2385 skb_queue_purge(&chan->srej_q);
2386 chan->rx_state = L2CAP_RX_STATE_RECV;
2387 }
2388
2389 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2390 struct l2cap_ctrl *control,
2391 struct sk_buff_head *skbs, u8 event)
2392 {
2393 int err = 0;
2394
2395 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2396 event);
2397
2398 switch (event) {
2399 case L2CAP_EV_DATA_REQUEST:
2400 if (chan->tx_send_head == NULL)
2401 chan->tx_send_head = skb_peek(skbs);
2402
2403 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2404 l2cap_ertm_send(chan);
2405 break;
2406 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2407 BT_DBG("Enter LOCAL_BUSY");
2408 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2409
2410 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2411 /* The SREJ_SENT state must be aborted if we are to
2412 * enter the LOCAL_BUSY state.
2413 */
2414 l2cap_abort_rx_srej_sent(chan);
2415 }
2416
2417 l2cap_send_ack(chan);
2418
2419 break;
2420 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2421 BT_DBG("Exit LOCAL_BUSY");
2422 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2423
2424 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2425 struct l2cap_ctrl local_control;
2426
2427 memset(&local_control, 0, sizeof(local_control));
2428 local_control.sframe = 1;
2429 local_control.super = L2CAP_SUPER_RR;
2430 local_control.poll = 1;
2431 local_control.reqseq = chan->buffer_seq;
2432 l2cap_send_sframe(chan, &local_control);
2433
2434 chan->retry_count = 1;
2435 __set_monitor_timer(chan);
2436 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2437 }
2438 break;
2439 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2440 l2cap_process_reqseq(chan, control->reqseq);
2441 break;
2442 case L2CAP_EV_EXPLICIT_POLL:
2443 l2cap_send_rr_or_rnr(chan, 1);
2444 chan->retry_count = 1;
2445 __set_monitor_timer(chan);
2446 __clear_ack_timer(chan);
2447 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2448 break;
2449 case L2CAP_EV_RETRANS_TO:
2450 l2cap_send_rr_or_rnr(chan, 1);
2451 chan->retry_count = 1;
2452 __set_monitor_timer(chan);
2453 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2454 break;
2455 case L2CAP_EV_RECV_FBIT:
2456 /* Nothing to process */
2457 break;
2458 default:
2459 break;
2460 }
2461
2462 return err;
2463 }
2464
2465 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2466 struct l2cap_ctrl *control,
2467 struct sk_buff_head *skbs, u8 event)
2468 {
2469 int err = 0;
2470
2471 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2472 event);
2473
2474 switch (event) {
2475 case L2CAP_EV_DATA_REQUEST:
2476 if (chan->tx_send_head == NULL)
2477 chan->tx_send_head = skb_peek(skbs);
2478 /* Queue data, but don't send. */
2479 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2480 break;
2481 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2482 BT_DBG("Enter LOCAL_BUSY");
2483 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2484
2485 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2486 /* The SREJ_SENT state must be aborted if we are to
2487 * enter the LOCAL_BUSY state.
2488 */
2489 l2cap_abort_rx_srej_sent(chan);
2490 }
2491
2492 l2cap_send_ack(chan);
2493
2494 break;
2495 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2496 BT_DBG("Exit LOCAL_BUSY");
2497 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2498
2499 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2500 struct l2cap_ctrl local_control;
2501 memset(&local_control, 0, sizeof(local_control));
2502 local_control.sframe = 1;
2503 local_control.super = L2CAP_SUPER_RR;
2504 local_control.poll = 1;
2505 local_control.reqseq = chan->buffer_seq;
2506 l2cap_send_sframe(chan, &local_control);
2507
2508 chan->retry_count = 1;
2509 __set_monitor_timer(chan);
2510 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2511 }
2512 break;
2513 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2514 l2cap_process_reqseq(chan, control->reqseq);
2515
2516 /* Fall through */
2517
2518 case L2CAP_EV_RECV_FBIT:
2519 if (control && control->final) {
2520 __clear_monitor_timer(chan);
2521 if (chan->unacked_frames > 0)
2522 __set_retrans_timer(chan);
2523 chan->retry_count = 0;
2524 chan->tx_state = L2CAP_TX_STATE_XMIT;
2525 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2526 }
2527 break;
2528 case L2CAP_EV_EXPLICIT_POLL:
2529 /* Ignore */
2530 break;
2531 case L2CAP_EV_MONITOR_TO:
2532 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2533 l2cap_send_rr_or_rnr(chan, 1);
2534 __set_monitor_timer(chan);
2535 chan->retry_count++;
2536 } else {
2537 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2538 }
2539 break;
2540 default:
2541 break;
2542 }
2543
2544 return err;
2545 }
2546
2547 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2548 struct sk_buff_head *skbs, u8 event)
2549 {
2550 int err = 0;
2551
2552 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2553 chan, control, skbs, event, chan->tx_state);
2554
2555 switch (chan->tx_state) {
2556 case L2CAP_TX_STATE_XMIT:
2557 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2558 break;
2559 case L2CAP_TX_STATE_WAIT_F:
2560 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2561 break;
2562 default:
2563 /* Ignore event */
2564 break;
2565 }
2566
2567 return err;
2568 }
2569
2570 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2571 struct l2cap_ctrl *control)
2572 {
2573 BT_DBG("chan %p, control %p", chan, control);
2574 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2575 }
2576
2577 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2578 struct l2cap_ctrl *control)
2579 {
2580 BT_DBG("chan %p, control %p", chan, control);
2581 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_FBIT);
2582 }
2583
2584 /* Copy frame to all raw sockets on that connection */
2585 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2586 {
2587 struct sk_buff *nskb;
2588 struct l2cap_chan *chan;
2589
2590 BT_DBG("conn %p", conn);
2591
2592 mutex_lock(&conn->chan_lock);
2593
2594 list_for_each_entry(chan, &conn->chan_l, list) {
2595 struct sock *sk = chan->sk;
2596 if (chan->chan_type != L2CAP_CHAN_RAW)
2597 continue;
2598
2599 /* Don't send frame to the socket it came from */
2600 if (skb->sk == sk)
2601 continue;
2602 nskb = skb_clone(skb, GFP_ATOMIC);
2603 if (!nskb)
2604 continue;
2605
2606 if (chan->ops->recv(chan->data, nskb))
2607 kfree_skb(nskb);
2608 }
2609
2610 mutex_unlock(&conn->chan_lock);
2611 }
2612
2613 /* ---- L2CAP signalling commands ---- */
2614 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2615 u8 code, u8 ident, u16 dlen, void *data)
2616 {
2617 struct sk_buff *skb, **frag;
2618 struct l2cap_cmd_hdr *cmd;
2619 struct l2cap_hdr *lh;
2620 int len, count;
2621
2622 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2623 conn, code, ident, dlen);
2624
2625 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2626 count = min_t(unsigned int, conn->mtu, len);
2627
2628 skb = bt_skb_alloc(count, GFP_ATOMIC);
2629 if (!skb)
2630 return NULL;
2631
2632 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2633 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2634
2635 if (conn->hcon->type == LE_LINK)
2636 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2637 else
2638 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2639
2640 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2641 cmd->code = code;
2642 cmd->ident = ident;
2643 cmd->len = cpu_to_le16(dlen);
2644
2645 if (dlen) {
2646 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2647 memcpy(skb_put(skb, count), data, count);
2648 data += count;
2649 }
2650
2651 len -= skb->len;
2652
2653 /* Continuation fragments (no L2CAP header) */
2654 frag = &skb_shinfo(skb)->frag_list;
2655 while (len) {
2656 count = min_t(unsigned int, conn->mtu, len);
2657
2658 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2659 if (!*frag)
2660 goto fail;
2661
2662 memcpy(skb_put(*frag, count), data, count);
2663
2664 len -= count;
2665 data += count;
2666
2667 frag = &(*frag)->next;
2668 }
2669
2670 return skb;
2671
2672 fail:
2673 kfree_skb(skb);
2674 return NULL;
2675 }
2676
2677 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2678 {
2679 struct l2cap_conf_opt *opt = *ptr;
2680 int len;
2681
2682 len = L2CAP_CONF_OPT_SIZE + opt->len;
2683 *ptr += len;
2684
2685 *type = opt->type;
2686 *olen = opt->len;
2687
2688 switch (opt->len) {
2689 case 1:
2690 *val = *((u8 *) opt->val);
2691 break;
2692
2693 case 2:
2694 *val = get_unaligned_le16(opt->val);
2695 break;
2696
2697 case 4:
2698 *val = get_unaligned_le32(opt->val);
2699 break;
2700
2701 default:
2702 *val = (unsigned long) opt->val;
2703 break;
2704 }
2705
2706 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2707 return len;
2708 }
2709
2710 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2711 {
2712 struct l2cap_conf_opt *opt = *ptr;
2713
2714 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2715
2716 opt->type = type;
2717 opt->len = len;
2718
2719 switch (len) {
2720 case 1:
2721 *((u8 *) opt->val) = val;
2722 break;
2723
2724 case 2:
2725 put_unaligned_le16(val, opt->val);
2726 break;
2727
2728 case 4:
2729 put_unaligned_le32(val, opt->val);
2730 break;
2731
2732 default:
2733 memcpy(opt->val, (void *) val, len);
2734 break;
2735 }
2736
2737 *ptr += L2CAP_CONF_OPT_SIZE + len;
2738 }
2739
2740 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2741 {
2742 struct l2cap_conf_efs efs;
2743
2744 switch (chan->mode) {
2745 case L2CAP_MODE_ERTM:
2746 efs.id = chan->local_id;
2747 efs.stype = chan->local_stype;
2748 efs.msdu = cpu_to_le16(chan->local_msdu);
2749 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2750 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2751 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2752 break;
2753
2754 case L2CAP_MODE_STREAMING:
2755 efs.id = 1;
2756 efs.stype = L2CAP_SERV_BESTEFFORT;
2757 efs.msdu = cpu_to_le16(chan->local_msdu);
2758 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2759 efs.acc_lat = 0;
2760 efs.flush_to = 0;
2761 break;
2762
2763 default:
2764 return;
2765 }
2766
2767 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2768 (unsigned long) &efs);
2769 }
2770
2771 static void l2cap_ack_timeout(struct work_struct *work)
2772 {
2773 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2774 ack_timer.work);
2775 u16 frames_to_ack;
2776
2777 BT_DBG("chan %p", chan);
2778
2779 l2cap_chan_lock(chan);
2780
2781 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2782 chan->last_acked_seq);
2783
2784 if (frames_to_ack)
2785 l2cap_send_rr_or_rnr(chan, 0);
2786
2787 l2cap_chan_unlock(chan);
2788 l2cap_chan_put(chan);
2789 }
2790
2791 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2792 {
2793 int err;
2794
2795 chan->next_tx_seq = 0;
2796 chan->expected_tx_seq = 0;
2797 chan->expected_ack_seq = 0;
2798 chan->unacked_frames = 0;
2799 chan->buffer_seq = 0;
2800 chan->frames_sent = 0;
2801 chan->last_acked_seq = 0;
2802 chan->sdu = NULL;
2803 chan->sdu_last_frag = NULL;
2804 chan->sdu_len = 0;
2805
2806 skb_queue_head_init(&chan->tx_q);
2807
2808 if (chan->mode != L2CAP_MODE_ERTM)
2809 return 0;
2810
2811 chan->rx_state = L2CAP_RX_STATE_RECV;
2812 chan->tx_state = L2CAP_TX_STATE_XMIT;
2813
2814 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2815 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2816 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2817
2818 skb_queue_head_init(&chan->srej_q);
2819
2820 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2821 if (err < 0)
2822 return err;
2823
2824 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2825 if (err < 0)
2826 l2cap_seq_list_free(&chan->srej_list);
2827
2828 return err;
2829 }
2830
2831 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2832 {
2833 switch (mode) {
2834 case L2CAP_MODE_STREAMING:
2835 case L2CAP_MODE_ERTM:
2836 if (l2cap_mode_supported(mode, remote_feat_mask))
2837 return mode;
2838 /* fall through */
2839 default:
2840 return L2CAP_MODE_BASIC;
2841 }
2842 }
2843
2844 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2845 {
2846 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2847 }
2848
2849 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2850 {
2851 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2852 }
2853
2854 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2855 {
2856 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2857 __l2cap_ews_supported(chan)) {
2858 /* use extended control field */
2859 set_bit(FLAG_EXT_CTRL, &chan->flags);
2860 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2861 } else {
2862 chan->tx_win = min_t(u16, chan->tx_win,
2863 L2CAP_DEFAULT_TX_WINDOW);
2864 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2865 }
2866 }
2867
2868 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2869 {
2870 struct l2cap_conf_req *req = data;
2871 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2872 void *ptr = req->data;
2873 u16 size;
2874
2875 BT_DBG("chan %p", chan);
2876
2877 if (chan->num_conf_req || chan->num_conf_rsp)
2878 goto done;
2879
2880 switch (chan->mode) {
2881 case L2CAP_MODE_STREAMING:
2882 case L2CAP_MODE_ERTM:
2883 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2884 break;
2885
2886 if (__l2cap_efs_supported(chan))
2887 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2888
2889 /* fall through */
2890 default:
2891 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2892 break;
2893 }
2894
2895 done:
2896 if (chan->imtu != L2CAP_DEFAULT_MTU)
2897 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2898
2899 switch (chan->mode) {
2900 case L2CAP_MODE_BASIC:
2901 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2902 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2903 break;
2904
2905 rfc.mode = L2CAP_MODE_BASIC;
2906 rfc.txwin_size = 0;
2907 rfc.max_transmit = 0;
2908 rfc.retrans_timeout = 0;
2909 rfc.monitor_timeout = 0;
2910 rfc.max_pdu_size = 0;
2911
2912 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2913 (unsigned long) &rfc);
2914 break;
2915
2916 case L2CAP_MODE_ERTM:
2917 rfc.mode = L2CAP_MODE_ERTM;
2918 rfc.max_transmit = chan->max_tx;
2919 rfc.retrans_timeout = 0;
2920 rfc.monitor_timeout = 0;
2921
2922 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2923 L2CAP_EXT_HDR_SIZE -
2924 L2CAP_SDULEN_SIZE -
2925 L2CAP_FCS_SIZE);
2926 rfc.max_pdu_size = cpu_to_le16(size);
2927
2928 l2cap_txwin_setup(chan);
2929
2930 rfc.txwin_size = min_t(u16, chan->tx_win,
2931 L2CAP_DEFAULT_TX_WINDOW);
2932
2933 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2934 (unsigned long) &rfc);
2935
2936 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2937 l2cap_add_opt_efs(&ptr, chan);
2938
2939 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2940 break;
2941
2942 if (chan->fcs == L2CAP_FCS_NONE ||
2943 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2944 chan->fcs = L2CAP_FCS_NONE;
2945 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2946 }
2947
2948 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2949 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2950 chan->tx_win);
2951 break;
2952
2953 case L2CAP_MODE_STREAMING:
2954 l2cap_txwin_setup(chan);
2955 rfc.mode = L2CAP_MODE_STREAMING;
2956 rfc.txwin_size = 0;
2957 rfc.max_transmit = 0;
2958 rfc.retrans_timeout = 0;
2959 rfc.monitor_timeout = 0;
2960
2961 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2962 L2CAP_EXT_HDR_SIZE -
2963 L2CAP_SDULEN_SIZE -
2964 L2CAP_FCS_SIZE);
2965 rfc.max_pdu_size = cpu_to_le16(size);
2966
2967 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2968 (unsigned long) &rfc);
2969
2970 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2971 l2cap_add_opt_efs(&ptr, chan);
2972
2973 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2974 break;
2975
2976 if (chan->fcs == L2CAP_FCS_NONE ||
2977 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2978 chan->fcs = L2CAP_FCS_NONE;
2979 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2980 }
2981 break;
2982 }
2983
2984 req->dcid = cpu_to_le16(chan->dcid);
2985 req->flags = cpu_to_le16(0);
2986
2987 return ptr - data;
2988 }
2989
2990 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2991 {
2992 struct l2cap_conf_rsp *rsp = data;
2993 void *ptr = rsp->data;
2994 void *req = chan->conf_req;
2995 int len = chan->conf_len;
2996 int type, hint, olen;
2997 unsigned long val;
2998 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2999 struct l2cap_conf_efs efs;
3000 u8 remote_efs = 0;
3001 u16 mtu = L2CAP_DEFAULT_MTU;
3002 u16 result = L2CAP_CONF_SUCCESS;
3003 u16 size;
3004
3005 BT_DBG("chan %p", chan);
3006
3007 while (len >= L2CAP_CONF_OPT_SIZE) {
3008 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3009
3010 hint = type & L2CAP_CONF_HINT;
3011 type &= L2CAP_CONF_MASK;
3012
3013 switch (type) {
3014 case L2CAP_CONF_MTU:
3015 mtu = val;
3016 break;
3017
3018 case L2CAP_CONF_FLUSH_TO:
3019 chan->flush_to = val;
3020 break;
3021
3022 case L2CAP_CONF_QOS:
3023 break;
3024
3025 case L2CAP_CONF_RFC:
3026 if (olen == sizeof(rfc))
3027 memcpy(&rfc, (void *) val, olen);
3028 break;
3029
3030 case L2CAP_CONF_FCS:
3031 if (val == L2CAP_FCS_NONE)
3032 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3033 break;
3034
3035 case L2CAP_CONF_EFS:
3036 remote_efs = 1;
3037 if (olen == sizeof(efs))
3038 memcpy(&efs, (void *) val, olen);
3039 break;
3040
3041 case L2CAP_CONF_EWS:
3042 if (!enable_hs)
3043 return -ECONNREFUSED;
3044
3045 set_bit(FLAG_EXT_CTRL, &chan->flags);
3046 set_bit(CONF_EWS_RECV, &chan->conf_state);
3047 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3048 chan->remote_tx_win = val;
3049 break;
3050
3051 default:
3052 if (hint)
3053 break;
3054
3055 result = L2CAP_CONF_UNKNOWN;
3056 *((u8 *) ptr++) = type;
3057 break;
3058 }
3059 }
3060
3061 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3062 goto done;
3063
3064 switch (chan->mode) {
3065 case L2CAP_MODE_STREAMING:
3066 case L2CAP_MODE_ERTM:
3067 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3068 chan->mode = l2cap_select_mode(rfc.mode,
3069 chan->conn->feat_mask);
3070 break;
3071 }
3072
3073 if (remote_efs) {
3074 if (__l2cap_efs_supported(chan))
3075 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3076 else
3077 return -ECONNREFUSED;
3078 }
3079
3080 if (chan->mode != rfc.mode)
3081 return -ECONNREFUSED;
3082
3083 break;
3084 }
3085
3086 done:
3087 if (chan->mode != rfc.mode) {
3088 result = L2CAP_CONF_UNACCEPT;
3089 rfc.mode = chan->mode;
3090
3091 if (chan->num_conf_rsp == 1)
3092 return -ECONNREFUSED;
3093
3094 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3095 sizeof(rfc), (unsigned long) &rfc);
3096 }
3097
3098 if (result == L2CAP_CONF_SUCCESS) {
3099 /* Configure output options and let the other side know
3100 * which ones we don't like. */
3101
3102 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3103 result = L2CAP_CONF_UNACCEPT;
3104 else {
3105 chan->omtu = mtu;
3106 set_bit(CONF_MTU_DONE, &chan->conf_state);
3107 }
3108 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3109
3110 if (remote_efs) {
3111 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3112 efs.stype != L2CAP_SERV_NOTRAFIC &&
3113 efs.stype != chan->local_stype) {
3114
3115 result = L2CAP_CONF_UNACCEPT;
3116
3117 if (chan->num_conf_req >= 1)
3118 return -ECONNREFUSED;
3119
3120 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3121 sizeof(efs),
3122 (unsigned long) &efs);
3123 } else {
3124 /* Send PENDING Conf Rsp */
3125 result = L2CAP_CONF_PENDING;
3126 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3127 }
3128 }
3129
3130 switch (rfc.mode) {
3131 case L2CAP_MODE_BASIC:
3132 chan->fcs = L2CAP_FCS_NONE;
3133 set_bit(CONF_MODE_DONE, &chan->conf_state);
3134 break;
3135
3136 case L2CAP_MODE_ERTM:
3137 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3138 chan->remote_tx_win = rfc.txwin_size;
3139 else
3140 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3141
3142 chan->remote_max_tx = rfc.max_transmit;
3143
3144 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3145 chan->conn->mtu -
3146 L2CAP_EXT_HDR_SIZE -
3147 L2CAP_SDULEN_SIZE -
3148 L2CAP_FCS_SIZE);
3149 rfc.max_pdu_size = cpu_to_le16(size);
3150 chan->remote_mps = size;
3151
3152 rfc.retrans_timeout =
3153 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3154 rfc.monitor_timeout =
3155 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3156
3157 set_bit(CONF_MODE_DONE, &chan->conf_state);
3158
3159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3160 sizeof(rfc), (unsigned long) &rfc);
3161
3162 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3163 chan->remote_id = efs.id;
3164 chan->remote_stype = efs.stype;
3165 chan->remote_msdu = le16_to_cpu(efs.msdu);
3166 chan->remote_flush_to =
3167 le32_to_cpu(efs.flush_to);
3168 chan->remote_acc_lat =
3169 le32_to_cpu(efs.acc_lat);
3170 chan->remote_sdu_itime =
3171 le32_to_cpu(efs.sdu_itime);
3172 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3173 sizeof(efs), (unsigned long) &efs);
3174 }
3175 break;
3176
3177 case L2CAP_MODE_STREAMING:
3178 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3179 chan->conn->mtu -
3180 L2CAP_EXT_HDR_SIZE -
3181 L2CAP_SDULEN_SIZE -
3182 L2CAP_FCS_SIZE);
3183 rfc.max_pdu_size = cpu_to_le16(size);
3184 chan->remote_mps = size;
3185
3186 set_bit(CONF_MODE_DONE, &chan->conf_state);
3187
3188 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3189 sizeof(rfc), (unsigned long) &rfc);
3190
3191 break;
3192
3193 default:
3194 result = L2CAP_CONF_UNACCEPT;
3195
3196 memset(&rfc, 0, sizeof(rfc));
3197 rfc.mode = chan->mode;
3198 }
3199
3200 if (result == L2CAP_CONF_SUCCESS)
3201 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3202 }
3203 rsp->scid = cpu_to_le16(chan->dcid);
3204 rsp->result = cpu_to_le16(result);
3205 rsp->flags = cpu_to_le16(0x0000);
3206
3207 return ptr - data;
3208 }
3209
3210 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3211 {
3212 struct l2cap_conf_req *req = data;
3213 void *ptr = req->data;
3214 int type, olen;
3215 unsigned long val;
3216 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3217 struct l2cap_conf_efs efs;
3218
3219 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3220
3221 while (len >= L2CAP_CONF_OPT_SIZE) {
3222 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3223
3224 switch (type) {
3225 case L2CAP_CONF_MTU:
3226 if (val < L2CAP_DEFAULT_MIN_MTU) {
3227 *result = L2CAP_CONF_UNACCEPT;
3228 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3229 } else
3230 chan->imtu = val;
3231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3232 break;
3233
3234 case L2CAP_CONF_FLUSH_TO:
3235 chan->flush_to = val;
3236 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3237 2, chan->flush_to);
3238 break;
3239
3240 case L2CAP_CONF_RFC:
3241 if (olen == sizeof(rfc))
3242 memcpy(&rfc, (void *)val, olen);
3243
3244 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3245 rfc.mode != chan->mode)
3246 return -ECONNREFUSED;
3247
3248 chan->fcs = 0;
3249
3250 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3251 sizeof(rfc), (unsigned long) &rfc);
3252 break;
3253
3254 case L2CAP_CONF_EWS:
3255 chan->tx_win = min_t(u16, val,
3256 L2CAP_DEFAULT_EXT_WINDOW);
3257 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3258 chan->tx_win);
3259 break;
3260
3261 case L2CAP_CONF_EFS:
3262 if (olen == sizeof(efs))
3263 memcpy(&efs, (void *)val, olen);
3264
3265 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3266 efs.stype != L2CAP_SERV_NOTRAFIC &&
3267 efs.stype != chan->local_stype)
3268 return -ECONNREFUSED;
3269
3270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3271 sizeof(efs), (unsigned long) &efs);
3272 break;
3273 }
3274 }
3275
3276 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3277 return -ECONNREFUSED;
3278
3279 chan->mode = rfc.mode;
3280
3281 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3282 switch (rfc.mode) {
3283 case L2CAP_MODE_ERTM:
3284 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3285 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3286 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3287
3288 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3289 chan->local_msdu = le16_to_cpu(efs.msdu);
3290 chan->local_sdu_itime =
3291 le32_to_cpu(efs.sdu_itime);
3292 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3293 chan->local_flush_to =
3294 le32_to_cpu(efs.flush_to);
3295 }
3296 break;
3297
3298 case L2CAP_MODE_STREAMING:
3299 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3300 }
3301 }
3302
3303 req->dcid = cpu_to_le16(chan->dcid);
3304 req->flags = cpu_to_le16(0x0000);
3305
3306 return ptr - data;
3307 }
3308
3309 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3310 {
3311 struct l2cap_conf_rsp *rsp = data;
3312 void *ptr = rsp->data;
3313
3314 BT_DBG("chan %p", chan);
3315
3316 rsp->scid = cpu_to_le16(chan->dcid);
3317 rsp->result = cpu_to_le16(result);
3318 rsp->flags = cpu_to_le16(flags);
3319
3320 return ptr - data;
3321 }
3322
3323 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3324 {
3325 struct l2cap_conn_rsp rsp;
3326 struct l2cap_conn *conn = chan->conn;
3327 u8 buf[128];
3328
3329 rsp.scid = cpu_to_le16(chan->dcid);
3330 rsp.dcid = cpu_to_le16(chan->scid);
3331 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3332 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3333 l2cap_send_cmd(conn, chan->ident,
3334 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3335
3336 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3337 return;
3338
3339 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3340 l2cap_build_conf_req(chan, buf), buf);
3341 chan->num_conf_req++;
3342 }
3343
3344 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3345 {
3346 int type, olen;
3347 unsigned long val;
3348 struct l2cap_conf_rfc rfc;
3349
3350 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3351
3352 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3353 return;
3354
3355 while (len >= L2CAP_CONF_OPT_SIZE) {
3356 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3357
3358 switch (type) {
3359 case L2CAP_CONF_RFC:
3360 if (olen == sizeof(rfc))
3361 memcpy(&rfc, (void *)val, olen);
3362 goto done;
3363 }
3364 }
3365
3366 /* Use sane default values in case a misbehaving remote device
3367 * did not send an RFC option.
3368 */
3369 rfc.mode = chan->mode;
3370 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3371 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3372 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3373
3374 BT_ERR("Expected RFC option was not found, using defaults");
3375
3376 done:
3377 switch (rfc.mode) {
3378 case L2CAP_MODE_ERTM:
3379 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3380 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3381 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3382 break;
3383 case L2CAP_MODE_STREAMING:
3384 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3385 }
3386 }
3387
3388 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3389 {
3390 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3391
3392 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3393 return 0;
3394
3395 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3396 cmd->ident == conn->info_ident) {
3397 cancel_delayed_work(&conn->info_timer);
3398
3399 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3400 conn->info_ident = 0;
3401
3402 l2cap_conn_start(conn);
3403 }
3404
3405 return 0;
3406 }
3407
3408 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3409 {
3410 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3411 struct l2cap_conn_rsp rsp;
3412 struct l2cap_chan *chan = NULL, *pchan;
3413 struct sock *parent, *sk = NULL;
3414 int result, status = L2CAP_CS_NO_INFO;
3415
3416 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3417 __le16 psm = req->psm;
3418
3419 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3420
3421 /* Check if we have socket listening on psm */
3422 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3423 if (!pchan) {
3424 result = L2CAP_CR_BAD_PSM;
3425 goto sendresp;
3426 }
3427
3428 parent = pchan->sk;
3429
3430 mutex_lock(&conn->chan_lock);
3431 lock_sock(parent);
3432
3433 /* Check if the ACL is secure enough (if not SDP) */
3434 if (psm != cpu_to_le16(0x0001) &&
3435 !hci_conn_check_link_mode(conn->hcon)) {
3436 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3437 result = L2CAP_CR_SEC_BLOCK;
3438 goto response;
3439 }
3440
3441 result = L2CAP_CR_NO_MEM;
3442
3443 /* Check for backlog size */
3444 if (sk_acceptq_is_full(parent)) {
3445 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3446 goto response;
3447 }
3448
3449 chan = pchan->ops->new_connection(pchan->data);
3450 if (!chan)
3451 goto response;
3452
3453 sk = chan->sk;
3454
3455 /* Check if we already have channel with that dcid */
3456 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3457 sock_set_flag(sk, SOCK_ZAPPED);
3458 chan->ops->close(chan->data);
3459 goto response;
3460 }
3461
3462 hci_conn_hold(conn->hcon);
3463
3464 bacpy(&bt_sk(sk)->src, conn->src);
3465 bacpy(&bt_sk(sk)->dst, conn->dst);
3466 chan->psm = psm;
3467 chan->dcid = scid;
3468
3469 bt_accept_enqueue(parent, sk);
3470
3471 __l2cap_chan_add(conn, chan);
3472
3473 dcid = chan->scid;
3474
3475 __set_chan_timer(chan, sk->sk_sndtimeo);
3476
3477 chan->ident = cmd->ident;
3478
3479 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3480 if (l2cap_chan_check_security(chan)) {
3481 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3482 __l2cap_state_change(chan, BT_CONNECT2);
3483 result = L2CAP_CR_PEND;
3484 status = L2CAP_CS_AUTHOR_PEND;
3485 parent->sk_data_ready(parent, 0);
3486 } else {
3487 __l2cap_state_change(chan, BT_CONFIG);
3488 result = L2CAP_CR_SUCCESS;
3489 status = L2CAP_CS_NO_INFO;
3490 }
3491 } else {
3492 __l2cap_state_change(chan, BT_CONNECT2);
3493 result = L2CAP_CR_PEND;
3494 status = L2CAP_CS_AUTHEN_PEND;
3495 }
3496 } else {
3497 __l2cap_state_change(chan, BT_CONNECT2);
3498 result = L2CAP_CR_PEND;
3499 status = L2CAP_CS_NO_INFO;
3500 }
3501
3502 response:
3503 release_sock(parent);
3504 mutex_unlock(&conn->chan_lock);
3505
3506 sendresp:
3507 rsp.scid = cpu_to_le16(scid);
3508 rsp.dcid = cpu_to_le16(dcid);
3509 rsp.result = cpu_to_le16(result);
3510 rsp.status = cpu_to_le16(status);
3511 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3512
3513 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3514 struct l2cap_info_req info;
3515 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3516
3517 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3518 conn->info_ident = l2cap_get_ident(conn);
3519
3520 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3521
3522 l2cap_send_cmd(conn, conn->info_ident,
3523 L2CAP_INFO_REQ, sizeof(info), &info);
3524 }
3525
3526 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3527 result == L2CAP_CR_SUCCESS) {
3528 u8 buf[128];
3529 set_bit(CONF_REQ_SENT, &chan->conf_state);
3530 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3531 l2cap_build_conf_req(chan, buf), buf);
3532 chan->num_conf_req++;
3533 }
3534
3535 return 0;
3536 }
3537
3538 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3539 {
3540 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3541 u16 scid, dcid, result, status;
3542 struct l2cap_chan *chan;
3543 u8 req[128];
3544 int err;
3545
3546 scid = __le16_to_cpu(rsp->scid);
3547 dcid = __le16_to_cpu(rsp->dcid);
3548 result = __le16_to_cpu(rsp->result);
3549 status = __le16_to_cpu(rsp->status);
3550
3551 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3552 dcid, scid, result, status);
3553
3554 mutex_lock(&conn->chan_lock);
3555
3556 if (scid) {
3557 chan = __l2cap_get_chan_by_scid(conn, scid);
3558 if (!chan) {
3559 err = -EFAULT;
3560 goto unlock;
3561 }
3562 } else {
3563 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3564 if (!chan) {
3565 err = -EFAULT;
3566 goto unlock;
3567 }
3568 }
3569
3570 err = 0;
3571
3572 l2cap_chan_lock(chan);
3573
3574 switch (result) {
3575 case L2CAP_CR_SUCCESS:
3576 l2cap_state_change(chan, BT_CONFIG);
3577 chan->ident = 0;
3578 chan->dcid = dcid;
3579 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3580
3581 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3582 break;
3583
3584 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3585 l2cap_build_conf_req(chan, req), req);
3586 chan->num_conf_req++;
3587 break;
3588
3589 case L2CAP_CR_PEND:
3590 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3591 break;
3592
3593 default:
3594 l2cap_chan_del(chan, ECONNREFUSED);
3595 break;
3596 }
3597
3598 l2cap_chan_unlock(chan);
3599
3600 unlock:
3601 mutex_unlock(&conn->chan_lock);
3602
3603 return err;
3604 }
3605
3606 static inline void set_default_fcs(struct l2cap_chan *chan)
3607 {
3608 /* FCS is enabled only in ERTM or streaming mode, if one or both
3609 * sides request it.
3610 */
3611 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3612 chan->fcs = L2CAP_FCS_NONE;
3613 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3614 chan->fcs = L2CAP_FCS_CRC16;
3615 }
3616
3617 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3618 {
3619 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3620 u16 dcid, flags;
3621 u8 rsp[64];
3622 struct l2cap_chan *chan;
3623 int len, err = 0;
3624
3625 dcid = __le16_to_cpu(req->dcid);
3626 flags = __le16_to_cpu(req->flags);
3627
3628 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3629
3630 chan = l2cap_get_chan_by_scid(conn, dcid);
3631 if (!chan)
3632 return -ENOENT;
3633
3634 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3635 struct l2cap_cmd_rej_cid rej;
3636
3637 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3638 rej.scid = cpu_to_le16(chan->scid);
3639 rej.dcid = cpu_to_le16(chan->dcid);
3640
3641 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3642 sizeof(rej), &rej);
3643 goto unlock;
3644 }
3645
3646 /* Reject if config buffer is too small. */
3647 len = cmd_len - sizeof(*req);
3648 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3649 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3650 l2cap_build_conf_rsp(chan, rsp,
3651 L2CAP_CONF_REJECT, flags), rsp);
3652 goto unlock;
3653 }
3654
3655 /* Store config. */
3656 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3657 chan->conf_len += len;
3658
3659 if (flags & 0x0001) {
3660 /* Incomplete config. Send empty response. */
3661 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3662 l2cap_build_conf_rsp(chan, rsp,
3663 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3664 goto unlock;
3665 }
3666
3667 /* Complete config. */
3668 len = l2cap_parse_conf_req(chan, rsp);
3669 if (len < 0) {
3670 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3671 goto unlock;
3672 }
3673
3674 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3675 chan->num_conf_rsp++;
3676
3677 /* Reset config buffer. */
3678 chan->conf_len = 0;
3679
3680 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3681 goto unlock;
3682
3683 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3684 set_default_fcs(chan);
3685
3686 l2cap_state_change(chan, BT_CONNECTED);
3687
3688 if (chan->mode == L2CAP_MODE_ERTM ||
3689 chan->mode == L2CAP_MODE_STREAMING)
3690 err = l2cap_ertm_init(chan);
3691
3692 if (err < 0)
3693 l2cap_send_disconn_req(chan->conn, chan, -err);
3694 else
3695 l2cap_chan_ready(chan);
3696
3697 goto unlock;
3698 }
3699
3700 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3701 u8 buf[64];
3702 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3703 l2cap_build_conf_req(chan, buf), buf);
3704 chan->num_conf_req++;
3705 }
3706
3707 /* Got Conf Rsp PENDING from remote side and asume we sent
3708 Conf Rsp PENDING in the code above */
3709 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3710 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3711
3712 /* check compatibility */
3713
3714 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3715 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3716
3717 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3718 l2cap_build_conf_rsp(chan, rsp,
3719 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3720 }
3721
3722 unlock:
3723 l2cap_chan_unlock(chan);
3724 return err;
3725 }
3726
3727 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3728 {
3729 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3730 u16 scid, flags, result;
3731 struct l2cap_chan *chan;
3732 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3733 int err = 0;
3734
3735 scid = __le16_to_cpu(rsp->scid);
3736 flags = __le16_to_cpu(rsp->flags);
3737 result = __le16_to_cpu(rsp->result);
3738
3739 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3740 result, len);
3741
3742 chan = l2cap_get_chan_by_scid(conn, scid);
3743 if (!chan)
3744 return 0;
3745
3746 switch (result) {
3747 case L2CAP_CONF_SUCCESS:
3748 l2cap_conf_rfc_get(chan, rsp->data, len);
3749 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3750 break;
3751
3752 case L2CAP_CONF_PENDING:
3753 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3754
3755 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3756 char buf[64];
3757
3758 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3759 buf, &result);
3760 if (len < 0) {
3761 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3762 goto done;
3763 }
3764
3765 /* check compatibility */
3766
3767 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3768 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3769
3770 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3771 l2cap_build_conf_rsp(chan, buf,
3772 L2CAP_CONF_SUCCESS, 0x0000), buf);
3773 }
3774 goto done;
3775
3776 case L2CAP_CONF_UNACCEPT:
3777 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3778 char req[64];
3779
3780 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3781 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3782 goto done;
3783 }
3784
3785 /* throw out any old stored conf requests */
3786 result = L2CAP_CONF_SUCCESS;
3787 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3788 req, &result);
3789 if (len < 0) {
3790 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3791 goto done;
3792 }
3793
3794 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3795 L2CAP_CONF_REQ, len, req);
3796 chan->num_conf_req++;
3797 if (result != L2CAP_CONF_SUCCESS)
3798 goto done;
3799 break;
3800 }
3801
3802 default:
3803 l2cap_chan_set_err(chan, ECONNRESET);
3804
3805 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3806 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3807 goto done;
3808 }
3809
3810 if (flags & 0x01)
3811 goto done;
3812
3813 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3814
3815 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3816 set_default_fcs(chan);
3817
3818 l2cap_state_change(chan, BT_CONNECTED);
3819 if (chan->mode == L2CAP_MODE_ERTM ||
3820 chan->mode == L2CAP_MODE_STREAMING)
3821 err = l2cap_ertm_init(chan);
3822
3823 if (err < 0)
3824 l2cap_send_disconn_req(chan->conn, chan, -err);
3825 else
3826 l2cap_chan_ready(chan);
3827 }
3828
3829 done:
3830 l2cap_chan_unlock(chan);
3831 return err;
3832 }
3833
3834 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3835 {
3836 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3837 struct l2cap_disconn_rsp rsp;
3838 u16 dcid, scid;
3839 struct l2cap_chan *chan;
3840 struct sock *sk;
3841
3842 scid = __le16_to_cpu(req->scid);
3843 dcid = __le16_to_cpu(req->dcid);
3844
3845 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3846
3847 mutex_lock(&conn->chan_lock);
3848
3849 chan = __l2cap_get_chan_by_scid(conn, dcid);
3850 if (!chan) {
3851 mutex_unlock(&conn->chan_lock);
3852 return 0;
3853 }
3854
3855 l2cap_chan_lock(chan);
3856
3857 sk = chan->sk;
3858
3859 rsp.dcid = cpu_to_le16(chan->scid);
3860 rsp.scid = cpu_to_le16(chan->dcid);
3861 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3862
3863 lock_sock(sk);
3864 sk->sk_shutdown = SHUTDOWN_MASK;
3865 release_sock(sk);
3866
3867 l2cap_chan_hold(chan);
3868 l2cap_chan_del(chan, ECONNRESET);
3869
3870 l2cap_chan_unlock(chan);
3871
3872 chan->ops->close(chan->data);
3873 l2cap_chan_put(chan);
3874
3875 mutex_unlock(&conn->chan_lock);
3876
3877 return 0;
3878 }
3879
3880 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3881 {
3882 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3883 u16 dcid, scid;
3884 struct l2cap_chan *chan;
3885
3886 scid = __le16_to_cpu(rsp->scid);
3887 dcid = __le16_to_cpu(rsp->dcid);
3888
3889 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3890
3891 mutex_lock(&conn->chan_lock);
3892
3893 chan = __l2cap_get_chan_by_scid(conn, scid);
3894 if (!chan) {
3895 mutex_unlock(&conn->chan_lock);
3896 return 0;
3897 }
3898
3899 l2cap_chan_lock(chan);
3900
3901 l2cap_chan_hold(chan);
3902 l2cap_chan_del(chan, 0);
3903
3904 l2cap_chan_unlock(chan);
3905
3906 chan->ops->close(chan->data);
3907 l2cap_chan_put(chan);
3908
3909 mutex_unlock(&conn->chan_lock);
3910
3911 return 0;
3912 }
3913
3914 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3915 {
3916 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3917 u16 type;
3918
3919 type = __le16_to_cpu(req->type);
3920
3921 BT_DBG("type 0x%4.4x", type);
3922
3923 if (type == L2CAP_IT_FEAT_MASK) {
3924 u8 buf[8];
3925 u32 feat_mask = l2cap_feat_mask;
3926 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3927 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3928 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3929 if (!disable_ertm)
3930 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3931 | L2CAP_FEAT_FCS;
3932 if (enable_hs)
3933 feat_mask |= L2CAP_FEAT_EXT_FLOW
3934 | L2CAP_FEAT_EXT_WINDOW;
3935
3936 put_unaligned_le32(feat_mask, rsp->data);
3937 l2cap_send_cmd(conn, cmd->ident,
3938 L2CAP_INFO_RSP, sizeof(buf), buf);
3939 } else if (type == L2CAP_IT_FIXED_CHAN) {
3940 u8 buf[12];
3941 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3942
3943 if (enable_hs)
3944 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3945 else
3946 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3947
3948 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3949 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3950 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3951 l2cap_send_cmd(conn, cmd->ident,
3952 L2CAP_INFO_RSP, sizeof(buf), buf);
3953 } else {
3954 struct l2cap_info_rsp rsp;
3955 rsp.type = cpu_to_le16(type);
3956 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3957 l2cap_send_cmd(conn, cmd->ident,
3958 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3959 }
3960
3961 return 0;
3962 }
3963
3964 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3965 {
3966 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3967 u16 type, result;
3968
3969 type = __le16_to_cpu(rsp->type);
3970 result = __le16_to_cpu(rsp->result);
3971
3972 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3973
3974 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3975 if (cmd->ident != conn->info_ident ||
3976 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3977 return 0;
3978
3979 cancel_delayed_work(&conn->info_timer);
3980
3981 if (result != L2CAP_IR_SUCCESS) {
3982 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3983 conn->info_ident = 0;
3984
3985 l2cap_conn_start(conn);
3986
3987 return 0;
3988 }
3989
3990 switch (type) {
3991 case L2CAP_IT_FEAT_MASK:
3992 conn->feat_mask = get_unaligned_le32(rsp->data);
3993
3994 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3995 struct l2cap_info_req req;
3996 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3997
3998 conn->info_ident = l2cap_get_ident(conn);
3999
4000 l2cap_send_cmd(conn, conn->info_ident,
4001 L2CAP_INFO_REQ, sizeof(req), &req);
4002 } else {
4003 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4004 conn->info_ident = 0;
4005
4006 l2cap_conn_start(conn);
4007 }
4008 break;
4009
4010 case L2CAP_IT_FIXED_CHAN:
4011 conn->fixed_chan_mask = rsp->data[0];
4012 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4013 conn->info_ident = 0;
4014
4015 l2cap_conn_start(conn);
4016 break;
4017 }
4018
4019 return 0;
4020 }
4021
4022 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4023 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4024 void *data)
4025 {
4026 struct l2cap_create_chan_req *req = data;
4027 struct l2cap_create_chan_rsp rsp;
4028 u16 psm, scid;
4029
4030 if (cmd_len != sizeof(*req))
4031 return -EPROTO;
4032
4033 if (!enable_hs)
4034 return -EINVAL;
4035
4036 psm = le16_to_cpu(req->psm);
4037 scid = le16_to_cpu(req->scid);
4038
4039 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
4040
4041 /* Placeholder: Always reject */
4042 rsp.dcid = 0;
4043 rsp.scid = cpu_to_le16(scid);
4044 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4045 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4046
4047 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4048 sizeof(rsp), &rsp);
4049
4050 return 0;
4051 }
4052
4053 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4054 struct l2cap_cmd_hdr *cmd, void *data)
4055 {
4056 BT_DBG("conn %p", conn);
4057
4058 return l2cap_connect_rsp(conn, cmd, data);
4059 }
4060
4061 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4062 u16 icid, u16 result)
4063 {
4064 struct l2cap_move_chan_rsp rsp;
4065
4066 BT_DBG("icid %d, result %d", icid, result);
4067
4068 rsp.icid = cpu_to_le16(icid);
4069 rsp.result = cpu_to_le16(result);
4070
4071 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4072 }
4073
4074 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4075 struct l2cap_chan *chan, u16 icid, u16 result)
4076 {
4077 struct l2cap_move_chan_cfm cfm;
4078 u8 ident;
4079
4080 BT_DBG("icid %d, result %d", icid, result);
4081
4082 ident = l2cap_get_ident(conn);
4083 if (chan)
4084 chan->ident = ident;
4085
4086 cfm.icid = cpu_to_le16(icid);
4087 cfm.result = cpu_to_le16(result);
4088
4089 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4090 }
4091
4092 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4093 u16 icid)
4094 {
4095 struct l2cap_move_chan_cfm_rsp rsp;
4096
4097 BT_DBG("icid %d", icid);
4098
4099 rsp.icid = cpu_to_le16(icid);
4100 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4101 }
4102
4103 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4104 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4105 {
4106 struct l2cap_move_chan_req *req = data;
4107 u16 icid = 0;
4108 u16 result = L2CAP_MR_NOT_ALLOWED;
4109
4110 if (cmd_len != sizeof(*req))
4111 return -EPROTO;
4112
4113 icid = le16_to_cpu(req->icid);
4114
4115 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4116
4117 if (!enable_hs)
4118 return -EINVAL;
4119
4120 /* Placeholder: Always refuse */
4121 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4122
4123 return 0;
4124 }
4125
4126 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4127 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4128 {
4129 struct l2cap_move_chan_rsp *rsp = data;
4130 u16 icid, result;
4131
4132 if (cmd_len != sizeof(*rsp))
4133 return -EPROTO;
4134
4135 icid = le16_to_cpu(rsp->icid);
4136 result = le16_to_cpu(rsp->result);
4137
4138 BT_DBG("icid %d, result %d", icid, result);
4139
4140 /* Placeholder: Always unconfirmed */
4141 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4142
4143 return 0;
4144 }
4145
4146 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4147 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4148 {
4149 struct l2cap_move_chan_cfm *cfm = data;
4150 u16 icid, result;
4151
4152 if (cmd_len != sizeof(*cfm))
4153 return -EPROTO;
4154
4155 icid = le16_to_cpu(cfm->icid);
4156 result = le16_to_cpu(cfm->result);
4157
4158 BT_DBG("icid %d, result %d", icid, result);
4159
4160 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4161
4162 return 0;
4163 }
4164
4165 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4166 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4167 {
4168 struct l2cap_move_chan_cfm_rsp *rsp = data;
4169 u16 icid;
4170
4171 if (cmd_len != sizeof(*rsp))
4172 return -EPROTO;
4173
4174 icid = le16_to_cpu(rsp->icid);
4175
4176 BT_DBG("icid %d", icid);
4177
4178 return 0;
4179 }
4180
4181 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4182 u16 to_multiplier)
4183 {
4184 u16 max_latency;
4185
4186 if (min > max || min < 6 || max > 3200)
4187 return -EINVAL;
4188
4189 if (to_multiplier < 10 || to_multiplier > 3200)
4190 return -EINVAL;
4191
4192 if (max >= to_multiplier * 8)
4193 return -EINVAL;
4194
4195 max_latency = (to_multiplier * 8 / max) - 1;
4196 if (latency > 499 || latency > max_latency)
4197 return -EINVAL;
4198
4199 return 0;
4200 }
4201
4202 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4203 struct l2cap_cmd_hdr *cmd, u8 *data)
4204 {
4205 struct hci_conn *hcon = conn->hcon;
4206 struct l2cap_conn_param_update_req *req;
4207 struct l2cap_conn_param_update_rsp rsp;
4208 u16 min, max, latency, to_multiplier, cmd_len;
4209 int err;
4210
4211 if (!(hcon->link_mode & HCI_LM_MASTER))
4212 return -EINVAL;
4213
4214 cmd_len = __le16_to_cpu(cmd->len);
4215 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4216 return -EPROTO;
4217
4218 req = (struct l2cap_conn_param_update_req *) data;
4219 min = __le16_to_cpu(req->min);
4220 max = __le16_to_cpu(req->max);
4221 latency = __le16_to_cpu(req->latency);
4222 to_multiplier = __le16_to_cpu(req->to_multiplier);
4223
4224 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4225 min, max, latency, to_multiplier);
4226
4227 memset(&rsp, 0, sizeof(rsp));
4228
4229 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4230 if (err)
4231 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4232 else
4233 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4234
4235 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4236 sizeof(rsp), &rsp);
4237
4238 if (!err)
4239 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4240
4241 return 0;
4242 }
4243
4244 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4245 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4246 {
4247 int err = 0;
4248
4249 switch (cmd->code) {
4250 case L2CAP_COMMAND_REJ:
4251 l2cap_command_rej(conn, cmd, data);
4252 break;
4253
4254 case L2CAP_CONN_REQ:
4255 err = l2cap_connect_req(conn, cmd, data);
4256 break;
4257
4258 case L2CAP_CONN_RSP:
4259 err = l2cap_connect_rsp(conn, cmd, data);
4260 break;
4261
4262 case L2CAP_CONF_REQ:
4263 err = l2cap_config_req(conn, cmd, cmd_len, data);
4264 break;
4265
4266 case L2CAP_CONF_RSP:
4267 err = l2cap_config_rsp(conn, cmd, data);
4268 break;
4269
4270 case L2CAP_DISCONN_REQ:
4271 err = l2cap_disconnect_req(conn, cmd, data);
4272 break;
4273
4274 case L2CAP_DISCONN_RSP:
4275 err = l2cap_disconnect_rsp(conn, cmd, data);
4276 break;
4277
4278 case L2CAP_ECHO_REQ:
4279 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4280 break;
4281
4282 case L2CAP_ECHO_RSP:
4283 break;
4284
4285 case L2CAP_INFO_REQ:
4286 err = l2cap_information_req(conn, cmd, data);
4287 break;
4288
4289 case L2CAP_INFO_RSP:
4290 err = l2cap_information_rsp(conn, cmd, data);
4291 break;
4292
4293 case L2CAP_CREATE_CHAN_REQ:
4294 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4295 break;
4296
4297 case L2CAP_CREATE_CHAN_RSP:
4298 err = l2cap_create_channel_rsp(conn, cmd, data);
4299 break;
4300
4301 case L2CAP_MOVE_CHAN_REQ:
4302 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4303 break;
4304
4305 case L2CAP_MOVE_CHAN_RSP:
4306 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4307 break;
4308
4309 case L2CAP_MOVE_CHAN_CFM:
4310 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4311 break;
4312
4313 case L2CAP_MOVE_CHAN_CFM_RSP:
4314 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4315 break;
4316
4317 default:
4318 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4319 err = -EINVAL;
4320 break;
4321 }
4322
4323 return err;
4324 }
4325
4326 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4327 struct l2cap_cmd_hdr *cmd, u8 *data)
4328 {
4329 switch (cmd->code) {
4330 case L2CAP_COMMAND_REJ:
4331 return 0;
4332
4333 case L2CAP_CONN_PARAM_UPDATE_REQ:
4334 return l2cap_conn_param_update_req(conn, cmd, data);
4335
4336 case L2CAP_CONN_PARAM_UPDATE_RSP:
4337 return 0;
4338
4339 default:
4340 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4341 return -EINVAL;
4342 }
4343 }
4344
4345 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4346 struct sk_buff *skb)
4347 {
4348 u8 *data = skb->data;
4349 int len = skb->len;
4350 struct l2cap_cmd_hdr cmd;
4351 int err;
4352
4353 l2cap_raw_recv(conn, skb);
4354
4355 while (len >= L2CAP_CMD_HDR_SIZE) {
4356 u16 cmd_len;
4357 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4358 data += L2CAP_CMD_HDR_SIZE;
4359 len -= L2CAP_CMD_HDR_SIZE;
4360
4361 cmd_len = le16_to_cpu(cmd.len);
4362
4363 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4364
4365 if (cmd_len > len || !cmd.ident) {
4366 BT_DBG("corrupted command");
4367 break;
4368 }
4369
4370 if (conn->hcon->type == LE_LINK)
4371 err = l2cap_le_sig_cmd(conn, &cmd, data);
4372 else
4373 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4374
4375 if (err) {
4376 struct l2cap_cmd_rej_unk rej;
4377
4378 BT_ERR("Wrong link type (%d)", err);
4379
4380 /* FIXME: Map err to a valid reason */
4381 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4382 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4383 }
4384
4385 data += cmd_len;
4386 len -= cmd_len;
4387 }
4388
4389 kfree_skb(skb);
4390 }
4391
4392 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4393 {
4394 u16 our_fcs, rcv_fcs;
4395 int hdr_size;
4396
4397 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4398 hdr_size = L2CAP_EXT_HDR_SIZE;
4399 else
4400 hdr_size = L2CAP_ENH_HDR_SIZE;
4401
4402 if (chan->fcs == L2CAP_FCS_CRC16) {
4403 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4404 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4405 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4406
4407 if (our_fcs != rcv_fcs)
4408 return -EBADMSG;
4409 }
4410 return 0;
4411 }
4412
4413 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4414 {
4415 struct l2cap_ctrl control;
4416
4417 BT_DBG("chan %p", chan);
4418
4419 memset(&control, 0, sizeof(control));
4420 control.sframe = 1;
4421 control.final = 1;
4422 control.reqseq = chan->buffer_seq;
4423 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4424
4425 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4426 control.super = L2CAP_SUPER_RNR;
4427 l2cap_send_sframe(chan, &control);
4428 }
4429
4430 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4431 chan->unacked_frames > 0)
4432 __set_retrans_timer(chan);
4433
4434 /* Send pending iframes */
4435 l2cap_ertm_send(chan);
4436
4437 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4438 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4439 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4440 * send it now.
4441 */
4442 control.super = L2CAP_SUPER_RR;
4443 l2cap_send_sframe(chan, &control);
4444 }
4445 }
4446
4447 static void append_skb_frag(struct sk_buff *skb,
4448 struct sk_buff *new_frag, struct sk_buff **last_frag)
4449 {
4450 /* skb->len reflects data in skb as well as all fragments
4451 * skb->data_len reflects only data in fragments
4452 */
4453 if (!skb_has_frag_list(skb))
4454 skb_shinfo(skb)->frag_list = new_frag;
4455
4456 new_frag->next = NULL;
4457
4458 (*last_frag)->next = new_frag;
4459 *last_frag = new_frag;
4460
4461 skb->len += new_frag->len;
4462 skb->data_len += new_frag->len;
4463 skb->truesize += new_frag->truesize;
4464 }
4465
4466 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4467 struct l2cap_ctrl *control)
4468 {
4469 int err = -EINVAL;
4470
4471 switch (control->sar) {
4472 case L2CAP_SAR_UNSEGMENTED:
4473 if (chan->sdu)
4474 break;
4475
4476 err = chan->ops->recv(chan->data, skb);
4477 break;
4478
4479 case L2CAP_SAR_START:
4480 if (chan->sdu)
4481 break;
4482
4483 chan->sdu_len = get_unaligned_le16(skb->data);
4484 skb_pull(skb, L2CAP_SDULEN_SIZE);
4485
4486 if (chan->sdu_len > chan->imtu) {
4487 err = -EMSGSIZE;
4488 break;
4489 }
4490
4491 if (skb->len >= chan->sdu_len)
4492 break;
4493
4494 chan->sdu = skb;
4495 chan->sdu_last_frag = skb;
4496
4497 skb = NULL;
4498 err = 0;
4499 break;
4500
4501 case L2CAP_SAR_CONTINUE:
4502 if (!chan->sdu)
4503 break;
4504
4505 append_skb_frag(chan->sdu, skb,
4506 &chan->sdu_last_frag);
4507 skb = NULL;
4508
4509 if (chan->sdu->len >= chan->sdu_len)
4510 break;
4511
4512 err = 0;
4513 break;
4514
4515 case L2CAP_SAR_END:
4516 if (!chan->sdu)
4517 break;
4518
4519 append_skb_frag(chan->sdu, skb,
4520 &chan->sdu_last_frag);
4521 skb = NULL;
4522
4523 if (chan->sdu->len != chan->sdu_len)
4524 break;
4525
4526 err = chan->ops->recv(chan->data, chan->sdu);
4527
4528 if (!err) {
4529 /* Reassembly complete */
4530 chan->sdu = NULL;
4531 chan->sdu_last_frag = NULL;
4532 chan->sdu_len = 0;
4533 }
4534 break;
4535 }
4536
4537 if (err) {
4538 kfree_skb(skb);
4539 kfree_skb(chan->sdu);
4540 chan->sdu = NULL;
4541 chan->sdu_last_frag = NULL;
4542 chan->sdu_len = 0;
4543 }
4544
4545 return err;
4546 }
4547
4548 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4549 {
4550 u8 event;
4551
4552 if (chan->mode != L2CAP_MODE_ERTM)
4553 return;
4554
4555 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4556 l2cap_tx(chan, 0, 0, event);
4557 }
4558
4559 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4560 {
4561 int err = 0;
4562 /* Pass sequential frames to l2cap_reassemble_sdu()
4563 * until a gap is encountered.
4564 */
4565
4566 BT_DBG("chan %p", chan);
4567
4568 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4569 struct sk_buff *skb;
4570 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4571 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4572
4573 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4574
4575 if (!skb)
4576 break;
4577
4578 skb_unlink(skb, &chan->srej_q);
4579 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4580 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4581 if (err)
4582 break;
4583 }
4584
4585 if (skb_queue_empty(&chan->srej_q)) {
4586 chan->rx_state = L2CAP_RX_STATE_RECV;
4587 l2cap_send_ack(chan);
4588 }
4589
4590 return err;
4591 }
4592
4593 static void l2cap_handle_srej(struct l2cap_chan *chan,
4594 struct l2cap_ctrl *control)
4595 {
4596 struct sk_buff *skb;
4597
4598 BT_DBG("chan %p, control %p", chan, control);
4599
4600 if (control->reqseq == chan->next_tx_seq) {
4601 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4602 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4603 return;
4604 }
4605
4606 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4607
4608 if (skb == NULL) {
4609 BT_DBG("Seq %d not available for retransmission",
4610 control->reqseq);
4611 return;
4612 }
4613
4614 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4615 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4616 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4617 return;
4618 }
4619
4620 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4621
4622 if (control->poll) {
4623 l2cap_pass_to_tx(chan, control);
4624
4625 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4626 l2cap_retransmit(chan, control);
4627 l2cap_ertm_send(chan);
4628
4629 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4630 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4631 chan->srej_save_reqseq = control->reqseq;
4632 }
4633 } else {
4634 l2cap_pass_to_tx_fbit(chan, control);
4635
4636 if (control->final) {
4637 if (chan->srej_save_reqseq != control->reqseq ||
4638 !test_and_clear_bit(CONN_SREJ_ACT,
4639 &chan->conn_state))
4640 l2cap_retransmit(chan, control);
4641 } else {
4642 l2cap_retransmit(chan, control);
4643 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4644 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4645 chan->srej_save_reqseq = control->reqseq;
4646 }
4647 }
4648 }
4649 }
4650
4651 static void l2cap_handle_rej(struct l2cap_chan *chan,
4652 struct l2cap_ctrl *control)
4653 {
4654 struct sk_buff *skb;
4655
4656 BT_DBG("chan %p, control %p", chan, control);
4657
4658 if (control->reqseq == chan->next_tx_seq) {
4659 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4660 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4661 return;
4662 }
4663
4664 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4665
4666 if (chan->max_tx && skb &&
4667 bt_cb(skb)->control.retries >= chan->max_tx) {
4668 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4669 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4670 return;
4671 }
4672
4673 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4674
4675 l2cap_pass_to_tx(chan, control);
4676
4677 if (control->final) {
4678 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4679 l2cap_retransmit_all(chan, control);
4680 } else {
4681 l2cap_retransmit_all(chan, control);
4682 l2cap_ertm_send(chan);
4683 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4684 set_bit(CONN_REJ_ACT, &chan->conn_state);
4685 }
4686 }
4687
4688 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4689 {
4690 BT_DBG("chan %p, txseq %d", chan, txseq);
4691
4692 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4693 chan->expected_tx_seq);
4694
4695 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4696 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4697 chan->tx_win) {
4698 /* See notes below regarding "double poll" and
4699 * invalid packets.
4700 */
4701 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4702 BT_DBG("Invalid/Ignore - after SREJ");
4703 return L2CAP_TXSEQ_INVALID_IGNORE;
4704 } else {
4705 BT_DBG("Invalid - in window after SREJ sent");
4706 return L2CAP_TXSEQ_INVALID;
4707 }
4708 }
4709
4710 if (chan->srej_list.head == txseq) {
4711 BT_DBG("Expected SREJ");
4712 return L2CAP_TXSEQ_EXPECTED_SREJ;
4713 }
4714
4715 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4716 BT_DBG("Duplicate SREJ - txseq already stored");
4717 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4718 }
4719
4720 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4721 BT_DBG("Unexpected SREJ - not requested");
4722 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4723 }
4724 }
4725
4726 if (chan->expected_tx_seq == txseq) {
4727 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4728 chan->tx_win) {
4729 BT_DBG("Invalid - txseq outside tx window");
4730 return L2CAP_TXSEQ_INVALID;
4731 } else {
4732 BT_DBG("Expected");
4733 return L2CAP_TXSEQ_EXPECTED;
4734 }
4735 }
4736
4737 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4738 __seq_offset(chan, chan->expected_tx_seq,
4739 chan->last_acked_seq)){
4740 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4741 return L2CAP_TXSEQ_DUPLICATE;
4742 }
4743
4744 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4745 /* A source of invalid packets is a "double poll" condition,
4746 * where delays cause us to send multiple poll packets. If
4747 * the remote stack receives and processes both polls,
4748 * sequence numbers can wrap around in such a way that a
4749 * resent frame has a sequence number that looks like new data
4750 * with a sequence gap. This would trigger an erroneous SREJ
4751 * request.
4752 *
4753 * Fortunately, this is impossible with a tx window that's
4754 * less than half of the maximum sequence number, which allows
4755 * invalid frames to be safely ignored.
4756 *
4757 * With tx window sizes greater than half of the tx window
4758 * maximum, the frame is invalid and cannot be ignored. This
4759 * causes a disconnect.
4760 */
4761
4762 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4763 BT_DBG("Invalid/Ignore - txseq outside tx window");
4764 return L2CAP_TXSEQ_INVALID_IGNORE;
4765 } else {
4766 BT_DBG("Invalid - txseq outside tx window");
4767 return L2CAP_TXSEQ_INVALID;
4768 }
4769 } else {
4770 BT_DBG("Unexpected - txseq indicates missing frames");
4771 return L2CAP_TXSEQ_UNEXPECTED;
4772 }
4773 }
4774
4775 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4776 struct l2cap_ctrl *control,
4777 struct sk_buff *skb, u8 event)
4778 {
4779 int err = 0;
4780 bool skb_in_use = 0;
4781
4782 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4783 event);
4784
4785 switch (event) {
4786 case L2CAP_EV_RECV_IFRAME:
4787 switch (l2cap_classify_txseq(chan, control->txseq)) {
4788 case L2CAP_TXSEQ_EXPECTED:
4789 l2cap_pass_to_tx(chan, control);
4790
4791 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4792 BT_DBG("Busy, discarding expected seq %d",
4793 control->txseq);
4794 break;
4795 }
4796
4797 chan->expected_tx_seq = __next_seq(chan,
4798 control->txseq);
4799
4800 chan->buffer_seq = chan->expected_tx_seq;
4801 skb_in_use = 1;
4802
4803 err = l2cap_reassemble_sdu(chan, skb, control);
4804 if (err)
4805 break;
4806
4807 if (control->final) {
4808 if (!test_and_clear_bit(CONN_REJ_ACT,
4809 &chan->conn_state)) {
4810 control->final = 0;
4811 l2cap_retransmit_all(chan, control);
4812 l2cap_ertm_send(chan);
4813 }
4814 }
4815
4816 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4817 l2cap_send_ack(chan);
4818 break;
4819 case L2CAP_TXSEQ_UNEXPECTED:
4820 l2cap_pass_to_tx(chan, control);
4821
4822 /* Can't issue SREJ frames in the local busy state.
4823 * Drop this frame, it will be seen as missing
4824 * when local busy is exited.
4825 */
4826 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4827 BT_DBG("Busy, discarding unexpected seq %d",
4828 control->txseq);
4829 break;
4830 }
4831
4832 /* There was a gap in the sequence, so an SREJ
4833 * must be sent for each missing frame. The
4834 * current frame is stored for later use.
4835 */
4836 skb_queue_tail(&chan->srej_q, skb);
4837 skb_in_use = 1;
4838 BT_DBG("Queued %p (queue len %d)", skb,
4839 skb_queue_len(&chan->srej_q));
4840
4841 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4842 l2cap_seq_list_clear(&chan->srej_list);
4843 l2cap_send_srej(chan, control->txseq);
4844
4845 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4846 break;
4847 case L2CAP_TXSEQ_DUPLICATE:
4848 l2cap_pass_to_tx(chan, control);
4849 break;
4850 case L2CAP_TXSEQ_INVALID_IGNORE:
4851 break;
4852 case L2CAP_TXSEQ_INVALID:
4853 default:
4854 l2cap_send_disconn_req(chan->conn, chan,
4855 ECONNRESET);
4856 break;
4857 }
4858 break;
4859 case L2CAP_EV_RECV_RR:
4860 l2cap_pass_to_tx(chan, control);
4861 if (control->final) {
4862 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4863
4864 if (!test_and_clear_bit(CONN_REJ_ACT,
4865 &chan->conn_state)) {
4866 control->final = 0;
4867 l2cap_retransmit_all(chan, control);
4868 }
4869
4870 l2cap_ertm_send(chan);
4871 } else if (control->poll) {
4872 l2cap_send_i_or_rr_or_rnr(chan);
4873 } else {
4874 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4875 &chan->conn_state) &&
4876 chan->unacked_frames)
4877 __set_retrans_timer(chan);
4878
4879 l2cap_ertm_send(chan);
4880 }
4881 break;
4882 case L2CAP_EV_RECV_RNR:
4883 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4884 l2cap_pass_to_tx(chan, control);
4885 if (control && control->poll) {
4886 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4887 l2cap_send_rr_or_rnr(chan, 0);
4888 }
4889 __clear_retrans_timer(chan);
4890 l2cap_seq_list_clear(&chan->retrans_list);
4891 break;
4892 case L2CAP_EV_RECV_REJ:
4893 l2cap_handle_rej(chan, control);
4894 break;
4895 case L2CAP_EV_RECV_SREJ:
4896 l2cap_handle_srej(chan, control);
4897 break;
4898 default:
4899 break;
4900 }
4901
4902 if (skb && !skb_in_use) {
4903 BT_DBG("Freeing %p", skb);
4904 kfree_skb(skb);
4905 }
4906
4907 return err;
4908 }
4909
4910 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4911 struct l2cap_ctrl *control,
4912 struct sk_buff *skb, u8 event)
4913 {
4914 int err = 0;
4915 u16 txseq = control->txseq;
4916 bool skb_in_use = 0;
4917
4918 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4919 event);
4920
4921 switch (event) {
4922 case L2CAP_EV_RECV_IFRAME:
4923 switch (l2cap_classify_txseq(chan, txseq)) {
4924 case L2CAP_TXSEQ_EXPECTED:
4925 /* Keep frame for reassembly later */
4926 l2cap_pass_to_tx(chan, control);
4927 skb_queue_tail(&chan->srej_q, skb);
4928 skb_in_use = 1;
4929 BT_DBG("Queued %p (queue len %d)", skb,
4930 skb_queue_len(&chan->srej_q));
4931
4932 chan->expected_tx_seq = __next_seq(chan, txseq);
4933 break;
4934 case L2CAP_TXSEQ_EXPECTED_SREJ:
4935 l2cap_seq_list_pop(&chan->srej_list);
4936
4937 l2cap_pass_to_tx(chan, control);
4938 skb_queue_tail(&chan->srej_q, skb);
4939 skb_in_use = 1;
4940 BT_DBG("Queued %p (queue len %d)", skb,
4941 skb_queue_len(&chan->srej_q));
4942
4943 err = l2cap_rx_queued_iframes(chan);
4944 if (err)
4945 break;
4946
4947 break;
4948 case L2CAP_TXSEQ_UNEXPECTED:
4949 /* Got a frame that can't be reassembled yet.
4950 * Save it for later, and send SREJs to cover
4951 * the missing frames.
4952 */
4953 skb_queue_tail(&chan->srej_q, skb);
4954 skb_in_use = 1;
4955 BT_DBG("Queued %p (queue len %d)", skb,
4956 skb_queue_len(&chan->srej_q));
4957
4958 l2cap_pass_to_tx(chan, control);
4959 l2cap_send_srej(chan, control->txseq);
4960 break;
4961 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4962 /* This frame was requested with an SREJ, but
4963 * some expected retransmitted frames are
4964 * missing. Request retransmission of missing
4965 * SREJ'd frames.
4966 */
4967 skb_queue_tail(&chan->srej_q, skb);
4968 skb_in_use = 1;
4969 BT_DBG("Queued %p (queue len %d)", skb,
4970 skb_queue_len(&chan->srej_q));
4971
4972 l2cap_pass_to_tx(chan, control);
4973 l2cap_send_srej_list(chan, control->txseq);
4974 break;
4975 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4976 /* We've already queued this frame. Drop this copy. */
4977 l2cap_pass_to_tx(chan, control);
4978 break;
4979 case L2CAP_TXSEQ_DUPLICATE:
4980 /* Expecting a later sequence number, so this frame
4981 * was already received. Ignore it completely.
4982 */
4983 break;
4984 case L2CAP_TXSEQ_INVALID_IGNORE:
4985 break;
4986 case L2CAP_TXSEQ_INVALID:
4987 default:
4988 l2cap_send_disconn_req(chan->conn, chan,
4989 ECONNRESET);
4990 break;
4991 }
4992 break;
4993 case L2CAP_EV_RECV_RR:
4994 l2cap_pass_to_tx(chan, control);
4995 if (control->final) {
4996 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4997
4998 if (!test_and_clear_bit(CONN_REJ_ACT,
4999 &chan->conn_state)) {
5000 control->final = 0;
5001 l2cap_retransmit_all(chan, control);
5002 }
5003
5004 l2cap_ertm_send(chan);
5005 } else if (control->poll) {
5006 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5007 &chan->conn_state) &&
5008 chan->unacked_frames) {
5009 __set_retrans_timer(chan);
5010 }
5011
5012 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5013 l2cap_send_srej_tail(chan);
5014 } else {
5015 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5016 &chan->conn_state) &&
5017 chan->unacked_frames)
5018 __set_retrans_timer(chan);
5019
5020 l2cap_send_ack(chan);
5021 }
5022 break;
5023 case L2CAP_EV_RECV_RNR:
5024 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5025 l2cap_pass_to_tx(chan, control);
5026 if (control->poll) {
5027 l2cap_send_srej_tail(chan);
5028 } else {
5029 struct l2cap_ctrl rr_control;
5030 memset(&rr_control, 0, sizeof(rr_control));
5031 rr_control.sframe = 1;
5032 rr_control.super = L2CAP_SUPER_RR;
5033 rr_control.reqseq = chan->buffer_seq;
5034 l2cap_send_sframe(chan, &rr_control);
5035 }
5036
5037 break;
5038 case L2CAP_EV_RECV_REJ:
5039 l2cap_handle_rej(chan, control);
5040 break;
5041 case L2CAP_EV_RECV_SREJ:
5042 l2cap_handle_srej(chan, control);
5043 break;
5044 }
5045
5046 if (skb && !skb_in_use) {
5047 BT_DBG("Freeing %p", skb);
5048 kfree_skb(skb);
5049 }
5050
5051 return err;
5052 }
5053
5054 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5055 {
5056 /* Make sure reqseq is for a packet that has been sent but not acked */
5057 u16 unacked;
5058
5059 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5060 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5061 }
5062
5063 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5064 struct sk_buff *skb, u8 event)
5065 {
5066 int err = 0;
5067
5068 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5069 control, skb, event, chan->rx_state);
5070
5071 if (__valid_reqseq(chan, control->reqseq)) {
5072 switch (chan->rx_state) {
5073 case L2CAP_RX_STATE_RECV:
5074 err = l2cap_rx_state_recv(chan, control, skb, event);
5075 break;
5076 case L2CAP_RX_STATE_SREJ_SENT:
5077 err = l2cap_rx_state_srej_sent(chan, control, skb,
5078 event);
5079 break;
5080 default:
5081 /* shut it down */
5082 break;
5083 }
5084 } else {
5085 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5086 control->reqseq, chan->next_tx_seq,
5087 chan->expected_ack_seq);
5088 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5089 }
5090
5091 return err;
5092 }
5093
5094 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5095 struct sk_buff *skb)
5096 {
5097 int err = 0;
5098
5099 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5100 chan->rx_state);
5101
5102 if (l2cap_classify_txseq(chan, control->txseq) ==
5103 L2CAP_TXSEQ_EXPECTED) {
5104 l2cap_pass_to_tx(chan, control);
5105
5106 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5107 __next_seq(chan, chan->buffer_seq));
5108
5109 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5110
5111 l2cap_reassemble_sdu(chan, skb, control);
5112 } else {
5113 if (chan->sdu) {
5114 kfree_skb(chan->sdu);
5115 chan->sdu = NULL;
5116 }
5117 chan->sdu_last_frag = NULL;
5118 chan->sdu_len = 0;
5119
5120 if (skb) {
5121 BT_DBG("Freeing %p", skb);
5122 kfree_skb(skb);
5123 }
5124 }
5125
5126 chan->last_acked_seq = control->txseq;
5127 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5128
5129 return err;
5130 }
5131
5132 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5133 {
5134 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5135 u16 len;
5136 u8 event;
5137
5138 __unpack_control(chan, skb);
5139
5140 len = skb->len;
5141
5142 /*
5143 * We can just drop the corrupted I-frame here.
5144 * Receiver will miss it and start proper recovery
5145 * procedures and ask for retransmission.
5146 */
5147 if (l2cap_check_fcs(chan, skb))
5148 goto drop;
5149
5150 if (!control->sframe && control->sar == L2CAP_SAR_START)
5151 len -= L2CAP_SDULEN_SIZE;
5152
5153 if (chan->fcs == L2CAP_FCS_CRC16)
5154 len -= L2CAP_FCS_SIZE;
5155
5156 if (len > chan->mps) {
5157 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5158 goto drop;
5159 }
5160
5161 if (!control->sframe) {
5162 int err;
5163
5164 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5165 control->sar, control->reqseq, control->final,
5166 control->txseq);
5167
5168 /* Validate F-bit - F=0 always valid, F=1 only
5169 * valid in TX WAIT_F
5170 */
5171 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5172 goto drop;
5173
5174 if (chan->mode != L2CAP_MODE_STREAMING) {
5175 event = L2CAP_EV_RECV_IFRAME;
5176 err = l2cap_rx(chan, control, skb, event);
5177 } else {
5178 err = l2cap_stream_rx(chan, control, skb);
5179 }
5180
5181 if (err)
5182 l2cap_send_disconn_req(chan->conn, chan,
5183 ECONNRESET);
5184 } else {
5185 const u8 rx_func_to_event[4] = {
5186 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5187 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5188 };
5189
5190 /* Only I-frames are expected in streaming mode */
5191 if (chan->mode == L2CAP_MODE_STREAMING)
5192 goto drop;
5193
5194 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5195 control->reqseq, control->final, control->poll,
5196 control->super);
5197
5198 if (len != 0) {
5199 BT_ERR("%d", len);
5200 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5201 goto drop;
5202 }
5203
5204 /* Validate F and P bits */
5205 if (control->final && (control->poll ||
5206 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5207 goto drop;
5208
5209 event = rx_func_to_event[control->super];
5210 if (l2cap_rx(chan, control, skb, event))
5211 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5212 }
5213
5214 return 0;
5215
5216 drop:
5217 kfree_skb(skb);
5218 return 0;
5219 }
5220
5221 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5222 {
5223 struct l2cap_chan *chan;
5224
5225 chan = l2cap_get_chan_by_scid(conn, cid);
5226 if (!chan) {
5227 BT_DBG("unknown cid 0x%4.4x", cid);
5228 /* Drop packet and return */
5229 kfree_skb(skb);
5230 return 0;
5231 }
5232
5233 BT_DBG("chan %p, len %d", chan, skb->len);
5234
5235 if (chan->state != BT_CONNECTED)
5236 goto drop;
5237
5238 switch (chan->mode) {
5239 case L2CAP_MODE_BASIC:
5240 /* If socket recv buffers overflows we drop data here
5241 * which is *bad* because L2CAP has to be reliable.
5242 * But we don't have any other choice. L2CAP doesn't
5243 * provide flow control mechanism. */
5244
5245 if (chan->imtu < skb->len)
5246 goto drop;
5247
5248 if (!chan->ops->recv(chan->data, skb))
5249 goto done;
5250 break;
5251
5252 case L2CAP_MODE_ERTM:
5253 case L2CAP_MODE_STREAMING:
5254 l2cap_data_rcv(chan, skb);
5255 goto done;
5256
5257 default:
5258 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5259 break;
5260 }
5261
5262 drop:
5263 kfree_skb(skb);
5264
5265 done:
5266 l2cap_chan_unlock(chan);
5267
5268 return 0;
5269 }
5270
5271 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5272 {
5273 struct l2cap_chan *chan;
5274
5275 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5276 if (!chan)
5277 goto drop;
5278
5279 BT_DBG("chan %p, len %d", chan, skb->len);
5280
5281 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5282 goto drop;
5283
5284 if (chan->imtu < skb->len)
5285 goto drop;
5286
5287 if (!chan->ops->recv(chan->data, skb))
5288 return 0;
5289
5290 drop:
5291 kfree_skb(skb);
5292
5293 return 0;
5294 }
5295
5296 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5297 struct sk_buff *skb)
5298 {
5299 struct l2cap_chan *chan;
5300
5301 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5302 if (!chan)
5303 goto drop;
5304
5305 BT_DBG("chan %p, len %d", chan, skb->len);
5306
5307 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5308 goto drop;
5309
5310 if (chan->imtu < skb->len)
5311 goto drop;
5312
5313 if (!chan->ops->recv(chan->data, skb))
5314 return 0;
5315
5316 drop:
5317 kfree_skb(skb);
5318
5319 return 0;
5320 }
5321
5322 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5323 {
5324 struct l2cap_hdr *lh = (void *) skb->data;
5325 u16 cid, len;
5326 __le16 psm;
5327
5328 skb_pull(skb, L2CAP_HDR_SIZE);
5329 cid = __le16_to_cpu(lh->cid);
5330 len = __le16_to_cpu(lh->len);
5331
5332 if (len != skb->len) {
5333 kfree_skb(skb);
5334 return;
5335 }
5336
5337 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5338
5339 switch (cid) {
5340 case L2CAP_CID_LE_SIGNALING:
5341 case L2CAP_CID_SIGNALING:
5342 l2cap_sig_channel(conn, skb);
5343 break;
5344
5345 case L2CAP_CID_CONN_LESS:
5346 psm = get_unaligned((__le16 *) skb->data);
5347 skb_pull(skb, 2);
5348 l2cap_conless_channel(conn, psm, skb);
5349 break;
5350
5351 case L2CAP_CID_LE_DATA:
5352 l2cap_att_channel(conn, cid, skb);
5353 break;
5354
5355 case L2CAP_CID_SMP:
5356 if (smp_sig_channel(conn, skb))
5357 l2cap_conn_del(conn->hcon, EACCES);
5358 break;
5359
5360 default:
5361 l2cap_data_channel(conn, cid, skb);
5362 break;
5363 }
5364 }
5365
5366 /* ---- L2CAP interface with lower layer (HCI) ---- */
5367
5368 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5369 {
5370 int exact = 0, lm1 = 0, lm2 = 0;
5371 struct l2cap_chan *c;
5372
5373 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5374
5375 /* Find listening sockets and check their link_mode */
5376 read_lock(&chan_list_lock);
5377 list_for_each_entry(c, &chan_list, global_l) {
5378 struct sock *sk = c->sk;
5379
5380 if (c->state != BT_LISTEN)
5381 continue;
5382
5383 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5384 lm1 |= HCI_LM_ACCEPT;
5385 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5386 lm1 |= HCI_LM_MASTER;
5387 exact++;
5388 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5389 lm2 |= HCI_LM_ACCEPT;
5390 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5391 lm2 |= HCI_LM_MASTER;
5392 }
5393 }
5394 read_unlock(&chan_list_lock);
5395
5396 return exact ? lm1 : lm2;
5397 }
5398
5399 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5400 {
5401 struct l2cap_conn *conn;
5402
5403 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5404
5405 if (!status) {
5406 conn = l2cap_conn_add(hcon, status);
5407 if (conn)
5408 l2cap_conn_ready(conn);
5409 } else
5410 l2cap_conn_del(hcon, bt_to_errno(status));
5411
5412 return 0;
5413 }
5414
5415 int l2cap_disconn_ind(struct hci_conn *hcon)
5416 {
5417 struct l2cap_conn *conn = hcon->l2cap_data;
5418
5419 BT_DBG("hcon %p", hcon);
5420
5421 if (!conn)
5422 return HCI_ERROR_REMOTE_USER_TERM;
5423 return conn->disc_reason;
5424 }
5425
5426 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5427 {
5428 BT_DBG("hcon %p reason %d", hcon, reason);
5429
5430 l2cap_conn_del(hcon, bt_to_errno(reason));
5431 return 0;
5432 }
5433
5434 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5435 {
5436 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5437 return;
5438
5439 if (encrypt == 0x00) {
5440 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5441 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5442 } else if (chan->sec_level == BT_SECURITY_HIGH)
5443 l2cap_chan_close(chan, ECONNREFUSED);
5444 } else {
5445 if (chan->sec_level == BT_SECURITY_MEDIUM)
5446 __clear_chan_timer(chan);
5447 }
5448 }
5449
5450 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5451 {
5452 struct l2cap_conn *conn = hcon->l2cap_data;
5453 struct l2cap_chan *chan;
5454
5455 if (!conn)
5456 return 0;
5457
5458 BT_DBG("conn %p", conn);
5459
5460 if (hcon->type == LE_LINK) {
5461 if (!status && encrypt)
5462 smp_distribute_keys(conn, 0);
5463 cancel_delayed_work(&conn->security_timer);
5464 }
5465
5466 mutex_lock(&conn->chan_lock);
5467
5468 list_for_each_entry(chan, &conn->chan_l, list) {
5469 l2cap_chan_lock(chan);
5470
5471 BT_DBG("chan->scid %d", chan->scid);
5472
5473 if (chan->scid == L2CAP_CID_LE_DATA) {
5474 if (!status && encrypt) {
5475 chan->sec_level = hcon->sec_level;
5476 l2cap_chan_ready(chan);
5477 }
5478
5479 l2cap_chan_unlock(chan);
5480 continue;
5481 }
5482
5483 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5484 l2cap_chan_unlock(chan);
5485 continue;
5486 }
5487
5488 if (!status && (chan->state == BT_CONNECTED ||
5489 chan->state == BT_CONFIG)) {
5490 struct sock *sk = chan->sk;
5491
5492 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5493 sk->sk_state_change(sk);
5494
5495 l2cap_check_encryption(chan, encrypt);
5496 l2cap_chan_unlock(chan);
5497 continue;
5498 }
5499
5500 if (chan->state == BT_CONNECT) {
5501 if (!status) {
5502 l2cap_send_conn_req(chan);
5503 } else {
5504 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5505 }
5506 } else if (chan->state == BT_CONNECT2) {
5507 struct sock *sk = chan->sk;
5508 struct l2cap_conn_rsp rsp;
5509 __u16 res, stat;
5510
5511 lock_sock(sk);
5512
5513 if (!status) {
5514 if (test_bit(BT_SK_DEFER_SETUP,
5515 &bt_sk(sk)->flags)) {
5516 struct sock *parent = bt_sk(sk)->parent;
5517 res = L2CAP_CR_PEND;
5518 stat = L2CAP_CS_AUTHOR_PEND;
5519 if (parent)
5520 parent->sk_data_ready(parent, 0);
5521 } else {
5522 __l2cap_state_change(chan, BT_CONFIG);
5523 res = L2CAP_CR_SUCCESS;
5524 stat = L2CAP_CS_NO_INFO;
5525 }
5526 } else {
5527 __l2cap_state_change(chan, BT_DISCONN);
5528 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5529 res = L2CAP_CR_SEC_BLOCK;
5530 stat = L2CAP_CS_NO_INFO;
5531 }
5532
5533 release_sock(sk);
5534
5535 rsp.scid = cpu_to_le16(chan->dcid);
5536 rsp.dcid = cpu_to_le16(chan->scid);
5537 rsp.result = cpu_to_le16(res);
5538 rsp.status = cpu_to_le16(stat);
5539 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5540 sizeof(rsp), &rsp);
5541 }
5542
5543 l2cap_chan_unlock(chan);
5544 }
5545
5546 mutex_unlock(&conn->chan_lock);
5547
5548 return 0;
5549 }
5550
5551 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5552 {
5553 struct l2cap_conn *conn = hcon->l2cap_data;
5554
5555 if (!conn)
5556 conn = l2cap_conn_add(hcon, 0);
5557
5558 if (!conn)
5559 goto drop;
5560
5561 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5562
5563 if (!(flags & ACL_CONT)) {
5564 struct l2cap_hdr *hdr;
5565 int len;
5566
5567 if (conn->rx_len) {
5568 BT_ERR("Unexpected start frame (len %d)", skb->len);
5569 kfree_skb(conn->rx_skb);
5570 conn->rx_skb = NULL;
5571 conn->rx_len = 0;
5572 l2cap_conn_unreliable(conn, ECOMM);
5573 }
5574
5575 /* Start fragment always begin with Basic L2CAP header */
5576 if (skb->len < L2CAP_HDR_SIZE) {
5577 BT_ERR("Frame is too short (len %d)", skb->len);
5578 l2cap_conn_unreliable(conn, ECOMM);
5579 goto drop;
5580 }
5581
5582 hdr = (struct l2cap_hdr *) skb->data;
5583 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5584
5585 if (len == skb->len) {
5586 /* Complete frame received */
5587 l2cap_recv_frame(conn, skb);
5588 return 0;
5589 }
5590
5591 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5592
5593 if (skb->len > len) {
5594 BT_ERR("Frame is too long (len %d, expected len %d)",
5595 skb->len, len);
5596 l2cap_conn_unreliable(conn, ECOMM);
5597 goto drop;
5598 }
5599
5600 /* Allocate skb for the complete frame (with header) */
5601 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5602 if (!conn->rx_skb)
5603 goto drop;
5604
5605 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5606 skb->len);
5607 conn->rx_len = len - skb->len;
5608 } else {
5609 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5610
5611 if (!conn->rx_len) {
5612 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5613 l2cap_conn_unreliable(conn, ECOMM);
5614 goto drop;
5615 }
5616
5617 if (skb->len > conn->rx_len) {
5618 BT_ERR("Fragment is too long (len %d, expected %d)",
5619 skb->len, conn->rx_len);
5620 kfree_skb(conn->rx_skb);
5621 conn->rx_skb = NULL;
5622 conn->rx_len = 0;
5623 l2cap_conn_unreliable(conn, ECOMM);
5624 goto drop;
5625 }
5626
5627 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5628 skb->len);
5629 conn->rx_len -= skb->len;
5630
5631 if (!conn->rx_len) {
5632 /* Complete frame received */
5633 l2cap_recv_frame(conn, conn->rx_skb);
5634 conn->rx_skb = NULL;
5635 }
5636 }
5637
5638 drop:
5639 kfree_skb(skb);
5640 return 0;
5641 }
5642
5643 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5644 {
5645 struct l2cap_chan *c;
5646
5647 read_lock(&chan_list_lock);
5648
5649 list_for_each_entry(c, &chan_list, global_l) {
5650 struct sock *sk = c->sk;
5651
5652 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5653 batostr(&bt_sk(sk)->src),
5654 batostr(&bt_sk(sk)->dst),
5655 c->state, __le16_to_cpu(c->psm),
5656 c->scid, c->dcid, c->imtu, c->omtu,
5657 c->sec_level, c->mode);
5658 }
5659
5660 read_unlock(&chan_list_lock);
5661
5662 return 0;
5663 }
5664
5665 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5666 {
5667 return single_open(file, l2cap_debugfs_show, inode->i_private);
5668 }
5669
5670 static const struct file_operations l2cap_debugfs_fops = {
5671 .open = l2cap_debugfs_open,
5672 .read = seq_read,
5673 .llseek = seq_lseek,
5674 .release = single_release,
5675 };
5676
5677 static struct dentry *l2cap_debugfs;
5678
5679 int __init l2cap_init(void)
5680 {
5681 int err;
5682
5683 err = l2cap_init_sockets();
5684 if (err < 0)
5685 return err;
5686
5687 if (bt_debugfs) {
5688 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5689 bt_debugfs, NULL, &l2cap_debugfs_fops);
5690 if (!l2cap_debugfs)
5691 BT_ERR("Failed to create L2CAP debug file");
5692 }
5693
5694 return 0;
5695 }
5696
5697 void l2cap_exit(void)
5698 {
5699 debugfs_remove(l2cap_debugfs);
5700 l2cap_cleanup_sockets();
5701 }
5702
5703 module_param(disable_ertm, bool, 0644);
5704 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.165649 seconds and 6 git commands to generate.