Bluetooth: Remove unnecessary headers include
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40
41 bool disable_ertm;
42
43 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
44 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
45
46 static LIST_HEAD(chan_list);
47 static DEFINE_RWLOCK(chan_list_lock);
48
49 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
50 u8 code, u8 ident, u16 dlen, void *data);
51 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
52 void *data);
53 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
54 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
55 struct l2cap_chan *chan, int err);
56
57 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
58 struct sk_buff_head *skbs, u8 event);
59
60 /* ---- L2CAP channels ---- */
61
62 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
63 {
64 struct l2cap_chan *c;
65
66 list_for_each_entry(c, &conn->chan_l, list) {
67 if (c->dcid == cid)
68 return c;
69 }
70 return NULL;
71 }
72
73 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
74 {
75 struct l2cap_chan *c;
76
77 list_for_each_entry(c, &conn->chan_l, list) {
78 if (c->scid == cid)
79 return c;
80 }
81 return NULL;
82 }
83
84 /* Find channel with given SCID.
85 * Returns locked channel. */
86 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
87 {
88 struct l2cap_chan *c;
89
90 mutex_lock(&conn->chan_lock);
91 c = __l2cap_get_chan_by_scid(conn, cid);
92 if (c)
93 l2cap_chan_lock(c);
94 mutex_unlock(&conn->chan_lock);
95
96 return c;
97 }
98
99 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
100 {
101 struct l2cap_chan *c;
102
103 list_for_each_entry(c, &conn->chan_l, list) {
104 if (c->ident == ident)
105 return c;
106 }
107 return NULL;
108 }
109
110 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
111 {
112 struct l2cap_chan *c;
113
114 list_for_each_entry(c, &chan_list, global_l) {
115 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
116 return c;
117 }
118 return NULL;
119 }
120
121 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
122 {
123 int err;
124
125 write_lock(&chan_list_lock);
126
127 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
128 err = -EADDRINUSE;
129 goto done;
130 }
131
132 if (psm) {
133 chan->psm = psm;
134 chan->sport = psm;
135 err = 0;
136 } else {
137 u16 p;
138
139 err = -EINVAL;
140 for (p = 0x1001; p < 0x1100; p += 2)
141 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
142 chan->psm = cpu_to_le16(p);
143 chan->sport = cpu_to_le16(p);
144 err = 0;
145 break;
146 }
147 }
148
149 done:
150 write_unlock(&chan_list_lock);
151 return err;
152 }
153
154 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
155 {
156 write_lock(&chan_list_lock);
157
158 chan->scid = scid;
159
160 write_unlock(&chan_list_lock);
161
162 return 0;
163 }
164
165 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
166 {
167 u16 cid = L2CAP_CID_DYN_START;
168
169 for (; cid < L2CAP_CID_DYN_END; cid++) {
170 if (!__l2cap_get_chan_by_scid(conn, cid))
171 return cid;
172 }
173
174 return 0;
175 }
176
177 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
178 {
179 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
180 state_to_string(state));
181
182 chan->state = state;
183 chan->ops->state_change(chan->data, state);
184 }
185
186 static void l2cap_state_change(struct l2cap_chan *chan, int state)
187 {
188 struct sock *sk = chan->sk;
189
190 lock_sock(sk);
191 __l2cap_state_change(chan, state);
192 release_sock(sk);
193 }
194
195 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
196 {
197 struct sock *sk = chan->sk;
198
199 sk->sk_err = err;
200 }
201
202 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
203 {
204 struct sock *sk = chan->sk;
205
206 lock_sock(sk);
207 __l2cap_chan_set_err(chan, err);
208 release_sock(sk);
209 }
210
211 static void __set_retrans_timer(struct l2cap_chan *chan)
212 {
213 if (!delayed_work_pending(&chan->monitor_timer) &&
214 chan->retrans_timeout) {
215 l2cap_set_timer(chan, &chan->retrans_timer,
216 msecs_to_jiffies(chan->retrans_timeout));
217 }
218 }
219
220 static void __set_monitor_timer(struct l2cap_chan *chan)
221 {
222 __clear_retrans_timer(chan);
223 if (chan->monitor_timeout) {
224 l2cap_set_timer(chan, &chan->monitor_timer,
225 msecs_to_jiffies(chan->monitor_timeout));
226 }
227 }
228
229 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
230 u16 seq)
231 {
232 struct sk_buff *skb;
233
234 skb_queue_walk(head, skb) {
235 if (bt_cb(skb)->control.txseq == seq)
236 return skb;
237 }
238
239 return NULL;
240 }
241
242 /* ---- L2CAP sequence number lists ---- */
243
244 /* For ERTM, ordered lists of sequence numbers must be tracked for
245 * SREJ requests that are received and for frames that are to be
246 * retransmitted. These seq_list functions implement a singly-linked
247 * list in an array, where membership in the list can also be checked
248 * in constant time. Items can also be added to the tail of the list
249 * and removed from the head in constant time, without further memory
250 * allocs or frees.
251 */
252
253 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
254 {
255 size_t alloc_size, i;
256
257 /* Allocated size is a power of 2 to map sequence numbers
258 * (which may be up to 14 bits) in to a smaller array that is
259 * sized for the negotiated ERTM transmit windows.
260 */
261 alloc_size = roundup_pow_of_two(size);
262
263 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
264 if (!seq_list->list)
265 return -ENOMEM;
266
267 seq_list->mask = alloc_size - 1;
268 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
269 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
270 for (i = 0; i < alloc_size; i++)
271 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
272
273 return 0;
274 }
275
276 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
277 {
278 kfree(seq_list->list);
279 }
280
281 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
282 u16 seq)
283 {
284 /* Constant-time check for list membership */
285 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
286 }
287
288 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
289 {
290 u16 mask = seq_list->mask;
291
292 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
293 /* In case someone tries to pop the head of an empty list */
294 return L2CAP_SEQ_LIST_CLEAR;
295 } else if (seq_list->head == seq) {
296 /* Head can be removed in constant time */
297 seq_list->head = seq_list->list[seq & mask];
298 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
299
300 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
301 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
302 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
303 }
304 } else {
305 /* Walk the list to find the sequence number */
306 u16 prev = seq_list->head;
307 while (seq_list->list[prev & mask] != seq) {
308 prev = seq_list->list[prev & mask];
309 if (prev == L2CAP_SEQ_LIST_TAIL)
310 return L2CAP_SEQ_LIST_CLEAR;
311 }
312
313 /* Unlink the number from the list and clear it */
314 seq_list->list[prev & mask] = seq_list->list[seq & mask];
315 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
316 if (seq_list->tail == seq)
317 seq_list->tail = prev;
318 }
319 return seq;
320 }
321
322 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
323 {
324 /* Remove the head in constant time */
325 return l2cap_seq_list_remove(seq_list, seq_list->head);
326 }
327
328 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
329 {
330 u16 i;
331
332 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
333 return;
334
335 for (i = 0; i <= seq_list->mask; i++)
336 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
337
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
340 }
341
342 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
343 {
344 u16 mask = seq_list->mask;
345
346 /* All appends happen in constant time */
347
348 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
349 return;
350
351 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
352 seq_list->head = seq;
353 else
354 seq_list->list[seq_list->tail & mask] = seq;
355
356 seq_list->tail = seq;
357 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
358 }
359
360 static void l2cap_chan_timeout(struct work_struct *work)
361 {
362 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
363 chan_timer.work);
364 struct l2cap_conn *conn = chan->conn;
365 int reason;
366
367 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
368
369 mutex_lock(&conn->chan_lock);
370 l2cap_chan_lock(chan);
371
372 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
373 reason = ECONNREFUSED;
374 else if (chan->state == BT_CONNECT &&
375 chan->sec_level != BT_SECURITY_SDP)
376 reason = ECONNREFUSED;
377 else
378 reason = ETIMEDOUT;
379
380 l2cap_chan_close(chan, reason);
381
382 l2cap_chan_unlock(chan);
383
384 chan->ops->close(chan->data);
385 mutex_unlock(&conn->chan_lock);
386
387 l2cap_chan_put(chan);
388 }
389
390 struct l2cap_chan *l2cap_chan_create(void)
391 {
392 struct l2cap_chan *chan;
393
394 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
395 if (!chan)
396 return NULL;
397
398 mutex_init(&chan->lock);
399
400 write_lock(&chan_list_lock);
401 list_add(&chan->global_l, &chan_list);
402 write_unlock(&chan_list_lock);
403
404 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
405
406 chan->state = BT_OPEN;
407
408 atomic_set(&chan->refcnt, 1);
409
410 /* This flag is cleared in l2cap_chan_ready() */
411 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
412
413 BT_DBG("chan %p", chan);
414
415 return chan;
416 }
417
418 void l2cap_chan_destroy(struct l2cap_chan *chan)
419 {
420 write_lock(&chan_list_lock);
421 list_del(&chan->global_l);
422 write_unlock(&chan_list_lock);
423
424 l2cap_chan_put(chan);
425 }
426
427 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
428 {
429 chan->fcs = L2CAP_FCS_CRC16;
430 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
431 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
432 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
433 chan->sec_level = BT_SECURITY_LOW;
434
435 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
436 }
437
438 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
439 {
440 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
441 __le16_to_cpu(chan->psm), chan->dcid);
442
443 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
444
445 chan->conn = conn;
446
447 switch (chan->chan_type) {
448 case L2CAP_CHAN_CONN_ORIENTED:
449 if (conn->hcon->type == LE_LINK) {
450 /* LE connection */
451 chan->omtu = L2CAP_LE_DEFAULT_MTU;
452 chan->scid = L2CAP_CID_LE_DATA;
453 chan->dcid = L2CAP_CID_LE_DATA;
454 } else {
455 /* Alloc CID for connection-oriented socket */
456 chan->scid = l2cap_alloc_cid(conn);
457 chan->omtu = L2CAP_DEFAULT_MTU;
458 }
459 break;
460
461 case L2CAP_CHAN_CONN_LESS:
462 /* Connectionless socket */
463 chan->scid = L2CAP_CID_CONN_LESS;
464 chan->dcid = L2CAP_CID_CONN_LESS;
465 chan->omtu = L2CAP_DEFAULT_MTU;
466 break;
467
468 default:
469 /* Raw socket can send/recv signalling messages only */
470 chan->scid = L2CAP_CID_SIGNALING;
471 chan->dcid = L2CAP_CID_SIGNALING;
472 chan->omtu = L2CAP_DEFAULT_MTU;
473 }
474
475 chan->local_id = L2CAP_BESTEFFORT_ID;
476 chan->local_stype = L2CAP_SERV_BESTEFFORT;
477 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
478 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
479 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
480 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
481
482 l2cap_chan_hold(chan);
483
484 list_add(&chan->list, &conn->chan_l);
485 }
486
487 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
488 {
489 mutex_lock(&conn->chan_lock);
490 __l2cap_chan_add(conn, chan);
491 mutex_unlock(&conn->chan_lock);
492 }
493
494 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
495 {
496 struct sock *sk = chan->sk;
497 struct l2cap_conn *conn = chan->conn;
498 struct sock *parent = bt_sk(sk)->parent;
499
500 __clear_chan_timer(chan);
501
502 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
503
504 if (conn) {
505 /* Delete from channel list */
506 list_del(&chan->list);
507
508 l2cap_chan_put(chan);
509
510 chan->conn = NULL;
511 hci_conn_put(conn->hcon);
512 }
513
514 lock_sock(sk);
515
516 __l2cap_state_change(chan, BT_CLOSED);
517 sock_set_flag(sk, SOCK_ZAPPED);
518
519 if (err)
520 __l2cap_chan_set_err(chan, err);
521
522 if (parent) {
523 bt_accept_unlink(sk);
524 parent->sk_data_ready(parent, 0);
525 } else
526 sk->sk_state_change(sk);
527
528 release_sock(sk);
529
530 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
531 return;
532
533 switch(chan->mode) {
534 case L2CAP_MODE_BASIC:
535 break;
536
537 case L2CAP_MODE_ERTM:
538 __clear_retrans_timer(chan);
539 __clear_monitor_timer(chan);
540 __clear_ack_timer(chan);
541
542 skb_queue_purge(&chan->srej_q);
543
544 l2cap_seq_list_free(&chan->srej_list);
545 l2cap_seq_list_free(&chan->retrans_list);
546
547 /* fall through */
548
549 case L2CAP_MODE_STREAMING:
550 skb_queue_purge(&chan->tx_q);
551 break;
552 }
553
554 return;
555 }
556
557 static void l2cap_chan_cleanup_listen(struct sock *parent)
558 {
559 struct sock *sk;
560
561 BT_DBG("parent %p", parent);
562
563 /* Close not yet accepted channels */
564 while ((sk = bt_accept_dequeue(parent, NULL))) {
565 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
566
567 l2cap_chan_lock(chan);
568 __clear_chan_timer(chan);
569 l2cap_chan_close(chan, ECONNRESET);
570 l2cap_chan_unlock(chan);
571
572 chan->ops->close(chan->data);
573 }
574 }
575
576 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
577 {
578 struct l2cap_conn *conn = chan->conn;
579 struct sock *sk = chan->sk;
580
581 BT_DBG("chan %p state %s sk %p", chan,
582 state_to_string(chan->state), sk);
583
584 switch (chan->state) {
585 case BT_LISTEN:
586 lock_sock(sk);
587 l2cap_chan_cleanup_listen(sk);
588
589 __l2cap_state_change(chan, BT_CLOSED);
590 sock_set_flag(sk, SOCK_ZAPPED);
591 release_sock(sk);
592 break;
593
594 case BT_CONNECTED:
595 case BT_CONFIG:
596 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 conn->hcon->type == ACL_LINK) {
598 __set_chan_timer(chan, sk->sk_sndtimeo);
599 l2cap_send_disconn_req(conn, chan, reason);
600 } else
601 l2cap_chan_del(chan, reason);
602 break;
603
604 case BT_CONNECT2:
605 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
606 conn->hcon->type == ACL_LINK) {
607 struct l2cap_conn_rsp rsp;
608 __u16 result;
609
610 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
611 result = L2CAP_CR_SEC_BLOCK;
612 else
613 result = L2CAP_CR_BAD_PSM;
614 l2cap_state_change(chan, BT_DISCONN);
615
616 rsp.scid = cpu_to_le16(chan->dcid);
617 rsp.dcid = cpu_to_le16(chan->scid);
618 rsp.result = cpu_to_le16(result);
619 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
620 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
621 sizeof(rsp), &rsp);
622 }
623
624 l2cap_chan_del(chan, reason);
625 break;
626
627 case BT_CONNECT:
628 case BT_DISCONN:
629 l2cap_chan_del(chan, reason);
630 break;
631
632 default:
633 lock_sock(sk);
634 sock_set_flag(sk, SOCK_ZAPPED);
635 release_sock(sk);
636 break;
637 }
638 }
639
640 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
641 {
642 if (chan->chan_type == L2CAP_CHAN_RAW) {
643 switch (chan->sec_level) {
644 case BT_SECURITY_HIGH:
645 return HCI_AT_DEDICATED_BONDING_MITM;
646 case BT_SECURITY_MEDIUM:
647 return HCI_AT_DEDICATED_BONDING;
648 default:
649 return HCI_AT_NO_BONDING;
650 }
651 } else if (chan->psm == cpu_to_le16(0x0001)) {
652 if (chan->sec_level == BT_SECURITY_LOW)
653 chan->sec_level = BT_SECURITY_SDP;
654
655 if (chan->sec_level == BT_SECURITY_HIGH)
656 return HCI_AT_NO_BONDING_MITM;
657 else
658 return HCI_AT_NO_BONDING;
659 } else {
660 switch (chan->sec_level) {
661 case BT_SECURITY_HIGH:
662 return HCI_AT_GENERAL_BONDING_MITM;
663 case BT_SECURITY_MEDIUM:
664 return HCI_AT_GENERAL_BONDING;
665 default:
666 return HCI_AT_NO_BONDING;
667 }
668 }
669 }
670
671 /* Service level security */
672 int l2cap_chan_check_security(struct l2cap_chan *chan)
673 {
674 struct l2cap_conn *conn = chan->conn;
675 __u8 auth_type;
676
677 auth_type = l2cap_get_auth_type(chan);
678
679 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
680 }
681
682 static u8 l2cap_get_ident(struct l2cap_conn *conn)
683 {
684 u8 id;
685
686 /* Get next available identificator.
687 * 1 - 128 are used by kernel.
688 * 129 - 199 are reserved.
689 * 200 - 254 are used by utilities like l2ping, etc.
690 */
691
692 spin_lock(&conn->lock);
693
694 if (++conn->tx_ident > 128)
695 conn->tx_ident = 1;
696
697 id = conn->tx_ident;
698
699 spin_unlock(&conn->lock);
700
701 return id;
702 }
703
704 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
705 {
706 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
707 u8 flags;
708
709 BT_DBG("code 0x%2.2x", code);
710
711 if (!skb)
712 return;
713
714 if (lmp_no_flush_capable(conn->hcon->hdev))
715 flags = ACL_START_NO_FLUSH;
716 else
717 flags = ACL_START;
718
719 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
720 skb->priority = HCI_PRIO_MAX;
721
722 hci_send_acl(conn->hchan, skb, flags);
723 }
724
725 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
726 {
727 struct hci_conn *hcon = chan->conn->hcon;
728 u16 flags;
729
730 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
731 skb->priority);
732
733 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
734 lmp_no_flush_capable(hcon->hdev))
735 flags = ACL_START_NO_FLUSH;
736 else
737 flags = ACL_START;
738
739 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
740 hci_send_acl(chan->conn->hchan, skb, flags);
741 }
742
743 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
744 {
745 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
746 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
747
748 if (enh & L2CAP_CTRL_FRAME_TYPE) {
749 /* S-Frame */
750 control->sframe = 1;
751 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
752 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
753
754 control->sar = 0;
755 control->txseq = 0;
756 } else {
757 /* I-Frame */
758 control->sframe = 0;
759 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
760 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
761
762 control->poll = 0;
763 control->super = 0;
764 }
765 }
766
767 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
768 {
769 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
770 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
771
772 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
773 /* S-Frame */
774 control->sframe = 1;
775 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
776 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
777
778 control->sar = 0;
779 control->txseq = 0;
780 } else {
781 /* I-Frame */
782 control->sframe = 0;
783 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
784 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
785
786 control->poll = 0;
787 control->super = 0;
788 }
789 }
790
791 static inline void __unpack_control(struct l2cap_chan *chan,
792 struct sk_buff *skb)
793 {
794 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
795 __unpack_extended_control(get_unaligned_le32(skb->data),
796 &bt_cb(skb)->control);
797 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
798 } else {
799 __unpack_enhanced_control(get_unaligned_le16(skb->data),
800 &bt_cb(skb)->control);
801 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
802 }
803 }
804
805 static u32 __pack_extended_control(struct l2cap_ctrl *control)
806 {
807 u32 packed;
808
809 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
810 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
811
812 if (control->sframe) {
813 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
814 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
815 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
816 } else {
817 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
818 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
819 }
820
821 return packed;
822 }
823
824 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
825 {
826 u16 packed;
827
828 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
829 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
830
831 if (control->sframe) {
832 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
833 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
834 packed |= L2CAP_CTRL_FRAME_TYPE;
835 } else {
836 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
837 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
838 }
839
840 return packed;
841 }
842
843 static inline void __pack_control(struct l2cap_chan *chan,
844 struct l2cap_ctrl *control,
845 struct sk_buff *skb)
846 {
847 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
848 put_unaligned_le32(__pack_extended_control(control),
849 skb->data + L2CAP_HDR_SIZE);
850 } else {
851 put_unaligned_le16(__pack_enhanced_control(control),
852 skb->data + L2CAP_HDR_SIZE);
853 }
854 }
855
856 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
857 u32 control)
858 {
859 struct sk_buff *skb;
860 struct l2cap_hdr *lh;
861 int hlen;
862
863 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
864 hlen = L2CAP_EXT_HDR_SIZE;
865 else
866 hlen = L2CAP_ENH_HDR_SIZE;
867
868 if (chan->fcs == L2CAP_FCS_CRC16)
869 hlen += L2CAP_FCS_SIZE;
870
871 skb = bt_skb_alloc(hlen, GFP_KERNEL);
872
873 if (!skb)
874 return ERR_PTR(-ENOMEM);
875
876 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
877 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
878 lh->cid = cpu_to_le16(chan->dcid);
879
880 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
881 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
882 else
883 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
884
885 if (chan->fcs == L2CAP_FCS_CRC16) {
886 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
887 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
888 }
889
890 skb->priority = HCI_PRIO_MAX;
891 return skb;
892 }
893
894 static void l2cap_send_sframe(struct l2cap_chan *chan,
895 struct l2cap_ctrl *control)
896 {
897 struct sk_buff *skb;
898 u32 control_field;
899
900 BT_DBG("chan %p, control %p", chan, control);
901
902 if (!control->sframe)
903 return;
904
905 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
906 !control->poll)
907 control->final = 1;
908
909 if (control->super == L2CAP_SUPER_RR)
910 clear_bit(CONN_RNR_SENT, &chan->conn_state);
911 else if (control->super == L2CAP_SUPER_RNR)
912 set_bit(CONN_RNR_SENT, &chan->conn_state);
913
914 if (control->super != L2CAP_SUPER_SREJ) {
915 chan->last_acked_seq = control->reqseq;
916 __clear_ack_timer(chan);
917 }
918
919 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
920 control->final, control->poll, control->super);
921
922 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
923 control_field = __pack_extended_control(control);
924 else
925 control_field = __pack_enhanced_control(control);
926
927 skb = l2cap_create_sframe_pdu(chan, control_field);
928 if (!IS_ERR(skb))
929 l2cap_do_send(chan, skb);
930 }
931
932 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
933 {
934 struct l2cap_ctrl control;
935
936 BT_DBG("chan %p, poll %d", chan, poll);
937
938 memset(&control, 0, sizeof(control));
939 control.sframe = 1;
940 control.poll = poll;
941
942 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
943 control.super = L2CAP_SUPER_RNR;
944 else
945 control.super = L2CAP_SUPER_RR;
946
947 control.reqseq = chan->buffer_seq;
948 l2cap_send_sframe(chan, &control);
949 }
950
951 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
952 {
953 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
954 }
955
956 static void l2cap_send_conn_req(struct l2cap_chan *chan)
957 {
958 struct l2cap_conn *conn = chan->conn;
959 struct l2cap_conn_req req;
960
961 req.scid = cpu_to_le16(chan->scid);
962 req.psm = chan->psm;
963
964 chan->ident = l2cap_get_ident(conn);
965
966 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
967
968 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
969 }
970
971 static void l2cap_chan_ready(struct l2cap_chan *chan)
972 {
973 struct sock *sk = chan->sk;
974 struct sock *parent;
975
976 lock_sock(sk);
977
978 parent = bt_sk(sk)->parent;
979
980 BT_DBG("sk %p, parent %p", sk, parent);
981
982 /* This clears all conf flags, including CONF_NOT_COMPLETE */
983 chan->conf_state = 0;
984 __clear_chan_timer(chan);
985
986 __l2cap_state_change(chan, BT_CONNECTED);
987 sk->sk_state_change(sk);
988
989 if (parent)
990 parent->sk_data_ready(parent, 0);
991
992 release_sock(sk);
993 }
994
995 static void l2cap_do_start(struct l2cap_chan *chan)
996 {
997 struct l2cap_conn *conn = chan->conn;
998
999 if (conn->hcon->type == LE_LINK) {
1000 l2cap_chan_ready(chan);
1001 return;
1002 }
1003
1004 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1005 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1006 return;
1007
1008 if (l2cap_chan_check_security(chan) &&
1009 __l2cap_no_conn_pending(chan))
1010 l2cap_send_conn_req(chan);
1011 } else {
1012 struct l2cap_info_req req;
1013 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1014
1015 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1016 conn->info_ident = l2cap_get_ident(conn);
1017
1018 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1019
1020 l2cap_send_cmd(conn, conn->info_ident,
1021 L2CAP_INFO_REQ, sizeof(req), &req);
1022 }
1023 }
1024
1025 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1026 {
1027 u32 local_feat_mask = l2cap_feat_mask;
1028 if (!disable_ertm)
1029 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1030
1031 switch (mode) {
1032 case L2CAP_MODE_ERTM:
1033 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1034 case L2CAP_MODE_STREAMING:
1035 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1036 default:
1037 return 0x00;
1038 }
1039 }
1040
1041 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1042 {
1043 struct sock *sk = chan->sk;
1044 struct l2cap_disconn_req req;
1045
1046 if (!conn)
1047 return;
1048
1049 if (chan->mode == L2CAP_MODE_ERTM) {
1050 __clear_retrans_timer(chan);
1051 __clear_monitor_timer(chan);
1052 __clear_ack_timer(chan);
1053 }
1054
1055 req.dcid = cpu_to_le16(chan->dcid);
1056 req.scid = cpu_to_le16(chan->scid);
1057 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1058 L2CAP_DISCONN_REQ, sizeof(req), &req);
1059
1060 lock_sock(sk);
1061 __l2cap_state_change(chan, BT_DISCONN);
1062 __l2cap_chan_set_err(chan, err);
1063 release_sock(sk);
1064 }
1065
1066 /* ---- L2CAP connections ---- */
1067 static void l2cap_conn_start(struct l2cap_conn *conn)
1068 {
1069 struct l2cap_chan *chan, *tmp;
1070
1071 BT_DBG("conn %p", conn);
1072
1073 mutex_lock(&conn->chan_lock);
1074
1075 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1076 struct sock *sk = chan->sk;
1077
1078 l2cap_chan_lock(chan);
1079
1080 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1081 l2cap_chan_unlock(chan);
1082 continue;
1083 }
1084
1085 if (chan->state == BT_CONNECT) {
1086 if (!l2cap_chan_check_security(chan) ||
1087 !__l2cap_no_conn_pending(chan)) {
1088 l2cap_chan_unlock(chan);
1089 continue;
1090 }
1091
1092 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1093 && test_bit(CONF_STATE2_DEVICE,
1094 &chan->conf_state)) {
1095 l2cap_chan_close(chan, ECONNRESET);
1096 l2cap_chan_unlock(chan);
1097 continue;
1098 }
1099
1100 l2cap_send_conn_req(chan);
1101
1102 } else if (chan->state == BT_CONNECT2) {
1103 struct l2cap_conn_rsp rsp;
1104 char buf[128];
1105 rsp.scid = cpu_to_le16(chan->dcid);
1106 rsp.dcid = cpu_to_le16(chan->scid);
1107
1108 if (l2cap_chan_check_security(chan)) {
1109 lock_sock(sk);
1110 if (test_bit(BT_SK_DEFER_SETUP,
1111 &bt_sk(sk)->flags)) {
1112 struct sock *parent = bt_sk(sk)->parent;
1113 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1114 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1115 if (parent)
1116 parent->sk_data_ready(parent, 0);
1117
1118 } else {
1119 __l2cap_state_change(chan, BT_CONFIG);
1120 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1121 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1122 }
1123 release_sock(sk);
1124 } else {
1125 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1126 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1127 }
1128
1129 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1130 sizeof(rsp), &rsp);
1131
1132 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1133 rsp.result != L2CAP_CR_SUCCESS) {
1134 l2cap_chan_unlock(chan);
1135 continue;
1136 }
1137
1138 set_bit(CONF_REQ_SENT, &chan->conf_state);
1139 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1140 l2cap_build_conf_req(chan, buf), buf);
1141 chan->num_conf_req++;
1142 }
1143
1144 l2cap_chan_unlock(chan);
1145 }
1146
1147 mutex_unlock(&conn->chan_lock);
1148 }
1149
1150 /* Find socket with cid and source/destination bdaddr.
1151 * Returns closest match, locked.
1152 */
1153 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1154 bdaddr_t *src,
1155 bdaddr_t *dst)
1156 {
1157 struct l2cap_chan *c, *c1 = NULL;
1158
1159 read_lock(&chan_list_lock);
1160
1161 list_for_each_entry(c, &chan_list, global_l) {
1162 struct sock *sk = c->sk;
1163
1164 if (state && c->state != state)
1165 continue;
1166
1167 if (c->scid == cid) {
1168 int src_match, dst_match;
1169 int src_any, dst_any;
1170
1171 /* Exact match. */
1172 src_match = !bacmp(&bt_sk(sk)->src, src);
1173 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1174 if (src_match && dst_match) {
1175 read_unlock(&chan_list_lock);
1176 return c;
1177 }
1178
1179 /* Closest match */
1180 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1181 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1182 if ((src_match && dst_any) || (src_any && dst_match) ||
1183 (src_any && dst_any))
1184 c1 = c;
1185 }
1186 }
1187
1188 read_unlock(&chan_list_lock);
1189
1190 return c1;
1191 }
1192
1193 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1194 {
1195 struct sock *parent, *sk;
1196 struct l2cap_chan *chan, *pchan;
1197
1198 BT_DBG("");
1199
1200 /* Check if we have socket listening on cid */
1201 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1202 conn->src, conn->dst);
1203 if (!pchan)
1204 return;
1205
1206 parent = pchan->sk;
1207
1208 lock_sock(parent);
1209
1210 /* Check for backlog size */
1211 if (sk_acceptq_is_full(parent)) {
1212 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1213 goto clean;
1214 }
1215
1216 chan = pchan->ops->new_connection(pchan->data);
1217 if (!chan)
1218 goto clean;
1219
1220 sk = chan->sk;
1221
1222 hci_conn_hold(conn->hcon);
1223
1224 bacpy(&bt_sk(sk)->src, conn->src);
1225 bacpy(&bt_sk(sk)->dst, conn->dst);
1226
1227 bt_accept_enqueue(parent, sk);
1228
1229 l2cap_chan_add(conn, chan);
1230
1231 __set_chan_timer(chan, sk->sk_sndtimeo);
1232
1233 __l2cap_state_change(chan, BT_CONNECTED);
1234 parent->sk_data_ready(parent, 0);
1235
1236 clean:
1237 release_sock(parent);
1238 }
1239
1240 static void l2cap_conn_ready(struct l2cap_conn *conn)
1241 {
1242 struct l2cap_chan *chan;
1243
1244 BT_DBG("conn %p", conn);
1245
1246 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1247 l2cap_le_conn_ready(conn);
1248
1249 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1250 smp_conn_security(conn, conn->hcon->pending_sec_level);
1251
1252 mutex_lock(&conn->chan_lock);
1253
1254 list_for_each_entry(chan, &conn->chan_l, list) {
1255
1256 l2cap_chan_lock(chan);
1257
1258 if (conn->hcon->type == LE_LINK) {
1259 if (smp_conn_security(conn, chan->sec_level))
1260 l2cap_chan_ready(chan);
1261
1262 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1263 struct sock *sk = chan->sk;
1264 __clear_chan_timer(chan);
1265 lock_sock(sk);
1266 __l2cap_state_change(chan, BT_CONNECTED);
1267 sk->sk_state_change(sk);
1268 release_sock(sk);
1269
1270 } else if (chan->state == BT_CONNECT)
1271 l2cap_do_start(chan);
1272
1273 l2cap_chan_unlock(chan);
1274 }
1275
1276 mutex_unlock(&conn->chan_lock);
1277 }
1278
1279 /* Notify sockets that we cannot guaranty reliability anymore */
1280 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1281 {
1282 struct l2cap_chan *chan;
1283
1284 BT_DBG("conn %p", conn);
1285
1286 mutex_lock(&conn->chan_lock);
1287
1288 list_for_each_entry(chan, &conn->chan_l, list) {
1289 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1290 __l2cap_chan_set_err(chan, err);
1291 }
1292
1293 mutex_unlock(&conn->chan_lock);
1294 }
1295
1296 static void l2cap_info_timeout(struct work_struct *work)
1297 {
1298 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1299 info_timer.work);
1300
1301 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1302 conn->info_ident = 0;
1303
1304 l2cap_conn_start(conn);
1305 }
1306
1307 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1308 {
1309 struct l2cap_conn *conn = hcon->l2cap_data;
1310 struct l2cap_chan *chan, *l;
1311
1312 if (!conn)
1313 return;
1314
1315 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1316
1317 kfree_skb(conn->rx_skb);
1318
1319 mutex_lock(&conn->chan_lock);
1320
1321 /* Kill channels */
1322 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1323 l2cap_chan_hold(chan);
1324 l2cap_chan_lock(chan);
1325
1326 l2cap_chan_del(chan, err);
1327
1328 l2cap_chan_unlock(chan);
1329
1330 chan->ops->close(chan->data);
1331 l2cap_chan_put(chan);
1332 }
1333
1334 mutex_unlock(&conn->chan_lock);
1335
1336 hci_chan_del(conn->hchan);
1337
1338 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1339 cancel_delayed_work_sync(&conn->info_timer);
1340
1341 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1342 cancel_delayed_work_sync(&conn->security_timer);
1343 smp_chan_destroy(conn);
1344 }
1345
1346 hcon->l2cap_data = NULL;
1347 kfree(conn);
1348 }
1349
1350 static void security_timeout(struct work_struct *work)
1351 {
1352 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1353 security_timer.work);
1354
1355 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1356 }
1357
1358 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1359 {
1360 struct l2cap_conn *conn = hcon->l2cap_data;
1361 struct hci_chan *hchan;
1362
1363 if (conn || status)
1364 return conn;
1365
1366 hchan = hci_chan_create(hcon);
1367 if (!hchan)
1368 return NULL;
1369
1370 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1371 if (!conn) {
1372 hci_chan_del(hchan);
1373 return NULL;
1374 }
1375
1376 hcon->l2cap_data = conn;
1377 conn->hcon = hcon;
1378 conn->hchan = hchan;
1379
1380 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1381
1382 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1383 conn->mtu = hcon->hdev->le_mtu;
1384 else
1385 conn->mtu = hcon->hdev->acl_mtu;
1386
1387 conn->src = &hcon->hdev->bdaddr;
1388 conn->dst = &hcon->dst;
1389
1390 conn->feat_mask = 0;
1391
1392 spin_lock_init(&conn->lock);
1393 mutex_init(&conn->chan_lock);
1394
1395 INIT_LIST_HEAD(&conn->chan_l);
1396
1397 if (hcon->type == LE_LINK)
1398 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1399 else
1400 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1401
1402 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1403
1404 return conn;
1405 }
1406
1407 /* ---- Socket interface ---- */
1408
1409 /* Find socket with psm and source / destination bdaddr.
1410 * Returns closest match.
1411 */
1412 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1413 bdaddr_t *src,
1414 bdaddr_t *dst)
1415 {
1416 struct l2cap_chan *c, *c1 = NULL;
1417
1418 read_lock(&chan_list_lock);
1419
1420 list_for_each_entry(c, &chan_list, global_l) {
1421 struct sock *sk = c->sk;
1422
1423 if (state && c->state != state)
1424 continue;
1425
1426 if (c->psm == psm) {
1427 int src_match, dst_match;
1428 int src_any, dst_any;
1429
1430 /* Exact match. */
1431 src_match = !bacmp(&bt_sk(sk)->src, src);
1432 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1433 if (src_match && dst_match) {
1434 read_unlock(&chan_list_lock);
1435 return c;
1436 }
1437
1438 /* Closest match */
1439 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1440 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1441 if ((src_match && dst_any) || (src_any && dst_match) ||
1442 (src_any && dst_any))
1443 c1 = c;
1444 }
1445 }
1446
1447 read_unlock(&chan_list_lock);
1448
1449 return c1;
1450 }
1451
1452 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1453 bdaddr_t *dst, u8 dst_type)
1454 {
1455 struct sock *sk = chan->sk;
1456 bdaddr_t *src = &bt_sk(sk)->src;
1457 struct l2cap_conn *conn;
1458 struct hci_conn *hcon;
1459 struct hci_dev *hdev;
1460 __u8 auth_type;
1461 int err;
1462
1463 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1464 dst_type, __le16_to_cpu(chan->psm));
1465
1466 hdev = hci_get_route(dst, src);
1467 if (!hdev)
1468 return -EHOSTUNREACH;
1469
1470 hci_dev_lock(hdev);
1471
1472 l2cap_chan_lock(chan);
1473
1474 /* PSM must be odd and lsb of upper byte must be 0 */
1475 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1476 chan->chan_type != L2CAP_CHAN_RAW) {
1477 err = -EINVAL;
1478 goto done;
1479 }
1480
1481 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1482 err = -EINVAL;
1483 goto done;
1484 }
1485
1486 switch (chan->mode) {
1487 case L2CAP_MODE_BASIC:
1488 break;
1489 case L2CAP_MODE_ERTM:
1490 case L2CAP_MODE_STREAMING:
1491 if (!disable_ertm)
1492 break;
1493 /* fall through */
1494 default:
1495 err = -ENOTSUPP;
1496 goto done;
1497 }
1498
1499 lock_sock(sk);
1500
1501 switch (sk->sk_state) {
1502 case BT_CONNECT:
1503 case BT_CONNECT2:
1504 case BT_CONFIG:
1505 /* Already connecting */
1506 err = 0;
1507 release_sock(sk);
1508 goto done;
1509
1510 case BT_CONNECTED:
1511 /* Already connected */
1512 err = -EISCONN;
1513 release_sock(sk);
1514 goto done;
1515
1516 case BT_OPEN:
1517 case BT_BOUND:
1518 /* Can connect */
1519 break;
1520
1521 default:
1522 err = -EBADFD;
1523 release_sock(sk);
1524 goto done;
1525 }
1526
1527 /* Set destination address and psm */
1528 bacpy(&bt_sk(sk)->dst, dst);
1529
1530 release_sock(sk);
1531
1532 chan->psm = psm;
1533 chan->dcid = cid;
1534
1535 auth_type = l2cap_get_auth_type(chan);
1536
1537 if (chan->dcid == L2CAP_CID_LE_DATA)
1538 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1539 chan->sec_level, auth_type);
1540 else
1541 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1542 chan->sec_level, auth_type);
1543
1544 if (IS_ERR(hcon)) {
1545 err = PTR_ERR(hcon);
1546 goto done;
1547 }
1548
1549 conn = l2cap_conn_add(hcon, 0);
1550 if (!conn) {
1551 hci_conn_put(hcon);
1552 err = -ENOMEM;
1553 goto done;
1554 }
1555
1556 if (hcon->type == LE_LINK) {
1557 err = 0;
1558
1559 if (!list_empty(&conn->chan_l)) {
1560 err = -EBUSY;
1561 hci_conn_put(hcon);
1562 }
1563
1564 if (err)
1565 goto done;
1566 }
1567
1568 /* Update source addr of the socket */
1569 bacpy(src, conn->src);
1570
1571 l2cap_chan_unlock(chan);
1572 l2cap_chan_add(conn, chan);
1573 l2cap_chan_lock(chan);
1574
1575 l2cap_state_change(chan, BT_CONNECT);
1576 __set_chan_timer(chan, sk->sk_sndtimeo);
1577
1578 if (hcon->state == BT_CONNECTED) {
1579 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1580 __clear_chan_timer(chan);
1581 if (l2cap_chan_check_security(chan))
1582 l2cap_state_change(chan, BT_CONNECTED);
1583 } else
1584 l2cap_do_start(chan);
1585 }
1586
1587 err = 0;
1588
1589 done:
1590 l2cap_chan_unlock(chan);
1591 hci_dev_unlock(hdev);
1592 hci_dev_put(hdev);
1593 return err;
1594 }
1595
1596 int __l2cap_wait_ack(struct sock *sk)
1597 {
1598 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1599 DECLARE_WAITQUEUE(wait, current);
1600 int err = 0;
1601 int timeo = HZ/5;
1602
1603 add_wait_queue(sk_sleep(sk), &wait);
1604 set_current_state(TASK_INTERRUPTIBLE);
1605 while (chan->unacked_frames > 0 && chan->conn) {
1606 if (!timeo)
1607 timeo = HZ/5;
1608
1609 if (signal_pending(current)) {
1610 err = sock_intr_errno(timeo);
1611 break;
1612 }
1613
1614 release_sock(sk);
1615 timeo = schedule_timeout(timeo);
1616 lock_sock(sk);
1617 set_current_state(TASK_INTERRUPTIBLE);
1618
1619 err = sock_error(sk);
1620 if (err)
1621 break;
1622 }
1623 set_current_state(TASK_RUNNING);
1624 remove_wait_queue(sk_sleep(sk), &wait);
1625 return err;
1626 }
1627
1628 static void l2cap_monitor_timeout(struct work_struct *work)
1629 {
1630 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1631 monitor_timer.work);
1632
1633 BT_DBG("chan %p", chan);
1634
1635 l2cap_chan_lock(chan);
1636
1637 if (!chan->conn) {
1638 l2cap_chan_unlock(chan);
1639 l2cap_chan_put(chan);
1640 return;
1641 }
1642
1643 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1644
1645 l2cap_chan_unlock(chan);
1646 l2cap_chan_put(chan);
1647 }
1648
1649 static void l2cap_retrans_timeout(struct work_struct *work)
1650 {
1651 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1652 retrans_timer.work);
1653
1654 BT_DBG("chan %p", chan);
1655
1656 l2cap_chan_lock(chan);
1657
1658 if (!chan->conn) {
1659 l2cap_chan_unlock(chan);
1660 l2cap_chan_put(chan);
1661 return;
1662 }
1663
1664 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1665 l2cap_chan_unlock(chan);
1666 l2cap_chan_put(chan);
1667 }
1668
1669 static void l2cap_streaming_send(struct l2cap_chan *chan,
1670 struct sk_buff_head *skbs)
1671 {
1672 struct sk_buff *skb;
1673 struct l2cap_ctrl *control;
1674
1675 BT_DBG("chan %p, skbs %p", chan, skbs);
1676
1677 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1678
1679 while (!skb_queue_empty(&chan->tx_q)) {
1680
1681 skb = skb_dequeue(&chan->tx_q);
1682
1683 bt_cb(skb)->control.retries = 1;
1684 control = &bt_cb(skb)->control;
1685
1686 control->reqseq = 0;
1687 control->txseq = chan->next_tx_seq;
1688
1689 __pack_control(chan, control, skb);
1690
1691 if (chan->fcs == L2CAP_FCS_CRC16) {
1692 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1693 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1694 }
1695
1696 l2cap_do_send(chan, skb);
1697
1698 BT_DBG("Sent txseq %d", (int)control->txseq);
1699
1700 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1701 chan->frames_sent++;
1702 }
1703 }
1704
1705 static int l2cap_ertm_send(struct l2cap_chan *chan)
1706 {
1707 struct sk_buff *skb, *tx_skb;
1708 struct l2cap_ctrl *control;
1709 int sent = 0;
1710
1711 BT_DBG("chan %p", chan);
1712
1713 if (chan->state != BT_CONNECTED)
1714 return -ENOTCONN;
1715
1716 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1717 return 0;
1718
1719 while (chan->tx_send_head &&
1720 chan->unacked_frames < chan->remote_tx_win &&
1721 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1722
1723 skb = chan->tx_send_head;
1724
1725 bt_cb(skb)->control.retries = 1;
1726 control = &bt_cb(skb)->control;
1727
1728 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1729 control->final = 1;
1730
1731 control->reqseq = chan->buffer_seq;
1732 chan->last_acked_seq = chan->buffer_seq;
1733 control->txseq = chan->next_tx_seq;
1734
1735 __pack_control(chan, control, skb);
1736
1737 if (chan->fcs == L2CAP_FCS_CRC16) {
1738 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1739 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1740 }
1741
1742 /* Clone after data has been modified. Data is assumed to be
1743 read-only (for locking purposes) on cloned sk_buffs.
1744 */
1745 tx_skb = skb_clone(skb, GFP_KERNEL);
1746
1747 if (!tx_skb)
1748 break;
1749
1750 __set_retrans_timer(chan);
1751
1752 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1753 chan->unacked_frames++;
1754 chan->frames_sent++;
1755 sent++;
1756
1757 if (skb_queue_is_last(&chan->tx_q, skb))
1758 chan->tx_send_head = NULL;
1759 else
1760 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1761
1762 l2cap_do_send(chan, tx_skb);
1763 BT_DBG("Sent txseq %d", (int)control->txseq);
1764 }
1765
1766 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1767 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1768
1769 return sent;
1770 }
1771
1772 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1773 {
1774 struct l2cap_ctrl control;
1775 struct sk_buff *skb;
1776 struct sk_buff *tx_skb;
1777 u16 seq;
1778
1779 BT_DBG("chan %p", chan);
1780
1781 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1782 return;
1783
1784 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1785 seq = l2cap_seq_list_pop(&chan->retrans_list);
1786
1787 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1788 if (!skb) {
1789 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1790 seq);
1791 continue;
1792 }
1793
1794 bt_cb(skb)->control.retries++;
1795 control = bt_cb(skb)->control;
1796
1797 if (chan->max_tx != 0 &&
1798 bt_cb(skb)->control.retries > chan->max_tx) {
1799 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1800 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1801 l2cap_seq_list_clear(&chan->retrans_list);
1802 break;
1803 }
1804
1805 control.reqseq = chan->buffer_seq;
1806 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1807 control.final = 1;
1808 else
1809 control.final = 0;
1810
1811 if (skb_cloned(skb)) {
1812 /* Cloned sk_buffs are read-only, so we need a
1813 * writeable copy
1814 */
1815 tx_skb = skb_copy(skb, GFP_ATOMIC);
1816 } else {
1817 tx_skb = skb_clone(skb, GFP_ATOMIC);
1818 }
1819
1820 if (!tx_skb) {
1821 l2cap_seq_list_clear(&chan->retrans_list);
1822 break;
1823 }
1824
1825 /* Update skb contents */
1826 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1827 put_unaligned_le32(__pack_extended_control(&control),
1828 tx_skb->data + L2CAP_HDR_SIZE);
1829 } else {
1830 put_unaligned_le16(__pack_enhanced_control(&control),
1831 tx_skb->data + L2CAP_HDR_SIZE);
1832 }
1833
1834 if (chan->fcs == L2CAP_FCS_CRC16) {
1835 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1836 put_unaligned_le16(fcs, skb_put(tx_skb,
1837 L2CAP_FCS_SIZE));
1838 }
1839
1840 l2cap_do_send(chan, tx_skb);
1841
1842 BT_DBG("Resent txseq %d", control.txseq);
1843
1844 chan->last_acked_seq = chan->buffer_seq;
1845 }
1846 }
1847
1848 static void l2cap_retransmit(struct l2cap_chan *chan,
1849 struct l2cap_ctrl *control)
1850 {
1851 BT_DBG("chan %p, control %p", chan, control);
1852
1853 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1854 l2cap_ertm_resend(chan);
1855 }
1856
1857 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1858 struct l2cap_ctrl *control)
1859 {
1860 struct sk_buff *skb;
1861
1862 BT_DBG("chan %p, control %p", chan, control);
1863
1864 if (control->poll)
1865 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1866
1867 l2cap_seq_list_clear(&chan->retrans_list);
1868
1869 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1870 return;
1871
1872 if (chan->unacked_frames) {
1873 skb_queue_walk(&chan->tx_q, skb) {
1874 if (bt_cb(skb)->control.txseq == control->reqseq ||
1875 skb == chan->tx_send_head)
1876 break;
1877 }
1878
1879 skb_queue_walk_from(&chan->tx_q, skb) {
1880 if (skb == chan->tx_send_head)
1881 break;
1882
1883 l2cap_seq_list_append(&chan->retrans_list,
1884 bt_cb(skb)->control.txseq);
1885 }
1886
1887 l2cap_ertm_resend(chan);
1888 }
1889 }
1890
1891 static void l2cap_send_ack(struct l2cap_chan *chan)
1892 {
1893 struct l2cap_ctrl control;
1894 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1895 chan->last_acked_seq);
1896 int threshold;
1897
1898 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1899 chan, chan->last_acked_seq, chan->buffer_seq);
1900
1901 memset(&control, 0, sizeof(control));
1902 control.sframe = 1;
1903
1904 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1905 chan->rx_state == L2CAP_RX_STATE_RECV) {
1906 __clear_ack_timer(chan);
1907 control.super = L2CAP_SUPER_RNR;
1908 control.reqseq = chan->buffer_seq;
1909 l2cap_send_sframe(chan, &control);
1910 } else {
1911 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1912 l2cap_ertm_send(chan);
1913 /* If any i-frames were sent, they included an ack */
1914 if (chan->buffer_seq == chan->last_acked_seq)
1915 frames_to_ack = 0;
1916 }
1917
1918 /* Ack now if the tx window is 3/4ths full.
1919 * Calculate without mul or div
1920 */
1921 threshold = chan->tx_win;
1922 threshold += threshold << 1;
1923 threshold >>= 2;
1924
1925 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1926 threshold);
1927
1928 if (frames_to_ack >= threshold) {
1929 __clear_ack_timer(chan);
1930 control.super = L2CAP_SUPER_RR;
1931 control.reqseq = chan->buffer_seq;
1932 l2cap_send_sframe(chan, &control);
1933 frames_to_ack = 0;
1934 }
1935
1936 if (frames_to_ack)
1937 __set_ack_timer(chan);
1938 }
1939 }
1940
1941 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1942 struct msghdr *msg, int len,
1943 int count, struct sk_buff *skb)
1944 {
1945 struct l2cap_conn *conn = chan->conn;
1946 struct sk_buff **frag;
1947 int sent = 0;
1948
1949 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1950 return -EFAULT;
1951
1952 sent += count;
1953 len -= count;
1954
1955 /* Continuation fragments (no L2CAP header) */
1956 frag = &skb_shinfo(skb)->frag_list;
1957 while (len) {
1958 struct sk_buff *tmp;
1959
1960 count = min_t(unsigned int, conn->mtu, len);
1961
1962 tmp = chan->ops->alloc_skb(chan, count,
1963 msg->msg_flags & MSG_DONTWAIT);
1964 if (IS_ERR(tmp))
1965 return PTR_ERR(tmp);
1966
1967 *frag = tmp;
1968
1969 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1970 return -EFAULT;
1971
1972 (*frag)->priority = skb->priority;
1973
1974 sent += count;
1975 len -= count;
1976
1977 skb->len += (*frag)->len;
1978 skb->data_len += (*frag)->len;
1979
1980 frag = &(*frag)->next;
1981 }
1982
1983 return sent;
1984 }
1985
1986 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1987 struct msghdr *msg, size_t len,
1988 u32 priority)
1989 {
1990 struct l2cap_conn *conn = chan->conn;
1991 struct sk_buff *skb;
1992 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1993 struct l2cap_hdr *lh;
1994
1995 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1996
1997 count = min_t(unsigned int, (conn->mtu - hlen), len);
1998
1999 skb = chan->ops->alloc_skb(chan, count + hlen,
2000 msg->msg_flags & MSG_DONTWAIT);
2001 if (IS_ERR(skb))
2002 return skb;
2003
2004 skb->priority = priority;
2005
2006 /* Create L2CAP header */
2007 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2008 lh->cid = cpu_to_le16(chan->dcid);
2009 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2010 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2011
2012 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2013 if (unlikely(err < 0)) {
2014 kfree_skb(skb);
2015 return ERR_PTR(err);
2016 }
2017 return skb;
2018 }
2019
2020 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2021 struct msghdr *msg, size_t len,
2022 u32 priority)
2023 {
2024 struct l2cap_conn *conn = chan->conn;
2025 struct sk_buff *skb;
2026 int err, count;
2027 struct l2cap_hdr *lh;
2028
2029 BT_DBG("chan %p len %d", chan, (int)len);
2030
2031 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2032
2033 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2034 msg->msg_flags & MSG_DONTWAIT);
2035 if (IS_ERR(skb))
2036 return skb;
2037
2038 skb->priority = priority;
2039
2040 /* Create L2CAP header */
2041 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2042 lh->cid = cpu_to_le16(chan->dcid);
2043 lh->len = cpu_to_le16(len);
2044
2045 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2046 if (unlikely(err < 0)) {
2047 kfree_skb(skb);
2048 return ERR_PTR(err);
2049 }
2050 return skb;
2051 }
2052
2053 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2054 struct msghdr *msg, size_t len,
2055 u16 sdulen)
2056 {
2057 struct l2cap_conn *conn = chan->conn;
2058 struct sk_buff *skb;
2059 int err, count, hlen;
2060 struct l2cap_hdr *lh;
2061
2062 BT_DBG("chan %p len %d", chan, (int)len);
2063
2064 if (!conn)
2065 return ERR_PTR(-ENOTCONN);
2066
2067 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2068 hlen = L2CAP_EXT_HDR_SIZE;
2069 else
2070 hlen = L2CAP_ENH_HDR_SIZE;
2071
2072 if (sdulen)
2073 hlen += L2CAP_SDULEN_SIZE;
2074
2075 if (chan->fcs == L2CAP_FCS_CRC16)
2076 hlen += L2CAP_FCS_SIZE;
2077
2078 count = min_t(unsigned int, (conn->mtu - hlen), len);
2079
2080 skb = chan->ops->alloc_skb(chan, count + hlen,
2081 msg->msg_flags & MSG_DONTWAIT);
2082 if (IS_ERR(skb))
2083 return skb;
2084
2085 /* Create L2CAP header */
2086 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2087 lh->cid = cpu_to_le16(chan->dcid);
2088 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2089
2090 /* Control header is populated later */
2091 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2092 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2093 else
2094 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2095
2096 if (sdulen)
2097 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2098
2099 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2100 if (unlikely(err < 0)) {
2101 kfree_skb(skb);
2102 return ERR_PTR(err);
2103 }
2104
2105 bt_cb(skb)->control.fcs = chan->fcs;
2106 bt_cb(skb)->control.retries = 0;
2107 return skb;
2108 }
2109
2110 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2111 struct sk_buff_head *seg_queue,
2112 struct msghdr *msg, size_t len)
2113 {
2114 struct sk_buff *skb;
2115 u16 sdu_len;
2116 size_t pdu_len;
2117 int err = 0;
2118 u8 sar;
2119
2120 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2121
2122 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2123 * so fragmented skbs are not used. The HCI layer's handling
2124 * of fragmented skbs is not compatible with ERTM's queueing.
2125 */
2126
2127 /* PDU size is derived from the HCI MTU */
2128 pdu_len = chan->conn->mtu;
2129
2130 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2131
2132 /* Adjust for largest possible L2CAP overhead. */
2133 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2134
2135 /* Remote device may have requested smaller PDUs */
2136 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2137
2138 if (len <= pdu_len) {
2139 sar = L2CAP_SAR_UNSEGMENTED;
2140 sdu_len = 0;
2141 pdu_len = len;
2142 } else {
2143 sar = L2CAP_SAR_START;
2144 sdu_len = len;
2145 pdu_len -= L2CAP_SDULEN_SIZE;
2146 }
2147
2148 while (len > 0) {
2149 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2150
2151 if (IS_ERR(skb)) {
2152 __skb_queue_purge(seg_queue);
2153 return PTR_ERR(skb);
2154 }
2155
2156 bt_cb(skb)->control.sar = sar;
2157 __skb_queue_tail(seg_queue, skb);
2158
2159 len -= pdu_len;
2160 if (sdu_len) {
2161 sdu_len = 0;
2162 pdu_len += L2CAP_SDULEN_SIZE;
2163 }
2164
2165 if (len <= pdu_len) {
2166 sar = L2CAP_SAR_END;
2167 pdu_len = len;
2168 } else {
2169 sar = L2CAP_SAR_CONTINUE;
2170 }
2171 }
2172
2173 return err;
2174 }
2175
2176 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2177 u32 priority)
2178 {
2179 struct sk_buff *skb;
2180 int err;
2181 struct sk_buff_head seg_queue;
2182
2183 /* Connectionless channel */
2184 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2185 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2186 if (IS_ERR(skb))
2187 return PTR_ERR(skb);
2188
2189 l2cap_do_send(chan, skb);
2190 return len;
2191 }
2192
2193 switch (chan->mode) {
2194 case L2CAP_MODE_BASIC:
2195 /* Check outgoing MTU */
2196 if (len > chan->omtu)
2197 return -EMSGSIZE;
2198
2199 /* Create a basic PDU */
2200 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2201 if (IS_ERR(skb))
2202 return PTR_ERR(skb);
2203
2204 l2cap_do_send(chan, skb);
2205 err = len;
2206 break;
2207
2208 case L2CAP_MODE_ERTM:
2209 case L2CAP_MODE_STREAMING:
2210 /* Check outgoing MTU */
2211 if (len > chan->omtu) {
2212 err = -EMSGSIZE;
2213 break;
2214 }
2215
2216 __skb_queue_head_init(&seg_queue);
2217
2218 /* Do segmentation before calling in to the state machine,
2219 * since it's possible to block while waiting for memory
2220 * allocation.
2221 */
2222 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2223
2224 /* The channel could have been closed while segmenting,
2225 * check that it is still connected.
2226 */
2227 if (chan->state != BT_CONNECTED) {
2228 __skb_queue_purge(&seg_queue);
2229 err = -ENOTCONN;
2230 }
2231
2232 if (err)
2233 break;
2234
2235 if (chan->mode == L2CAP_MODE_ERTM)
2236 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2237 else
2238 l2cap_streaming_send(chan, &seg_queue);
2239
2240 err = len;
2241
2242 /* If the skbs were not queued for sending, they'll still be in
2243 * seg_queue and need to be purged.
2244 */
2245 __skb_queue_purge(&seg_queue);
2246 break;
2247
2248 default:
2249 BT_DBG("bad state %1.1x", chan->mode);
2250 err = -EBADFD;
2251 }
2252
2253 return err;
2254 }
2255
2256 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2257 {
2258 struct l2cap_ctrl control;
2259 u16 seq;
2260
2261 BT_DBG("chan %p, txseq %d", chan, txseq);
2262
2263 memset(&control, 0, sizeof(control));
2264 control.sframe = 1;
2265 control.super = L2CAP_SUPER_SREJ;
2266
2267 for (seq = chan->expected_tx_seq; seq != txseq;
2268 seq = __next_seq(chan, seq)) {
2269 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2270 control.reqseq = seq;
2271 l2cap_send_sframe(chan, &control);
2272 l2cap_seq_list_append(&chan->srej_list, seq);
2273 }
2274 }
2275
2276 chan->expected_tx_seq = __next_seq(chan, txseq);
2277 }
2278
2279 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2280 {
2281 struct l2cap_ctrl control;
2282
2283 BT_DBG("chan %p", chan);
2284
2285 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2286 return;
2287
2288 memset(&control, 0, sizeof(control));
2289 control.sframe = 1;
2290 control.super = L2CAP_SUPER_SREJ;
2291 control.reqseq = chan->srej_list.tail;
2292 l2cap_send_sframe(chan, &control);
2293 }
2294
2295 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2296 {
2297 struct l2cap_ctrl control;
2298 u16 initial_head;
2299 u16 seq;
2300
2301 BT_DBG("chan %p, txseq %d", chan, txseq);
2302
2303 memset(&control, 0, sizeof(control));
2304 control.sframe = 1;
2305 control.super = L2CAP_SUPER_SREJ;
2306
2307 /* Capture initial list head to allow only one pass through the list. */
2308 initial_head = chan->srej_list.head;
2309
2310 do {
2311 seq = l2cap_seq_list_pop(&chan->srej_list);
2312 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2313 break;
2314
2315 control.reqseq = seq;
2316 l2cap_send_sframe(chan, &control);
2317 l2cap_seq_list_append(&chan->srej_list, seq);
2318 } while (chan->srej_list.head != initial_head);
2319 }
2320
2321 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2322 {
2323 struct sk_buff *acked_skb;
2324 u16 ackseq;
2325
2326 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2327
2328 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2329 return;
2330
2331 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2332 chan->expected_ack_seq, chan->unacked_frames);
2333
2334 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2335 ackseq = __next_seq(chan, ackseq)) {
2336
2337 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2338 if (acked_skb) {
2339 skb_unlink(acked_skb, &chan->tx_q);
2340 kfree_skb(acked_skb);
2341 chan->unacked_frames--;
2342 }
2343 }
2344
2345 chan->expected_ack_seq = reqseq;
2346
2347 if (chan->unacked_frames == 0)
2348 __clear_retrans_timer(chan);
2349
2350 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2351 }
2352
2353 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2354 {
2355 BT_DBG("chan %p", chan);
2356
2357 chan->expected_tx_seq = chan->buffer_seq;
2358 l2cap_seq_list_clear(&chan->srej_list);
2359 skb_queue_purge(&chan->srej_q);
2360 chan->rx_state = L2CAP_RX_STATE_RECV;
2361 }
2362
2363 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2364 struct l2cap_ctrl *control,
2365 struct sk_buff_head *skbs, u8 event)
2366 {
2367 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2368 event);
2369
2370 switch (event) {
2371 case L2CAP_EV_DATA_REQUEST:
2372 if (chan->tx_send_head == NULL)
2373 chan->tx_send_head = skb_peek(skbs);
2374
2375 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2376 l2cap_ertm_send(chan);
2377 break;
2378 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2379 BT_DBG("Enter LOCAL_BUSY");
2380 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2381
2382 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2383 /* The SREJ_SENT state must be aborted if we are to
2384 * enter the LOCAL_BUSY state.
2385 */
2386 l2cap_abort_rx_srej_sent(chan);
2387 }
2388
2389 l2cap_send_ack(chan);
2390
2391 break;
2392 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2393 BT_DBG("Exit LOCAL_BUSY");
2394 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2395
2396 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2397 struct l2cap_ctrl local_control;
2398
2399 memset(&local_control, 0, sizeof(local_control));
2400 local_control.sframe = 1;
2401 local_control.super = L2CAP_SUPER_RR;
2402 local_control.poll = 1;
2403 local_control.reqseq = chan->buffer_seq;
2404 l2cap_send_sframe(chan, &local_control);
2405
2406 chan->retry_count = 1;
2407 __set_monitor_timer(chan);
2408 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2409 }
2410 break;
2411 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2412 l2cap_process_reqseq(chan, control->reqseq);
2413 break;
2414 case L2CAP_EV_EXPLICIT_POLL:
2415 l2cap_send_rr_or_rnr(chan, 1);
2416 chan->retry_count = 1;
2417 __set_monitor_timer(chan);
2418 __clear_ack_timer(chan);
2419 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2420 break;
2421 case L2CAP_EV_RETRANS_TO:
2422 l2cap_send_rr_or_rnr(chan, 1);
2423 chan->retry_count = 1;
2424 __set_monitor_timer(chan);
2425 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2426 break;
2427 case L2CAP_EV_RECV_FBIT:
2428 /* Nothing to process */
2429 break;
2430 default:
2431 break;
2432 }
2433 }
2434
2435 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2436 struct l2cap_ctrl *control,
2437 struct sk_buff_head *skbs, u8 event)
2438 {
2439 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2440 event);
2441
2442 switch (event) {
2443 case L2CAP_EV_DATA_REQUEST:
2444 if (chan->tx_send_head == NULL)
2445 chan->tx_send_head = skb_peek(skbs);
2446 /* Queue data, but don't send. */
2447 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2448 break;
2449 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2450 BT_DBG("Enter LOCAL_BUSY");
2451 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2452
2453 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2454 /* The SREJ_SENT state must be aborted if we are to
2455 * enter the LOCAL_BUSY state.
2456 */
2457 l2cap_abort_rx_srej_sent(chan);
2458 }
2459
2460 l2cap_send_ack(chan);
2461
2462 break;
2463 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2464 BT_DBG("Exit LOCAL_BUSY");
2465 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2466
2467 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2468 struct l2cap_ctrl local_control;
2469 memset(&local_control, 0, sizeof(local_control));
2470 local_control.sframe = 1;
2471 local_control.super = L2CAP_SUPER_RR;
2472 local_control.poll = 1;
2473 local_control.reqseq = chan->buffer_seq;
2474 l2cap_send_sframe(chan, &local_control);
2475
2476 chan->retry_count = 1;
2477 __set_monitor_timer(chan);
2478 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2479 }
2480 break;
2481 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2482 l2cap_process_reqseq(chan, control->reqseq);
2483
2484 /* Fall through */
2485
2486 case L2CAP_EV_RECV_FBIT:
2487 if (control && control->final) {
2488 __clear_monitor_timer(chan);
2489 if (chan->unacked_frames > 0)
2490 __set_retrans_timer(chan);
2491 chan->retry_count = 0;
2492 chan->tx_state = L2CAP_TX_STATE_XMIT;
2493 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2494 }
2495 break;
2496 case L2CAP_EV_EXPLICIT_POLL:
2497 /* Ignore */
2498 break;
2499 case L2CAP_EV_MONITOR_TO:
2500 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2501 l2cap_send_rr_or_rnr(chan, 1);
2502 __set_monitor_timer(chan);
2503 chan->retry_count++;
2504 } else {
2505 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2506 }
2507 break;
2508 default:
2509 break;
2510 }
2511 }
2512
2513 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2514 struct sk_buff_head *skbs, u8 event)
2515 {
2516 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2517 chan, control, skbs, event, chan->tx_state);
2518
2519 switch (chan->tx_state) {
2520 case L2CAP_TX_STATE_XMIT:
2521 l2cap_tx_state_xmit(chan, control, skbs, event);
2522 break;
2523 case L2CAP_TX_STATE_WAIT_F:
2524 l2cap_tx_state_wait_f(chan, control, skbs, event);
2525 break;
2526 default:
2527 /* Ignore event */
2528 break;
2529 }
2530 }
2531
2532 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2533 struct l2cap_ctrl *control)
2534 {
2535 BT_DBG("chan %p, control %p", chan, control);
2536 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2537 }
2538
2539 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2540 struct l2cap_ctrl *control)
2541 {
2542 BT_DBG("chan %p, control %p", chan, control);
2543 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2544 }
2545
2546 /* Copy frame to all raw sockets on that connection */
2547 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2548 {
2549 struct sk_buff *nskb;
2550 struct l2cap_chan *chan;
2551
2552 BT_DBG("conn %p", conn);
2553
2554 mutex_lock(&conn->chan_lock);
2555
2556 list_for_each_entry(chan, &conn->chan_l, list) {
2557 struct sock *sk = chan->sk;
2558 if (chan->chan_type != L2CAP_CHAN_RAW)
2559 continue;
2560
2561 /* Don't send frame to the socket it came from */
2562 if (skb->sk == sk)
2563 continue;
2564 nskb = skb_clone(skb, GFP_ATOMIC);
2565 if (!nskb)
2566 continue;
2567
2568 if (chan->ops->recv(chan->data, nskb))
2569 kfree_skb(nskb);
2570 }
2571
2572 mutex_unlock(&conn->chan_lock);
2573 }
2574
2575 /* ---- L2CAP signalling commands ---- */
2576 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2577 u8 code, u8 ident, u16 dlen, void *data)
2578 {
2579 struct sk_buff *skb, **frag;
2580 struct l2cap_cmd_hdr *cmd;
2581 struct l2cap_hdr *lh;
2582 int len, count;
2583
2584 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2585 conn, code, ident, dlen);
2586
2587 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2588 count = min_t(unsigned int, conn->mtu, len);
2589
2590 skb = bt_skb_alloc(count, GFP_ATOMIC);
2591 if (!skb)
2592 return NULL;
2593
2594 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2595 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2596
2597 if (conn->hcon->type == LE_LINK)
2598 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2599 else
2600 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2601
2602 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2603 cmd->code = code;
2604 cmd->ident = ident;
2605 cmd->len = cpu_to_le16(dlen);
2606
2607 if (dlen) {
2608 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2609 memcpy(skb_put(skb, count), data, count);
2610 data += count;
2611 }
2612
2613 len -= skb->len;
2614
2615 /* Continuation fragments (no L2CAP header) */
2616 frag = &skb_shinfo(skb)->frag_list;
2617 while (len) {
2618 count = min_t(unsigned int, conn->mtu, len);
2619
2620 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2621 if (!*frag)
2622 goto fail;
2623
2624 memcpy(skb_put(*frag, count), data, count);
2625
2626 len -= count;
2627 data += count;
2628
2629 frag = &(*frag)->next;
2630 }
2631
2632 return skb;
2633
2634 fail:
2635 kfree_skb(skb);
2636 return NULL;
2637 }
2638
2639 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2640 {
2641 struct l2cap_conf_opt *opt = *ptr;
2642 int len;
2643
2644 len = L2CAP_CONF_OPT_SIZE + opt->len;
2645 *ptr += len;
2646
2647 *type = opt->type;
2648 *olen = opt->len;
2649
2650 switch (opt->len) {
2651 case 1:
2652 *val = *((u8 *) opt->val);
2653 break;
2654
2655 case 2:
2656 *val = get_unaligned_le16(opt->val);
2657 break;
2658
2659 case 4:
2660 *val = get_unaligned_le32(opt->val);
2661 break;
2662
2663 default:
2664 *val = (unsigned long) opt->val;
2665 break;
2666 }
2667
2668 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2669 return len;
2670 }
2671
2672 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2673 {
2674 struct l2cap_conf_opt *opt = *ptr;
2675
2676 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2677
2678 opt->type = type;
2679 opt->len = len;
2680
2681 switch (len) {
2682 case 1:
2683 *((u8 *) opt->val) = val;
2684 break;
2685
2686 case 2:
2687 put_unaligned_le16(val, opt->val);
2688 break;
2689
2690 case 4:
2691 put_unaligned_le32(val, opt->val);
2692 break;
2693
2694 default:
2695 memcpy(opt->val, (void *) val, len);
2696 break;
2697 }
2698
2699 *ptr += L2CAP_CONF_OPT_SIZE + len;
2700 }
2701
2702 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2703 {
2704 struct l2cap_conf_efs efs;
2705
2706 switch (chan->mode) {
2707 case L2CAP_MODE_ERTM:
2708 efs.id = chan->local_id;
2709 efs.stype = chan->local_stype;
2710 efs.msdu = cpu_to_le16(chan->local_msdu);
2711 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2712 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2713 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2714 break;
2715
2716 case L2CAP_MODE_STREAMING:
2717 efs.id = 1;
2718 efs.stype = L2CAP_SERV_BESTEFFORT;
2719 efs.msdu = cpu_to_le16(chan->local_msdu);
2720 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2721 efs.acc_lat = 0;
2722 efs.flush_to = 0;
2723 break;
2724
2725 default:
2726 return;
2727 }
2728
2729 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2730 (unsigned long) &efs);
2731 }
2732
2733 static void l2cap_ack_timeout(struct work_struct *work)
2734 {
2735 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2736 ack_timer.work);
2737 u16 frames_to_ack;
2738
2739 BT_DBG("chan %p", chan);
2740
2741 l2cap_chan_lock(chan);
2742
2743 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2744 chan->last_acked_seq);
2745
2746 if (frames_to_ack)
2747 l2cap_send_rr_or_rnr(chan, 0);
2748
2749 l2cap_chan_unlock(chan);
2750 l2cap_chan_put(chan);
2751 }
2752
2753 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2754 {
2755 int err;
2756
2757 chan->next_tx_seq = 0;
2758 chan->expected_tx_seq = 0;
2759 chan->expected_ack_seq = 0;
2760 chan->unacked_frames = 0;
2761 chan->buffer_seq = 0;
2762 chan->frames_sent = 0;
2763 chan->last_acked_seq = 0;
2764 chan->sdu = NULL;
2765 chan->sdu_last_frag = NULL;
2766 chan->sdu_len = 0;
2767
2768 skb_queue_head_init(&chan->tx_q);
2769
2770 if (chan->mode != L2CAP_MODE_ERTM)
2771 return 0;
2772
2773 chan->rx_state = L2CAP_RX_STATE_RECV;
2774 chan->tx_state = L2CAP_TX_STATE_XMIT;
2775
2776 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2777 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2778 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2779
2780 skb_queue_head_init(&chan->srej_q);
2781
2782 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2783 if (err < 0)
2784 return err;
2785
2786 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2787 if (err < 0)
2788 l2cap_seq_list_free(&chan->srej_list);
2789
2790 return err;
2791 }
2792
2793 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2794 {
2795 switch (mode) {
2796 case L2CAP_MODE_STREAMING:
2797 case L2CAP_MODE_ERTM:
2798 if (l2cap_mode_supported(mode, remote_feat_mask))
2799 return mode;
2800 /* fall through */
2801 default:
2802 return L2CAP_MODE_BASIC;
2803 }
2804 }
2805
2806 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2807 {
2808 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2809 }
2810
2811 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2812 {
2813 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2814 }
2815
2816 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2817 {
2818 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2819 __l2cap_ews_supported(chan)) {
2820 /* use extended control field */
2821 set_bit(FLAG_EXT_CTRL, &chan->flags);
2822 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2823 } else {
2824 chan->tx_win = min_t(u16, chan->tx_win,
2825 L2CAP_DEFAULT_TX_WINDOW);
2826 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2827 }
2828 }
2829
2830 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2831 {
2832 struct l2cap_conf_req *req = data;
2833 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2834 void *ptr = req->data;
2835 u16 size;
2836
2837 BT_DBG("chan %p", chan);
2838
2839 if (chan->num_conf_req || chan->num_conf_rsp)
2840 goto done;
2841
2842 switch (chan->mode) {
2843 case L2CAP_MODE_STREAMING:
2844 case L2CAP_MODE_ERTM:
2845 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2846 break;
2847
2848 if (__l2cap_efs_supported(chan))
2849 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2850
2851 /* fall through */
2852 default:
2853 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2854 break;
2855 }
2856
2857 done:
2858 if (chan->imtu != L2CAP_DEFAULT_MTU)
2859 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2860
2861 switch (chan->mode) {
2862 case L2CAP_MODE_BASIC:
2863 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2864 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2865 break;
2866
2867 rfc.mode = L2CAP_MODE_BASIC;
2868 rfc.txwin_size = 0;
2869 rfc.max_transmit = 0;
2870 rfc.retrans_timeout = 0;
2871 rfc.monitor_timeout = 0;
2872 rfc.max_pdu_size = 0;
2873
2874 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2875 (unsigned long) &rfc);
2876 break;
2877
2878 case L2CAP_MODE_ERTM:
2879 rfc.mode = L2CAP_MODE_ERTM;
2880 rfc.max_transmit = chan->max_tx;
2881 rfc.retrans_timeout = 0;
2882 rfc.monitor_timeout = 0;
2883
2884 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2885 L2CAP_EXT_HDR_SIZE -
2886 L2CAP_SDULEN_SIZE -
2887 L2CAP_FCS_SIZE);
2888 rfc.max_pdu_size = cpu_to_le16(size);
2889
2890 l2cap_txwin_setup(chan);
2891
2892 rfc.txwin_size = min_t(u16, chan->tx_win,
2893 L2CAP_DEFAULT_TX_WINDOW);
2894
2895 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2896 (unsigned long) &rfc);
2897
2898 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2899 l2cap_add_opt_efs(&ptr, chan);
2900
2901 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2902 break;
2903
2904 if (chan->fcs == L2CAP_FCS_NONE ||
2905 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2906 chan->fcs = L2CAP_FCS_NONE;
2907 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2908 }
2909
2910 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2911 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2912 chan->tx_win);
2913 break;
2914
2915 case L2CAP_MODE_STREAMING:
2916 l2cap_txwin_setup(chan);
2917 rfc.mode = L2CAP_MODE_STREAMING;
2918 rfc.txwin_size = 0;
2919 rfc.max_transmit = 0;
2920 rfc.retrans_timeout = 0;
2921 rfc.monitor_timeout = 0;
2922
2923 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2924 L2CAP_EXT_HDR_SIZE -
2925 L2CAP_SDULEN_SIZE -
2926 L2CAP_FCS_SIZE);
2927 rfc.max_pdu_size = cpu_to_le16(size);
2928
2929 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2930 (unsigned long) &rfc);
2931
2932 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2933 l2cap_add_opt_efs(&ptr, chan);
2934
2935 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2936 break;
2937
2938 if (chan->fcs == L2CAP_FCS_NONE ||
2939 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2940 chan->fcs = L2CAP_FCS_NONE;
2941 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2942 }
2943 break;
2944 }
2945
2946 req->dcid = cpu_to_le16(chan->dcid);
2947 req->flags = cpu_to_le16(0);
2948
2949 return ptr - data;
2950 }
2951
2952 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2953 {
2954 struct l2cap_conf_rsp *rsp = data;
2955 void *ptr = rsp->data;
2956 void *req = chan->conf_req;
2957 int len = chan->conf_len;
2958 int type, hint, olen;
2959 unsigned long val;
2960 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2961 struct l2cap_conf_efs efs;
2962 u8 remote_efs = 0;
2963 u16 mtu = L2CAP_DEFAULT_MTU;
2964 u16 result = L2CAP_CONF_SUCCESS;
2965 u16 size;
2966
2967 BT_DBG("chan %p", chan);
2968
2969 while (len >= L2CAP_CONF_OPT_SIZE) {
2970 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2971
2972 hint = type & L2CAP_CONF_HINT;
2973 type &= L2CAP_CONF_MASK;
2974
2975 switch (type) {
2976 case L2CAP_CONF_MTU:
2977 mtu = val;
2978 break;
2979
2980 case L2CAP_CONF_FLUSH_TO:
2981 chan->flush_to = val;
2982 break;
2983
2984 case L2CAP_CONF_QOS:
2985 break;
2986
2987 case L2CAP_CONF_RFC:
2988 if (olen == sizeof(rfc))
2989 memcpy(&rfc, (void *) val, olen);
2990 break;
2991
2992 case L2CAP_CONF_FCS:
2993 if (val == L2CAP_FCS_NONE)
2994 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2995 break;
2996
2997 case L2CAP_CONF_EFS:
2998 remote_efs = 1;
2999 if (olen == sizeof(efs))
3000 memcpy(&efs, (void *) val, olen);
3001 break;
3002
3003 case L2CAP_CONF_EWS:
3004 if (!enable_hs)
3005 return -ECONNREFUSED;
3006
3007 set_bit(FLAG_EXT_CTRL, &chan->flags);
3008 set_bit(CONF_EWS_RECV, &chan->conf_state);
3009 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3010 chan->remote_tx_win = val;
3011 break;
3012
3013 default:
3014 if (hint)
3015 break;
3016
3017 result = L2CAP_CONF_UNKNOWN;
3018 *((u8 *) ptr++) = type;
3019 break;
3020 }
3021 }
3022
3023 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3024 goto done;
3025
3026 switch (chan->mode) {
3027 case L2CAP_MODE_STREAMING:
3028 case L2CAP_MODE_ERTM:
3029 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3030 chan->mode = l2cap_select_mode(rfc.mode,
3031 chan->conn->feat_mask);
3032 break;
3033 }
3034
3035 if (remote_efs) {
3036 if (__l2cap_efs_supported(chan))
3037 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3038 else
3039 return -ECONNREFUSED;
3040 }
3041
3042 if (chan->mode != rfc.mode)
3043 return -ECONNREFUSED;
3044
3045 break;
3046 }
3047
3048 done:
3049 if (chan->mode != rfc.mode) {
3050 result = L2CAP_CONF_UNACCEPT;
3051 rfc.mode = chan->mode;
3052
3053 if (chan->num_conf_rsp == 1)
3054 return -ECONNREFUSED;
3055
3056 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3057 sizeof(rfc), (unsigned long) &rfc);
3058 }
3059
3060 if (result == L2CAP_CONF_SUCCESS) {
3061 /* Configure output options and let the other side know
3062 * which ones we don't like. */
3063
3064 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3065 result = L2CAP_CONF_UNACCEPT;
3066 else {
3067 chan->omtu = mtu;
3068 set_bit(CONF_MTU_DONE, &chan->conf_state);
3069 }
3070 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3071
3072 if (remote_efs) {
3073 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3074 efs.stype != L2CAP_SERV_NOTRAFIC &&
3075 efs.stype != chan->local_stype) {
3076
3077 result = L2CAP_CONF_UNACCEPT;
3078
3079 if (chan->num_conf_req >= 1)
3080 return -ECONNREFUSED;
3081
3082 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3083 sizeof(efs),
3084 (unsigned long) &efs);
3085 } else {
3086 /* Send PENDING Conf Rsp */
3087 result = L2CAP_CONF_PENDING;
3088 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3089 }
3090 }
3091
3092 switch (rfc.mode) {
3093 case L2CAP_MODE_BASIC:
3094 chan->fcs = L2CAP_FCS_NONE;
3095 set_bit(CONF_MODE_DONE, &chan->conf_state);
3096 break;
3097
3098 case L2CAP_MODE_ERTM:
3099 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3100 chan->remote_tx_win = rfc.txwin_size;
3101 else
3102 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3103
3104 chan->remote_max_tx = rfc.max_transmit;
3105
3106 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3107 chan->conn->mtu -
3108 L2CAP_EXT_HDR_SIZE -
3109 L2CAP_SDULEN_SIZE -
3110 L2CAP_FCS_SIZE);
3111 rfc.max_pdu_size = cpu_to_le16(size);
3112 chan->remote_mps = size;
3113
3114 rfc.retrans_timeout =
3115 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3116 rfc.monitor_timeout =
3117 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3118
3119 set_bit(CONF_MODE_DONE, &chan->conf_state);
3120
3121 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3122 sizeof(rfc), (unsigned long) &rfc);
3123
3124 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3125 chan->remote_id = efs.id;
3126 chan->remote_stype = efs.stype;
3127 chan->remote_msdu = le16_to_cpu(efs.msdu);
3128 chan->remote_flush_to =
3129 le32_to_cpu(efs.flush_to);
3130 chan->remote_acc_lat =
3131 le32_to_cpu(efs.acc_lat);
3132 chan->remote_sdu_itime =
3133 le32_to_cpu(efs.sdu_itime);
3134 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3135 sizeof(efs), (unsigned long) &efs);
3136 }
3137 break;
3138
3139 case L2CAP_MODE_STREAMING:
3140 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3141 chan->conn->mtu -
3142 L2CAP_EXT_HDR_SIZE -
3143 L2CAP_SDULEN_SIZE -
3144 L2CAP_FCS_SIZE);
3145 rfc.max_pdu_size = cpu_to_le16(size);
3146 chan->remote_mps = size;
3147
3148 set_bit(CONF_MODE_DONE, &chan->conf_state);
3149
3150 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3151 sizeof(rfc), (unsigned long) &rfc);
3152
3153 break;
3154
3155 default:
3156 result = L2CAP_CONF_UNACCEPT;
3157
3158 memset(&rfc, 0, sizeof(rfc));
3159 rfc.mode = chan->mode;
3160 }
3161
3162 if (result == L2CAP_CONF_SUCCESS)
3163 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3164 }
3165 rsp->scid = cpu_to_le16(chan->dcid);
3166 rsp->result = cpu_to_le16(result);
3167 rsp->flags = cpu_to_le16(0x0000);
3168
3169 return ptr - data;
3170 }
3171
3172 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3173 {
3174 struct l2cap_conf_req *req = data;
3175 void *ptr = req->data;
3176 int type, olen;
3177 unsigned long val;
3178 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3179 struct l2cap_conf_efs efs;
3180
3181 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3182
3183 while (len >= L2CAP_CONF_OPT_SIZE) {
3184 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3185
3186 switch (type) {
3187 case L2CAP_CONF_MTU:
3188 if (val < L2CAP_DEFAULT_MIN_MTU) {
3189 *result = L2CAP_CONF_UNACCEPT;
3190 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3191 } else
3192 chan->imtu = val;
3193 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3194 break;
3195
3196 case L2CAP_CONF_FLUSH_TO:
3197 chan->flush_to = val;
3198 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3199 2, chan->flush_to);
3200 break;
3201
3202 case L2CAP_CONF_RFC:
3203 if (olen == sizeof(rfc))
3204 memcpy(&rfc, (void *)val, olen);
3205
3206 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3207 rfc.mode != chan->mode)
3208 return -ECONNREFUSED;
3209
3210 chan->fcs = 0;
3211
3212 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3213 sizeof(rfc), (unsigned long) &rfc);
3214 break;
3215
3216 case L2CAP_CONF_EWS:
3217 chan->tx_win = min_t(u16, val,
3218 L2CAP_DEFAULT_EXT_WINDOW);
3219 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3220 chan->tx_win);
3221 break;
3222
3223 case L2CAP_CONF_EFS:
3224 if (olen == sizeof(efs))
3225 memcpy(&efs, (void *)val, olen);
3226
3227 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3228 efs.stype != L2CAP_SERV_NOTRAFIC &&
3229 efs.stype != chan->local_stype)
3230 return -ECONNREFUSED;
3231
3232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3233 sizeof(efs), (unsigned long) &efs);
3234 break;
3235 }
3236 }
3237
3238 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3239 return -ECONNREFUSED;
3240
3241 chan->mode = rfc.mode;
3242
3243 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3244 switch (rfc.mode) {
3245 case L2CAP_MODE_ERTM:
3246 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3247 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3248 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3249
3250 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3251 chan->local_msdu = le16_to_cpu(efs.msdu);
3252 chan->local_sdu_itime =
3253 le32_to_cpu(efs.sdu_itime);
3254 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3255 chan->local_flush_to =
3256 le32_to_cpu(efs.flush_to);
3257 }
3258 break;
3259
3260 case L2CAP_MODE_STREAMING:
3261 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3262 }
3263 }
3264
3265 req->dcid = cpu_to_le16(chan->dcid);
3266 req->flags = cpu_to_le16(0x0000);
3267
3268 return ptr - data;
3269 }
3270
3271 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3272 {
3273 struct l2cap_conf_rsp *rsp = data;
3274 void *ptr = rsp->data;
3275
3276 BT_DBG("chan %p", chan);
3277
3278 rsp->scid = cpu_to_le16(chan->dcid);
3279 rsp->result = cpu_to_le16(result);
3280 rsp->flags = cpu_to_le16(flags);
3281
3282 return ptr - data;
3283 }
3284
3285 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3286 {
3287 struct l2cap_conn_rsp rsp;
3288 struct l2cap_conn *conn = chan->conn;
3289 u8 buf[128];
3290
3291 rsp.scid = cpu_to_le16(chan->dcid);
3292 rsp.dcid = cpu_to_le16(chan->scid);
3293 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3294 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3295 l2cap_send_cmd(conn, chan->ident,
3296 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3297
3298 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3299 return;
3300
3301 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3302 l2cap_build_conf_req(chan, buf), buf);
3303 chan->num_conf_req++;
3304 }
3305
3306 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3307 {
3308 int type, olen;
3309 unsigned long val;
3310 struct l2cap_conf_rfc rfc;
3311
3312 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3313
3314 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3315 return;
3316
3317 while (len >= L2CAP_CONF_OPT_SIZE) {
3318 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3319
3320 switch (type) {
3321 case L2CAP_CONF_RFC:
3322 if (olen == sizeof(rfc))
3323 memcpy(&rfc, (void *)val, olen);
3324 goto done;
3325 }
3326 }
3327
3328 /* Use sane default values in case a misbehaving remote device
3329 * did not send an RFC option.
3330 */
3331 rfc.mode = chan->mode;
3332 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3333 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3334 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3335
3336 BT_ERR("Expected RFC option was not found, using defaults");
3337
3338 done:
3339 switch (rfc.mode) {
3340 case L2CAP_MODE_ERTM:
3341 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3342 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3343 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3344 break;
3345 case L2CAP_MODE_STREAMING:
3346 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3347 }
3348 }
3349
3350 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3351 {
3352 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3353
3354 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3355 return 0;
3356
3357 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3358 cmd->ident == conn->info_ident) {
3359 cancel_delayed_work(&conn->info_timer);
3360
3361 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3362 conn->info_ident = 0;
3363
3364 l2cap_conn_start(conn);
3365 }
3366
3367 return 0;
3368 }
3369
3370 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3371 {
3372 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3373 struct l2cap_conn_rsp rsp;
3374 struct l2cap_chan *chan = NULL, *pchan;
3375 struct sock *parent, *sk = NULL;
3376 int result, status = L2CAP_CS_NO_INFO;
3377
3378 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3379 __le16 psm = req->psm;
3380
3381 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3382
3383 /* Check if we have socket listening on psm */
3384 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3385 if (!pchan) {
3386 result = L2CAP_CR_BAD_PSM;
3387 goto sendresp;
3388 }
3389
3390 parent = pchan->sk;
3391
3392 mutex_lock(&conn->chan_lock);
3393 lock_sock(parent);
3394
3395 /* Check if the ACL is secure enough (if not SDP) */
3396 if (psm != cpu_to_le16(0x0001) &&
3397 !hci_conn_check_link_mode(conn->hcon)) {
3398 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3399 result = L2CAP_CR_SEC_BLOCK;
3400 goto response;
3401 }
3402
3403 result = L2CAP_CR_NO_MEM;
3404
3405 /* Check for backlog size */
3406 if (sk_acceptq_is_full(parent)) {
3407 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3408 goto response;
3409 }
3410
3411 chan = pchan->ops->new_connection(pchan->data);
3412 if (!chan)
3413 goto response;
3414
3415 sk = chan->sk;
3416
3417 /* Check if we already have channel with that dcid */
3418 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3419 sock_set_flag(sk, SOCK_ZAPPED);
3420 chan->ops->close(chan->data);
3421 goto response;
3422 }
3423
3424 hci_conn_hold(conn->hcon);
3425
3426 bacpy(&bt_sk(sk)->src, conn->src);
3427 bacpy(&bt_sk(sk)->dst, conn->dst);
3428 chan->psm = psm;
3429 chan->dcid = scid;
3430
3431 bt_accept_enqueue(parent, sk);
3432
3433 __l2cap_chan_add(conn, chan);
3434
3435 dcid = chan->scid;
3436
3437 __set_chan_timer(chan, sk->sk_sndtimeo);
3438
3439 chan->ident = cmd->ident;
3440
3441 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3442 if (l2cap_chan_check_security(chan)) {
3443 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3444 __l2cap_state_change(chan, BT_CONNECT2);
3445 result = L2CAP_CR_PEND;
3446 status = L2CAP_CS_AUTHOR_PEND;
3447 parent->sk_data_ready(parent, 0);
3448 } else {
3449 __l2cap_state_change(chan, BT_CONFIG);
3450 result = L2CAP_CR_SUCCESS;
3451 status = L2CAP_CS_NO_INFO;
3452 }
3453 } else {
3454 __l2cap_state_change(chan, BT_CONNECT2);
3455 result = L2CAP_CR_PEND;
3456 status = L2CAP_CS_AUTHEN_PEND;
3457 }
3458 } else {
3459 __l2cap_state_change(chan, BT_CONNECT2);
3460 result = L2CAP_CR_PEND;
3461 status = L2CAP_CS_NO_INFO;
3462 }
3463
3464 response:
3465 release_sock(parent);
3466 mutex_unlock(&conn->chan_lock);
3467
3468 sendresp:
3469 rsp.scid = cpu_to_le16(scid);
3470 rsp.dcid = cpu_to_le16(dcid);
3471 rsp.result = cpu_to_le16(result);
3472 rsp.status = cpu_to_le16(status);
3473 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3474
3475 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3476 struct l2cap_info_req info;
3477 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3478
3479 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3480 conn->info_ident = l2cap_get_ident(conn);
3481
3482 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3483
3484 l2cap_send_cmd(conn, conn->info_ident,
3485 L2CAP_INFO_REQ, sizeof(info), &info);
3486 }
3487
3488 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3489 result == L2CAP_CR_SUCCESS) {
3490 u8 buf[128];
3491 set_bit(CONF_REQ_SENT, &chan->conf_state);
3492 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3493 l2cap_build_conf_req(chan, buf), buf);
3494 chan->num_conf_req++;
3495 }
3496
3497 return 0;
3498 }
3499
3500 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3501 {
3502 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3503 u16 scid, dcid, result, status;
3504 struct l2cap_chan *chan;
3505 u8 req[128];
3506 int err;
3507
3508 scid = __le16_to_cpu(rsp->scid);
3509 dcid = __le16_to_cpu(rsp->dcid);
3510 result = __le16_to_cpu(rsp->result);
3511 status = __le16_to_cpu(rsp->status);
3512
3513 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3514 dcid, scid, result, status);
3515
3516 mutex_lock(&conn->chan_lock);
3517
3518 if (scid) {
3519 chan = __l2cap_get_chan_by_scid(conn, scid);
3520 if (!chan) {
3521 err = -EFAULT;
3522 goto unlock;
3523 }
3524 } else {
3525 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3526 if (!chan) {
3527 err = -EFAULT;
3528 goto unlock;
3529 }
3530 }
3531
3532 err = 0;
3533
3534 l2cap_chan_lock(chan);
3535
3536 switch (result) {
3537 case L2CAP_CR_SUCCESS:
3538 l2cap_state_change(chan, BT_CONFIG);
3539 chan->ident = 0;
3540 chan->dcid = dcid;
3541 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3542
3543 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3544 break;
3545
3546 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3547 l2cap_build_conf_req(chan, req), req);
3548 chan->num_conf_req++;
3549 break;
3550
3551 case L2CAP_CR_PEND:
3552 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3553 break;
3554
3555 default:
3556 l2cap_chan_del(chan, ECONNREFUSED);
3557 break;
3558 }
3559
3560 l2cap_chan_unlock(chan);
3561
3562 unlock:
3563 mutex_unlock(&conn->chan_lock);
3564
3565 return err;
3566 }
3567
3568 static inline void set_default_fcs(struct l2cap_chan *chan)
3569 {
3570 /* FCS is enabled only in ERTM or streaming mode, if one or both
3571 * sides request it.
3572 */
3573 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3574 chan->fcs = L2CAP_FCS_NONE;
3575 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3576 chan->fcs = L2CAP_FCS_CRC16;
3577 }
3578
3579 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3580 {
3581 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3582 u16 dcid, flags;
3583 u8 rsp[64];
3584 struct l2cap_chan *chan;
3585 int len, err = 0;
3586
3587 dcid = __le16_to_cpu(req->dcid);
3588 flags = __le16_to_cpu(req->flags);
3589
3590 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3591
3592 chan = l2cap_get_chan_by_scid(conn, dcid);
3593 if (!chan)
3594 return -ENOENT;
3595
3596 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3597 struct l2cap_cmd_rej_cid rej;
3598
3599 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3600 rej.scid = cpu_to_le16(chan->scid);
3601 rej.dcid = cpu_to_le16(chan->dcid);
3602
3603 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3604 sizeof(rej), &rej);
3605 goto unlock;
3606 }
3607
3608 /* Reject if config buffer is too small. */
3609 len = cmd_len - sizeof(*req);
3610 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3611 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3612 l2cap_build_conf_rsp(chan, rsp,
3613 L2CAP_CONF_REJECT, flags), rsp);
3614 goto unlock;
3615 }
3616
3617 /* Store config. */
3618 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3619 chan->conf_len += len;
3620
3621 if (flags & 0x0001) {
3622 /* Incomplete config. Send empty response. */
3623 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3624 l2cap_build_conf_rsp(chan, rsp,
3625 L2CAP_CONF_SUCCESS, flags), rsp);
3626 goto unlock;
3627 }
3628
3629 /* Complete config. */
3630 len = l2cap_parse_conf_req(chan, rsp);
3631 if (len < 0) {
3632 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3633 goto unlock;
3634 }
3635
3636 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3637 chan->num_conf_rsp++;
3638
3639 /* Reset config buffer. */
3640 chan->conf_len = 0;
3641
3642 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3643 goto unlock;
3644
3645 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3646 set_default_fcs(chan);
3647
3648 l2cap_state_change(chan, BT_CONNECTED);
3649
3650 if (chan->mode == L2CAP_MODE_ERTM ||
3651 chan->mode == L2CAP_MODE_STREAMING)
3652 err = l2cap_ertm_init(chan);
3653
3654 if (err < 0)
3655 l2cap_send_disconn_req(chan->conn, chan, -err);
3656 else
3657 l2cap_chan_ready(chan);
3658
3659 goto unlock;
3660 }
3661
3662 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3663 u8 buf[64];
3664 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3665 l2cap_build_conf_req(chan, buf), buf);
3666 chan->num_conf_req++;
3667 }
3668
3669 /* Got Conf Rsp PENDING from remote side and asume we sent
3670 Conf Rsp PENDING in the code above */
3671 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3672 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3673
3674 /* check compatibility */
3675
3676 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3677 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3678
3679 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3680 l2cap_build_conf_rsp(chan, rsp,
3681 L2CAP_CONF_SUCCESS, flags), rsp);
3682 }
3683
3684 unlock:
3685 l2cap_chan_unlock(chan);
3686 return err;
3687 }
3688
3689 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3690 {
3691 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3692 u16 scid, flags, result;
3693 struct l2cap_chan *chan;
3694 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3695 int err = 0;
3696
3697 scid = __le16_to_cpu(rsp->scid);
3698 flags = __le16_to_cpu(rsp->flags);
3699 result = __le16_to_cpu(rsp->result);
3700
3701 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3702 result, len);
3703
3704 chan = l2cap_get_chan_by_scid(conn, scid);
3705 if (!chan)
3706 return 0;
3707
3708 switch (result) {
3709 case L2CAP_CONF_SUCCESS:
3710 l2cap_conf_rfc_get(chan, rsp->data, len);
3711 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3712 break;
3713
3714 case L2CAP_CONF_PENDING:
3715 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3716
3717 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3718 char buf[64];
3719
3720 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3721 buf, &result);
3722 if (len < 0) {
3723 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3724 goto done;
3725 }
3726
3727 /* check compatibility */
3728
3729 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3730 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3731
3732 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3733 l2cap_build_conf_rsp(chan, buf,
3734 L2CAP_CONF_SUCCESS, 0x0000), buf);
3735 }
3736 goto done;
3737
3738 case L2CAP_CONF_UNACCEPT:
3739 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3740 char req[64];
3741
3742 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3743 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3744 goto done;
3745 }
3746
3747 /* throw out any old stored conf requests */
3748 result = L2CAP_CONF_SUCCESS;
3749 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3750 req, &result);
3751 if (len < 0) {
3752 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3753 goto done;
3754 }
3755
3756 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3757 L2CAP_CONF_REQ, len, req);
3758 chan->num_conf_req++;
3759 if (result != L2CAP_CONF_SUCCESS)
3760 goto done;
3761 break;
3762 }
3763
3764 default:
3765 l2cap_chan_set_err(chan, ECONNRESET);
3766
3767 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3768 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3769 goto done;
3770 }
3771
3772 if (flags & 0x01)
3773 goto done;
3774
3775 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3776
3777 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3778 set_default_fcs(chan);
3779
3780 l2cap_state_change(chan, BT_CONNECTED);
3781 if (chan->mode == L2CAP_MODE_ERTM ||
3782 chan->mode == L2CAP_MODE_STREAMING)
3783 err = l2cap_ertm_init(chan);
3784
3785 if (err < 0)
3786 l2cap_send_disconn_req(chan->conn, chan, -err);
3787 else
3788 l2cap_chan_ready(chan);
3789 }
3790
3791 done:
3792 l2cap_chan_unlock(chan);
3793 return err;
3794 }
3795
3796 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3797 {
3798 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3799 struct l2cap_disconn_rsp rsp;
3800 u16 dcid, scid;
3801 struct l2cap_chan *chan;
3802 struct sock *sk;
3803
3804 scid = __le16_to_cpu(req->scid);
3805 dcid = __le16_to_cpu(req->dcid);
3806
3807 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3808
3809 mutex_lock(&conn->chan_lock);
3810
3811 chan = __l2cap_get_chan_by_scid(conn, dcid);
3812 if (!chan) {
3813 mutex_unlock(&conn->chan_lock);
3814 return 0;
3815 }
3816
3817 l2cap_chan_lock(chan);
3818
3819 sk = chan->sk;
3820
3821 rsp.dcid = cpu_to_le16(chan->scid);
3822 rsp.scid = cpu_to_le16(chan->dcid);
3823 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3824
3825 lock_sock(sk);
3826 sk->sk_shutdown = SHUTDOWN_MASK;
3827 release_sock(sk);
3828
3829 l2cap_chan_hold(chan);
3830 l2cap_chan_del(chan, ECONNRESET);
3831
3832 l2cap_chan_unlock(chan);
3833
3834 chan->ops->close(chan->data);
3835 l2cap_chan_put(chan);
3836
3837 mutex_unlock(&conn->chan_lock);
3838
3839 return 0;
3840 }
3841
3842 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3843 {
3844 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3845 u16 dcid, scid;
3846 struct l2cap_chan *chan;
3847
3848 scid = __le16_to_cpu(rsp->scid);
3849 dcid = __le16_to_cpu(rsp->dcid);
3850
3851 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3852
3853 mutex_lock(&conn->chan_lock);
3854
3855 chan = __l2cap_get_chan_by_scid(conn, scid);
3856 if (!chan) {
3857 mutex_unlock(&conn->chan_lock);
3858 return 0;
3859 }
3860
3861 l2cap_chan_lock(chan);
3862
3863 l2cap_chan_hold(chan);
3864 l2cap_chan_del(chan, 0);
3865
3866 l2cap_chan_unlock(chan);
3867
3868 chan->ops->close(chan->data);
3869 l2cap_chan_put(chan);
3870
3871 mutex_unlock(&conn->chan_lock);
3872
3873 return 0;
3874 }
3875
3876 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3877 {
3878 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3879 u16 type;
3880
3881 type = __le16_to_cpu(req->type);
3882
3883 BT_DBG("type 0x%4.4x", type);
3884
3885 if (type == L2CAP_IT_FEAT_MASK) {
3886 u8 buf[8];
3887 u32 feat_mask = l2cap_feat_mask;
3888 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3889 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3890 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3891 if (!disable_ertm)
3892 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3893 | L2CAP_FEAT_FCS;
3894 if (enable_hs)
3895 feat_mask |= L2CAP_FEAT_EXT_FLOW
3896 | L2CAP_FEAT_EXT_WINDOW;
3897
3898 put_unaligned_le32(feat_mask, rsp->data);
3899 l2cap_send_cmd(conn, cmd->ident,
3900 L2CAP_INFO_RSP, sizeof(buf), buf);
3901 } else if (type == L2CAP_IT_FIXED_CHAN) {
3902 u8 buf[12];
3903 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3904
3905 if (enable_hs)
3906 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3907 else
3908 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3909
3910 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3911 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3912 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3913 l2cap_send_cmd(conn, cmd->ident,
3914 L2CAP_INFO_RSP, sizeof(buf), buf);
3915 } else {
3916 struct l2cap_info_rsp rsp;
3917 rsp.type = cpu_to_le16(type);
3918 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3919 l2cap_send_cmd(conn, cmd->ident,
3920 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3921 }
3922
3923 return 0;
3924 }
3925
3926 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3927 {
3928 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3929 u16 type, result;
3930
3931 type = __le16_to_cpu(rsp->type);
3932 result = __le16_to_cpu(rsp->result);
3933
3934 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3935
3936 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3937 if (cmd->ident != conn->info_ident ||
3938 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3939 return 0;
3940
3941 cancel_delayed_work(&conn->info_timer);
3942
3943 if (result != L2CAP_IR_SUCCESS) {
3944 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3945 conn->info_ident = 0;
3946
3947 l2cap_conn_start(conn);
3948
3949 return 0;
3950 }
3951
3952 switch (type) {
3953 case L2CAP_IT_FEAT_MASK:
3954 conn->feat_mask = get_unaligned_le32(rsp->data);
3955
3956 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3957 struct l2cap_info_req req;
3958 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3959
3960 conn->info_ident = l2cap_get_ident(conn);
3961
3962 l2cap_send_cmd(conn, conn->info_ident,
3963 L2CAP_INFO_REQ, sizeof(req), &req);
3964 } else {
3965 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3966 conn->info_ident = 0;
3967
3968 l2cap_conn_start(conn);
3969 }
3970 break;
3971
3972 case L2CAP_IT_FIXED_CHAN:
3973 conn->fixed_chan_mask = rsp->data[0];
3974 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3975 conn->info_ident = 0;
3976
3977 l2cap_conn_start(conn);
3978 break;
3979 }
3980
3981 return 0;
3982 }
3983
3984 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3985 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3986 void *data)
3987 {
3988 struct l2cap_create_chan_req *req = data;
3989 struct l2cap_create_chan_rsp rsp;
3990 u16 psm, scid;
3991
3992 if (cmd_len != sizeof(*req))
3993 return -EPROTO;
3994
3995 if (!enable_hs)
3996 return -EINVAL;
3997
3998 psm = le16_to_cpu(req->psm);
3999 scid = le16_to_cpu(req->scid);
4000
4001 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
4002
4003 /* Placeholder: Always reject */
4004 rsp.dcid = 0;
4005 rsp.scid = cpu_to_le16(scid);
4006 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4007 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4008
4009 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4010 sizeof(rsp), &rsp);
4011
4012 return 0;
4013 }
4014
4015 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4016 struct l2cap_cmd_hdr *cmd, void *data)
4017 {
4018 BT_DBG("conn %p", conn);
4019
4020 return l2cap_connect_rsp(conn, cmd, data);
4021 }
4022
4023 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4024 u16 icid, u16 result)
4025 {
4026 struct l2cap_move_chan_rsp rsp;
4027
4028 BT_DBG("icid %d, result %d", icid, result);
4029
4030 rsp.icid = cpu_to_le16(icid);
4031 rsp.result = cpu_to_le16(result);
4032
4033 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4034 }
4035
4036 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4037 struct l2cap_chan *chan, u16 icid, u16 result)
4038 {
4039 struct l2cap_move_chan_cfm cfm;
4040 u8 ident;
4041
4042 BT_DBG("icid %d, result %d", icid, result);
4043
4044 ident = l2cap_get_ident(conn);
4045 if (chan)
4046 chan->ident = ident;
4047
4048 cfm.icid = cpu_to_le16(icid);
4049 cfm.result = cpu_to_le16(result);
4050
4051 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4052 }
4053
4054 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4055 u16 icid)
4056 {
4057 struct l2cap_move_chan_cfm_rsp rsp;
4058
4059 BT_DBG("icid %d", icid);
4060
4061 rsp.icid = cpu_to_le16(icid);
4062 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4063 }
4064
4065 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4066 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4067 {
4068 struct l2cap_move_chan_req *req = data;
4069 u16 icid = 0;
4070 u16 result = L2CAP_MR_NOT_ALLOWED;
4071
4072 if (cmd_len != sizeof(*req))
4073 return -EPROTO;
4074
4075 icid = le16_to_cpu(req->icid);
4076
4077 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4078
4079 if (!enable_hs)
4080 return -EINVAL;
4081
4082 /* Placeholder: Always refuse */
4083 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4084
4085 return 0;
4086 }
4087
4088 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4089 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4090 {
4091 struct l2cap_move_chan_rsp *rsp = data;
4092 u16 icid, result;
4093
4094 if (cmd_len != sizeof(*rsp))
4095 return -EPROTO;
4096
4097 icid = le16_to_cpu(rsp->icid);
4098 result = le16_to_cpu(rsp->result);
4099
4100 BT_DBG("icid %d, result %d", icid, result);
4101
4102 /* Placeholder: Always unconfirmed */
4103 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4104
4105 return 0;
4106 }
4107
4108 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4109 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4110 {
4111 struct l2cap_move_chan_cfm *cfm = data;
4112 u16 icid, result;
4113
4114 if (cmd_len != sizeof(*cfm))
4115 return -EPROTO;
4116
4117 icid = le16_to_cpu(cfm->icid);
4118 result = le16_to_cpu(cfm->result);
4119
4120 BT_DBG("icid %d, result %d", icid, result);
4121
4122 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4123
4124 return 0;
4125 }
4126
4127 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4128 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4129 {
4130 struct l2cap_move_chan_cfm_rsp *rsp = data;
4131 u16 icid;
4132
4133 if (cmd_len != sizeof(*rsp))
4134 return -EPROTO;
4135
4136 icid = le16_to_cpu(rsp->icid);
4137
4138 BT_DBG("icid %d", icid);
4139
4140 return 0;
4141 }
4142
4143 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4144 u16 to_multiplier)
4145 {
4146 u16 max_latency;
4147
4148 if (min > max || min < 6 || max > 3200)
4149 return -EINVAL;
4150
4151 if (to_multiplier < 10 || to_multiplier > 3200)
4152 return -EINVAL;
4153
4154 if (max >= to_multiplier * 8)
4155 return -EINVAL;
4156
4157 max_latency = (to_multiplier * 8 / max) - 1;
4158 if (latency > 499 || latency > max_latency)
4159 return -EINVAL;
4160
4161 return 0;
4162 }
4163
4164 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4165 struct l2cap_cmd_hdr *cmd, u8 *data)
4166 {
4167 struct hci_conn *hcon = conn->hcon;
4168 struct l2cap_conn_param_update_req *req;
4169 struct l2cap_conn_param_update_rsp rsp;
4170 u16 min, max, latency, to_multiplier, cmd_len;
4171 int err;
4172
4173 if (!(hcon->link_mode & HCI_LM_MASTER))
4174 return -EINVAL;
4175
4176 cmd_len = __le16_to_cpu(cmd->len);
4177 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4178 return -EPROTO;
4179
4180 req = (struct l2cap_conn_param_update_req *) data;
4181 min = __le16_to_cpu(req->min);
4182 max = __le16_to_cpu(req->max);
4183 latency = __le16_to_cpu(req->latency);
4184 to_multiplier = __le16_to_cpu(req->to_multiplier);
4185
4186 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4187 min, max, latency, to_multiplier);
4188
4189 memset(&rsp, 0, sizeof(rsp));
4190
4191 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4192 if (err)
4193 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4194 else
4195 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4196
4197 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4198 sizeof(rsp), &rsp);
4199
4200 if (!err)
4201 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4202
4203 return 0;
4204 }
4205
4206 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4207 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4208 {
4209 int err = 0;
4210
4211 switch (cmd->code) {
4212 case L2CAP_COMMAND_REJ:
4213 l2cap_command_rej(conn, cmd, data);
4214 break;
4215
4216 case L2CAP_CONN_REQ:
4217 err = l2cap_connect_req(conn, cmd, data);
4218 break;
4219
4220 case L2CAP_CONN_RSP:
4221 err = l2cap_connect_rsp(conn, cmd, data);
4222 break;
4223
4224 case L2CAP_CONF_REQ:
4225 err = l2cap_config_req(conn, cmd, cmd_len, data);
4226 break;
4227
4228 case L2CAP_CONF_RSP:
4229 err = l2cap_config_rsp(conn, cmd, data);
4230 break;
4231
4232 case L2CAP_DISCONN_REQ:
4233 err = l2cap_disconnect_req(conn, cmd, data);
4234 break;
4235
4236 case L2CAP_DISCONN_RSP:
4237 err = l2cap_disconnect_rsp(conn, cmd, data);
4238 break;
4239
4240 case L2CAP_ECHO_REQ:
4241 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4242 break;
4243
4244 case L2CAP_ECHO_RSP:
4245 break;
4246
4247 case L2CAP_INFO_REQ:
4248 err = l2cap_information_req(conn, cmd, data);
4249 break;
4250
4251 case L2CAP_INFO_RSP:
4252 err = l2cap_information_rsp(conn, cmd, data);
4253 break;
4254
4255 case L2CAP_CREATE_CHAN_REQ:
4256 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4257 break;
4258
4259 case L2CAP_CREATE_CHAN_RSP:
4260 err = l2cap_create_channel_rsp(conn, cmd, data);
4261 break;
4262
4263 case L2CAP_MOVE_CHAN_REQ:
4264 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4265 break;
4266
4267 case L2CAP_MOVE_CHAN_RSP:
4268 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4269 break;
4270
4271 case L2CAP_MOVE_CHAN_CFM:
4272 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4273 break;
4274
4275 case L2CAP_MOVE_CHAN_CFM_RSP:
4276 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4277 break;
4278
4279 default:
4280 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4281 err = -EINVAL;
4282 break;
4283 }
4284
4285 return err;
4286 }
4287
4288 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4289 struct l2cap_cmd_hdr *cmd, u8 *data)
4290 {
4291 switch (cmd->code) {
4292 case L2CAP_COMMAND_REJ:
4293 return 0;
4294
4295 case L2CAP_CONN_PARAM_UPDATE_REQ:
4296 return l2cap_conn_param_update_req(conn, cmd, data);
4297
4298 case L2CAP_CONN_PARAM_UPDATE_RSP:
4299 return 0;
4300
4301 default:
4302 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4303 return -EINVAL;
4304 }
4305 }
4306
4307 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4308 struct sk_buff *skb)
4309 {
4310 u8 *data = skb->data;
4311 int len = skb->len;
4312 struct l2cap_cmd_hdr cmd;
4313 int err;
4314
4315 l2cap_raw_recv(conn, skb);
4316
4317 while (len >= L2CAP_CMD_HDR_SIZE) {
4318 u16 cmd_len;
4319 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4320 data += L2CAP_CMD_HDR_SIZE;
4321 len -= L2CAP_CMD_HDR_SIZE;
4322
4323 cmd_len = le16_to_cpu(cmd.len);
4324
4325 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4326
4327 if (cmd_len > len || !cmd.ident) {
4328 BT_DBG("corrupted command");
4329 break;
4330 }
4331
4332 if (conn->hcon->type == LE_LINK)
4333 err = l2cap_le_sig_cmd(conn, &cmd, data);
4334 else
4335 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4336
4337 if (err) {
4338 struct l2cap_cmd_rej_unk rej;
4339
4340 BT_ERR("Wrong link type (%d)", err);
4341
4342 /* FIXME: Map err to a valid reason */
4343 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4344 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4345 }
4346
4347 data += cmd_len;
4348 len -= cmd_len;
4349 }
4350
4351 kfree_skb(skb);
4352 }
4353
4354 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4355 {
4356 u16 our_fcs, rcv_fcs;
4357 int hdr_size;
4358
4359 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4360 hdr_size = L2CAP_EXT_HDR_SIZE;
4361 else
4362 hdr_size = L2CAP_ENH_HDR_SIZE;
4363
4364 if (chan->fcs == L2CAP_FCS_CRC16) {
4365 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4366 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4367 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4368
4369 if (our_fcs != rcv_fcs)
4370 return -EBADMSG;
4371 }
4372 return 0;
4373 }
4374
4375 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4376 {
4377 struct l2cap_ctrl control;
4378
4379 BT_DBG("chan %p", chan);
4380
4381 memset(&control, 0, sizeof(control));
4382 control.sframe = 1;
4383 control.final = 1;
4384 control.reqseq = chan->buffer_seq;
4385 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4386
4387 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4388 control.super = L2CAP_SUPER_RNR;
4389 l2cap_send_sframe(chan, &control);
4390 }
4391
4392 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4393 chan->unacked_frames > 0)
4394 __set_retrans_timer(chan);
4395
4396 /* Send pending iframes */
4397 l2cap_ertm_send(chan);
4398
4399 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4400 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4401 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4402 * send it now.
4403 */
4404 control.super = L2CAP_SUPER_RR;
4405 l2cap_send_sframe(chan, &control);
4406 }
4407 }
4408
4409 static void append_skb_frag(struct sk_buff *skb,
4410 struct sk_buff *new_frag, struct sk_buff **last_frag)
4411 {
4412 /* skb->len reflects data in skb as well as all fragments
4413 * skb->data_len reflects only data in fragments
4414 */
4415 if (!skb_has_frag_list(skb))
4416 skb_shinfo(skb)->frag_list = new_frag;
4417
4418 new_frag->next = NULL;
4419
4420 (*last_frag)->next = new_frag;
4421 *last_frag = new_frag;
4422
4423 skb->len += new_frag->len;
4424 skb->data_len += new_frag->len;
4425 skb->truesize += new_frag->truesize;
4426 }
4427
4428 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4429 struct l2cap_ctrl *control)
4430 {
4431 int err = -EINVAL;
4432
4433 switch (control->sar) {
4434 case L2CAP_SAR_UNSEGMENTED:
4435 if (chan->sdu)
4436 break;
4437
4438 err = chan->ops->recv(chan->data, skb);
4439 break;
4440
4441 case L2CAP_SAR_START:
4442 if (chan->sdu)
4443 break;
4444
4445 chan->sdu_len = get_unaligned_le16(skb->data);
4446 skb_pull(skb, L2CAP_SDULEN_SIZE);
4447
4448 if (chan->sdu_len > chan->imtu) {
4449 err = -EMSGSIZE;
4450 break;
4451 }
4452
4453 if (skb->len >= chan->sdu_len)
4454 break;
4455
4456 chan->sdu = skb;
4457 chan->sdu_last_frag = skb;
4458
4459 skb = NULL;
4460 err = 0;
4461 break;
4462
4463 case L2CAP_SAR_CONTINUE:
4464 if (!chan->sdu)
4465 break;
4466
4467 append_skb_frag(chan->sdu, skb,
4468 &chan->sdu_last_frag);
4469 skb = NULL;
4470
4471 if (chan->sdu->len >= chan->sdu_len)
4472 break;
4473
4474 err = 0;
4475 break;
4476
4477 case L2CAP_SAR_END:
4478 if (!chan->sdu)
4479 break;
4480
4481 append_skb_frag(chan->sdu, skb,
4482 &chan->sdu_last_frag);
4483 skb = NULL;
4484
4485 if (chan->sdu->len != chan->sdu_len)
4486 break;
4487
4488 err = chan->ops->recv(chan->data, chan->sdu);
4489
4490 if (!err) {
4491 /* Reassembly complete */
4492 chan->sdu = NULL;
4493 chan->sdu_last_frag = NULL;
4494 chan->sdu_len = 0;
4495 }
4496 break;
4497 }
4498
4499 if (err) {
4500 kfree_skb(skb);
4501 kfree_skb(chan->sdu);
4502 chan->sdu = NULL;
4503 chan->sdu_last_frag = NULL;
4504 chan->sdu_len = 0;
4505 }
4506
4507 return err;
4508 }
4509
4510 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4511 {
4512 u8 event;
4513
4514 if (chan->mode != L2CAP_MODE_ERTM)
4515 return;
4516
4517 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4518 l2cap_tx(chan, NULL, NULL, event);
4519 }
4520
4521 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4522 {
4523 int err = 0;
4524 /* Pass sequential frames to l2cap_reassemble_sdu()
4525 * until a gap is encountered.
4526 */
4527
4528 BT_DBG("chan %p", chan);
4529
4530 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4531 struct sk_buff *skb;
4532 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4533 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4534
4535 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4536
4537 if (!skb)
4538 break;
4539
4540 skb_unlink(skb, &chan->srej_q);
4541 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4542 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4543 if (err)
4544 break;
4545 }
4546
4547 if (skb_queue_empty(&chan->srej_q)) {
4548 chan->rx_state = L2CAP_RX_STATE_RECV;
4549 l2cap_send_ack(chan);
4550 }
4551
4552 return err;
4553 }
4554
4555 static void l2cap_handle_srej(struct l2cap_chan *chan,
4556 struct l2cap_ctrl *control)
4557 {
4558 struct sk_buff *skb;
4559
4560 BT_DBG("chan %p, control %p", chan, control);
4561
4562 if (control->reqseq == chan->next_tx_seq) {
4563 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4564 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4565 return;
4566 }
4567
4568 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4569
4570 if (skb == NULL) {
4571 BT_DBG("Seq %d not available for retransmission",
4572 control->reqseq);
4573 return;
4574 }
4575
4576 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4577 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4578 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4579 return;
4580 }
4581
4582 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4583
4584 if (control->poll) {
4585 l2cap_pass_to_tx(chan, control);
4586
4587 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4588 l2cap_retransmit(chan, control);
4589 l2cap_ertm_send(chan);
4590
4591 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4592 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4593 chan->srej_save_reqseq = control->reqseq;
4594 }
4595 } else {
4596 l2cap_pass_to_tx_fbit(chan, control);
4597
4598 if (control->final) {
4599 if (chan->srej_save_reqseq != control->reqseq ||
4600 !test_and_clear_bit(CONN_SREJ_ACT,
4601 &chan->conn_state))
4602 l2cap_retransmit(chan, control);
4603 } else {
4604 l2cap_retransmit(chan, control);
4605 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4606 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4607 chan->srej_save_reqseq = control->reqseq;
4608 }
4609 }
4610 }
4611 }
4612
4613 static void l2cap_handle_rej(struct l2cap_chan *chan,
4614 struct l2cap_ctrl *control)
4615 {
4616 struct sk_buff *skb;
4617
4618 BT_DBG("chan %p, control %p", chan, control);
4619
4620 if (control->reqseq == chan->next_tx_seq) {
4621 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4622 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4623 return;
4624 }
4625
4626 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4627
4628 if (chan->max_tx && skb &&
4629 bt_cb(skb)->control.retries >= chan->max_tx) {
4630 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4631 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4632 return;
4633 }
4634
4635 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4636
4637 l2cap_pass_to_tx(chan, control);
4638
4639 if (control->final) {
4640 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4641 l2cap_retransmit_all(chan, control);
4642 } else {
4643 l2cap_retransmit_all(chan, control);
4644 l2cap_ertm_send(chan);
4645 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4646 set_bit(CONN_REJ_ACT, &chan->conn_state);
4647 }
4648 }
4649
4650 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4651 {
4652 BT_DBG("chan %p, txseq %d", chan, txseq);
4653
4654 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4655 chan->expected_tx_seq);
4656
4657 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4658 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4659 chan->tx_win) {
4660 /* See notes below regarding "double poll" and
4661 * invalid packets.
4662 */
4663 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4664 BT_DBG("Invalid/Ignore - after SREJ");
4665 return L2CAP_TXSEQ_INVALID_IGNORE;
4666 } else {
4667 BT_DBG("Invalid - in window after SREJ sent");
4668 return L2CAP_TXSEQ_INVALID;
4669 }
4670 }
4671
4672 if (chan->srej_list.head == txseq) {
4673 BT_DBG("Expected SREJ");
4674 return L2CAP_TXSEQ_EXPECTED_SREJ;
4675 }
4676
4677 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4678 BT_DBG("Duplicate SREJ - txseq already stored");
4679 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4680 }
4681
4682 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4683 BT_DBG("Unexpected SREJ - not requested");
4684 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4685 }
4686 }
4687
4688 if (chan->expected_tx_seq == txseq) {
4689 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4690 chan->tx_win) {
4691 BT_DBG("Invalid - txseq outside tx window");
4692 return L2CAP_TXSEQ_INVALID;
4693 } else {
4694 BT_DBG("Expected");
4695 return L2CAP_TXSEQ_EXPECTED;
4696 }
4697 }
4698
4699 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4700 __seq_offset(chan, chan->expected_tx_seq,
4701 chan->last_acked_seq)){
4702 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4703 return L2CAP_TXSEQ_DUPLICATE;
4704 }
4705
4706 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4707 /* A source of invalid packets is a "double poll" condition,
4708 * where delays cause us to send multiple poll packets. If
4709 * the remote stack receives and processes both polls,
4710 * sequence numbers can wrap around in such a way that a
4711 * resent frame has a sequence number that looks like new data
4712 * with a sequence gap. This would trigger an erroneous SREJ
4713 * request.
4714 *
4715 * Fortunately, this is impossible with a tx window that's
4716 * less than half of the maximum sequence number, which allows
4717 * invalid frames to be safely ignored.
4718 *
4719 * With tx window sizes greater than half of the tx window
4720 * maximum, the frame is invalid and cannot be ignored. This
4721 * causes a disconnect.
4722 */
4723
4724 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4725 BT_DBG("Invalid/Ignore - txseq outside tx window");
4726 return L2CAP_TXSEQ_INVALID_IGNORE;
4727 } else {
4728 BT_DBG("Invalid - txseq outside tx window");
4729 return L2CAP_TXSEQ_INVALID;
4730 }
4731 } else {
4732 BT_DBG("Unexpected - txseq indicates missing frames");
4733 return L2CAP_TXSEQ_UNEXPECTED;
4734 }
4735 }
4736
4737 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4738 struct l2cap_ctrl *control,
4739 struct sk_buff *skb, u8 event)
4740 {
4741 int err = 0;
4742 bool skb_in_use = 0;
4743
4744 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4745 event);
4746
4747 switch (event) {
4748 case L2CAP_EV_RECV_IFRAME:
4749 switch (l2cap_classify_txseq(chan, control->txseq)) {
4750 case L2CAP_TXSEQ_EXPECTED:
4751 l2cap_pass_to_tx(chan, control);
4752
4753 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4754 BT_DBG("Busy, discarding expected seq %d",
4755 control->txseq);
4756 break;
4757 }
4758
4759 chan->expected_tx_seq = __next_seq(chan,
4760 control->txseq);
4761
4762 chan->buffer_seq = chan->expected_tx_seq;
4763 skb_in_use = 1;
4764
4765 err = l2cap_reassemble_sdu(chan, skb, control);
4766 if (err)
4767 break;
4768
4769 if (control->final) {
4770 if (!test_and_clear_bit(CONN_REJ_ACT,
4771 &chan->conn_state)) {
4772 control->final = 0;
4773 l2cap_retransmit_all(chan, control);
4774 l2cap_ertm_send(chan);
4775 }
4776 }
4777
4778 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4779 l2cap_send_ack(chan);
4780 break;
4781 case L2CAP_TXSEQ_UNEXPECTED:
4782 l2cap_pass_to_tx(chan, control);
4783
4784 /* Can't issue SREJ frames in the local busy state.
4785 * Drop this frame, it will be seen as missing
4786 * when local busy is exited.
4787 */
4788 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4789 BT_DBG("Busy, discarding unexpected seq %d",
4790 control->txseq);
4791 break;
4792 }
4793
4794 /* There was a gap in the sequence, so an SREJ
4795 * must be sent for each missing frame. The
4796 * current frame is stored for later use.
4797 */
4798 skb_queue_tail(&chan->srej_q, skb);
4799 skb_in_use = 1;
4800 BT_DBG("Queued %p (queue len %d)", skb,
4801 skb_queue_len(&chan->srej_q));
4802
4803 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4804 l2cap_seq_list_clear(&chan->srej_list);
4805 l2cap_send_srej(chan, control->txseq);
4806
4807 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4808 break;
4809 case L2CAP_TXSEQ_DUPLICATE:
4810 l2cap_pass_to_tx(chan, control);
4811 break;
4812 case L2CAP_TXSEQ_INVALID_IGNORE:
4813 break;
4814 case L2CAP_TXSEQ_INVALID:
4815 default:
4816 l2cap_send_disconn_req(chan->conn, chan,
4817 ECONNRESET);
4818 break;
4819 }
4820 break;
4821 case L2CAP_EV_RECV_RR:
4822 l2cap_pass_to_tx(chan, control);
4823 if (control->final) {
4824 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4825
4826 if (!test_and_clear_bit(CONN_REJ_ACT,
4827 &chan->conn_state)) {
4828 control->final = 0;
4829 l2cap_retransmit_all(chan, control);
4830 }
4831
4832 l2cap_ertm_send(chan);
4833 } else if (control->poll) {
4834 l2cap_send_i_or_rr_or_rnr(chan);
4835 } else {
4836 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4837 &chan->conn_state) &&
4838 chan->unacked_frames)
4839 __set_retrans_timer(chan);
4840
4841 l2cap_ertm_send(chan);
4842 }
4843 break;
4844 case L2CAP_EV_RECV_RNR:
4845 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4846 l2cap_pass_to_tx(chan, control);
4847 if (control && control->poll) {
4848 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4849 l2cap_send_rr_or_rnr(chan, 0);
4850 }
4851 __clear_retrans_timer(chan);
4852 l2cap_seq_list_clear(&chan->retrans_list);
4853 break;
4854 case L2CAP_EV_RECV_REJ:
4855 l2cap_handle_rej(chan, control);
4856 break;
4857 case L2CAP_EV_RECV_SREJ:
4858 l2cap_handle_srej(chan, control);
4859 break;
4860 default:
4861 break;
4862 }
4863
4864 if (skb && !skb_in_use) {
4865 BT_DBG("Freeing %p", skb);
4866 kfree_skb(skb);
4867 }
4868
4869 return err;
4870 }
4871
4872 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4873 struct l2cap_ctrl *control,
4874 struct sk_buff *skb, u8 event)
4875 {
4876 int err = 0;
4877 u16 txseq = control->txseq;
4878 bool skb_in_use = 0;
4879
4880 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4881 event);
4882
4883 switch (event) {
4884 case L2CAP_EV_RECV_IFRAME:
4885 switch (l2cap_classify_txseq(chan, txseq)) {
4886 case L2CAP_TXSEQ_EXPECTED:
4887 /* Keep frame for reassembly later */
4888 l2cap_pass_to_tx(chan, control);
4889 skb_queue_tail(&chan->srej_q, skb);
4890 skb_in_use = 1;
4891 BT_DBG("Queued %p (queue len %d)", skb,
4892 skb_queue_len(&chan->srej_q));
4893
4894 chan->expected_tx_seq = __next_seq(chan, txseq);
4895 break;
4896 case L2CAP_TXSEQ_EXPECTED_SREJ:
4897 l2cap_seq_list_pop(&chan->srej_list);
4898
4899 l2cap_pass_to_tx(chan, control);
4900 skb_queue_tail(&chan->srej_q, skb);
4901 skb_in_use = 1;
4902 BT_DBG("Queued %p (queue len %d)", skb,
4903 skb_queue_len(&chan->srej_q));
4904
4905 err = l2cap_rx_queued_iframes(chan);
4906 if (err)
4907 break;
4908
4909 break;
4910 case L2CAP_TXSEQ_UNEXPECTED:
4911 /* Got a frame that can't be reassembled yet.
4912 * Save it for later, and send SREJs to cover
4913 * the missing frames.
4914 */
4915 skb_queue_tail(&chan->srej_q, skb);
4916 skb_in_use = 1;
4917 BT_DBG("Queued %p (queue len %d)", skb,
4918 skb_queue_len(&chan->srej_q));
4919
4920 l2cap_pass_to_tx(chan, control);
4921 l2cap_send_srej(chan, control->txseq);
4922 break;
4923 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4924 /* This frame was requested with an SREJ, but
4925 * some expected retransmitted frames are
4926 * missing. Request retransmission of missing
4927 * SREJ'd frames.
4928 */
4929 skb_queue_tail(&chan->srej_q, skb);
4930 skb_in_use = 1;
4931 BT_DBG("Queued %p (queue len %d)", skb,
4932 skb_queue_len(&chan->srej_q));
4933
4934 l2cap_pass_to_tx(chan, control);
4935 l2cap_send_srej_list(chan, control->txseq);
4936 break;
4937 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4938 /* We've already queued this frame. Drop this copy. */
4939 l2cap_pass_to_tx(chan, control);
4940 break;
4941 case L2CAP_TXSEQ_DUPLICATE:
4942 /* Expecting a later sequence number, so this frame
4943 * was already received. Ignore it completely.
4944 */
4945 break;
4946 case L2CAP_TXSEQ_INVALID_IGNORE:
4947 break;
4948 case L2CAP_TXSEQ_INVALID:
4949 default:
4950 l2cap_send_disconn_req(chan->conn, chan,
4951 ECONNRESET);
4952 break;
4953 }
4954 break;
4955 case L2CAP_EV_RECV_RR:
4956 l2cap_pass_to_tx(chan, control);
4957 if (control->final) {
4958 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4959
4960 if (!test_and_clear_bit(CONN_REJ_ACT,
4961 &chan->conn_state)) {
4962 control->final = 0;
4963 l2cap_retransmit_all(chan, control);
4964 }
4965
4966 l2cap_ertm_send(chan);
4967 } else if (control->poll) {
4968 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4969 &chan->conn_state) &&
4970 chan->unacked_frames) {
4971 __set_retrans_timer(chan);
4972 }
4973
4974 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4975 l2cap_send_srej_tail(chan);
4976 } else {
4977 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4978 &chan->conn_state) &&
4979 chan->unacked_frames)
4980 __set_retrans_timer(chan);
4981
4982 l2cap_send_ack(chan);
4983 }
4984 break;
4985 case L2CAP_EV_RECV_RNR:
4986 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4987 l2cap_pass_to_tx(chan, control);
4988 if (control->poll) {
4989 l2cap_send_srej_tail(chan);
4990 } else {
4991 struct l2cap_ctrl rr_control;
4992 memset(&rr_control, 0, sizeof(rr_control));
4993 rr_control.sframe = 1;
4994 rr_control.super = L2CAP_SUPER_RR;
4995 rr_control.reqseq = chan->buffer_seq;
4996 l2cap_send_sframe(chan, &rr_control);
4997 }
4998
4999 break;
5000 case L2CAP_EV_RECV_REJ:
5001 l2cap_handle_rej(chan, control);
5002 break;
5003 case L2CAP_EV_RECV_SREJ:
5004 l2cap_handle_srej(chan, control);
5005 break;
5006 }
5007
5008 if (skb && !skb_in_use) {
5009 BT_DBG("Freeing %p", skb);
5010 kfree_skb(skb);
5011 }
5012
5013 return err;
5014 }
5015
5016 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5017 {
5018 /* Make sure reqseq is for a packet that has been sent but not acked */
5019 u16 unacked;
5020
5021 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5022 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5023 }
5024
5025 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5026 struct sk_buff *skb, u8 event)
5027 {
5028 int err = 0;
5029
5030 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5031 control, skb, event, chan->rx_state);
5032
5033 if (__valid_reqseq(chan, control->reqseq)) {
5034 switch (chan->rx_state) {
5035 case L2CAP_RX_STATE_RECV:
5036 err = l2cap_rx_state_recv(chan, control, skb, event);
5037 break;
5038 case L2CAP_RX_STATE_SREJ_SENT:
5039 err = l2cap_rx_state_srej_sent(chan, control, skb,
5040 event);
5041 break;
5042 default:
5043 /* shut it down */
5044 break;
5045 }
5046 } else {
5047 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5048 control->reqseq, chan->next_tx_seq,
5049 chan->expected_ack_seq);
5050 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5051 }
5052
5053 return err;
5054 }
5055
5056 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5057 struct sk_buff *skb)
5058 {
5059 int err = 0;
5060
5061 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5062 chan->rx_state);
5063
5064 if (l2cap_classify_txseq(chan, control->txseq) ==
5065 L2CAP_TXSEQ_EXPECTED) {
5066 l2cap_pass_to_tx(chan, control);
5067
5068 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5069 __next_seq(chan, chan->buffer_seq));
5070
5071 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5072
5073 l2cap_reassemble_sdu(chan, skb, control);
5074 } else {
5075 if (chan->sdu) {
5076 kfree_skb(chan->sdu);
5077 chan->sdu = NULL;
5078 }
5079 chan->sdu_last_frag = NULL;
5080 chan->sdu_len = 0;
5081
5082 if (skb) {
5083 BT_DBG("Freeing %p", skb);
5084 kfree_skb(skb);
5085 }
5086 }
5087
5088 chan->last_acked_seq = control->txseq;
5089 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5090
5091 return err;
5092 }
5093
5094 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5095 {
5096 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5097 u16 len;
5098 u8 event;
5099
5100 __unpack_control(chan, skb);
5101
5102 len = skb->len;
5103
5104 /*
5105 * We can just drop the corrupted I-frame here.
5106 * Receiver will miss it and start proper recovery
5107 * procedures and ask for retransmission.
5108 */
5109 if (l2cap_check_fcs(chan, skb))
5110 goto drop;
5111
5112 if (!control->sframe && control->sar == L2CAP_SAR_START)
5113 len -= L2CAP_SDULEN_SIZE;
5114
5115 if (chan->fcs == L2CAP_FCS_CRC16)
5116 len -= L2CAP_FCS_SIZE;
5117
5118 if (len > chan->mps) {
5119 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5120 goto drop;
5121 }
5122
5123 if (!control->sframe) {
5124 int err;
5125
5126 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5127 control->sar, control->reqseq, control->final,
5128 control->txseq);
5129
5130 /* Validate F-bit - F=0 always valid, F=1 only
5131 * valid in TX WAIT_F
5132 */
5133 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5134 goto drop;
5135
5136 if (chan->mode != L2CAP_MODE_STREAMING) {
5137 event = L2CAP_EV_RECV_IFRAME;
5138 err = l2cap_rx(chan, control, skb, event);
5139 } else {
5140 err = l2cap_stream_rx(chan, control, skb);
5141 }
5142
5143 if (err)
5144 l2cap_send_disconn_req(chan->conn, chan,
5145 ECONNRESET);
5146 } else {
5147 const u8 rx_func_to_event[4] = {
5148 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5149 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5150 };
5151
5152 /* Only I-frames are expected in streaming mode */
5153 if (chan->mode == L2CAP_MODE_STREAMING)
5154 goto drop;
5155
5156 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5157 control->reqseq, control->final, control->poll,
5158 control->super);
5159
5160 if (len != 0) {
5161 BT_ERR("%d", len);
5162 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5163 goto drop;
5164 }
5165
5166 /* Validate F and P bits */
5167 if (control->final && (control->poll ||
5168 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5169 goto drop;
5170
5171 event = rx_func_to_event[control->super];
5172 if (l2cap_rx(chan, control, skb, event))
5173 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5174 }
5175
5176 return 0;
5177
5178 drop:
5179 kfree_skb(skb);
5180 return 0;
5181 }
5182
5183 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5184 {
5185 struct l2cap_chan *chan;
5186
5187 chan = l2cap_get_chan_by_scid(conn, cid);
5188 if (!chan) {
5189 BT_DBG("unknown cid 0x%4.4x", cid);
5190 /* Drop packet and return */
5191 kfree_skb(skb);
5192 return 0;
5193 }
5194
5195 BT_DBG("chan %p, len %d", chan, skb->len);
5196
5197 if (chan->state != BT_CONNECTED)
5198 goto drop;
5199
5200 switch (chan->mode) {
5201 case L2CAP_MODE_BASIC:
5202 /* If socket recv buffers overflows we drop data here
5203 * which is *bad* because L2CAP has to be reliable.
5204 * But we don't have any other choice. L2CAP doesn't
5205 * provide flow control mechanism. */
5206
5207 if (chan->imtu < skb->len)
5208 goto drop;
5209
5210 if (!chan->ops->recv(chan->data, skb))
5211 goto done;
5212 break;
5213
5214 case L2CAP_MODE_ERTM:
5215 case L2CAP_MODE_STREAMING:
5216 l2cap_data_rcv(chan, skb);
5217 goto done;
5218
5219 default:
5220 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5221 break;
5222 }
5223
5224 drop:
5225 kfree_skb(skb);
5226
5227 done:
5228 l2cap_chan_unlock(chan);
5229
5230 return 0;
5231 }
5232
5233 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5234 {
5235 struct l2cap_chan *chan;
5236
5237 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5238 if (!chan)
5239 goto drop;
5240
5241 BT_DBG("chan %p, len %d", chan, skb->len);
5242
5243 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5244 goto drop;
5245
5246 if (chan->imtu < skb->len)
5247 goto drop;
5248
5249 if (!chan->ops->recv(chan->data, skb))
5250 return 0;
5251
5252 drop:
5253 kfree_skb(skb);
5254
5255 return 0;
5256 }
5257
5258 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5259 struct sk_buff *skb)
5260 {
5261 struct l2cap_chan *chan;
5262
5263 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5264 if (!chan)
5265 goto drop;
5266
5267 BT_DBG("chan %p, len %d", chan, skb->len);
5268
5269 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5270 goto drop;
5271
5272 if (chan->imtu < skb->len)
5273 goto drop;
5274
5275 if (!chan->ops->recv(chan->data, skb))
5276 return 0;
5277
5278 drop:
5279 kfree_skb(skb);
5280
5281 return 0;
5282 }
5283
5284 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5285 {
5286 struct l2cap_hdr *lh = (void *) skb->data;
5287 u16 cid, len;
5288 __le16 psm;
5289
5290 skb_pull(skb, L2CAP_HDR_SIZE);
5291 cid = __le16_to_cpu(lh->cid);
5292 len = __le16_to_cpu(lh->len);
5293
5294 if (len != skb->len) {
5295 kfree_skb(skb);
5296 return;
5297 }
5298
5299 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5300
5301 switch (cid) {
5302 case L2CAP_CID_LE_SIGNALING:
5303 case L2CAP_CID_SIGNALING:
5304 l2cap_sig_channel(conn, skb);
5305 break;
5306
5307 case L2CAP_CID_CONN_LESS:
5308 psm = get_unaligned((__le16 *) skb->data);
5309 skb_pull(skb, 2);
5310 l2cap_conless_channel(conn, psm, skb);
5311 break;
5312
5313 case L2CAP_CID_LE_DATA:
5314 l2cap_att_channel(conn, cid, skb);
5315 break;
5316
5317 case L2CAP_CID_SMP:
5318 if (smp_sig_channel(conn, skb))
5319 l2cap_conn_del(conn->hcon, EACCES);
5320 break;
5321
5322 default:
5323 l2cap_data_channel(conn, cid, skb);
5324 break;
5325 }
5326 }
5327
5328 /* ---- L2CAP interface with lower layer (HCI) ---- */
5329
5330 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5331 {
5332 int exact = 0, lm1 = 0, lm2 = 0;
5333 struct l2cap_chan *c;
5334
5335 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5336
5337 /* Find listening sockets and check their link_mode */
5338 read_lock(&chan_list_lock);
5339 list_for_each_entry(c, &chan_list, global_l) {
5340 struct sock *sk = c->sk;
5341
5342 if (c->state != BT_LISTEN)
5343 continue;
5344
5345 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5346 lm1 |= HCI_LM_ACCEPT;
5347 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5348 lm1 |= HCI_LM_MASTER;
5349 exact++;
5350 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5351 lm2 |= HCI_LM_ACCEPT;
5352 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5353 lm2 |= HCI_LM_MASTER;
5354 }
5355 }
5356 read_unlock(&chan_list_lock);
5357
5358 return exact ? lm1 : lm2;
5359 }
5360
5361 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5362 {
5363 struct l2cap_conn *conn;
5364
5365 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5366
5367 if (!status) {
5368 conn = l2cap_conn_add(hcon, status);
5369 if (conn)
5370 l2cap_conn_ready(conn);
5371 } else
5372 l2cap_conn_del(hcon, bt_to_errno(status));
5373
5374 return 0;
5375 }
5376
5377 int l2cap_disconn_ind(struct hci_conn *hcon)
5378 {
5379 struct l2cap_conn *conn = hcon->l2cap_data;
5380
5381 BT_DBG("hcon %p", hcon);
5382
5383 if (!conn)
5384 return HCI_ERROR_REMOTE_USER_TERM;
5385 return conn->disc_reason;
5386 }
5387
5388 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5389 {
5390 BT_DBG("hcon %p reason %d", hcon, reason);
5391
5392 l2cap_conn_del(hcon, bt_to_errno(reason));
5393 return 0;
5394 }
5395
5396 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5397 {
5398 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5399 return;
5400
5401 if (encrypt == 0x00) {
5402 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5403 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5404 } else if (chan->sec_level == BT_SECURITY_HIGH)
5405 l2cap_chan_close(chan, ECONNREFUSED);
5406 } else {
5407 if (chan->sec_level == BT_SECURITY_MEDIUM)
5408 __clear_chan_timer(chan);
5409 }
5410 }
5411
5412 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5413 {
5414 struct l2cap_conn *conn = hcon->l2cap_data;
5415 struct l2cap_chan *chan;
5416
5417 if (!conn)
5418 return 0;
5419
5420 BT_DBG("conn %p", conn);
5421
5422 if (hcon->type == LE_LINK) {
5423 if (!status && encrypt)
5424 smp_distribute_keys(conn, 0);
5425 cancel_delayed_work(&conn->security_timer);
5426 }
5427
5428 mutex_lock(&conn->chan_lock);
5429
5430 list_for_each_entry(chan, &conn->chan_l, list) {
5431 l2cap_chan_lock(chan);
5432
5433 BT_DBG("chan->scid %d", chan->scid);
5434
5435 if (chan->scid == L2CAP_CID_LE_DATA) {
5436 if (!status && encrypt) {
5437 chan->sec_level = hcon->sec_level;
5438 l2cap_chan_ready(chan);
5439 }
5440
5441 l2cap_chan_unlock(chan);
5442 continue;
5443 }
5444
5445 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5446 l2cap_chan_unlock(chan);
5447 continue;
5448 }
5449
5450 if (!status && (chan->state == BT_CONNECTED ||
5451 chan->state == BT_CONFIG)) {
5452 struct sock *sk = chan->sk;
5453
5454 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5455 sk->sk_state_change(sk);
5456
5457 l2cap_check_encryption(chan, encrypt);
5458 l2cap_chan_unlock(chan);
5459 continue;
5460 }
5461
5462 if (chan->state == BT_CONNECT) {
5463 if (!status) {
5464 l2cap_send_conn_req(chan);
5465 } else {
5466 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5467 }
5468 } else if (chan->state == BT_CONNECT2) {
5469 struct sock *sk = chan->sk;
5470 struct l2cap_conn_rsp rsp;
5471 __u16 res, stat;
5472
5473 lock_sock(sk);
5474
5475 if (!status) {
5476 if (test_bit(BT_SK_DEFER_SETUP,
5477 &bt_sk(sk)->flags)) {
5478 struct sock *parent = bt_sk(sk)->parent;
5479 res = L2CAP_CR_PEND;
5480 stat = L2CAP_CS_AUTHOR_PEND;
5481 if (parent)
5482 parent->sk_data_ready(parent, 0);
5483 } else {
5484 __l2cap_state_change(chan, BT_CONFIG);
5485 res = L2CAP_CR_SUCCESS;
5486 stat = L2CAP_CS_NO_INFO;
5487 }
5488 } else {
5489 __l2cap_state_change(chan, BT_DISCONN);
5490 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5491 res = L2CAP_CR_SEC_BLOCK;
5492 stat = L2CAP_CS_NO_INFO;
5493 }
5494
5495 release_sock(sk);
5496
5497 rsp.scid = cpu_to_le16(chan->dcid);
5498 rsp.dcid = cpu_to_le16(chan->scid);
5499 rsp.result = cpu_to_le16(res);
5500 rsp.status = cpu_to_le16(stat);
5501 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5502 sizeof(rsp), &rsp);
5503 }
5504
5505 l2cap_chan_unlock(chan);
5506 }
5507
5508 mutex_unlock(&conn->chan_lock);
5509
5510 return 0;
5511 }
5512
5513 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5514 {
5515 struct l2cap_conn *conn = hcon->l2cap_data;
5516
5517 if (!conn)
5518 conn = l2cap_conn_add(hcon, 0);
5519
5520 if (!conn)
5521 goto drop;
5522
5523 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5524
5525 if (!(flags & ACL_CONT)) {
5526 struct l2cap_hdr *hdr;
5527 int len;
5528
5529 if (conn->rx_len) {
5530 BT_ERR("Unexpected start frame (len %d)", skb->len);
5531 kfree_skb(conn->rx_skb);
5532 conn->rx_skb = NULL;
5533 conn->rx_len = 0;
5534 l2cap_conn_unreliable(conn, ECOMM);
5535 }
5536
5537 /* Start fragment always begin with Basic L2CAP header */
5538 if (skb->len < L2CAP_HDR_SIZE) {
5539 BT_ERR("Frame is too short (len %d)", skb->len);
5540 l2cap_conn_unreliable(conn, ECOMM);
5541 goto drop;
5542 }
5543
5544 hdr = (struct l2cap_hdr *) skb->data;
5545 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5546
5547 if (len == skb->len) {
5548 /* Complete frame received */
5549 l2cap_recv_frame(conn, skb);
5550 return 0;
5551 }
5552
5553 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5554
5555 if (skb->len > len) {
5556 BT_ERR("Frame is too long (len %d, expected len %d)",
5557 skb->len, len);
5558 l2cap_conn_unreliable(conn, ECOMM);
5559 goto drop;
5560 }
5561
5562 /* Allocate skb for the complete frame (with header) */
5563 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5564 if (!conn->rx_skb)
5565 goto drop;
5566
5567 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5568 skb->len);
5569 conn->rx_len = len - skb->len;
5570 } else {
5571 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5572
5573 if (!conn->rx_len) {
5574 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5575 l2cap_conn_unreliable(conn, ECOMM);
5576 goto drop;
5577 }
5578
5579 if (skb->len > conn->rx_len) {
5580 BT_ERR("Fragment is too long (len %d, expected %d)",
5581 skb->len, conn->rx_len);
5582 kfree_skb(conn->rx_skb);
5583 conn->rx_skb = NULL;
5584 conn->rx_len = 0;
5585 l2cap_conn_unreliable(conn, ECOMM);
5586 goto drop;
5587 }
5588
5589 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5590 skb->len);
5591 conn->rx_len -= skb->len;
5592
5593 if (!conn->rx_len) {
5594 /* Complete frame received */
5595 l2cap_recv_frame(conn, conn->rx_skb);
5596 conn->rx_skb = NULL;
5597 }
5598 }
5599
5600 drop:
5601 kfree_skb(skb);
5602 return 0;
5603 }
5604
5605 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5606 {
5607 struct l2cap_chan *c;
5608
5609 read_lock(&chan_list_lock);
5610
5611 list_for_each_entry(c, &chan_list, global_l) {
5612 struct sock *sk = c->sk;
5613
5614 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5615 batostr(&bt_sk(sk)->src),
5616 batostr(&bt_sk(sk)->dst),
5617 c->state, __le16_to_cpu(c->psm),
5618 c->scid, c->dcid, c->imtu, c->omtu,
5619 c->sec_level, c->mode);
5620 }
5621
5622 read_unlock(&chan_list_lock);
5623
5624 return 0;
5625 }
5626
5627 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5628 {
5629 return single_open(file, l2cap_debugfs_show, inode->i_private);
5630 }
5631
5632 static const struct file_operations l2cap_debugfs_fops = {
5633 .open = l2cap_debugfs_open,
5634 .read = seq_read,
5635 .llseek = seq_lseek,
5636 .release = single_release,
5637 };
5638
5639 static struct dentry *l2cap_debugfs;
5640
5641 int __init l2cap_init(void)
5642 {
5643 int err;
5644
5645 err = l2cap_init_sockets();
5646 if (err < 0)
5647 return err;
5648
5649 if (bt_debugfs) {
5650 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5651 bt_debugfs, NULL, &l2cap_debugfs_fops);
5652 if (!l2cap_debugfs)
5653 BT_ERR("Failed to create L2CAP debug file");
5654 }
5655
5656 return 0;
5657 }
5658
5659 void l2cap_exit(void)
5660 {
5661 debugfs_remove(l2cap_debugfs);
5662 l2cap_cleanup_sockets();
5663 }
5664
5665 module_param(disable_ertm, bool, 0644);
5666 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.158271 seconds and 6 git commands to generate.