Bluetooth: Remove unnecessary L2CAP channel state check
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42
43 bool disable_ertm;
44
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65 {
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73 }
74
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77 {
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85 }
86
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101 }
102
103 /* Find channel with given DCID.
104 * Returns locked channel.
105 */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
108 {
109 struct l2cap_chan *c;
110
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
116
117 return c;
118 }
119
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
122 {
123 struct l2cap_chan *c;
124
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
128 }
129 return NULL;
130 }
131
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
134 {
135 struct l2cap_chan *c;
136
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
142
143 return c;
144 }
145
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 struct l2cap_chan *c;
149
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 return c;
153 }
154 return NULL;
155 }
156
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 int err;
160
161 write_lock(&chan_list_lock);
162
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 err = -EADDRINUSE;
165 goto done;
166 }
167
168 if (psm) {
169 chan->psm = psm;
170 chan->sport = psm;
171 err = 0;
172 } else {
173 u16 p;
174
175 err = -EINVAL;
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
180 err = 0;
181 break;
182 }
183 }
184
185 done:
186 write_unlock(&chan_list_lock);
187 return err;
188 }
189
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
191 {
192 write_lock(&chan_list_lock);
193
194 chan->scid = scid;
195
196 write_unlock(&chan_list_lock);
197
198 return 0;
199 }
200
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 u16 cid = L2CAP_CID_DYN_START;
204
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
207 return cid;
208 }
209
210 return 0;
211 }
212
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
217
218 chan->state = state;
219 chan->ops->state_change(chan, state);
220 }
221
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 struct sock *sk = chan->sk;
225
226 lock_sock(sk);
227 __l2cap_state_change(chan, state);
228 release_sock(sk);
229 }
230
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 struct sock *sk = chan->sk;
234
235 sk->sk_err = err;
236 }
237
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 struct sock *sk = chan->sk;
241
242 lock_sock(sk);
243 __l2cap_chan_set_err(chan, err);
244 release_sock(sk);
245 }
246
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
253 }
254 }
255
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
262 }
263 }
264
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
267 {
268 struct sk_buff *skb;
269
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
273 }
274
275 return NULL;
276 }
277
278 /* ---- L2CAP sequence number lists ---- */
279
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
287 */
288
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 size_t alloc_size, i;
292
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
296 */
297 alloc_size = roundup_pow_of_two(size);
298
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
302
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308
309 return 0;
310 }
311
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 kfree(seq_list->list);
315 }
316
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
319 {
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 u16 mask = seq_list->mask;
327
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 }
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
347 }
348
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
354 }
355 return seq;
356 }
357
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
439
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441
442 chan->state = BT_OPEN;
443
444 kref_init(&chan->kref);
445
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448
449 BT_DBG("chan %p", chan);
450
451 return chan;
452 }
453
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457
458 BT_DBG("chan %p", chan);
459
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
463
464 kfree(chan);
465 }
466
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470
471 kref_get(&c->kref);
472 }
473
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477
478 kref_put(&c->kref, l2cap_chan_destroy);
479 }
480
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
489
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
497
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499
500 chan->conn = conn;
501
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 if (chan->dcid == L2CAP_CID_ATT)
508 chan->scid = L2CAP_CID_ATT;
509 else
510 chan->scid = l2cap_alloc_cid(conn);
511 } else {
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 chan->omtu = L2CAP_DEFAULT_MTU;
515 }
516 break;
517
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
523 break;
524
525 case L2CAP_CHAN_CONN_FIX_A2MP:
526 chan->scid = L2CAP_CID_A2MP;
527 chan->dcid = L2CAP_CID_A2MP;
528 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
530 break;
531
532 default:
533 /* Raw socket can send/recv signalling messages only */
534 chan->scid = L2CAP_CID_SIGNALING;
535 chan->dcid = L2CAP_CID_SIGNALING;
536 chan->omtu = L2CAP_DEFAULT_MTU;
537 }
538
539 chan->local_id = L2CAP_BESTEFFORT_ID;
540 chan->local_stype = L2CAP_SERV_BESTEFFORT;
541 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
542 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
543 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
544 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
545
546 l2cap_chan_hold(chan);
547
548 list_add(&chan->list, &conn->chan_l);
549 }
550
551 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
552 {
553 mutex_lock(&conn->chan_lock);
554 __l2cap_chan_add(conn, chan);
555 mutex_unlock(&conn->chan_lock);
556 }
557
558 void l2cap_chan_del(struct l2cap_chan *chan, int err)
559 {
560 struct l2cap_conn *conn = chan->conn;
561
562 __clear_chan_timer(chan);
563
564 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
565
566 if (conn) {
567 struct amp_mgr *mgr = conn->hcon->amp_mgr;
568 /* Delete from channel list */
569 list_del(&chan->list);
570
571 l2cap_chan_put(chan);
572
573 chan->conn = NULL;
574
575 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
576 hci_conn_drop(conn->hcon);
577
578 if (mgr && mgr->bredr_chan == chan)
579 mgr->bredr_chan = NULL;
580 }
581
582 if (chan->hs_hchan) {
583 struct hci_chan *hs_hchan = chan->hs_hchan;
584
585 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
586 amp_disconnect_logical_link(hs_hchan);
587 }
588
589 chan->ops->teardown(chan, err);
590
591 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
592 return;
593
594 switch(chan->mode) {
595 case L2CAP_MODE_BASIC:
596 break;
597
598 case L2CAP_MODE_ERTM:
599 __clear_retrans_timer(chan);
600 __clear_monitor_timer(chan);
601 __clear_ack_timer(chan);
602
603 skb_queue_purge(&chan->srej_q);
604
605 l2cap_seq_list_free(&chan->srej_list);
606 l2cap_seq_list_free(&chan->retrans_list);
607
608 /* fall through */
609
610 case L2CAP_MODE_STREAMING:
611 skb_queue_purge(&chan->tx_q);
612 break;
613 }
614
615 return;
616 }
617
618 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
619 {
620 struct l2cap_conn *conn = chan->conn;
621 struct sock *sk = chan->sk;
622
623 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
624 sk);
625
626 switch (chan->state) {
627 case BT_LISTEN:
628 chan->ops->teardown(chan, 0);
629 break;
630
631 case BT_CONNECTED:
632 case BT_CONFIG:
633 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
634 conn->hcon->type == ACL_LINK) {
635 __set_chan_timer(chan, sk->sk_sndtimeo);
636 l2cap_send_disconn_req(chan, reason);
637 } else
638 l2cap_chan_del(chan, reason);
639 break;
640
641 case BT_CONNECT2:
642 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
643 conn->hcon->type == ACL_LINK) {
644 struct l2cap_conn_rsp rsp;
645 __u16 result;
646
647 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
648 result = L2CAP_CR_SEC_BLOCK;
649 else
650 result = L2CAP_CR_BAD_PSM;
651 l2cap_state_change(chan, BT_DISCONN);
652
653 rsp.scid = cpu_to_le16(chan->dcid);
654 rsp.dcid = cpu_to_le16(chan->scid);
655 rsp.result = cpu_to_le16(result);
656 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
657 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
658 sizeof(rsp), &rsp);
659 }
660
661 l2cap_chan_del(chan, reason);
662 break;
663
664 case BT_CONNECT:
665 case BT_DISCONN:
666 l2cap_chan_del(chan, reason);
667 break;
668
669 default:
670 chan->ops->teardown(chan, 0);
671 break;
672 }
673 }
674
675 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
676 {
677 if (chan->chan_type == L2CAP_CHAN_RAW) {
678 switch (chan->sec_level) {
679 case BT_SECURITY_HIGH:
680 return HCI_AT_DEDICATED_BONDING_MITM;
681 case BT_SECURITY_MEDIUM:
682 return HCI_AT_DEDICATED_BONDING;
683 default:
684 return HCI_AT_NO_BONDING;
685 }
686 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
687 if (chan->sec_level == BT_SECURITY_LOW)
688 chan->sec_level = BT_SECURITY_SDP;
689
690 if (chan->sec_level == BT_SECURITY_HIGH)
691 return HCI_AT_NO_BONDING_MITM;
692 else
693 return HCI_AT_NO_BONDING;
694 } else {
695 switch (chan->sec_level) {
696 case BT_SECURITY_HIGH:
697 return HCI_AT_GENERAL_BONDING_MITM;
698 case BT_SECURITY_MEDIUM:
699 return HCI_AT_GENERAL_BONDING;
700 default:
701 return HCI_AT_NO_BONDING;
702 }
703 }
704 }
705
706 /* Service level security */
707 int l2cap_chan_check_security(struct l2cap_chan *chan)
708 {
709 struct l2cap_conn *conn = chan->conn;
710 __u8 auth_type;
711
712 auth_type = l2cap_get_auth_type(chan);
713
714 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
715 }
716
717 static u8 l2cap_get_ident(struct l2cap_conn *conn)
718 {
719 u8 id;
720
721 /* Get next available identificator.
722 * 1 - 128 are used by kernel.
723 * 129 - 199 are reserved.
724 * 200 - 254 are used by utilities like l2ping, etc.
725 */
726
727 spin_lock(&conn->lock);
728
729 if (++conn->tx_ident > 128)
730 conn->tx_ident = 1;
731
732 id = conn->tx_ident;
733
734 spin_unlock(&conn->lock);
735
736 return id;
737 }
738
739 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
740 void *data)
741 {
742 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
743 u8 flags;
744
745 BT_DBG("code 0x%2.2x", code);
746
747 if (!skb)
748 return;
749
750 if (lmp_no_flush_capable(conn->hcon->hdev))
751 flags = ACL_START_NO_FLUSH;
752 else
753 flags = ACL_START;
754
755 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
756 skb->priority = HCI_PRIO_MAX;
757
758 hci_send_acl(conn->hchan, skb, flags);
759 }
760
761 static bool __chan_is_moving(struct l2cap_chan *chan)
762 {
763 return chan->move_state != L2CAP_MOVE_STABLE &&
764 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
765 }
766
767 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
768 {
769 struct hci_conn *hcon = chan->conn->hcon;
770 u16 flags;
771
772 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
773 skb->priority);
774
775 if (chan->hs_hcon && !__chan_is_moving(chan)) {
776 if (chan->hs_hchan)
777 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
778 else
779 kfree_skb(skb);
780
781 return;
782 }
783
784 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
785 lmp_no_flush_capable(hcon->hdev))
786 flags = ACL_START_NO_FLUSH;
787 else
788 flags = ACL_START;
789
790 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
791 hci_send_acl(chan->conn->hchan, skb, flags);
792 }
793
794 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
795 {
796 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
797 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
798
799 if (enh & L2CAP_CTRL_FRAME_TYPE) {
800 /* S-Frame */
801 control->sframe = 1;
802 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
803 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
804
805 control->sar = 0;
806 control->txseq = 0;
807 } else {
808 /* I-Frame */
809 control->sframe = 0;
810 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
811 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
812
813 control->poll = 0;
814 control->super = 0;
815 }
816 }
817
818 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
819 {
820 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
821 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
822
823 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
824 /* S-Frame */
825 control->sframe = 1;
826 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
827 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
828
829 control->sar = 0;
830 control->txseq = 0;
831 } else {
832 /* I-Frame */
833 control->sframe = 0;
834 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
835 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
836
837 control->poll = 0;
838 control->super = 0;
839 }
840 }
841
842 static inline void __unpack_control(struct l2cap_chan *chan,
843 struct sk_buff *skb)
844 {
845 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
846 __unpack_extended_control(get_unaligned_le32(skb->data),
847 &bt_cb(skb)->control);
848 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
849 } else {
850 __unpack_enhanced_control(get_unaligned_le16(skb->data),
851 &bt_cb(skb)->control);
852 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
853 }
854 }
855
856 static u32 __pack_extended_control(struct l2cap_ctrl *control)
857 {
858 u32 packed;
859
860 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
861 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
862
863 if (control->sframe) {
864 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
865 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
866 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
867 } else {
868 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
869 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
870 }
871
872 return packed;
873 }
874
875 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
876 {
877 u16 packed;
878
879 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
880 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
881
882 if (control->sframe) {
883 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
884 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
885 packed |= L2CAP_CTRL_FRAME_TYPE;
886 } else {
887 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
888 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
889 }
890
891 return packed;
892 }
893
894 static inline void __pack_control(struct l2cap_chan *chan,
895 struct l2cap_ctrl *control,
896 struct sk_buff *skb)
897 {
898 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
899 put_unaligned_le32(__pack_extended_control(control),
900 skb->data + L2CAP_HDR_SIZE);
901 } else {
902 put_unaligned_le16(__pack_enhanced_control(control),
903 skb->data + L2CAP_HDR_SIZE);
904 }
905 }
906
907 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
908 {
909 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
910 return L2CAP_EXT_HDR_SIZE;
911 else
912 return L2CAP_ENH_HDR_SIZE;
913 }
914
915 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
916 u32 control)
917 {
918 struct sk_buff *skb;
919 struct l2cap_hdr *lh;
920 int hlen = __ertm_hdr_size(chan);
921
922 if (chan->fcs == L2CAP_FCS_CRC16)
923 hlen += L2CAP_FCS_SIZE;
924
925 skb = bt_skb_alloc(hlen, GFP_KERNEL);
926
927 if (!skb)
928 return ERR_PTR(-ENOMEM);
929
930 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
931 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
932 lh->cid = cpu_to_le16(chan->dcid);
933
934 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
935 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
936 else
937 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
938
939 if (chan->fcs == L2CAP_FCS_CRC16) {
940 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
941 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
942 }
943
944 skb->priority = HCI_PRIO_MAX;
945 return skb;
946 }
947
948 static void l2cap_send_sframe(struct l2cap_chan *chan,
949 struct l2cap_ctrl *control)
950 {
951 struct sk_buff *skb;
952 u32 control_field;
953
954 BT_DBG("chan %p, control %p", chan, control);
955
956 if (!control->sframe)
957 return;
958
959 if (__chan_is_moving(chan))
960 return;
961
962 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
963 !control->poll)
964 control->final = 1;
965
966 if (control->super == L2CAP_SUPER_RR)
967 clear_bit(CONN_RNR_SENT, &chan->conn_state);
968 else if (control->super == L2CAP_SUPER_RNR)
969 set_bit(CONN_RNR_SENT, &chan->conn_state);
970
971 if (control->super != L2CAP_SUPER_SREJ) {
972 chan->last_acked_seq = control->reqseq;
973 __clear_ack_timer(chan);
974 }
975
976 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
977 control->final, control->poll, control->super);
978
979 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
980 control_field = __pack_extended_control(control);
981 else
982 control_field = __pack_enhanced_control(control);
983
984 skb = l2cap_create_sframe_pdu(chan, control_field);
985 if (!IS_ERR(skb))
986 l2cap_do_send(chan, skb);
987 }
988
989 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
990 {
991 struct l2cap_ctrl control;
992
993 BT_DBG("chan %p, poll %d", chan, poll);
994
995 memset(&control, 0, sizeof(control));
996 control.sframe = 1;
997 control.poll = poll;
998
999 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1000 control.super = L2CAP_SUPER_RNR;
1001 else
1002 control.super = L2CAP_SUPER_RR;
1003
1004 control.reqseq = chan->buffer_seq;
1005 l2cap_send_sframe(chan, &control);
1006 }
1007
1008 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1009 {
1010 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1011 }
1012
1013 static bool __amp_capable(struct l2cap_chan *chan)
1014 {
1015 struct l2cap_conn *conn = chan->conn;
1016
1017 if (enable_hs &&
1018 hci_amp_capable() &&
1019 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1020 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1021 return true;
1022 else
1023 return false;
1024 }
1025
1026 static bool l2cap_check_efs(struct l2cap_chan *chan)
1027 {
1028 /* Check EFS parameters */
1029 return true;
1030 }
1031
1032 void l2cap_send_conn_req(struct l2cap_chan *chan)
1033 {
1034 struct l2cap_conn *conn = chan->conn;
1035 struct l2cap_conn_req req;
1036
1037 req.scid = cpu_to_le16(chan->scid);
1038 req.psm = chan->psm;
1039
1040 chan->ident = l2cap_get_ident(conn);
1041
1042 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1043
1044 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1045 }
1046
1047 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1048 {
1049 struct l2cap_create_chan_req req;
1050 req.scid = cpu_to_le16(chan->scid);
1051 req.psm = chan->psm;
1052 req.amp_id = amp_id;
1053
1054 chan->ident = l2cap_get_ident(chan->conn);
1055
1056 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1057 sizeof(req), &req);
1058 }
1059
1060 static void l2cap_move_setup(struct l2cap_chan *chan)
1061 {
1062 struct sk_buff *skb;
1063
1064 BT_DBG("chan %p", chan);
1065
1066 if (chan->mode != L2CAP_MODE_ERTM)
1067 return;
1068
1069 __clear_retrans_timer(chan);
1070 __clear_monitor_timer(chan);
1071 __clear_ack_timer(chan);
1072
1073 chan->retry_count = 0;
1074 skb_queue_walk(&chan->tx_q, skb) {
1075 if (bt_cb(skb)->control.retries)
1076 bt_cb(skb)->control.retries = 1;
1077 else
1078 break;
1079 }
1080
1081 chan->expected_tx_seq = chan->buffer_seq;
1082
1083 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1084 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1085 l2cap_seq_list_clear(&chan->retrans_list);
1086 l2cap_seq_list_clear(&chan->srej_list);
1087 skb_queue_purge(&chan->srej_q);
1088
1089 chan->tx_state = L2CAP_TX_STATE_XMIT;
1090 chan->rx_state = L2CAP_RX_STATE_MOVE;
1091
1092 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1093 }
1094
1095 static void l2cap_move_done(struct l2cap_chan *chan)
1096 {
1097 u8 move_role = chan->move_role;
1098 BT_DBG("chan %p", chan);
1099
1100 chan->move_state = L2CAP_MOVE_STABLE;
1101 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1102
1103 if (chan->mode != L2CAP_MODE_ERTM)
1104 return;
1105
1106 switch (move_role) {
1107 case L2CAP_MOVE_ROLE_INITIATOR:
1108 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1109 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1110 break;
1111 case L2CAP_MOVE_ROLE_RESPONDER:
1112 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1113 break;
1114 }
1115 }
1116
1117 static void l2cap_chan_ready(struct l2cap_chan *chan)
1118 {
1119 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1120 chan->conf_state = 0;
1121 __clear_chan_timer(chan);
1122
1123 chan->state = BT_CONNECTED;
1124
1125 chan->ops->ready(chan);
1126 }
1127
1128 static void l2cap_start_connection(struct l2cap_chan *chan)
1129 {
1130 if (__amp_capable(chan)) {
1131 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1132 a2mp_discover_amp(chan);
1133 } else {
1134 l2cap_send_conn_req(chan);
1135 }
1136 }
1137
1138 static void l2cap_do_start(struct l2cap_chan *chan)
1139 {
1140 struct l2cap_conn *conn = chan->conn;
1141
1142 if (conn->hcon->type == LE_LINK) {
1143 l2cap_chan_ready(chan);
1144 return;
1145 }
1146
1147 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1148 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1149 return;
1150
1151 if (l2cap_chan_check_security(chan) &&
1152 __l2cap_no_conn_pending(chan)) {
1153 l2cap_start_connection(chan);
1154 }
1155 } else {
1156 struct l2cap_info_req req;
1157 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1158
1159 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1160 conn->info_ident = l2cap_get_ident(conn);
1161
1162 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1163
1164 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1165 sizeof(req), &req);
1166 }
1167 }
1168
1169 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1170 {
1171 u32 local_feat_mask = l2cap_feat_mask;
1172 if (!disable_ertm)
1173 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1174
1175 switch (mode) {
1176 case L2CAP_MODE_ERTM:
1177 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1178 case L2CAP_MODE_STREAMING:
1179 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1180 default:
1181 return 0x00;
1182 }
1183 }
1184
1185 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1186 {
1187 struct sock *sk = chan->sk;
1188 struct l2cap_conn *conn = chan->conn;
1189 struct l2cap_disconn_req req;
1190
1191 if (!conn)
1192 return;
1193
1194 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1195 __clear_retrans_timer(chan);
1196 __clear_monitor_timer(chan);
1197 __clear_ack_timer(chan);
1198 }
1199
1200 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1201 l2cap_state_change(chan, BT_DISCONN);
1202 return;
1203 }
1204
1205 req.dcid = cpu_to_le16(chan->dcid);
1206 req.scid = cpu_to_le16(chan->scid);
1207 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1208 sizeof(req), &req);
1209
1210 lock_sock(sk);
1211 __l2cap_state_change(chan, BT_DISCONN);
1212 __l2cap_chan_set_err(chan, err);
1213 release_sock(sk);
1214 }
1215
1216 /* ---- L2CAP connections ---- */
1217 static void l2cap_conn_start(struct l2cap_conn *conn)
1218 {
1219 struct l2cap_chan *chan, *tmp;
1220
1221 BT_DBG("conn %p", conn);
1222
1223 mutex_lock(&conn->chan_lock);
1224
1225 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1226 struct sock *sk = chan->sk;
1227
1228 l2cap_chan_lock(chan);
1229
1230 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1231 l2cap_chan_unlock(chan);
1232 continue;
1233 }
1234
1235 if (chan->state == BT_CONNECT) {
1236 if (!l2cap_chan_check_security(chan) ||
1237 !__l2cap_no_conn_pending(chan)) {
1238 l2cap_chan_unlock(chan);
1239 continue;
1240 }
1241
1242 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1243 && test_bit(CONF_STATE2_DEVICE,
1244 &chan->conf_state)) {
1245 l2cap_chan_close(chan, ECONNRESET);
1246 l2cap_chan_unlock(chan);
1247 continue;
1248 }
1249
1250 l2cap_start_connection(chan);
1251
1252 } else if (chan->state == BT_CONNECT2) {
1253 struct l2cap_conn_rsp rsp;
1254 char buf[128];
1255 rsp.scid = cpu_to_le16(chan->dcid);
1256 rsp.dcid = cpu_to_le16(chan->scid);
1257
1258 if (l2cap_chan_check_security(chan)) {
1259 lock_sock(sk);
1260 if (test_bit(BT_SK_DEFER_SETUP,
1261 &bt_sk(sk)->flags)) {
1262 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1263 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1264 chan->ops->defer(chan);
1265
1266 } else {
1267 __l2cap_state_change(chan, BT_CONFIG);
1268 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1269 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1270 }
1271 release_sock(sk);
1272 } else {
1273 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1274 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1275 }
1276
1277 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1278 sizeof(rsp), &rsp);
1279
1280 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1281 rsp.result != L2CAP_CR_SUCCESS) {
1282 l2cap_chan_unlock(chan);
1283 continue;
1284 }
1285
1286 set_bit(CONF_REQ_SENT, &chan->conf_state);
1287 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1288 l2cap_build_conf_req(chan, buf), buf);
1289 chan->num_conf_req++;
1290 }
1291
1292 l2cap_chan_unlock(chan);
1293 }
1294
1295 mutex_unlock(&conn->chan_lock);
1296 }
1297
1298 /* Find socket with cid and source/destination bdaddr.
1299 * Returns closest match, locked.
1300 */
1301 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1302 bdaddr_t *src,
1303 bdaddr_t *dst)
1304 {
1305 struct l2cap_chan *c, *c1 = NULL;
1306
1307 read_lock(&chan_list_lock);
1308
1309 list_for_each_entry(c, &chan_list, global_l) {
1310 struct sock *sk = c->sk;
1311
1312 if (state && c->state != state)
1313 continue;
1314
1315 if (c->scid == cid) {
1316 int src_match, dst_match;
1317 int src_any, dst_any;
1318
1319 /* Exact match. */
1320 src_match = !bacmp(&bt_sk(sk)->src, src);
1321 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1322 if (src_match && dst_match) {
1323 read_unlock(&chan_list_lock);
1324 return c;
1325 }
1326
1327 /* Closest match */
1328 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1329 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1330 if ((src_match && dst_any) || (src_any && dst_match) ||
1331 (src_any && dst_any))
1332 c1 = c;
1333 }
1334 }
1335
1336 read_unlock(&chan_list_lock);
1337
1338 return c1;
1339 }
1340
1341 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1342 {
1343 struct sock *parent;
1344 struct l2cap_chan *chan, *pchan;
1345
1346 BT_DBG("");
1347
1348 /* Check if we have socket listening on cid */
1349 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1350 conn->src, conn->dst);
1351 if (!pchan)
1352 return;
1353
1354 parent = pchan->sk;
1355
1356 lock_sock(parent);
1357
1358 chan = pchan->ops->new_connection(pchan);
1359 if (!chan)
1360 goto clean;
1361
1362 chan->dcid = L2CAP_CID_ATT;
1363
1364 hci_conn_hold(conn->hcon);
1365 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1366
1367 bacpy(&bt_sk(chan->sk)->src, conn->src);
1368 bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1369
1370 l2cap_chan_add(conn, chan);
1371
1372 clean:
1373 release_sock(parent);
1374 }
1375
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1377 {
1378 struct l2cap_chan *chan;
1379 struct hci_conn *hcon = conn->hcon;
1380
1381 BT_DBG("conn %p", conn);
1382
1383 if (!hcon->out && hcon->type == LE_LINK)
1384 l2cap_le_conn_ready(conn);
1385
1386 /* For outgoing pairing which doesn't necessarily have an
1387 * associated socket (e.g. mgmt_pair_device).
1388 */
1389 if (hcon->out && hcon->type == LE_LINK)
1390 smp_conn_security(hcon, hcon->pending_sec_level);
1391
1392 mutex_lock(&conn->chan_lock);
1393
1394 list_for_each_entry(chan, &conn->chan_l, list) {
1395
1396 l2cap_chan_lock(chan);
1397
1398 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1399 l2cap_chan_unlock(chan);
1400 continue;
1401 }
1402
1403 if (hcon->type == LE_LINK) {
1404 if (smp_conn_security(hcon, chan->sec_level))
1405 l2cap_chan_ready(chan);
1406
1407 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1408 struct sock *sk = chan->sk;
1409 __clear_chan_timer(chan);
1410 lock_sock(sk);
1411 __l2cap_state_change(chan, BT_CONNECTED);
1412 sk->sk_state_change(sk);
1413 release_sock(sk);
1414
1415 } else if (chan->state == BT_CONNECT)
1416 l2cap_do_start(chan);
1417
1418 l2cap_chan_unlock(chan);
1419 }
1420
1421 mutex_unlock(&conn->chan_lock);
1422 }
1423
1424 /* Notify sockets that we cannot guaranty reliability anymore */
1425 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1426 {
1427 struct l2cap_chan *chan;
1428
1429 BT_DBG("conn %p", conn);
1430
1431 mutex_lock(&conn->chan_lock);
1432
1433 list_for_each_entry(chan, &conn->chan_l, list) {
1434 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1435 l2cap_chan_set_err(chan, err);
1436 }
1437
1438 mutex_unlock(&conn->chan_lock);
1439 }
1440
1441 static void l2cap_info_timeout(struct work_struct *work)
1442 {
1443 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1444 info_timer.work);
1445
1446 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1447 conn->info_ident = 0;
1448
1449 l2cap_conn_start(conn);
1450 }
1451
1452 /*
1453 * l2cap_user
1454 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1455 * callback is called during registration. The ->remove callback is called
1456 * during unregistration.
1457 * An l2cap_user object can either be explicitly unregistered or when the
1458 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1459 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1460 * External modules must own a reference to the l2cap_conn object if they intend
1461 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1462 * any time if they don't.
1463 */
1464
1465 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1466 {
1467 struct hci_dev *hdev = conn->hcon->hdev;
1468 int ret;
1469
1470 /* We need to check whether l2cap_conn is registered. If it is not, we
1471 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1472 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1473 * relies on the parent hci_conn object to be locked. This itself relies
1474 * on the hci_dev object to be locked. So we must lock the hci device
1475 * here, too. */
1476
1477 hci_dev_lock(hdev);
1478
1479 if (user->list.next || user->list.prev) {
1480 ret = -EINVAL;
1481 goto out_unlock;
1482 }
1483
1484 /* conn->hchan is NULL after l2cap_conn_del() was called */
1485 if (!conn->hchan) {
1486 ret = -ENODEV;
1487 goto out_unlock;
1488 }
1489
1490 ret = user->probe(conn, user);
1491 if (ret)
1492 goto out_unlock;
1493
1494 list_add(&user->list, &conn->users);
1495 ret = 0;
1496
1497 out_unlock:
1498 hci_dev_unlock(hdev);
1499 return ret;
1500 }
1501 EXPORT_SYMBOL(l2cap_register_user);
1502
1503 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1504 {
1505 struct hci_dev *hdev = conn->hcon->hdev;
1506
1507 hci_dev_lock(hdev);
1508
1509 if (!user->list.next || !user->list.prev)
1510 goto out_unlock;
1511
1512 list_del(&user->list);
1513 user->list.next = NULL;
1514 user->list.prev = NULL;
1515 user->remove(conn, user);
1516
1517 out_unlock:
1518 hci_dev_unlock(hdev);
1519 }
1520 EXPORT_SYMBOL(l2cap_unregister_user);
1521
1522 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1523 {
1524 struct l2cap_user *user;
1525
1526 while (!list_empty(&conn->users)) {
1527 user = list_first_entry(&conn->users, struct l2cap_user, list);
1528 list_del(&user->list);
1529 user->list.next = NULL;
1530 user->list.prev = NULL;
1531 user->remove(conn, user);
1532 }
1533 }
1534
1535 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1536 {
1537 struct l2cap_conn *conn = hcon->l2cap_data;
1538 struct l2cap_chan *chan, *l;
1539
1540 if (!conn)
1541 return;
1542
1543 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1544
1545 kfree_skb(conn->rx_skb);
1546
1547 l2cap_unregister_all_users(conn);
1548
1549 mutex_lock(&conn->chan_lock);
1550
1551 /* Kill channels */
1552 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1553 l2cap_chan_hold(chan);
1554 l2cap_chan_lock(chan);
1555
1556 l2cap_chan_del(chan, err);
1557
1558 l2cap_chan_unlock(chan);
1559
1560 chan->ops->close(chan);
1561 l2cap_chan_put(chan);
1562 }
1563
1564 mutex_unlock(&conn->chan_lock);
1565
1566 hci_chan_del(conn->hchan);
1567
1568 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1569 cancel_delayed_work_sync(&conn->info_timer);
1570
1571 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1572 cancel_delayed_work_sync(&conn->security_timer);
1573 smp_chan_destroy(conn);
1574 }
1575
1576 hcon->l2cap_data = NULL;
1577 conn->hchan = NULL;
1578 l2cap_conn_put(conn);
1579 }
1580
1581 static void security_timeout(struct work_struct *work)
1582 {
1583 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1584 security_timer.work);
1585
1586 BT_DBG("conn %p", conn);
1587
1588 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1589 smp_chan_destroy(conn);
1590 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1591 }
1592 }
1593
1594 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1595 {
1596 struct l2cap_conn *conn = hcon->l2cap_data;
1597 struct hci_chan *hchan;
1598
1599 if (conn)
1600 return conn;
1601
1602 hchan = hci_chan_create(hcon);
1603 if (!hchan)
1604 return NULL;
1605
1606 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1607 if (!conn) {
1608 hci_chan_del(hchan);
1609 return NULL;
1610 }
1611
1612 kref_init(&conn->ref);
1613 hcon->l2cap_data = conn;
1614 conn->hcon = hcon;
1615 hci_conn_get(conn->hcon);
1616 conn->hchan = hchan;
1617
1618 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1619
1620 switch (hcon->type) {
1621 case LE_LINK:
1622 if (hcon->hdev->le_mtu) {
1623 conn->mtu = hcon->hdev->le_mtu;
1624 break;
1625 }
1626 /* fall through */
1627 default:
1628 conn->mtu = hcon->hdev->acl_mtu;
1629 break;
1630 }
1631
1632 conn->src = &hcon->hdev->bdaddr;
1633 conn->dst = &hcon->dst;
1634
1635 conn->feat_mask = 0;
1636
1637 spin_lock_init(&conn->lock);
1638 mutex_init(&conn->chan_lock);
1639
1640 INIT_LIST_HEAD(&conn->chan_l);
1641 INIT_LIST_HEAD(&conn->users);
1642
1643 if (hcon->type == LE_LINK)
1644 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1645 else
1646 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1647
1648 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1649
1650 return conn;
1651 }
1652
1653 static void l2cap_conn_free(struct kref *ref)
1654 {
1655 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1656
1657 hci_conn_put(conn->hcon);
1658 kfree(conn);
1659 }
1660
1661 void l2cap_conn_get(struct l2cap_conn *conn)
1662 {
1663 kref_get(&conn->ref);
1664 }
1665 EXPORT_SYMBOL(l2cap_conn_get);
1666
1667 void l2cap_conn_put(struct l2cap_conn *conn)
1668 {
1669 kref_put(&conn->ref, l2cap_conn_free);
1670 }
1671 EXPORT_SYMBOL(l2cap_conn_put);
1672
1673 /* ---- Socket interface ---- */
1674
1675 /* Find socket with psm and source / destination bdaddr.
1676 * Returns closest match.
1677 */
1678 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1679 bdaddr_t *src,
1680 bdaddr_t *dst)
1681 {
1682 struct l2cap_chan *c, *c1 = NULL;
1683
1684 read_lock(&chan_list_lock);
1685
1686 list_for_each_entry(c, &chan_list, global_l) {
1687 struct sock *sk = c->sk;
1688
1689 if (state && c->state != state)
1690 continue;
1691
1692 if (c->psm == psm) {
1693 int src_match, dst_match;
1694 int src_any, dst_any;
1695
1696 /* Exact match. */
1697 src_match = !bacmp(&bt_sk(sk)->src, src);
1698 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1699 if (src_match && dst_match) {
1700 read_unlock(&chan_list_lock);
1701 return c;
1702 }
1703
1704 /* Closest match */
1705 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1706 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1707 if ((src_match && dst_any) || (src_any && dst_match) ||
1708 (src_any && dst_any))
1709 c1 = c;
1710 }
1711 }
1712
1713 read_unlock(&chan_list_lock);
1714
1715 return c1;
1716 }
1717
1718 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1719 bdaddr_t *dst, u8 dst_type)
1720 {
1721 struct sock *sk = chan->sk;
1722 bdaddr_t *src = &bt_sk(sk)->src;
1723 struct l2cap_conn *conn;
1724 struct hci_conn *hcon;
1725 struct hci_dev *hdev;
1726 __u8 auth_type;
1727 int err;
1728
1729 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1730 dst_type, __le16_to_cpu(psm));
1731
1732 hdev = hci_get_route(dst, src);
1733 if (!hdev)
1734 return -EHOSTUNREACH;
1735
1736 hci_dev_lock(hdev);
1737
1738 l2cap_chan_lock(chan);
1739
1740 /* PSM must be odd and lsb of upper byte must be 0 */
1741 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1742 chan->chan_type != L2CAP_CHAN_RAW) {
1743 err = -EINVAL;
1744 goto done;
1745 }
1746
1747 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1748 err = -EINVAL;
1749 goto done;
1750 }
1751
1752 switch (chan->mode) {
1753 case L2CAP_MODE_BASIC:
1754 break;
1755 case L2CAP_MODE_ERTM:
1756 case L2CAP_MODE_STREAMING:
1757 if (!disable_ertm)
1758 break;
1759 /* fall through */
1760 default:
1761 err = -ENOTSUPP;
1762 goto done;
1763 }
1764
1765 switch (chan->state) {
1766 case BT_CONNECT:
1767 case BT_CONNECT2:
1768 case BT_CONFIG:
1769 /* Already connecting */
1770 err = 0;
1771 goto done;
1772
1773 case BT_CONNECTED:
1774 /* Already connected */
1775 err = -EISCONN;
1776 goto done;
1777
1778 case BT_OPEN:
1779 case BT_BOUND:
1780 /* Can connect */
1781 break;
1782
1783 default:
1784 err = -EBADFD;
1785 goto done;
1786 }
1787
1788 /* Set destination address and psm */
1789 lock_sock(sk);
1790 bacpy(&bt_sk(sk)->dst, dst);
1791 release_sock(sk);
1792
1793 chan->psm = psm;
1794 chan->dcid = cid;
1795
1796 auth_type = l2cap_get_auth_type(chan);
1797
1798 if (bdaddr_type_is_le(dst_type))
1799 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1800 chan->sec_level, auth_type);
1801 else
1802 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1803 chan->sec_level, auth_type);
1804
1805 if (IS_ERR(hcon)) {
1806 err = PTR_ERR(hcon);
1807 goto done;
1808 }
1809
1810 conn = l2cap_conn_add(hcon);
1811 if (!conn) {
1812 hci_conn_drop(hcon);
1813 err = -ENOMEM;
1814 goto done;
1815 }
1816
1817 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1818 hci_conn_drop(hcon);
1819 err = -EBUSY;
1820 goto done;
1821 }
1822
1823 /* Update source addr of the socket */
1824 bacpy(src, conn->src);
1825
1826 l2cap_chan_unlock(chan);
1827 l2cap_chan_add(conn, chan);
1828 l2cap_chan_lock(chan);
1829
1830 l2cap_state_change(chan, BT_CONNECT);
1831 __set_chan_timer(chan, sk->sk_sndtimeo);
1832
1833 if (hcon->state == BT_CONNECTED) {
1834 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1835 __clear_chan_timer(chan);
1836 if (l2cap_chan_check_security(chan))
1837 l2cap_state_change(chan, BT_CONNECTED);
1838 } else
1839 l2cap_do_start(chan);
1840 }
1841
1842 err = 0;
1843
1844 done:
1845 l2cap_chan_unlock(chan);
1846 hci_dev_unlock(hdev);
1847 hci_dev_put(hdev);
1848 return err;
1849 }
1850
1851 int __l2cap_wait_ack(struct sock *sk)
1852 {
1853 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1854 DECLARE_WAITQUEUE(wait, current);
1855 int err = 0;
1856 int timeo = HZ/5;
1857
1858 add_wait_queue(sk_sleep(sk), &wait);
1859 set_current_state(TASK_INTERRUPTIBLE);
1860 while (chan->unacked_frames > 0 && chan->conn) {
1861 if (!timeo)
1862 timeo = HZ/5;
1863
1864 if (signal_pending(current)) {
1865 err = sock_intr_errno(timeo);
1866 break;
1867 }
1868
1869 release_sock(sk);
1870 timeo = schedule_timeout(timeo);
1871 lock_sock(sk);
1872 set_current_state(TASK_INTERRUPTIBLE);
1873
1874 err = sock_error(sk);
1875 if (err)
1876 break;
1877 }
1878 set_current_state(TASK_RUNNING);
1879 remove_wait_queue(sk_sleep(sk), &wait);
1880 return err;
1881 }
1882
1883 static void l2cap_monitor_timeout(struct work_struct *work)
1884 {
1885 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1886 monitor_timer.work);
1887
1888 BT_DBG("chan %p", chan);
1889
1890 l2cap_chan_lock(chan);
1891
1892 if (!chan->conn) {
1893 l2cap_chan_unlock(chan);
1894 l2cap_chan_put(chan);
1895 return;
1896 }
1897
1898 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1899
1900 l2cap_chan_unlock(chan);
1901 l2cap_chan_put(chan);
1902 }
1903
1904 static void l2cap_retrans_timeout(struct work_struct *work)
1905 {
1906 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1907 retrans_timer.work);
1908
1909 BT_DBG("chan %p", chan);
1910
1911 l2cap_chan_lock(chan);
1912
1913 if (!chan->conn) {
1914 l2cap_chan_unlock(chan);
1915 l2cap_chan_put(chan);
1916 return;
1917 }
1918
1919 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1920 l2cap_chan_unlock(chan);
1921 l2cap_chan_put(chan);
1922 }
1923
1924 static void l2cap_streaming_send(struct l2cap_chan *chan,
1925 struct sk_buff_head *skbs)
1926 {
1927 struct sk_buff *skb;
1928 struct l2cap_ctrl *control;
1929
1930 BT_DBG("chan %p, skbs %p", chan, skbs);
1931
1932 if (__chan_is_moving(chan))
1933 return;
1934
1935 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1936
1937 while (!skb_queue_empty(&chan->tx_q)) {
1938
1939 skb = skb_dequeue(&chan->tx_q);
1940
1941 bt_cb(skb)->control.retries = 1;
1942 control = &bt_cb(skb)->control;
1943
1944 control->reqseq = 0;
1945 control->txseq = chan->next_tx_seq;
1946
1947 __pack_control(chan, control, skb);
1948
1949 if (chan->fcs == L2CAP_FCS_CRC16) {
1950 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1951 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1952 }
1953
1954 l2cap_do_send(chan, skb);
1955
1956 BT_DBG("Sent txseq %u", control->txseq);
1957
1958 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1959 chan->frames_sent++;
1960 }
1961 }
1962
1963 static int l2cap_ertm_send(struct l2cap_chan *chan)
1964 {
1965 struct sk_buff *skb, *tx_skb;
1966 struct l2cap_ctrl *control;
1967 int sent = 0;
1968
1969 BT_DBG("chan %p", chan);
1970
1971 if (chan->state != BT_CONNECTED)
1972 return -ENOTCONN;
1973
1974 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1975 return 0;
1976
1977 if (__chan_is_moving(chan))
1978 return 0;
1979
1980 while (chan->tx_send_head &&
1981 chan->unacked_frames < chan->remote_tx_win &&
1982 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1983
1984 skb = chan->tx_send_head;
1985
1986 bt_cb(skb)->control.retries = 1;
1987 control = &bt_cb(skb)->control;
1988
1989 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1990 control->final = 1;
1991
1992 control->reqseq = chan->buffer_seq;
1993 chan->last_acked_seq = chan->buffer_seq;
1994 control->txseq = chan->next_tx_seq;
1995
1996 __pack_control(chan, control, skb);
1997
1998 if (chan->fcs == L2CAP_FCS_CRC16) {
1999 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2000 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2001 }
2002
2003 /* Clone after data has been modified. Data is assumed to be
2004 read-only (for locking purposes) on cloned sk_buffs.
2005 */
2006 tx_skb = skb_clone(skb, GFP_KERNEL);
2007
2008 if (!tx_skb)
2009 break;
2010
2011 __set_retrans_timer(chan);
2012
2013 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2014 chan->unacked_frames++;
2015 chan->frames_sent++;
2016 sent++;
2017
2018 if (skb_queue_is_last(&chan->tx_q, skb))
2019 chan->tx_send_head = NULL;
2020 else
2021 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2022
2023 l2cap_do_send(chan, tx_skb);
2024 BT_DBG("Sent txseq %u", control->txseq);
2025 }
2026
2027 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2028 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2029
2030 return sent;
2031 }
2032
2033 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2034 {
2035 struct l2cap_ctrl control;
2036 struct sk_buff *skb;
2037 struct sk_buff *tx_skb;
2038 u16 seq;
2039
2040 BT_DBG("chan %p", chan);
2041
2042 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2043 return;
2044
2045 if (__chan_is_moving(chan))
2046 return;
2047
2048 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2049 seq = l2cap_seq_list_pop(&chan->retrans_list);
2050
2051 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2052 if (!skb) {
2053 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2054 seq);
2055 continue;
2056 }
2057
2058 bt_cb(skb)->control.retries++;
2059 control = bt_cb(skb)->control;
2060
2061 if (chan->max_tx != 0 &&
2062 bt_cb(skb)->control.retries > chan->max_tx) {
2063 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2064 l2cap_send_disconn_req(chan, ECONNRESET);
2065 l2cap_seq_list_clear(&chan->retrans_list);
2066 break;
2067 }
2068
2069 control.reqseq = chan->buffer_seq;
2070 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2071 control.final = 1;
2072 else
2073 control.final = 0;
2074
2075 if (skb_cloned(skb)) {
2076 /* Cloned sk_buffs are read-only, so we need a
2077 * writeable copy
2078 */
2079 tx_skb = skb_copy(skb, GFP_KERNEL);
2080 } else {
2081 tx_skb = skb_clone(skb, GFP_KERNEL);
2082 }
2083
2084 if (!tx_skb) {
2085 l2cap_seq_list_clear(&chan->retrans_list);
2086 break;
2087 }
2088
2089 /* Update skb contents */
2090 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2091 put_unaligned_le32(__pack_extended_control(&control),
2092 tx_skb->data + L2CAP_HDR_SIZE);
2093 } else {
2094 put_unaligned_le16(__pack_enhanced_control(&control),
2095 tx_skb->data + L2CAP_HDR_SIZE);
2096 }
2097
2098 if (chan->fcs == L2CAP_FCS_CRC16) {
2099 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2100 put_unaligned_le16(fcs, skb_put(tx_skb,
2101 L2CAP_FCS_SIZE));
2102 }
2103
2104 l2cap_do_send(chan, tx_skb);
2105
2106 BT_DBG("Resent txseq %d", control.txseq);
2107
2108 chan->last_acked_seq = chan->buffer_seq;
2109 }
2110 }
2111
2112 static void l2cap_retransmit(struct l2cap_chan *chan,
2113 struct l2cap_ctrl *control)
2114 {
2115 BT_DBG("chan %p, control %p", chan, control);
2116
2117 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2118 l2cap_ertm_resend(chan);
2119 }
2120
2121 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2122 struct l2cap_ctrl *control)
2123 {
2124 struct sk_buff *skb;
2125
2126 BT_DBG("chan %p, control %p", chan, control);
2127
2128 if (control->poll)
2129 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2130
2131 l2cap_seq_list_clear(&chan->retrans_list);
2132
2133 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2134 return;
2135
2136 if (chan->unacked_frames) {
2137 skb_queue_walk(&chan->tx_q, skb) {
2138 if (bt_cb(skb)->control.txseq == control->reqseq ||
2139 skb == chan->tx_send_head)
2140 break;
2141 }
2142
2143 skb_queue_walk_from(&chan->tx_q, skb) {
2144 if (skb == chan->tx_send_head)
2145 break;
2146
2147 l2cap_seq_list_append(&chan->retrans_list,
2148 bt_cb(skb)->control.txseq);
2149 }
2150
2151 l2cap_ertm_resend(chan);
2152 }
2153 }
2154
2155 static void l2cap_send_ack(struct l2cap_chan *chan)
2156 {
2157 struct l2cap_ctrl control;
2158 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2159 chan->last_acked_seq);
2160 int threshold;
2161
2162 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2163 chan, chan->last_acked_seq, chan->buffer_seq);
2164
2165 memset(&control, 0, sizeof(control));
2166 control.sframe = 1;
2167
2168 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2169 chan->rx_state == L2CAP_RX_STATE_RECV) {
2170 __clear_ack_timer(chan);
2171 control.super = L2CAP_SUPER_RNR;
2172 control.reqseq = chan->buffer_seq;
2173 l2cap_send_sframe(chan, &control);
2174 } else {
2175 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2176 l2cap_ertm_send(chan);
2177 /* If any i-frames were sent, they included an ack */
2178 if (chan->buffer_seq == chan->last_acked_seq)
2179 frames_to_ack = 0;
2180 }
2181
2182 /* Ack now if the window is 3/4ths full.
2183 * Calculate without mul or div
2184 */
2185 threshold = chan->ack_win;
2186 threshold += threshold << 1;
2187 threshold >>= 2;
2188
2189 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2190 threshold);
2191
2192 if (frames_to_ack >= threshold) {
2193 __clear_ack_timer(chan);
2194 control.super = L2CAP_SUPER_RR;
2195 control.reqseq = chan->buffer_seq;
2196 l2cap_send_sframe(chan, &control);
2197 frames_to_ack = 0;
2198 }
2199
2200 if (frames_to_ack)
2201 __set_ack_timer(chan);
2202 }
2203 }
2204
2205 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2206 struct msghdr *msg, int len,
2207 int count, struct sk_buff *skb)
2208 {
2209 struct l2cap_conn *conn = chan->conn;
2210 struct sk_buff **frag;
2211 int sent = 0;
2212
2213 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2214 return -EFAULT;
2215
2216 sent += count;
2217 len -= count;
2218
2219 /* Continuation fragments (no L2CAP header) */
2220 frag = &skb_shinfo(skb)->frag_list;
2221 while (len) {
2222 struct sk_buff *tmp;
2223
2224 count = min_t(unsigned int, conn->mtu, len);
2225
2226 tmp = chan->ops->alloc_skb(chan, count,
2227 msg->msg_flags & MSG_DONTWAIT);
2228 if (IS_ERR(tmp))
2229 return PTR_ERR(tmp);
2230
2231 *frag = tmp;
2232
2233 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2234 return -EFAULT;
2235
2236 (*frag)->priority = skb->priority;
2237
2238 sent += count;
2239 len -= count;
2240
2241 skb->len += (*frag)->len;
2242 skb->data_len += (*frag)->len;
2243
2244 frag = &(*frag)->next;
2245 }
2246
2247 return sent;
2248 }
2249
2250 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2251 struct msghdr *msg, size_t len,
2252 u32 priority)
2253 {
2254 struct l2cap_conn *conn = chan->conn;
2255 struct sk_buff *skb;
2256 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2257 struct l2cap_hdr *lh;
2258
2259 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2260
2261 count = min_t(unsigned int, (conn->mtu - hlen), len);
2262
2263 skb = chan->ops->alloc_skb(chan, count + hlen,
2264 msg->msg_flags & MSG_DONTWAIT);
2265 if (IS_ERR(skb))
2266 return skb;
2267
2268 skb->priority = priority;
2269
2270 /* Create L2CAP header */
2271 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2272 lh->cid = cpu_to_le16(chan->dcid);
2273 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2274 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2275
2276 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2277 if (unlikely(err < 0)) {
2278 kfree_skb(skb);
2279 return ERR_PTR(err);
2280 }
2281 return skb;
2282 }
2283
2284 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2285 struct msghdr *msg, size_t len,
2286 u32 priority)
2287 {
2288 struct l2cap_conn *conn = chan->conn;
2289 struct sk_buff *skb;
2290 int err, count;
2291 struct l2cap_hdr *lh;
2292
2293 BT_DBG("chan %p len %zu", chan, len);
2294
2295 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2296
2297 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2298 msg->msg_flags & MSG_DONTWAIT);
2299 if (IS_ERR(skb))
2300 return skb;
2301
2302 skb->priority = priority;
2303
2304 /* Create L2CAP header */
2305 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2306 lh->cid = cpu_to_le16(chan->dcid);
2307 lh->len = cpu_to_le16(len);
2308
2309 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2310 if (unlikely(err < 0)) {
2311 kfree_skb(skb);
2312 return ERR_PTR(err);
2313 }
2314 return skb;
2315 }
2316
2317 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2318 struct msghdr *msg, size_t len,
2319 u16 sdulen)
2320 {
2321 struct l2cap_conn *conn = chan->conn;
2322 struct sk_buff *skb;
2323 int err, count, hlen;
2324 struct l2cap_hdr *lh;
2325
2326 BT_DBG("chan %p len %zu", chan, len);
2327
2328 if (!conn)
2329 return ERR_PTR(-ENOTCONN);
2330
2331 hlen = __ertm_hdr_size(chan);
2332
2333 if (sdulen)
2334 hlen += L2CAP_SDULEN_SIZE;
2335
2336 if (chan->fcs == L2CAP_FCS_CRC16)
2337 hlen += L2CAP_FCS_SIZE;
2338
2339 count = min_t(unsigned int, (conn->mtu - hlen), len);
2340
2341 skb = chan->ops->alloc_skb(chan, count + hlen,
2342 msg->msg_flags & MSG_DONTWAIT);
2343 if (IS_ERR(skb))
2344 return skb;
2345
2346 /* Create L2CAP header */
2347 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2348 lh->cid = cpu_to_le16(chan->dcid);
2349 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2350
2351 /* Control header is populated later */
2352 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2353 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2354 else
2355 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2356
2357 if (sdulen)
2358 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2359
2360 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2361 if (unlikely(err < 0)) {
2362 kfree_skb(skb);
2363 return ERR_PTR(err);
2364 }
2365
2366 bt_cb(skb)->control.fcs = chan->fcs;
2367 bt_cb(skb)->control.retries = 0;
2368 return skb;
2369 }
2370
2371 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2372 struct sk_buff_head *seg_queue,
2373 struct msghdr *msg, size_t len)
2374 {
2375 struct sk_buff *skb;
2376 u16 sdu_len;
2377 size_t pdu_len;
2378 u8 sar;
2379
2380 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2381
2382 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2383 * so fragmented skbs are not used. The HCI layer's handling
2384 * of fragmented skbs is not compatible with ERTM's queueing.
2385 */
2386
2387 /* PDU size is derived from the HCI MTU */
2388 pdu_len = chan->conn->mtu;
2389
2390 /* Constrain PDU size for BR/EDR connections */
2391 if (!chan->hs_hcon)
2392 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2393
2394 /* Adjust for largest possible L2CAP overhead. */
2395 if (chan->fcs)
2396 pdu_len -= L2CAP_FCS_SIZE;
2397
2398 pdu_len -= __ertm_hdr_size(chan);
2399
2400 /* Remote device may have requested smaller PDUs */
2401 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2402
2403 if (len <= pdu_len) {
2404 sar = L2CAP_SAR_UNSEGMENTED;
2405 sdu_len = 0;
2406 pdu_len = len;
2407 } else {
2408 sar = L2CAP_SAR_START;
2409 sdu_len = len;
2410 pdu_len -= L2CAP_SDULEN_SIZE;
2411 }
2412
2413 while (len > 0) {
2414 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2415
2416 if (IS_ERR(skb)) {
2417 __skb_queue_purge(seg_queue);
2418 return PTR_ERR(skb);
2419 }
2420
2421 bt_cb(skb)->control.sar = sar;
2422 __skb_queue_tail(seg_queue, skb);
2423
2424 len -= pdu_len;
2425 if (sdu_len) {
2426 sdu_len = 0;
2427 pdu_len += L2CAP_SDULEN_SIZE;
2428 }
2429
2430 if (len <= pdu_len) {
2431 sar = L2CAP_SAR_END;
2432 pdu_len = len;
2433 } else {
2434 sar = L2CAP_SAR_CONTINUE;
2435 }
2436 }
2437
2438 return 0;
2439 }
2440
2441 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2442 u32 priority)
2443 {
2444 struct sk_buff *skb;
2445 int err;
2446 struct sk_buff_head seg_queue;
2447
2448 /* Connectionless channel */
2449 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2450 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2451 if (IS_ERR(skb))
2452 return PTR_ERR(skb);
2453
2454 l2cap_do_send(chan, skb);
2455 return len;
2456 }
2457
2458 switch (chan->mode) {
2459 case L2CAP_MODE_BASIC:
2460 /* Check outgoing MTU */
2461 if (len > chan->omtu)
2462 return -EMSGSIZE;
2463
2464 /* Create a basic PDU */
2465 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2466 if (IS_ERR(skb))
2467 return PTR_ERR(skb);
2468
2469 l2cap_do_send(chan, skb);
2470 err = len;
2471 break;
2472
2473 case L2CAP_MODE_ERTM:
2474 case L2CAP_MODE_STREAMING:
2475 /* Check outgoing MTU */
2476 if (len > chan->omtu) {
2477 err = -EMSGSIZE;
2478 break;
2479 }
2480
2481 __skb_queue_head_init(&seg_queue);
2482
2483 /* Do segmentation before calling in to the state machine,
2484 * since it's possible to block while waiting for memory
2485 * allocation.
2486 */
2487 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2488
2489 /* The channel could have been closed while segmenting,
2490 * check that it is still connected.
2491 */
2492 if (chan->state != BT_CONNECTED) {
2493 __skb_queue_purge(&seg_queue);
2494 err = -ENOTCONN;
2495 }
2496
2497 if (err)
2498 break;
2499
2500 if (chan->mode == L2CAP_MODE_ERTM)
2501 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2502 else
2503 l2cap_streaming_send(chan, &seg_queue);
2504
2505 err = len;
2506
2507 /* If the skbs were not queued for sending, they'll still be in
2508 * seg_queue and need to be purged.
2509 */
2510 __skb_queue_purge(&seg_queue);
2511 break;
2512
2513 default:
2514 BT_DBG("bad state %1.1x", chan->mode);
2515 err = -EBADFD;
2516 }
2517
2518 return err;
2519 }
2520
2521 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2522 {
2523 struct l2cap_ctrl control;
2524 u16 seq;
2525
2526 BT_DBG("chan %p, txseq %u", chan, txseq);
2527
2528 memset(&control, 0, sizeof(control));
2529 control.sframe = 1;
2530 control.super = L2CAP_SUPER_SREJ;
2531
2532 for (seq = chan->expected_tx_seq; seq != txseq;
2533 seq = __next_seq(chan, seq)) {
2534 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2535 control.reqseq = seq;
2536 l2cap_send_sframe(chan, &control);
2537 l2cap_seq_list_append(&chan->srej_list, seq);
2538 }
2539 }
2540
2541 chan->expected_tx_seq = __next_seq(chan, txseq);
2542 }
2543
2544 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2545 {
2546 struct l2cap_ctrl control;
2547
2548 BT_DBG("chan %p", chan);
2549
2550 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2551 return;
2552
2553 memset(&control, 0, sizeof(control));
2554 control.sframe = 1;
2555 control.super = L2CAP_SUPER_SREJ;
2556 control.reqseq = chan->srej_list.tail;
2557 l2cap_send_sframe(chan, &control);
2558 }
2559
2560 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2561 {
2562 struct l2cap_ctrl control;
2563 u16 initial_head;
2564 u16 seq;
2565
2566 BT_DBG("chan %p, txseq %u", chan, txseq);
2567
2568 memset(&control, 0, sizeof(control));
2569 control.sframe = 1;
2570 control.super = L2CAP_SUPER_SREJ;
2571
2572 /* Capture initial list head to allow only one pass through the list. */
2573 initial_head = chan->srej_list.head;
2574
2575 do {
2576 seq = l2cap_seq_list_pop(&chan->srej_list);
2577 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2578 break;
2579
2580 control.reqseq = seq;
2581 l2cap_send_sframe(chan, &control);
2582 l2cap_seq_list_append(&chan->srej_list, seq);
2583 } while (chan->srej_list.head != initial_head);
2584 }
2585
2586 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2587 {
2588 struct sk_buff *acked_skb;
2589 u16 ackseq;
2590
2591 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2592
2593 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2594 return;
2595
2596 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2597 chan->expected_ack_seq, chan->unacked_frames);
2598
2599 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2600 ackseq = __next_seq(chan, ackseq)) {
2601
2602 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2603 if (acked_skb) {
2604 skb_unlink(acked_skb, &chan->tx_q);
2605 kfree_skb(acked_skb);
2606 chan->unacked_frames--;
2607 }
2608 }
2609
2610 chan->expected_ack_seq = reqseq;
2611
2612 if (chan->unacked_frames == 0)
2613 __clear_retrans_timer(chan);
2614
2615 BT_DBG("unacked_frames %u", chan->unacked_frames);
2616 }
2617
2618 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2619 {
2620 BT_DBG("chan %p", chan);
2621
2622 chan->expected_tx_seq = chan->buffer_seq;
2623 l2cap_seq_list_clear(&chan->srej_list);
2624 skb_queue_purge(&chan->srej_q);
2625 chan->rx_state = L2CAP_RX_STATE_RECV;
2626 }
2627
2628 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2629 struct l2cap_ctrl *control,
2630 struct sk_buff_head *skbs, u8 event)
2631 {
2632 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2633 event);
2634
2635 switch (event) {
2636 case L2CAP_EV_DATA_REQUEST:
2637 if (chan->tx_send_head == NULL)
2638 chan->tx_send_head = skb_peek(skbs);
2639
2640 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2641 l2cap_ertm_send(chan);
2642 break;
2643 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2644 BT_DBG("Enter LOCAL_BUSY");
2645 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2646
2647 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2648 /* The SREJ_SENT state must be aborted if we are to
2649 * enter the LOCAL_BUSY state.
2650 */
2651 l2cap_abort_rx_srej_sent(chan);
2652 }
2653
2654 l2cap_send_ack(chan);
2655
2656 break;
2657 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2658 BT_DBG("Exit LOCAL_BUSY");
2659 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2660
2661 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2662 struct l2cap_ctrl local_control;
2663
2664 memset(&local_control, 0, sizeof(local_control));
2665 local_control.sframe = 1;
2666 local_control.super = L2CAP_SUPER_RR;
2667 local_control.poll = 1;
2668 local_control.reqseq = chan->buffer_seq;
2669 l2cap_send_sframe(chan, &local_control);
2670
2671 chan->retry_count = 1;
2672 __set_monitor_timer(chan);
2673 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2674 }
2675 break;
2676 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2677 l2cap_process_reqseq(chan, control->reqseq);
2678 break;
2679 case L2CAP_EV_EXPLICIT_POLL:
2680 l2cap_send_rr_or_rnr(chan, 1);
2681 chan->retry_count = 1;
2682 __set_monitor_timer(chan);
2683 __clear_ack_timer(chan);
2684 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2685 break;
2686 case L2CAP_EV_RETRANS_TO:
2687 l2cap_send_rr_or_rnr(chan, 1);
2688 chan->retry_count = 1;
2689 __set_monitor_timer(chan);
2690 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2691 break;
2692 case L2CAP_EV_RECV_FBIT:
2693 /* Nothing to process */
2694 break;
2695 default:
2696 break;
2697 }
2698 }
2699
2700 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2701 struct l2cap_ctrl *control,
2702 struct sk_buff_head *skbs, u8 event)
2703 {
2704 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2705 event);
2706
2707 switch (event) {
2708 case L2CAP_EV_DATA_REQUEST:
2709 if (chan->tx_send_head == NULL)
2710 chan->tx_send_head = skb_peek(skbs);
2711 /* Queue data, but don't send. */
2712 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2713 break;
2714 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2715 BT_DBG("Enter LOCAL_BUSY");
2716 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2717
2718 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2719 /* The SREJ_SENT state must be aborted if we are to
2720 * enter the LOCAL_BUSY state.
2721 */
2722 l2cap_abort_rx_srej_sent(chan);
2723 }
2724
2725 l2cap_send_ack(chan);
2726
2727 break;
2728 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2729 BT_DBG("Exit LOCAL_BUSY");
2730 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2731
2732 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2733 struct l2cap_ctrl local_control;
2734 memset(&local_control, 0, sizeof(local_control));
2735 local_control.sframe = 1;
2736 local_control.super = L2CAP_SUPER_RR;
2737 local_control.poll = 1;
2738 local_control.reqseq = chan->buffer_seq;
2739 l2cap_send_sframe(chan, &local_control);
2740
2741 chan->retry_count = 1;
2742 __set_monitor_timer(chan);
2743 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2744 }
2745 break;
2746 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2747 l2cap_process_reqseq(chan, control->reqseq);
2748
2749 /* Fall through */
2750
2751 case L2CAP_EV_RECV_FBIT:
2752 if (control && control->final) {
2753 __clear_monitor_timer(chan);
2754 if (chan->unacked_frames > 0)
2755 __set_retrans_timer(chan);
2756 chan->retry_count = 0;
2757 chan->tx_state = L2CAP_TX_STATE_XMIT;
2758 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2759 }
2760 break;
2761 case L2CAP_EV_EXPLICIT_POLL:
2762 /* Ignore */
2763 break;
2764 case L2CAP_EV_MONITOR_TO:
2765 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2766 l2cap_send_rr_or_rnr(chan, 1);
2767 __set_monitor_timer(chan);
2768 chan->retry_count++;
2769 } else {
2770 l2cap_send_disconn_req(chan, ECONNABORTED);
2771 }
2772 break;
2773 default:
2774 break;
2775 }
2776 }
2777
2778 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2779 struct sk_buff_head *skbs, u8 event)
2780 {
2781 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2782 chan, control, skbs, event, chan->tx_state);
2783
2784 switch (chan->tx_state) {
2785 case L2CAP_TX_STATE_XMIT:
2786 l2cap_tx_state_xmit(chan, control, skbs, event);
2787 break;
2788 case L2CAP_TX_STATE_WAIT_F:
2789 l2cap_tx_state_wait_f(chan, control, skbs, event);
2790 break;
2791 default:
2792 /* Ignore event */
2793 break;
2794 }
2795 }
2796
2797 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2798 struct l2cap_ctrl *control)
2799 {
2800 BT_DBG("chan %p, control %p", chan, control);
2801 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2802 }
2803
2804 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2805 struct l2cap_ctrl *control)
2806 {
2807 BT_DBG("chan %p, control %p", chan, control);
2808 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2809 }
2810
2811 /* Copy frame to all raw sockets on that connection */
2812 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2813 {
2814 struct sk_buff *nskb;
2815 struct l2cap_chan *chan;
2816
2817 BT_DBG("conn %p", conn);
2818
2819 mutex_lock(&conn->chan_lock);
2820
2821 list_for_each_entry(chan, &conn->chan_l, list) {
2822 struct sock *sk = chan->sk;
2823 if (chan->chan_type != L2CAP_CHAN_RAW)
2824 continue;
2825
2826 /* Don't send frame to the socket it came from */
2827 if (skb->sk == sk)
2828 continue;
2829 nskb = skb_clone(skb, GFP_KERNEL);
2830 if (!nskb)
2831 continue;
2832
2833 if (chan->ops->recv(chan, nskb))
2834 kfree_skb(nskb);
2835 }
2836
2837 mutex_unlock(&conn->chan_lock);
2838 }
2839
2840 /* ---- L2CAP signalling commands ---- */
2841 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2842 u8 ident, u16 dlen, void *data)
2843 {
2844 struct sk_buff *skb, **frag;
2845 struct l2cap_cmd_hdr *cmd;
2846 struct l2cap_hdr *lh;
2847 int len, count;
2848
2849 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2850 conn, code, ident, dlen);
2851
2852 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2853 return NULL;
2854
2855 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2856 count = min_t(unsigned int, conn->mtu, len);
2857
2858 skb = bt_skb_alloc(count, GFP_KERNEL);
2859 if (!skb)
2860 return NULL;
2861
2862 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2863 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2864
2865 if (conn->hcon->type == LE_LINK)
2866 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2867 else
2868 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2869
2870 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2871 cmd->code = code;
2872 cmd->ident = ident;
2873 cmd->len = cpu_to_le16(dlen);
2874
2875 if (dlen) {
2876 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2877 memcpy(skb_put(skb, count), data, count);
2878 data += count;
2879 }
2880
2881 len -= skb->len;
2882
2883 /* Continuation fragments (no L2CAP header) */
2884 frag = &skb_shinfo(skb)->frag_list;
2885 while (len) {
2886 count = min_t(unsigned int, conn->mtu, len);
2887
2888 *frag = bt_skb_alloc(count, GFP_KERNEL);
2889 if (!*frag)
2890 goto fail;
2891
2892 memcpy(skb_put(*frag, count), data, count);
2893
2894 len -= count;
2895 data += count;
2896
2897 frag = &(*frag)->next;
2898 }
2899
2900 return skb;
2901
2902 fail:
2903 kfree_skb(skb);
2904 return NULL;
2905 }
2906
2907 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2908 unsigned long *val)
2909 {
2910 struct l2cap_conf_opt *opt = *ptr;
2911 int len;
2912
2913 len = L2CAP_CONF_OPT_SIZE + opt->len;
2914 *ptr += len;
2915
2916 *type = opt->type;
2917 *olen = opt->len;
2918
2919 switch (opt->len) {
2920 case 1:
2921 *val = *((u8 *) opt->val);
2922 break;
2923
2924 case 2:
2925 *val = get_unaligned_le16(opt->val);
2926 break;
2927
2928 case 4:
2929 *val = get_unaligned_le32(opt->val);
2930 break;
2931
2932 default:
2933 *val = (unsigned long) opt->val;
2934 break;
2935 }
2936
2937 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2938 return len;
2939 }
2940
2941 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2942 {
2943 struct l2cap_conf_opt *opt = *ptr;
2944
2945 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2946
2947 opt->type = type;
2948 opt->len = len;
2949
2950 switch (len) {
2951 case 1:
2952 *((u8 *) opt->val) = val;
2953 break;
2954
2955 case 2:
2956 put_unaligned_le16(val, opt->val);
2957 break;
2958
2959 case 4:
2960 put_unaligned_le32(val, opt->val);
2961 break;
2962
2963 default:
2964 memcpy(opt->val, (void *) val, len);
2965 break;
2966 }
2967
2968 *ptr += L2CAP_CONF_OPT_SIZE + len;
2969 }
2970
2971 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2972 {
2973 struct l2cap_conf_efs efs;
2974
2975 switch (chan->mode) {
2976 case L2CAP_MODE_ERTM:
2977 efs.id = chan->local_id;
2978 efs.stype = chan->local_stype;
2979 efs.msdu = cpu_to_le16(chan->local_msdu);
2980 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2981 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2982 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2983 break;
2984
2985 case L2CAP_MODE_STREAMING:
2986 efs.id = 1;
2987 efs.stype = L2CAP_SERV_BESTEFFORT;
2988 efs.msdu = cpu_to_le16(chan->local_msdu);
2989 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2990 efs.acc_lat = 0;
2991 efs.flush_to = 0;
2992 break;
2993
2994 default:
2995 return;
2996 }
2997
2998 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2999 (unsigned long) &efs);
3000 }
3001
3002 static void l2cap_ack_timeout(struct work_struct *work)
3003 {
3004 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3005 ack_timer.work);
3006 u16 frames_to_ack;
3007
3008 BT_DBG("chan %p", chan);
3009
3010 l2cap_chan_lock(chan);
3011
3012 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3013 chan->last_acked_seq);
3014
3015 if (frames_to_ack)
3016 l2cap_send_rr_or_rnr(chan, 0);
3017
3018 l2cap_chan_unlock(chan);
3019 l2cap_chan_put(chan);
3020 }
3021
3022 int l2cap_ertm_init(struct l2cap_chan *chan)
3023 {
3024 int err;
3025
3026 chan->next_tx_seq = 0;
3027 chan->expected_tx_seq = 0;
3028 chan->expected_ack_seq = 0;
3029 chan->unacked_frames = 0;
3030 chan->buffer_seq = 0;
3031 chan->frames_sent = 0;
3032 chan->last_acked_seq = 0;
3033 chan->sdu = NULL;
3034 chan->sdu_last_frag = NULL;
3035 chan->sdu_len = 0;
3036
3037 skb_queue_head_init(&chan->tx_q);
3038
3039 chan->local_amp_id = 0;
3040 chan->move_id = 0;
3041 chan->move_state = L2CAP_MOVE_STABLE;
3042 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3043
3044 if (chan->mode != L2CAP_MODE_ERTM)
3045 return 0;
3046
3047 chan->rx_state = L2CAP_RX_STATE_RECV;
3048 chan->tx_state = L2CAP_TX_STATE_XMIT;
3049
3050 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3051 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3052 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3053
3054 skb_queue_head_init(&chan->srej_q);
3055
3056 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3057 if (err < 0)
3058 return err;
3059
3060 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3061 if (err < 0)
3062 l2cap_seq_list_free(&chan->srej_list);
3063
3064 return err;
3065 }
3066
3067 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3068 {
3069 switch (mode) {
3070 case L2CAP_MODE_STREAMING:
3071 case L2CAP_MODE_ERTM:
3072 if (l2cap_mode_supported(mode, remote_feat_mask))
3073 return mode;
3074 /* fall through */
3075 default:
3076 return L2CAP_MODE_BASIC;
3077 }
3078 }
3079
3080 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
3081 {
3082 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3083 }
3084
3085 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
3086 {
3087 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3088 }
3089
3090 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3091 struct l2cap_conf_rfc *rfc)
3092 {
3093 if (chan->local_amp_id && chan->hs_hcon) {
3094 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3095
3096 /* Class 1 devices have must have ERTM timeouts
3097 * exceeding the Link Supervision Timeout. The
3098 * default Link Supervision Timeout for AMP
3099 * controllers is 10 seconds.
3100 *
3101 * Class 1 devices use 0xffffffff for their
3102 * best-effort flush timeout, so the clamping logic
3103 * will result in a timeout that meets the above
3104 * requirement. ERTM timeouts are 16-bit values, so
3105 * the maximum timeout is 65.535 seconds.
3106 */
3107
3108 /* Convert timeout to milliseconds and round */
3109 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3110
3111 /* This is the recommended formula for class 2 devices
3112 * that start ERTM timers when packets are sent to the
3113 * controller.
3114 */
3115 ertm_to = 3 * ertm_to + 500;
3116
3117 if (ertm_to > 0xffff)
3118 ertm_to = 0xffff;
3119
3120 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3121 rfc->monitor_timeout = rfc->retrans_timeout;
3122 } else {
3123 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3124 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3125 }
3126 }
3127
3128 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3129 {
3130 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3131 __l2cap_ews_supported(chan)) {
3132 /* use extended control field */
3133 set_bit(FLAG_EXT_CTRL, &chan->flags);
3134 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3135 } else {
3136 chan->tx_win = min_t(u16, chan->tx_win,
3137 L2CAP_DEFAULT_TX_WINDOW);
3138 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3139 }
3140 chan->ack_win = chan->tx_win;
3141 }
3142
3143 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3144 {
3145 struct l2cap_conf_req *req = data;
3146 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3147 void *ptr = req->data;
3148 u16 size;
3149
3150 BT_DBG("chan %p", chan);
3151
3152 if (chan->num_conf_req || chan->num_conf_rsp)
3153 goto done;
3154
3155 switch (chan->mode) {
3156 case L2CAP_MODE_STREAMING:
3157 case L2CAP_MODE_ERTM:
3158 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3159 break;
3160
3161 if (__l2cap_efs_supported(chan))
3162 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3163
3164 /* fall through */
3165 default:
3166 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3167 break;
3168 }
3169
3170 done:
3171 if (chan->imtu != L2CAP_DEFAULT_MTU)
3172 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3173
3174 switch (chan->mode) {
3175 case L2CAP_MODE_BASIC:
3176 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3177 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3178 break;
3179
3180 rfc.mode = L2CAP_MODE_BASIC;
3181 rfc.txwin_size = 0;
3182 rfc.max_transmit = 0;
3183 rfc.retrans_timeout = 0;
3184 rfc.monitor_timeout = 0;
3185 rfc.max_pdu_size = 0;
3186
3187 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3188 (unsigned long) &rfc);
3189 break;
3190
3191 case L2CAP_MODE_ERTM:
3192 rfc.mode = L2CAP_MODE_ERTM;
3193 rfc.max_transmit = chan->max_tx;
3194
3195 __l2cap_set_ertm_timeouts(chan, &rfc);
3196
3197 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3198 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3199 L2CAP_FCS_SIZE);
3200 rfc.max_pdu_size = cpu_to_le16(size);
3201
3202 l2cap_txwin_setup(chan);
3203
3204 rfc.txwin_size = min_t(u16, chan->tx_win,
3205 L2CAP_DEFAULT_TX_WINDOW);
3206
3207 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3208 (unsigned long) &rfc);
3209
3210 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3211 l2cap_add_opt_efs(&ptr, chan);
3212
3213 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3215 chan->tx_win);
3216
3217 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3218 if (chan->fcs == L2CAP_FCS_NONE ||
3219 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3220 chan->fcs = L2CAP_FCS_NONE;
3221 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3222 chan->fcs);
3223 }
3224 break;
3225
3226 case L2CAP_MODE_STREAMING:
3227 l2cap_txwin_setup(chan);
3228 rfc.mode = L2CAP_MODE_STREAMING;
3229 rfc.txwin_size = 0;
3230 rfc.max_transmit = 0;
3231 rfc.retrans_timeout = 0;
3232 rfc.monitor_timeout = 0;
3233
3234 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3235 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3236 L2CAP_FCS_SIZE);
3237 rfc.max_pdu_size = cpu_to_le16(size);
3238
3239 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3240 (unsigned long) &rfc);
3241
3242 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3243 l2cap_add_opt_efs(&ptr, chan);
3244
3245 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3246 if (chan->fcs == L2CAP_FCS_NONE ||
3247 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3248 chan->fcs = L2CAP_FCS_NONE;
3249 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3250 chan->fcs);
3251 }
3252 break;
3253 }
3254
3255 req->dcid = cpu_to_le16(chan->dcid);
3256 req->flags = __constant_cpu_to_le16(0);
3257
3258 return ptr - data;
3259 }
3260
3261 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3262 {
3263 struct l2cap_conf_rsp *rsp = data;
3264 void *ptr = rsp->data;
3265 void *req = chan->conf_req;
3266 int len = chan->conf_len;
3267 int type, hint, olen;
3268 unsigned long val;
3269 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3270 struct l2cap_conf_efs efs;
3271 u8 remote_efs = 0;
3272 u16 mtu = L2CAP_DEFAULT_MTU;
3273 u16 result = L2CAP_CONF_SUCCESS;
3274 u16 size;
3275
3276 BT_DBG("chan %p", chan);
3277
3278 while (len >= L2CAP_CONF_OPT_SIZE) {
3279 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3280
3281 hint = type & L2CAP_CONF_HINT;
3282 type &= L2CAP_CONF_MASK;
3283
3284 switch (type) {
3285 case L2CAP_CONF_MTU:
3286 mtu = val;
3287 break;
3288
3289 case L2CAP_CONF_FLUSH_TO:
3290 chan->flush_to = val;
3291 break;
3292
3293 case L2CAP_CONF_QOS:
3294 break;
3295
3296 case L2CAP_CONF_RFC:
3297 if (olen == sizeof(rfc))
3298 memcpy(&rfc, (void *) val, olen);
3299 break;
3300
3301 case L2CAP_CONF_FCS:
3302 if (val == L2CAP_FCS_NONE)
3303 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3304 break;
3305
3306 case L2CAP_CONF_EFS:
3307 remote_efs = 1;
3308 if (olen == sizeof(efs))
3309 memcpy(&efs, (void *) val, olen);
3310 break;
3311
3312 case L2CAP_CONF_EWS:
3313 if (!enable_hs)
3314 return -ECONNREFUSED;
3315
3316 set_bit(FLAG_EXT_CTRL, &chan->flags);
3317 set_bit(CONF_EWS_RECV, &chan->conf_state);
3318 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3319 chan->remote_tx_win = val;
3320 break;
3321
3322 default:
3323 if (hint)
3324 break;
3325
3326 result = L2CAP_CONF_UNKNOWN;
3327 *((u8 *) ptr++) = type;
3328 break;
3329 }
3330 }
3331
3332 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3333 goto done;
3334
3335 switch (chan->mode) {
3336 case L2CAP_MODE_STREAMING:
3337 case L2CAP_MODE_ERTM:
3338 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3339 chan->mode = l2cap_select_mode(rfc.mode,
3340 chan->conn->feat_mask);
3341 break;
3342 }
3343
3344 if (remote_efs) {
3345 if (__l2cap_efs_supported(chan))
3346 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3347 else
3348 return -ECONNREFUSED;
3349 }
3350
3351 if (chan->mode != rfc.mode)
3352 return -ECONNREFUSED;
3353
3354 break;
3355 }
3356
3357 done:
3358 if (chan->mode != rfc.mode) {
3359 result = L2CAP_CONF_UNACCEPT;
3360 rfc.mode = chan->mode;
3361
3362 if (chan->num_conf_rsp == 1)
3363 return -ECONNREFUSED;
3364
3365 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3366 (unsigned long) &rfc);
3367 }
3368
3369 if (result == L2CAP_CONF_SUCCESS) {
3370 /* Configure output options and let the other side know
3371 * which ones we don't like. */
3372
3373 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3374 result = L2CAP_CONF_UNACCEPT;
3375 else {
3376 chan->omtu = mtu;
3377 set_bit(CONF_MTU_DONE, &chan->conf_state);
3378 }
3379 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3380
3381 if (remote_efs) {
3382 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3383 efs.stype != L2CAP_SERV_NOTRAFIC &&
3384 efs.stype != chan->local_stype) {
3385
3386 result = L2CAP_CONF_UNACCEPT;
3387
3388 if (chan->num_conf_req >= 1)
3389 return -ECONNREFUSED;
3390
3391 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3392 sizeof(efs),
3393 (unsigned long) &efs);
3394 } else {
3395 /* Send PENDING Conf Rsp */
3396 result = L2CAP_CONF_PENDING;
3397 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3398 }
3399 }
3400
3401 switch (rfc.mode) {
3402 case L2CAP_MODE_BASIC:
3403 chan->fcs = L2CAP_FCS_NONE;
3404 set_bit(CONF_MODE_DONE, &chan->conf_state);
3405 break;
3406
3407 case L2CAP_MODE_ERTM:
3408 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3409 chan->remote_tx_win = rfc.txwin_size;
3410 else
3411 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3412
3413 chan->remote_max_tx = rfc.max_transmit;
3414
3415 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3416 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3417 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3418 rfc.max_pdu_size = cpu_to_le16(size);
3419 chan->remote_mps = size;
3420
3421 __l2cap_set_ertm_timeouts(chan, &rfc);
3422
3423 set_bit(CONF_MODE_DONE, &chan->conf_state);
3424
3425 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3426 sizeof(rfc), (unsigned long) &rfc);
3427
3428 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3429 chan->remote_id = efs.id;
3430 chan->remote_stype = efs.stype;
3431 chan->remote_msdu = le16_to_cpu(efs.msdu);
3432 chan->remote_flush_to =
3433 le32_to_cpu(efs.flush_to);
3434 chan->remote_acc_lat =
3435 le32_to_cpu(efs.acc_lat);
3436 chan->remote_sdu_itime =
3437 le32_to_cpu(efs.sdu_itime);
3438 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3439 sizeof(efs),
3440 (unsigned long) &efs);
3441 }
3442 break;
3443
3444 case L2CAP_MODE_STREAMING:
3445 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3446 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3447 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3448 rfc.max_pdu_size = cpu_to_le16(size);
3449 chan->remote_mps = size;
3450
3451 set_bit(CONF_MODE_DONE, &chan->conf_state);
3452
3453 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3454 (unsigned long) &rfc);
3455
3456 break;
3457
3458 default:
3459 result = L2CAP_CONF_UNACCEPT;
3460
3461 memset(&rfc, 0, sizeof(rfc));
3462 rfc.mode = chan->mode;
3463 }
3464
3465 if (result == L2CAP_CONF_SUCCESS)
3466 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3467 }
3468 rsp->scid = cpu_to_le16(chan->dcid);
3469 rsp->result = cpu_to_le16(result);
3470 rsp->flags = __constant_cpu_to_le16(0);
3471
3472 return ptr - data;
3473 }
3474
3475 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3476 void *data, u16 *result)
3477 {
3478 struct l2cap_conf_req *req = data;
3479 void *ptr = req->data;
3480 int type, olen;
3481 unsigned long val;
3482 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3483 struct l2cap_conf_efs efs;
3484
3485 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3486
3487 while (len >= L2CAP_CONF_OPT_SIZE) {
3488 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3489
3490 switch (type) {
3491 case L2CAP_CONF_MTU:
3492 if (val < L2CAP_DEFAULT_MIN_MTU) {
3493 *result = L2CAP_CONF_UNACCEPT;
3494 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3495 } else
3496 chan->imtu = val;
3497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3498 break;
3499
3500 case L2CAP_CONF_FLUSH_TO:
3501 chan->flush_to = val;
3502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3503 2, chan->flush_to);
3504 break;
3505
3506 case L2CAP_CONF_RFC:
3507 if (olen == sizeof(rfc))
3508 memcpy(&rfc, (void *)val, olen);
3509
3510 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3511 rfc.mode != chan->mode)
3512 return -ECONNREFUSED;
3513
3514 chan->fcs = 0;
3515
3516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3517 sizeof(rfc), (unsigned long) &rfc);
3518 break;
3519
3520 case L2CAP_CONF_EWS:
3521 chan->ack_win = min_t(u16, val, chan->ack_win);
3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3523 chan->tx_win);
3524 break;
3525
3526 case L2CAP_CONF_EFS:
3527 if (olen == sizeof(efs))
3528 memcpy(&efs, (void *)val, olen);
3529
3530 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3531 efs.stype != L2CAP_SERV_NOTRAFIC &&
3532 efs.stype != chan->local_stype)
3533 return -ECONNREFUSED;
3534
3535 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3536 (unsigned long) &efs);
3537 break;
3538
3539 case L2CAP_CONF_FCS:
3540 if (*result == L2CAP_CONF_PENDING)
3541 if (val == L2CAP_FCS_NONE)
3542 set_bit(CONF_RECV_NO_FCS,
3543 &chan->conf_state);
3544 break;
3545 }
3546 }
3547
3548 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3549 return -ECONNREFUSED;
3550
3551 chan->mode = rfc.mode;
3552
3553 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3554 switch (rfc.mode) {
3555 case L2CAP_MODE_ERTM:
3556 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3557 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3558 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3559 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3560 chan->ack_win = min_t(u16, chan->ack_win,
3561 rfc.txwin_size);
3562
3563 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3564 chan->local_msdu = le16_to_cpu(efs.msdu);
3565 chan->local_sdu_itime =
3566 le32_to_cpu(efs.sdu_itime);
3567 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3568 chan->local_flush_to =
3569 le32_to_cpu(efs.flush_to);
3570 }
3571 break;
3572
3573 case L2CAP_MODE_STREAMING:
3574 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3575 }
3576 }
3577
3578 req->dcid = cpu_to_le16(chan->dcid);
3579 req->flags = __constant_cpu_to_le16(0);
3580
3581 return ptr - data;
3582 }
3583
3584 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3585 u16 result, u16 flags)
3586 {
3587 struct l2cap_conf_rsp *rsp = data;
3588 void *ptr = rsp->data;
3589
3590 BT_DBG("chan %p", chan);
3591
3592 rsp->scid = cpu_to_le16(chan->dcid);
3593 rsp->result = cpu_to_le16(result);
3594 rsp->flags = cpu_to_le16(flags);
3595
3596 return ptr - data;
3597 }
3598
3599 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3600 {
3601 struct l2cap_conn_rsp rsp;
3602 struct l2cap_conn *conn = chan->conn;
3603 u8 buf[128];
3604 u8 rsp_code;
3605
3606 rsp.scid = cpu_to_le16(chan->dcid);
3607 rsp.dcid = cpu_to_le16(chan->scid);
3608 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3609 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3610
3611 if (chan->hs_hcon)
3612 rsp_code = L2CAP_CREATE_CHAN_RSP;
3613 else
3614 rsp_code = L2CAP_CONN_RSP;
3615
3616 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3617
3618 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3619
3620 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3621 return;
3622
3623 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3624 l2cap_build_conf_req(chan, buf), buf);
3625 chan->num_conf_req++;
3626 }
3627
3628 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3629 {
3630 int type, olen;
3631 unsigned long val;
3632 /* Use sane default values in case a misbehaving remote device
3633 * did not send an RFC or extended window size option.
3634 */
3635 u16 txwin_ext = chan->ack_win;
3636 struct l2cap_conf_rfc rfc = {
3637 .mode = chan->mode,
3638 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3639 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3640 .max_pdu_size = cpu_to_le16(chan->imtu),
3641 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3642 };
3643
3644 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3645
3646 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3647 return;
3648
3649 while (len >= L2CAP_CONF_OPT_SIZE) {
3650 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3651
3652 switch (type) {
3653 case L2CAP_CONF_RFC:
3654 if (olen == sizeof(rfc))
3655 memcpy(&rfc, (void *)val, olen);
3656 break;
3657 case L2CAP_CONF_EWS:
3658 txwin_ext = val;
3659 break;
3660 }
3661 }
3662
3663 switch (rfc.mode) {
3664 case L2CAP_MODE_ERTM:
3665 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3666 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3667 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3668 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3669 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3670 else
3671 chan->ack_win = min_t(u16, chan->ack_win,
3672 rfc.txwin_size);
3673 break;
3674 case L2CAP_MODE_STREAMING:
3675 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3676 }
3677 }
3678
3679 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3680 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3681 u8 *data)
3682 {
3683 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3684
3685 if (cmd_len < sizeof(*rej))
3686 return -EPROTO;
3687
3688 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3689 return 0;
3690
3691 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3692 cmd->ident == conn->info_ident) {
3693 cancel_delayed_work(&conn->info_timer);
3694
3695 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3696 conn->info_ident = 0;
3697
3698 l2cap_conn_start(conn);
3699 }
3700
3701 return 0;
3702 }
3703
3704 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3705 struct l2cap_cmd_hdr *cmd,
3706 u8 *data, u8 rsp_code, u8 amp_id)
3707 {
3708 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3709 struct l2cap_conn_rsp rsp;
3710 struct l2cap_chan *chan = NULL, *pchan;
3711 struct sock *parent, *sk = NULL;
3712 int result, status = L2CAP_CS_NO_INFO;
3713
3714 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3715 __le16 psm = req->psm;
3716
3717 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3718
3719 /* Check if we have socket listening on psm */
3720 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3721 if (!pchan) {
3722 result = L2CAP_CR_BAD_PSM;
3723 goto sendresp;
3724 }
3725
3726 parent = pchan->sk;
3727
3728 mutex_lock(&conn->chan_lock);
3729 lock_sock(parent);
3730
3731 /* Check if the ACL is secure enough (if not SDP) */
3732 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3733 !hci_conn_check_link_mode(conn->hcon)) {
3734 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3735 result = L2CAP_CR_SEC_BLOCK;
3736 goto response;
3737 }
3738
3739 result = L2CAP_CR_NO_MEM;
3740
3741 /* Check if we already have channel with that dcid */
3742 if (__l2cap_get_chan_by_dcid(conn, scid))
3743 goto response;
3744
3745 chan = pchan->ops->new_connection(pchan);
3746 if (!chan)
3747 goto response;
3748
3749 sk = chan->sk;
3750
3751 hci_conn_hold(conn->hcon);
3752
3753 bacpy(&bt_sk(sk)->src, conn->src);
3754 bacpy(&bt_sk(sk)->dst, conn->dst);
3755 chan->psm = psm;
3756 chan->dcid = scid;
3757 chan->local_amp_id = amp_id;
3758
3759 __l2cap_chan_add(conn, chan);
3760
3761 dcid = chan->scid;
3762
3763 __set_chan_timer(chan, sk->sk_sndtimeo);
3764
3765 chan->ident = cmd->ident;
3766
3767 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3768 if (l2cap_chan_check_security(chan)) {
3769 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3770 __l2cap_state_change(chan, BT_CONNECT2);
3771 result = L2CAP_CR_PEND;
3772 status = L2CAP_CS_AUTHOR_PEND;
3773 chan->ops->defer(chan);
3774 } else {
3775 /* Force pending result for AMP controllers.
3776 * The connection will succeed after the
3777 * physical link is up.
3778 */
3779 if (amp_id) {
3780 __l2cap_state_change(chan, BT_CONNECT2);
3781 result = L2CAP_CR_PEND;
3782 } else {
3783 __l2cap_state_change(chan, BT_CONFIG);
3784 result = L2CAP_CR_SUCCESS;
3785 }
3786 status = L2CAP_CS_NO_INFO;
3787 }
3788 } else {
3789 __l2cap_state_change(chan, BT_CONNECT2);
3790 result = L2CAP_CR_PEND;
3791 status = L2CAP_CS_AUTHEN_PEND;
3792 }
3793 } else {
3794 __l2cap_state_change(chan, BT_CONNECT2);
3795 result = L2CAP_CR_PEND;
3796 status = L2CAP_CS_NO_INFO;
3797 }
3798
3799 response:
3800 release_sock(parent);
3801 mutex_unlock(&conn->chan_lock);
3802
3803 sendresp:
3804 rsp.scid = cpu_to_le16(scid);
3805 rsp.dcid = cpu_to_le16(dcid);
3806 rsp.result = cpu_to_le16(result);
3807 rsp.status = cpu_to_le16(status);
3808 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3809
3810 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3811 struct l2cap_info_req info;
3812 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3813
3814 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3815 conn->info_ident = l2cap_get_ident(conn);
3816
3817 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3818
3819 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3820 sizeof(info), &info);
3821 }
3822
3823 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3824 result == L2CAP_CR_SUCCESS) {
3825 u8 buf[128];
3826 set_bit(CONF_REQ_SENT, &chan->conf_state);
3827 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3828 l2cap_build_conf_req(chan, buf), buf);
3829 chan->num_conf_req++;
3830 }
3831
3832 return chan;
3833 }
3834
3835 static int l2cap_connect_req(struct l2cap_conn *conn,
3836 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3837 {
3838 struct hci_dev *hdev = conn->hcon->hdev;
3839 struct hci_conn *hcon = conn->hcon;
3840
3841 if (cmd_len < sizeof(struct l2cap_conn_req))
3842 return -EPROTO;
3843
3844 hci_dev_lock(hdev);
3845 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3846 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3847 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3848 hcon->dst_type, 0, NULL, 0,
3849 hcon->dev_class);
3850 hci_dev_unlock(hdev);
3851
3852 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3853 return 0;
3854 }
3855
3856 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3857 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3858 u8 *data)
3859 {
3860 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3861 u16 scid, dcid, result, status;
3862 struct l2cap_chan *chan;
3863 u8 req[128];
3864 int err;
3865
3866 if (cmd_len < sizeof(*rsp))
3867 return -EPROTO;
3868
3869 scid = __le16_to_cpu(rsp->scid);
3870 dcid = __le16_to_cpu(rsp->dcid);
3871 result = __le16_to_cpu(rsp->result);
3872 status = __le16_to_cpu(rsp->status);
3873
3874 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3875 dcid, scid, result, status);
3876
3877 mutex_lock(&conn->chan_lock);
3878
3879 if (scid) {
3880 chan = __l2cap_get_chan_by_scid(conn, scid);
3881 if (!chan) {
3882 err = -EFAULT;
3883 goto unlock;
3884 }
3885 } else {
3886 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3887 if (!chan) {
3888 err = -EFAULT;
3889 goto unlock;
3890 }
3891 }
3892
3893 err = 0;
3894
3895 l2cap_chan_lock(chan);
3896
3897 switch (result) {
3898 case L2CAP_CR_SUCCESS:
3899 l2cap_state_change(chan, BT_CONFIG);
3900 chan->ident = 0;
3901 chan->dcid = dcid;
3902 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3903
3904 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3905 break;
3906
3907 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3908 l2cap_build_conf_req(chan, req), req);
3909 chan->num_conf_req++;
3910 break;
3911
3912 case L2CAP_CR_PEND:
3913 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3914 break;
3915
3916 default:
3917 l2cap_chan_del(chan, ECONNREFUSED);
3918 break;
3919 }
3920
3921 l2cap_chan_unlock(chan);
3922
3923 unlock:
3924 mutex_unlock(&conn->chan_lock);
3925
3926 return err;
3927 }
3928
3929 static inline void set_default_fcs(struct l2cap_chan *chan)
3930 {
3931 /* FCS is enabled only in ERTM or streaming mode, if one or both
3932 * sides request it.
3933 */
3934 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3935 chan->fcs = L2CAP_FCS_NONE;
3936 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3937 chan->fcs = L2CAP_FCS_CRC16;
3938 }
3939
3940 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3941 u8 ident, u16 flags)
3942 {
3943 struct l2cap_conn *conn = chan->conn;
3944
3945 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3946 flags);
3947
3948 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3949 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3950
3951 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3952 l2cap_build_conf_rsp(chan, data,
3953 L2CAP_CONF_SUCCESS, flags), data);
3954 }
3955
3956 static inline int l2cap_config_req(struct l2cap_conn *conn,
3957 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3958 u8 *data)
3959 {
3960 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3961 u16 dcid, flags;
3962 u8 rsp[64];
3963 struct l2cap_chan *chan;
3964 int len, err = 0;
3965
3966 if (cmd_len < sizeof(*req))
3967 return -EPROTO;
3968
3969 dcid = __le16_to_cpu(req->dcid);
3970 flags = __le16_to_cpu(req->flags);
3971
3972 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3973
3974 chan = l2cap_get_chan_by_scid(conn, dcid);
3975 if (!chan)
3976 return -ENOENT;
3977
3978 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3979 struct l2cap_cmd_rej_cid rej;
3980
3981 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3982 rej.scid = cpu_to_le16(chan->scid);
3983 rej.dcid = cpu_to_le16(chan->dcid);
3984
3985 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3986 sizeof(rej), &rej);
3987 goto unlock;
3988 }
3989
3990 /* Reject if config buffer is too small. */
3991 len = cmd_len - sizeof(*req);
3992 if (chan->conf_len + len > sizeof(chan->conf_req)) {
3993 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3994 l2cap_build_conf_rsp(chan, rsp,
3995 L2CAP_CONF_REJECT, flags), rsp);
3996 goto unlock;
3997 }
3998
3999 /* Store config. */
4000 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4001 chan->conf_len += len;
4002
4003 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4004 /* Incomplete config. Send empty response. */
4005 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4006 l2cap_build_conf_rsp(chan, rsp,
4007 L2CAP_CONF_SUCCESS, flags), rsp);
4008 goto unlock;
4009 }
4010
4011 /* Complete config. */
4012 len = l2cap_parse_conf_req(chan, rsp);
4013 if (len < 0) {
4014 l2cap_send_disconn_req(chan, ECONNRESET);
4015 goto unlock;
4016 }
4017
4018 chan->ident = cmd->ident;
4019 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4020 chan->num_conf_rsp++;
4021
4022 /* Reset config buffer. */
4023 chan->conf_len = 0;
4024
4025 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4026 goto unlock;
4027
4028 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4029 set_default_fcs(chan);
4030
4031 if (chan->mode == L2CAP_MODE_ERTM ||
4032 chan->mode == L2CAP_MODE_STREAMING)
4033 err = l2cap_ertm_init(chan);
4034
4035 if (err < 0)
4036 l2cap_send_disconn_req(chan, -err);
4037 else
4038 l2cap_chan_ready(chan);
4039
4040 goto unlock;
4041 }
4042
4043 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4044 u8 buf[64];
4045 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4046 l2cap_build_conf_req(chan, buf), buf);
4047 chan->num_conf_req++;
4048 }
4049
4050 /* Got Conf Rsp PENDING from remote side and asume we sent
4051 Conf Rsp PENDING in the code above */
4052 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4053 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4054
4055 /* check compatibility */
4056
4057 /* Send rsp for BR/EDR channel */
4058 if (!chan->hs_hcon)
4059 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4060 else
4061 chan->ident = cmd->ident;
4062 }
4063
4064 unlock:
4065 l2cap_chan_unlock(chan);
4066 return err;
4067 }
4068
4069 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4070 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4071 u8 *data)
4072 {
4073 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4074 u16 scid, flags, result;
4075 struct l2cap_chan *chan;
4076 int len = cmd_len - sizeof(*rsp);
4077 int err = 0;
4078
4079 if (cmd_len < sizeof(*rsp))
4080 return -EPROTO;
4081
4082 scid = __le16_to_cpu(rsp->scid);
4083 flags = __le16_to_cpu(rsp->flags);
4084 result = __le16_to_cpu(rsp->result);
4085
4086 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4087 result, len);
4088
4089 chan = l2cap_get_chan_by_scid(conn, scid);
4090 if (!chan)
4091 return 0;
4092
4093 switch (result) {
4094 case L2CAP_CONF_SUCCESS:
4095 l2cap_conf_rfc_get(chan, rsp->data, len);
4096 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4097 break;
4098
4099 case L2CAP_CONF_PENDING:
4100 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4101
4102 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4103 char buf[64];
4104
4105 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4106 buf, &result);
4107 if (len < 0) {
4108 l2cap_send_disconn_req(chan, ECONNRESET);
4109 goto done;
4110 }
4111
4112 if (!chan->hs_hcon) {
4113 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4114 0);
4115 } else {
4116 if (l2cap_check_efs(chan)) {
4117 amp_create_logical_link(chan);
4118 chan->ident = cmd->ident;
4119 }
4120 }
4121 }
4122 goto done;
4123
4124 case L2CAP_CONF_UNACCEPT:
4125 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4126 char req[64];
4127
4128 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4129 l2cap_send_disconn_req(chan, ECONNRESET);
4130 goto done;
4131 }
4132
4133 /* throw out any old stored conf requests */
4134 result = L2CAP_CONF_SUCCESS;
4135 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4136 req, &result);
4137 if (len < 0) {
4138 l2cap_send_disconn_req(chan, ECONNRESET);
4139 goto done;
4140 }
4141
4142 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4143 L2CAP_CONF_REQ, len, req);
4144 chan->num_conf_req++;
4145 if (result != L2CAP_CONF_SUCCESS)
4146 goto done;
4147 break;
4148 }
4149
4150 default:
4151 l2cap_chan_set_err(chan, ECONNRESET);
4152
4153 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4154 l2cap_send_disconn_req(chan, ECONNRESET);
4155 goto done;
4156 }
4157
4158 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4159 goto done;
4160
4161 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4162
4163 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4164 set_default_fcs(chan);
4165
4166 if (chan->mode == L2CAP_MODE_ERTM ||
4167 chan->mode == L2CAP_MODE_STREAMING)
4168 err = l2cap_ertm_init(chan);
4169
4170 if (err < 0)
4171 l2cap_send_disconn_req(chan, -err);
4172 else
4173 l2cap_chan_ready(chan);
4174 }
4175
4176 done:
4177 l2cap_chan_unlock(chan);
4178 return err;
4179 }
4180
4181 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4182 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4183 u8 *data)
4184 {
4185 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4186 struct l2cap_disconn_rsp rsp;
4187 u16 dcid, scid;
4188 struct l2cap_chan *chan;
4189 struct sock *sk;
4190
4191 if (cmd_len != sizeof(*req))
4192 return -EPROTO;
4193
4194 scid = __le16_to_cpu(req->scid);
4195 dcid = __le16_to_cpu(req->dcid);
4196
4197 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4198
4199 mutex_lock(&conn->chan_lock);
4200
4201 chan = __l2cap_get_chan_by_scid(conn, dcid);
4202 if (!chan) {
4203 mutex_unlock(&conn->chan_lock);
4204 return 0;
4205 }
4206
4207 l2cap_chan_lock(chan);
4208
4209 sk = chan->sk;
4210
4211 rsp.dcid = cpu_to_le16(chan->scid);
4212 rsp.scid = cpu_to_le16(chan->dcid);
4213 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4214
4215 lock_sock(sk);
4216 sk->sk_shutdown = SHUTDOWN_MASK;
4217 release_sock(sk);
4218
4219 l2cap_chan_hold(chan);
4220 l2cap_chan_del(chan, ECONNRESET);
4221
4222 l2cap_chan_unlock(chan);
4223
4224 chan->ops->close(chan);
4225 l2cap_chan_put(chan);
4226
4227 mutex_unlock(&conn->chan_lock);
4228
4229 return 0;
4230 }
4231
4232 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4233 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4234 u8 *data)
4235 {
4236 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4237 u16 dcid, scid;
4238 struct l2cap_chan *chan;
4239
4240 if (cmd_len != sizeof(*rsp))
4241 return -EPROTO;
4242
4243 scid = __le16_to_cpu(rsp->scid);
4244 dcid = __le16_to_cpu(rsp->dcid);
4245
4246 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4247
4248 mutex_lock(&conn->chan_lock);
4249
4250 chan = __l2cap_get_chan_by_scid(conn, scid);
4251 if (!chan) {
4252 mutex_unlock(&conn->chan_lock);
4253 return 0;
4254 }
4255
4256 l2cap_chan_lock(chan);
4257
4258 l2cap_chan_hold(chan);
4259 l2cap_chan_del(chan, 0);
4260
4261 l2cap_chan_unlock(chan);
4262
4263 chan->ops->close(chan);
4264 l2cap_chan_put(chan);
4265
4266 mutex_unlock(&conn->chan_lock);
4267
4268 return 0;
4269 }
4270
4271 static inline int l2cap_information_req(struct l2cap_conn *conn,
4272 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4273 u8 *data)
4274 {
4275 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4276 u16 type;
4277
4278 if (cmd_len != sizeof(*req))
4279 return -EPROTO;
4280
4281 type = __le16_to_cpu(req->type);
4282
4283 BT_DBG("type 0x%4.4x", type);
4284
4285 if (type == L2CAP_IT_FEAT_MASK) {
4286 u8 buf[8];
4287 u32 feat_mask = l2cap_feat_mask;
4288 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4289 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4290 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4291 if (!disable_ertm)
4292 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4293 | L2CAP_FEAT_FCS;
4294 if (enable_hs)
4295 feat_mask |= L2CAP_FEAT_EXT_FLOW
4296 | L2CAP_FEAT_EXT_WINDOW;
4297
4298 put_unaligned_le32(feat_mask, rsp->data);
4299 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4300 buf);
4301 } else if (type == L2CAP_IT_FIXED_CHAN) {
4302 u8 buf[12];
4303 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4304
4305 if (enable_hs)
4306 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4307 else
4308 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4309
4310 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4311 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4312 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4313 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4314 buf);
4315 } else {
4316 struct l2cap_info_rsp rsp;
4317 rsp.type = cpu_to_le16(type);
4318 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4319 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4320 &rsp);
4321 }
4322
4323 return 0;
4324 }
4325
4326 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4327 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4328 u8 *data)
4329 {
4330 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4331 u16 type, result;
4332
4333 if (cmd_len != sizeof(*rsp))
4334 return -EPROTO;
4335
4336 type = __le16_to_cpu(rsp->type);
4337 result = __le16_to_cpu(rsp->result);
4338
4339 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4340
4341 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4342 if (cmd->ident != conn->info_ident ||
4343 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4344 return 0;
4345
4346 cancel_delayed_work(&conn->info_timer);
4347
4348 if (result != L2CAP_IR_SUCCESS) {
4349 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4350 conn->info_ident = 0;
4351
4352 l2cap_conn_start(conn);
4353
4354 return 0;
4355 }
4356
4357 switch (type) {
4358 case L2CAP_IT_FEAT_MASK:
4359 conn->feat_mask = get_unaligned_le32(rsp->data);
4360
4361 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4362 struct l2cap_info_req req;
4363 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4364
4365 conn->info_ident = l2cap_get_ident(conn);
4366
4367 l2cap_send_cmd(conn, conn->info_ident,
4368 L2CAP_INFO_REQ, sizeof(req), &req);
4369 } else {
4370 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4371 conn->info_ident = 0;
4372
4373 l2cap_conn_start(conn);
4374 }
4375 break;
4376
4377 case L2CAP_IT_FIXED_CHAN:
4378 conn->fixed_chan_mask = rsp->data[0];
4379 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4380 conn->info_ident = 0;
4381
4382 l2cap_conn_start(conn);
4383 break;
4384 }
4385
4386 return 0;
4387 }
4388
4389 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4390 struct l2cap_cmd_hdr *cmd,
4391 u16 cmd_len, void *data)
4392 {
4393 struct l2cap_create_chan_req *req = data;
4394 struct l2cap_create_chan_rsp rsp;
4395 struct l2cap_chan *chan;
4396 struct hci_dev *hdev;
4397 u16 psm, scid;
4398
4399 if (cmd_len != sizeof(*req))
4400 return -EPROTO;
4401
4402 if (!enable_hs)
4403 return -EINVAL;
4404
4405 psm = le16_to_cpu(req->psm);
4406 scid = le16_to_cpu(req->scid);
4407
4408 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4409
4410 /* For controller id 0 make BR/EDR connection */
4411 if (req->amp_id == HCI_BREDR_ID) {
4412 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4413 req->amp_id);
4414 return 0;
4415 }
4416
4417 /* Validate AMP controller id */
4418 hdev = hci_dev_get(req->amp_id);
4419 if (!hdev)
4420 goto error;
4421
4422 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4423 hci_dev_put(hdev);
4424 goto error;
4425 }
4426
4427 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4428 req->amp_id);
4429 if (chan) {
4430 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4431 struct hci_conn *hs_hcon;
4432
4433 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4434 if (!hs_hcon) {
4435 hci_dev_put(hdev);
4436 return -EFAULT;
4437 }
4438
4439 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4440
4441 mgr->bredr_chan = chan;
4442 chan->hs_hcon = hs_hcon;
4443 chan->fcs = L2CAP_FCS_NONE;
4444 conn->mtu = hdev->block_mtu;
4445 }
4446
4447 hci_dev_put(hdev);
4448
4449 return 0;
4450
4451 error:
4452 rsp.dcid = 0;
4453 rsp.scid = cpu_to_le16(scid);
4454 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4455 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4456
4457 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4458 sizeof(rsp), &rsp);
4459
4460 return -EFAULT;
4461 }
4462
4463 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4464 {
4465 struct l2cap_move_chan_req req;
4466 u8 ident;
4467
4468 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4469
4470 ident = l2cap_get_ident(chan->conn);
4471 chan->ident = ident;
4472
4473 req.icid = cpu_to_le16(chan->scid);
4474 req.dest_amp_id = dest_amp_id;
4475
4476 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4477 &req);
4478
4479 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4480 }
4481
4482 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4483 {
4484 struct l2cap_move_chan_rsp rsp;
4485
4486 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4487
4488 rsp.icid = cpu_to_le16(chan->dcid);
4489 rsp.result = cpu_to_le16(result);
4490
4491 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4492 sizeof(rsp), &rsp);
4493 }
4494
4495 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4496 {
4497 struct l2cap_move_chan_cfm cfm;
4498
4499 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4500
4501 chan->ident = l2cap_get_ident(chan->conn);
4502
4503 cfm.icid = cpu_to_le16(chan->scid);
4504 cfm.result = cpu_to_le16(result);
4505
4506 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4507 sizeof(cfm), &cfm);
4508
4509 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4510 }
4511
4512 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4513 {
4514 struct l2cap_move_chan_cfm cfm;
4515
4516 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4517
4518 cfm.icid = cpu_to_le16(icid);
4519 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4520
4521 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4522 sizeof(cfm), &cfm);
4523 }
4524
4525 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4526 u16 icid)
4527 {
4528 struct l2cap_move_chan_cfm_rsp rsp;
4529
4530 BT_DBG("icid 0x%4.4x", icid);
4531
4532 rsp.icid = cpu_to_le16(icid);
4533 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4534 }
4535
4536 static void __release_logical_link(struct l2cap_chan *chan)
4537 {
4538 chan->hs_hchan = NULL;
4539 chan->hs_hcon = NULL;
4540
4541 /* Placeholder - release the logical link */
4542 }
4543
4544 static void l2cap_logical_fail(struct l2cap_chan *chan)
4545 {
4546 /* Logical link setup failed */
4547 if (chan->state != BT_CONNECTED) {
4548 /* Create channel failure, disconnect */
4549 l2cap_send_disconn_req(chan, ECONNRESET);
4550 return;
4551 }
4552
4553 switch (chan->move_role) {
4554 case L2CAP_MOVE_ROLE_RESPONDER:
4555 l2cap_move_done(chan);
4556 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4557 break;
4558 case L2CAP_MOVE_ROLE_INITIATOR:
4559 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4560 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4561 /* Remote has only sent pending or
4562 * success responses, clean up
4563 */
4564 l2cap_move_done(chan);
4565 }
4566
4567 /* Other amp move states imply that the move
4568 * has already aborted
4569 */
4570 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4571 break;
4572 }
4573 }
4574
4575 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4576 struct hci_chan *hchan)
4577 {
4578 struct l2cap_conf_rsp rsp;
4579
4580 chan->hs_hchan = hchan;
4581 chan->hs_hcon->l2cap_data = chan->conn;
4582
4583 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4584
4585 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4586 int err;
4587
4588 set_default_fcs(chan);
4589
4590 err = l2cap_ertm_init(chan);
4591 if (err < 0)
4592 l2cap_send_disconn_req(chan, -err);
4593 else
4594 l2cap_chan_ready(chan);
4595 }
4596 }
4597
4598 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4599 struct hci_chan *hchan)
4600 {
4601 chan->hs_hcon = hchan->conn;
4602 chan->hs_hcon->l2cap_data = chan->conn;
4603
4604 BT_DBG("move_state %d", chan->move_state);
4605
4606 switch (chan->move_state) {
4607 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4608 /* Move confirm will be sent after a success
4609 * response is received
4610 */
4611 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4612 break;
4613 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4615 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4616 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4617 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4618 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4619 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4620 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4621 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4622 }
4623 break;
4624 default:
4625 /* Move was not in expected state, free the channel */
4626 __release_logical_link(chan);
4627
4628 chan->move_state = L2CAP_MOVE_STABLE;
4629 }
4630 }
4631
4632 /* Call with chan locked */
4633 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4634 u8 status)
4635 {
4636 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4637
4638 if (status) {
4639 l2cap_logical_fail(chan);
4640 __release_logical_link(chan);
4641 return;
4642 }
4643
4644 if (chan->state != BT_CONNECTED) {
4645 /* Ignore logical link if channel is on BR/EDR */
4646 if (chan->local_amp_id)
4647 l2cap_logical_finish_create(chan, hchan);
4648 } else {
4649 l2cap_logical_finish_move(chan, hchan);
4650 }
4651 }
4652
4653 void l2cap_move_start(struct l2cap_chan *chan)
4654 {
4655 BT_DBG("chan %p", chan);
4656
4657 if (chan->local_amp_id == HCI_BREDR_ID) {
4658 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4659 return;
4660 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4661 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4662 /* Placeholder - start physical link setup */
4663 } else {
4664 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4665 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4666 chan->move_id = 0;
4667 l2cap_move_setup(chan);
4668 l2cap_send_move_chan_req(chan, 0);
4669 }
4670 }
4671
4672 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4673 u8 local_amp_id, u8 remote_amp_id)
4674 {
4675 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4676 local_amp_id, remote_amp_id);
4677
4678 chan->fcs = L2CAP_FCS_NONE;
4679
4680 /* Outgoing channel on AMP */
4681 if (chan->state == BT_CONNECT) {
4682 if (result == L2CAP_CR_SUCCESS) {
4683 chan->local_amp_id = local_amp_id;
4684 l2cap_send_create_chan_req(chan, remote_amp_id);
4685 } else {
4686 /* Revert to BR/EDR connect */
4687 l2cap_send_conn_req(chan);
4688 }
4689
4690 return;
4691 }
4692
4693 /* Incoming channel on AMP */
4694 if (__l2cap_no_conn_pending(chan)) {
4695 struct l2cap_conn_rsp rsp;
4696 char buf[128];
4697 rsp.scid = cpu_to_le16(chan->dcid);
4698 rsp.dcid = cpu_to_le16(chan->scid);
4699
4700 if (result == L2CAP_CR_SUCCESS) {
4701 /* Send successful response */
4702 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4703 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4704 } else {
4705 /* Send negative response */
4706 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4707 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4708 }
4709
4710 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4711 sizeof(rsp), &rsp);
4712
4713 if (result == L2CAP_CR_SUCCESS) {
4714 __l2cap_state_change(chan, BT_CONFIG);
4715 set_bit(CONF_REQ_SENT, &chan->conf_state);
4716 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4717 L2CAP_CONF_REQ,
4718 l2cap_build_conf_req(chan, buf), buf);
4719 chan->num_conf_req++;
4720 }
4721 }
4722 }
4723
4724 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4725 u8 remote_amp_id)
4726 {
4727 l2cap_move_setup(chan);
4728 chan->move_id = local_amp_id;
4729 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4730
4731 l2cap_send_move_chan_req(chan, remote_amp_id);
4732 }
4733
4734 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4735 {
4736 struct hci_chan *hchan = NULL;
4737
4738 /* Placeholder - get hci_chan for logical link */
4739
4740 if (hchan) {
4741 if (hchan->state == BT_CONNECTED) {
4742 /* Logical link is ready to go */
4743 chan->hs_hcon = hchan->conn;
4744 chan->hs_hcon->l2cap_data = chan->conn;
4745 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4746 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4747
4748 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4749 } else {
4750 /* Wait for logical link to be ready */
4751 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4752 }
4753 } else {
4754 /* Logical link not available */
4755 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4756 }
4757 }
4758
4759 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4760 {
4761 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4762 u8 rsp_result;
4763 if (result == -EINVAL)
4764 rsp_result = L2CAP_MR_BAD_ID;
4765 else
4766 rsp_result = L2CAP_MR_NOT_ALLOWED;
4767
4768 l2cap_send_move_chan_rsp(chan, rsp_result);
4769 }
4770
4771 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4772 chan->move_state = L2CAP_MOVE_STABLE;
4773
4774 /* Restart data transmission */
4775 l2cap_ertm_send(chan);
4776 }
4777
4778 /* Invoke with locked chan */
4779 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4780 {
4781 u8 local_amp_id = chan->local_amp_id;
4782 u8 remote_amp_id = chan->remote_amp_id;
4783
4784 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4785 chan, result, local_amp_id, remote_amp_id);
4786
4787 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4788 l2cap_chan_unlock(chan);
4789 return;
4790 }
4791
4792 if (chan->state != BT_CONNECTED) {
4793 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4794 } else if (result != L2CAP_MR_SUCCESS) {
4795 l2cap_do_move_cancel(chan, result);
4796 } else {
4797 switch (chan->move_role) {
4798 case L2CAP_MOVE_ROLE_INITIATOR:
4799 l2cap_do_move_initiate(chan, local_amp_id,
4800 remote_amp_id);
4801 break;
4802 case L2CAP_MOVE_ROLE_RESPONDER:
4803 l2cap_do_move_respond(chan, result);
4804 break;
4805 default:
4806 l2cap_do_move_cancel(chan, result);
4807 break;
4808 }
4809 }
4810 }
4811
4812 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4813 struct l2cap_cmd_hdr *cmd,
4814 u16 cmd_len, void *data)
4815 {
4816 struct l2cap_move_chan_req *req = data;
4817 struct l2cap_move_chan_rsp rsp;
4818 struct l2cap_chan *chan;
4819 u16 icid = 0;
4820 u16 result = L2CAP_MR_NOT_ALLOWED;
4821
4822 if (cmd_len != sizeof(*req))
4823 return -EPROTO;
4824
4825 icid = le16_to_cpu(req->icid);
4826
4827 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4828
4829 if (!enable_hs)
4830 return -EINVAL;
4831
4832 chan = l2cap_get_chan_by_dcid(conn, icid);
4833 if (!chan) {
4834 rsp.icid = cpu_to_le16(icid);
4835 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4836 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4837 sizeof(rsp), &rsp);
4838 return 0;
4839 }
4840
4841 chan->ident = cmd->ident;
4842
4843 if (chan->scid < L2CAP_CID_DYN_START ||
4844 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4845 (chan->mode != L2CAP_MODE_ERTM &&
4846 chan->mode != L2CAP_MODE_STREAMING)) {
4847 result = L2CAP_MR_NOT_ALLOWED;
4848 goto send_move_response;
4849 }
4850
4851 if (chan->local_amp_id == req->dest_amp_id) {
4852 result = L2CAP_MR_SAME_ID;
4853 goto send_move_response;
4854 }
4855
4856 if (req->dest_amp_id) {
4857 struct hci_dev *hdev;
4858 hdev = hci_dev_get(req->dest_amp_id);
4859 if (!hdev || hdev->dev_type != HCI_AMP ||
4860 !test_bit(HCI_UP, &hdev->flags)) {
4861 if (hdev)
4862 hci_dev_put(hdev);
4863
4864 result = L2CAP_MR_BAD_ID;
4865 goto send_move_response;
4866 }
4867 hci_dev_put(hdev);
4868 }
4869
4870 /* Detect a move collision. Only send a collision response
4871 * if this side has "lost", otherwise proceed with the move.
4872 * The winner has the larger bd_addr.
4873 */
4874 if ((__chan_is_moving(chan) ||
4875 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4876 bacmp(conn->src, conn->dst) > 0) {
4877 result = L2CAP_MR_COLLISION;
4878 goto send_move_response;
4879 }
4880
4881 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4882 l2cap_move_setup(chan);
4883 chan->move_id = req->dest_amp_id;
4884 icid = chan->dcid;
4885
4886 if (!req->dest_amp_id) {
4887 /* Moving to BR/EDR */
4888 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4889 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4890 result = L2CAP_MR_PEND;
4891 } else {
4892 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4893 result = L2CAP_MR_SUCCESS;
4894 }
4895 } else {
4896 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4897 /* Placeholder - uncomment when amp functions are available */
4898 /*amp_accept_physical(chan, req->dest_amp_id);*/
4899 result = L2CAP_MR_PEND;
4900 }
4901
4902 send_move_response:
4903 l2cap_send_move_chan_rsp(chan, result);
4904
4905 l2cap_chan_unlock(chan);
4906
4907 return 0;
4908 }
4909
4910 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4911 {
4912 struct l2cap_chan *chan;
4913 struct hci_chan *hchan = NULL;
4914
4915 chan = l2cap_get_chan_by_scid(conn, icid);
4916 if (!chan) {
4917 l2cap_send_move_chan_cfm_icid(conn, icid);
4918 return;
4919 }
4920
4921 __clear_chan_timer(chan);
4922 if (result == L2CAP_MR_PEND)
4923 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4924
4925 switch (chan->move_state) {
4926 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4927 /* Move confirm will be sent when logical link
4928 * is complete.
4929 */
4930 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4931 break;
4932 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4933 if (result == L2CAP_MR_PEND) {
4934 break;
4935 } else if (test_bit(CONN_LOCAL_BUSY,
4936 &chan->conn_state)) {
4937 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4938 } else {
4939 /* Logical link is up or moving to BR/EDR,
4940 * proceed with move
4941 */
4942 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4943 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4944 }
4945 break;
4946 case L2CAP_MOVE_WAIT_RSP:
4947 /* Moving to AMP */
4948 if (result == L2CAP_MR_SUCCESS) {
4949 /* Remote is ready, send confirm immediately
4950 * after logical link is ready
4951 */
4952 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4953 } else {
4954 /* Both logical link and move success
4955 * are required to confirm
4956 */
4957 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4958 }
4959
4960 /* Placeholder - get hci_chan for logical link */
4961 if (!hchan) {
4962 /* Logical link not available */
4963 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4964 break;
4965 }
4966
4967 /* If the logical link is not yet connected, do not
4968 * send confirmation.
4969 */
4970 if (hchan->state != BT_CONNECTED)
4971 break;
4972
4973 /* Logical link is already ready to go */
4974
4975 chan->hs_hcon = hchan->conn;
4976 chan->hs_hcon->l2cap_data = chan->conn;
4977
4978 if (result == L2CAP_MR_SUCCESS) {
4979 /* Can confirm now */
4980 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4981 } else {
4982 /* Now only need move success
4983 * to confirm
4984 */
4985 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4986 }
4987
4988 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4989 break;
4990 default:
4991 /* Any other amp move state means the move failed. */
4992 chan->move_id = chan->local_amp_id;
4993 l2cap_move_done(chan);
4994 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4995 }
4996
4997 l2cap_chan_unlock(chan);
4998 }
4999
5000 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5001 u16 result)
5002 {
5003 struct l2cap_chan *chan;
5004
5005 chan = l2cap_get_chan_by_ident(conn, ident);
5006 if (!chan) {
5007 /* Could not locate channel, icid is best guess */
5008 l2cap_send_move_chan_cfm_icid(conn, icid);
5009 return;
5010 }
5011
5012 __clear_chan_timer(chan);
5013
5014 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5015 if (result == L2CAP_MR_COLLISION) {
5016 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5017 } else {
5018 /* Cleanup - cancel move */
5019 chan->move_id = chan->local_amp_id;
5020 l2cap_move_done(chan);
5021 }
5022 }
5023
5024 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5025
5026 l2cap_chan_unlock(chan);
5027 }
5028
5029 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5030 struct l2cap_cmd_hdr *cmd,
5031 u16 cmd_len, void *data)
5032 {
5033 struct l2cap_move_chan_rsp *rsp = data;
5034 u16 icid, result;
5035
5036 if (cmd_len != sizeof(*rsp))
5037 return -EPROTO;
5038
5039 icid = le16_to_cpu(rsp->icid);
5040 result = le16_to_cpu(rsp->result);
5041
5042 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5043
5044 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5045 l2cap_move_continue(conn, icid, result);
5046 else
5047 l2cap_move_fail(conn, cmd->ident, icid, result);
5048
5049 return 0;
5050 }
5051
5052 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5053 struct l2cap_cmd_hdr *cmd,
5054 u16 cmd_len, void *data)
5055 {
5056 struct l2cap_move_chan_cfm *cfm = data;
5057 struct l2cap_chan *chan;
5058 u16 icid, result;
5059
5060 if (cmd_len != sizeof(*cfm))
5061 return -EPROTO;
5062
5063 icid = le16_to_cpu(cfm->icid);
5064 result = le16_to_cpu(cfm->result);
5065
5066 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5067
5068 chan = l2cap_get_chan_by_dcid(conn, icid);
5069 if (!chan) {
5070 /* Spec requires a response even if the icid was not found */
5071 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5072 return 0;
5073 }
5074
5075 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5076 if (result == L2CAP_MC_CONFIRMED) {
5077 chan->local_amp_id = chan->move_id;
5078 if (!chan->local_amp_id)
5079 __release_logical_link(chan);
5080 } else {
5081 chan->move_id = chan->local_amp_id;
5082 }
5083
5084 l2cap_move_done(chan);
5085 }
5086
5087 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5088
5089 l2cap_chan_unlock(chan);
5090
5091 return 0;
5092 }
5093
5094 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5095 struct l2cap_cmd_hdr *cmd,
5096 u16 cmd_len, void *data)
5097 {
5098 struct l2cap_move_chan_cfm_rsp *rsp = data;
5099 struct l2cap_chan *chan;
5100 u16 icid;
5101
5102 if (cmd_len != sizeof(*rsp))
5103 return -EPROTO;
5104
5105 icid = le16_to_cpu(rsp->icid);
5106
5107 BT_DBG("icid 0x%4.4x", icid);
5108
5109 chan = l2cap_get_chan_by_scid(conn, icid);
5110 if (!chan)
5111 return 0;
5112
5113 __clear_chan_timer(chan);
5114
5115 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5116 chan->local_amp_id = chan->move_id;
5117
5118 if (!chan->local_amp_id && chan->hs_hchan)
5119 __release_logical_link(chan);
5120
5121 l2cap_move_done(chan);
5122 }
5123
5124 l2cap_chan_unlock(chan);
5125
5126 return 0;
5127 }
5128
5129 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5130 u16 to_multiplier)
5131 {
5132 u16 max_latency;
5133
5134 if (min > max || min < 6 || max > 3200)
5135 return -EINVAL;
5136
5137 if (to_multiplier < 10 || to_multiplier > 3200)
5138 return -EINVAL;
5139
5140 if (max >= to_multiplier * 8)
5141 return -EINVAL;
5142
5143 max_latency = (to_multiplier * 8 / max) - 1;
5144 if (latency > 499 || latency > max_latency)
5145 return -EINVAL;
5146
5147 return 0;
5148 }
5149
5150 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5151 struct l2cap_cmd_hdr *cmd,
5152 u8 *data)
5153 {
5154 struct hci_conn *hcon = conn->hcon;
5155 struct l2cap_conn_param_update_req *req;
5156 struct l2cap_conn_param_update_rsp rsp;
5157 u16 min, max, latency, to_multiplier, cmd_len;
5158 int err;
5159
5160 if (!(hcon->link_mode & HCI_LM_MASTER))
5161 return -EINVAL;
5162
5163 cmd_len = __le16_to_cpu(cmd->len);
5164 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5165 return -EPROTO;
5166
5167 req = (struct l2cap_conn_param_update_req *) data;
5168 min = __le16_to_cpu(req->min);
5169 max = __le16_to_cpu(req->max);
5170 latency = __le16_to_cpu(req->latency);
5171 to_multiplier = __le16_to_cpu(req->to_multiplier);
5172
5173 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5174 min, max, latency, to_multiplier);
5175
5176 memset(&rsp, 0, sizeof(rsp));
5177
5178 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5179 if (err)
5180 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5181 else
5182 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5183
5184 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5185 sizeof(rsp), &rsp);
5186
5187 if (!err)
5188 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5189
5190 return 0;
5191 }
5192
5193 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5194 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5195 u8 *data)
5196 {
5197 int err = 0;
5198
5199 switch (cmd->code) {
5200 case L2CAP_COMMAND_REJ:
5201 l2cap_command_rej(conn, cmd, cmd_len, data);
5202 break;
5203
5204 case L2CAP_CONN_REQ:
5205 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5206 break;
5207
5208 case L2CAP_CONN_RSP:
5209 case L2CAP_CREATE_CHAN_RSP:
5210 err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5211 break;
5212
5213 case L2CAP_CONF_REQ:
5214 err = l2cap_config_req(conn, cmd, cmd_len, data);
5215 break;
5216
5217 case L2CAP_CONF_RSP:
5218 err = l2cap_config_rsp(conn, cmd, cmd_len, data);
5219 break;
5220
5221 case L2CAP_DISCONN_REQ:
5222 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5223 break;
5224
5225 case L2CAP_DISCONN_RSP:
5226 err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5227 break;
5228
5229 case L2CAP_ECHO_REQ:
5230 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5231 break;
5232
5233 case L2CAP_ECHO_RSP:
5234 break;
5235
5236 case L2CAP_INFO_REQ:
5237 err = l2cap_information_req(conn, cmd, cmd_len, data);
5238 break;
5239
5240 case L2CAP_INFO_RSP:
5241 err = l2cap_information_rsp(conn, cmd, cmd_len, data);
5242 break;
5243
5244 case L2CAP_CREATE_CHAN_REQ:
5245 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5246 break;
5247
5248 case L2CAP_MOVE_CHAN_REQ:
5249 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5250 break;
5251
5252 case L2CAP_MOVE_CHAN_RSP:
5253 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5254 break;
5255
5256 case L2CAP_MOVE_CHAN_CFM:
5257 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5258 break;
5259
5260 case L2CAP_MOVE_CHAN_CFM_RSP:
5261 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5262 break;
5263
5264 default:
5265 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5266 err = -EINVAL;
5267 break;
5268 }
5269
5270 return err;
5271 }
5272
5273 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5274 struct l2cap_cmd_hdr *cmd, u8 *data)
5275 {
5276 switch (cmd->code) {
5277 case L2CAP_COMMAND_REJ:
5278 return 0;
5279
5280 case L2CAP_CONN_PARAM_UPDATE_REQ:
5281 return l2cap_conn_param_update_req(conn, cmd, data);
5282
5283 case L2CAP_CONN_PARAM_UPDATE_RSP:
5284 return 0;
5285
5286 default:
5287 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5288 return -EINVAL;
5289 }
5290 }
5291
5292 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5293 struct sk_buff *skb)
5294 {
5295 u8 *data = skb->data;
5296 int len = skb->len;
5297 struct l2cap_cmd_hdr cmd;
5298 int err;
5299
5300 l2cap_raw_recv(conn, skb);
5301
5302 while (len >= L2CAP_CMD_HDR_SIZE) {
5303 u16 cmd_len;
5304 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5305 data += L2CAP_CMD_HDR_SIZE;
5306 len -= L2CAP_CMD_HDR_SIZE;
5307
5308 cmd_len = le16_to_cpu(cmd.len);
5309
5310 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5311 cmd.ident);
5312
5313 if (cmd_len > len || !cmd.ident) {
5314 BT_DBG("corrupted command");
5315 break;
5316 }
5317
5318 err = l2cap_le_sig_cmd(conn, &cmd, data);
5319 if (err) {
5320 struct l2cap_cmd_rej_unk rej;
5321
5322 BT_ERR("Wrong link type (%d)", err);
5323
5324 /* FIXME: Map err to a valid reason */
5325 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5326 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5327 sizeof(rej), &rej);
5328 }
5329
5330 data += cmd_len;
5331 len -= cmd_len;
5332 }
5333
5334 kfree_skb(skb);
5335 }
5336
5337 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5338 struct sk_buff *skb)
5339 {
5340 u8 *data = skb->data;
5341 int len = skb->len;
5342 struct l2cap_cmd_hdr cmd;
5343 int err;
5344
5345 l2cap_raw_recv(conn, skb);
5346
5347 while (len >= L2CAP_CMD_HDR_SIZE) {
5348 u16 cmd_len;
5349 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5350 data += L2CAP_CMD_HDR_SIZE;
5351 len -= L2CAP_CMD_HDR_SIZE;
5352
5353 cmd_len = le16_to_cpu(cmd.len);
5354
5355 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5356 cmd.ident);
5357
5358 if (cmd_len > len || !cmd.ident) {
5359 BT_DBG("corrupted command");
5360 break;
5361 }
5362
5363 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5364 if (err) {
5365 struct l2cap_cmd_rej_unk rej;
5366
5367 BT_ERR("Wrong link type (%d)", err);
5368
5369 /* FIXME: Map err to a valid reason */
5370 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5371 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5372 sizeof(rej), &rej);
5373 }
5374
5375 data += cmd_len;
5376 len -= cmd_len;
5377 }
5378
5379 kfree_skb(skb);
5380 }
5381
5382 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5383 {
5384 u16 our_fcs, rcv_fcs;
5385 int hdr_size;
5386
5387 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5388 hdr_size = L2CAP_EXT_HDR_SIZE;
5389 else
5390 hdr_size = L2CAP_ENH_HDR_SIZE;
5391
5392 if (chan->fcs == L2CAP_FCS_CRC16) {
5393 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5394 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5395 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5396
5397 if (our_fcs != rcv_fcs)
5398 return -EBADMSG;
5399 }
5400 return 0;
5401 }
5402
5403 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5404 {
5405 struct l2cap_ctrl control;
5406
5407 BT_DBG("chan %p", chan);
5408
5409 memset(&control, 0, sizeof(control));
5410 control.sframe = 1;
5411 control.final = 1;
5412 control.reqseq = chan->buffer_seq;
5413 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5414
5415 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5416 control.super = L2CAP_SUPER_RNR;
5417 l2cap_send_sframe(chan, &control);
5418 }
5419
5420 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5421 chan->unacked_frames > 0)
5422 __set_retrans_timer(chan);
5423
5424 /* Send pending iframes */
5425 l2cap_ertm_send(chan);
5426
5427 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5428 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5429 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5430 * send it now.
5431 */
5432 control.super = L2CAP_SUPER_RR;
5433 l2cap_send_sframe(chan, &control);
5434 }
5435 }
5436
5437 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5438 struct sk_buff **last_frag)
5439 {
5440 /* skb->len reflects data in skb as well as all fragments
5441 * skb->data_len reflects only data in fragments
5442 */
5443 if (!skb_has_frag_list(skb))
5444 skb_shinfo(skb)->frag_list = new_frag;
5445
5446 new_frag->next = NULL;
5447
5448 (*last_frag)->next = new_frag;
5449 *last_frag = new_frag;
5450
5451 skb->len += new_frag->len;
5452 skb->data_len += new_frag->len;
5453 skb->truesize += new_frag->truesize;
5454 }
5455
5456 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5457 struct l2cap_ctrl *control)
5458 {
5459 int err = -EINVAL;
5460
5461 switch (control->sar) {
5462 case L2CAP_SAR_UNSEGMENTED:
5463 if (chan->sdu)
5464 break;
5465
5466 err = chan->ops->recv(chan, skb);
5467 break;
5468
5469 case L2CAP_SAR_START:
5470 if (chan->sdu)
5471 break;
5472
5473 chan->sdu_len = get_unaligned_le16(skb->data);
5474 skb_pull(skb, L2CAP_SDULEN_SIZE);
5475
5476 if (chan->sdu_len > chan->imtu) {
5477 err = -EMSGSIZE;
5478 break;
5479 }
5480
5481 if (skb->len >= chan->sdu_len)
5482 break;
5483
5484 chan->sdu = skb;
5485 chan->sdu_last_frag = skb;
5486
5487 skb = NULL;
5488 err = 0;
5489 break;
5490
5491 case L2CAP_SAR_CONTINUE:
5492 if (!chan->sdu)
5493 break;
5494
5495 append_skb_frag(chan->sdu, skb,
5496 &chan->sdu_last_frag);
5497 skb = NULL;
5498
5499 if (chan->sdu->len >= chan->sdu_len)
5500 break;
5501
5502 err = 0;
5503 break;
5504
5505 case L2CAP_SAR_END:
5506 if (!chan->sdu)
5507 break;
5508
5509 append_skb_frag(chan->sdu, skb,
5510 &chan->sdu_last_frag);
5511 skb = NULL;
5512
5513 if (chan->sdu->len != chan->sdu_len)
5514 break;
5515
5516 err = chan->ops->recv(chan, chan->sdu);
5517
5518 if (!err) {
5519 /* Reassembly complete */
5520 chan->sdu = NULL;
5521 chan->sdu_last_frag = NULL;
5522 chan->sdu_len = 0;
5523 }
5524 break;
5525 }
5526
5527 if (err) {
5528 kfree_skb(skb);
5529 kfree_skb(chan->sdu);
5530 chan->sdu = NULL;
5531 chan->sdu_last_frag = NULL;
5532 chan->sdu_len = 0;
5533 }
5534
5535 return err;
5536 }
5537
5538 static int l2cap_resegment(struct l2cap_chan *chan)
5539 {
5540 /* Placeholder */
5541 return 0;
5542 }
5543
5544 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5545 {
5546 u8 event;
5547
5548 if (chan->mode != L2CAP_MODE_ERTM)
5549 return;
5550
5551 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5552 l2cap_tx(chan, NULL, NULL, event);
5553 }
5554
5555 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5556 {
5557 int err = 0;
5558 /* Pass sequential frames to l2cap_reassemble_sdu()
5559 * until a gap is encountered.
5560 */
5561
5562 BT_DBG("chan %p", chan);
5563
5564 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5565 struct sk_buff *skb;
5566 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5567 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5568
5569 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5570
5571 if (!skb)
5572 break;
5573
5574 skb_unlink(skb, &chan->srej_q);
5575 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5576 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5577 if (err)
5578 break;
5579 }
5580
5581 if (skb_queue_empty(&chan->srej_q)) {
5582 chan->rx_state = L2CAP_RX_STATE_RECV;
5583 l2cap_send_ack(chan);
5584 }
5585
5586 return err;
5587 }
5588
5589 static void l2cap_handle_srej(struct l2cap_chan *chan,
5590 struct l2cap_ctrl *control)
5591 {
5592 struct sk_buff *skb;
5593
5594 BT_DBG("chan %p, control %p", chan, control);
5595
5596 if (control->reqseq == chan->next_tx_seq) {
5597 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5598 l2cap_send_disconn_req(chan, ECONNRESET);
5599 return;
5600 }
5601
5602 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5603
5604 if (skb == NULL) {
5605 BT_DBG("Seq %d not available for retransmission",
5606 control->reqseq);
5607 return;
5608 }
5609
5610 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5611 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5612 l2cap_send_disconn_req(chan, ECONNRESET);
5613 return;
5614 }
5615
5616 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5617
5618 if (control->poll) {
5619 l2cap_pass_to_tx(chan, control);
5620
5621 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5622 l2cap_retransmit(chan, control);
5623 l2cap_ertm_send(chan);
5624
5625 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5626 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5627 chan->srej_save_reqseq = control->reqseq;
5628 }
5629 } else {
5630 l2cap_pass_to_tx_fbit(chan, control);
5631
5632 if (control->final) {
5633 if (chan->srej_save_reqseq != control->reqseq ||
5634 !test_and_clear_bit(CONN_SREJ_ACT,
5635 &chan->conn_state))
5636 l2cap_retransmit(chan, control);
5637 } else {
5638 l2cap_retransmit(chan, control);
5639 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5640 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5641 chan->srej_save_reqseq = control->reqseq;
5642 }
5643 }
5644 }
5645 }
5646
5647 static void l2cap_handle_rej(struct l2cap_chan *chan,
5648 struct l2cap_ctrl *control)
5649 {
5650 struct sk_buff *skb;
5651
5652 BT_DBG("chan %p, control %p", chan, control);
5653
5654 if (control->reqseq == chan->next_tx_seq) {
5655 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5656 l2cap_send_disconn_req(chan, ECONNRESET);
5657 return;
5658 }
5659
5660 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5661
5662 if (chan->max_tx && skb &&
5663 bt_cb(skb)->control.retries >= chan->max_tx) {
5664 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5665 l2cap_send_disconn_req(chan, ECONNRESET);
5666 return;
5667 }
5668
5669 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5670
5671 l2cap_pass_to_tx(chan, control);
5672
5673 if (control->final) {
5674 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5675 l2cap_retransmit_all(chan, control);
5676 } else {
5677 l2cap_retransmit_all(chan, control);
5678 l2cap_ertm_send(chan);
5679 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5680 set_bit(CONN_REJ_ACT, &chan->conn_state);
5681 }
5682 }
5683
5684 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5685 {
5686 BT_DBG("chan %p, txseq %d", chan, txseq);
5687
5688 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5689 chan->expected_tx_seq);
5690
5691 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5692 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5693 chan->tx_win) {
5694 /* See notes below regarding "double poll" and
5695 * invalid packets.
5696 */
5697 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5698 BT_DBG("Invalid/Ignore - after SREJ");
5699 return L2CAP_TXSEQ_INVALID_IGNORE;
5700 } else {
5701 BT_DBG("Invalid - in window after SREJ sent");
5702 return L2CAP_TXSEQ_INVALID;
5703 }
5704 }
5705
5706 if (chan->srej_list.head == txseq) {
5707 BT_DBG("Expected SREJ");
5708 return L2CAP_TXSEQ_EXPECTED_SREJ;
5709 }
5710
5711 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5712 BT_DBG("Duplicate SREJ - txseq already stored");
5713 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5714 }
5715
5716 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5717 BT_DBG("Unexpected SREJ - not requested");
5718 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5719 }
5720 }
5721
5722 if (chan->expected_tx_seq == txseq) {
5723 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5724 chan->tx_win) {
5725 BT_DBG("Invalid - txseq outside tx window");
5726 return L2CAP_TXSEQ_INVALID;
5727 } else {
5728 BT_DBG("Expected");
5729 return L2CAP_TXSEQ_EXPECTED;
5730 }
5731 }
5732
5733 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5734 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5735 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5736 return L2CAP_TXSEQ_DUPLICATE;
5737 }
5738
5739 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5740 /* A source of invalid packets is a "double poll" condition,
5741 * where delays cause us to send multiple poll packets. If
5742 * the remote stack receives and processes both polls,
5743 * sequence numbers can wrap around in such a way that a
5744 * resent frame has a sequence number that looks like new data
5745 * with a sequence gap. This would trigger an erroneous SREJ
5746 * request.
5747 *
5748 * Fortunately, this is impossible with a tx window that's
5749 * less than half of the maximum sequence number, which allows
5750 * invalid frames to be safely ignored.
5751 *
5752 * With tx window sizes greater than half of the tx window
5753 * maximum, the frame is invalid and cannot be ignored. This
5754 * causes a disconnect.
5755 */
5756
5757 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5758 BT_DBG("Invalid/Ignore - txseq outside tx window");
5759 return L2CAP_TXSEQ_INVALID_IGNORE;
5760 } else {
5761 BT_DBG("Invalid - txseq outside tx window");
5762 return L2CAP_TXSEQ_INVALID;
5763 }
5764 } else {
5765 BT_DBG("Unexpected - txseq indicates missing frames");
5766 return L2CAP_TXSEQ_UNEXPECTED;
5767 }
5768 }
5769
5770 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5771 struct l2cap_ctrl *control,
5772 struct sk_buff *skb, u8 event)
5773 {
5774 int err = 0;
5775 bool skb_in_use = 0;
5776
5777 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5778 event);
5779
5780 switch (event) {
5781 case L2CAP_EV_RECV_IFRAME:
5782 switch (l2cap_classify_txseq(chan, control->txseq)) {
5783 case L2CAP_TXSEQ_EXPECTED:
5784 l2cap_pass_to_tx(chan, control);
5785
5786 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5787 BT_DBG("Busy, discarding expected seq %d",
5788 control->txseq);
5789 break;
5790 }
5791
5792 chan->expected_tx_seq = __next_seq(chan,
5793 control->txseq);
5794
5795 chan->buffer_seq = chan->expected_tx_seq;
5796 skb_in_use = 1;
5797
5798 err = l2cap_reassemble_sdu(chan, skb, control);
5799 if (err)
5800 break;
5801
5802 if (control->final) {
5803 if (!test_and_clear_bit(CONN_REJ_ACT,
5804 &chan->conn_state)) {
5805 control->final = 0;
5806 l2cap_retransmit_all(chan, control);
5807 l2cap_ertm_send(chan);
5808 }
5809 }
5810
5811 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5812 l2cap_send_ack(chan);
5813 break;
5814 case L2CAP_TXSEQ_UNEXPECTED:
5815 l2cap_pass_to_tx(chan, control);
5816
5817 /* Can't issue SREJ frames in the local busy state.
5818 * Drop this frame, it will be seen as missing
5819 * when local busy is exited.
5820 */
5821 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5822 BT_DBG("Busy, discarding unexpected seq %d",
5823 control->txseq);
5824 break;
5825 }
5826
5827 /* There was a gap in the sequence, so an SREJ
5828 * must be sent for each missing frame. The
5829 * current frame is stored for later use.
5830 */
5831 skb_queue_tail(&chan->srej_q, skb);
5832 skb_in_use = 1;
5833 BT_DBG("Queued %p (queue len %d)", skb,
5834 skb_queue_len(&chan->srej_q));
5835
5836 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5837 l2cap_seq_list_clear(&chan->srej_list);
5838 l2cap_send_srej(chan, control->txseq);
5839
5840 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5841 break;
5842 case L2CAP_TXSEQ_DUPLICATE:
5843 l2cap_pass_to_tx(chan, control);
5844 break;
5845 case L2CAP_TXSEQ_INVALID_IGNORE:
5846 break;
5847 case L2CAP_TXSEQ_INVALID:
5848 default:
5849 l2cap_send_disconn_req(chan, ECONNRESET);
5850 break;
5851 }
5852 break;
5853 case L2CAP_EV_RECV_RR:
5854 l2cap_pass_to_tx(chan, control);
5855 if (control->final) {
5856 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5857
5858 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5859 !__chan_is_moving(chan)) {
5860 control->final = 0;
5861 l2cap_retransmit_all(chan, control);
5862 }
5863
5864 l2cap_ertm_send(chan);
5865 } else if (control->poll) {
5866 l2cap_send_i_or_rr_or_rnr(chan);
5867 } else {
5868 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5869 &chan->conn_state) &&
5870 chan->unacked_frames)
5871 __set_retrans_timer(chan);
5872
5873 l2cap_ertm_send(chan);
5874 }
5875 break;
5876 case L2CAP_EV_RECV_RNR:
5877 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5878 l2cap_pass_to_tx(chan, control);
5879 if (control && control->poll) {
5880 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5881 l2cap_send_rr_or_rnr(chan, 0);
5882 }
5883 __clear_retrans_timer(chan);
5884 l2cap_seq_list_clear(&chan->retrans_list);
5885 break;
5886 case L2CAP_EV_RECV_REJ:
5887 l2cap_handle_rej(chan, control);
5888 break;
5889 case L2CAP_EV_RECV_SREJ:
5890 l2cap_handle_srej(chan, control);
5891 break;
5892 default:
5893 break;
5894 }
5895
5896 if (skb && !skb_in_use) {
5897 BT_DBG("Freeing %p", skb);
5898 kfree_skb(skb);
5899 }
5900
5901 return err;
5902 }
5903
5904 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5905 struct l2cap_ctrl *control,
5906 struct sk_buff *skb, u8 event)
5907 {
5908 int err = 0;
5909 u16 txseq = control->txseq;
5910 bool skb_in_use = 0;
5911
5912 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5913 event);
5914
5915 switch (event) {
5916 case L2CAP_EV_RECV_IFRAME:
5917 switch (l2cap_classify_txseq(chan, txseq)) {
5918 case L2CAP_TXSEQ_EXPECTED:
5919 /* Keep frame for reassembly later */
5920 l2cap_pass_to_tx(chan, control);
5921 skb_queue_tail(&chan->srej_q, skb);
5922 skb_in_use = 1;
5923 BT_DBG("Queued %p (queue len %d)", skb,
5924 skb_queue_len(&chan->srej_q));
5925
5926 chan->expected_tx_seq = __next_seq(chan, txseq);
5927 break;
5928 case L2CAP_TXSEQ_EXPECTED_SREJ:
5929 l2cap_seq_list_pop(&chan->srej_list);
5930
5931 l2cap_pass_to_tx(chan, control);
5932 skb_queue_tail(&chan->srej_q, skb);
5933 skb_in_use = 1;
5934 BT_DBG("Queued %p (queue len %d)", skb,
5935 skb_queue_len(&chan->srej_q));
5936
5937 err = l2cap_rx_queued_iframes(chan);
5938 if (err)
5939 break;
5940
5941 break;
5942 case L2CAP_TXSEQ_UNEXPECTED:
5943 /* Got a frame that can't be reassembled yet.
5944 * Save it for later, and send SREJs to cover
5945 * the missing frames.
5946 */
5947 skb_queue_tail(&chan->srej_q, skb);
5948 skb_in_use = 1;
5949 BT_DBG("Queued %p (queue len %d)", skb,
5950 skb_queue_len(&chan->srej_q));
5951
5952 l2cap_pass_to_tx(chan, control);
5953 l2cap_send_srej(chan, control->txseq);
5954 break;
5955 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5956 /* This frame was requested with an SREJ, but
5957 * some expected retransmitted frames are
5958 * missing. Request retransmission of missing
5959 * SREJ'd frames.
5960 */
5961 skb_queue_tail(&chan->srej_q, skb);
5962 skb_in_use = 1;
5963 BT_DBG("Queued %p (queue len %d)", skb,
5964 skb_queue_len(&chan->srej_q));
5965
5966 l2cap_pass_to_tx(chan, control);
5967 l2cap_send_srej_list(chan, control->txseq);
5968 break;
5969 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5970 /* We've already queued this frame. Drop this copy. */
5971 l2cap_pass_to_tx(chan, control);
5972 break;
5973 case L2CAP_TXSEQ_DUPLICATE:
5974 /* Expecting a later sequence number, so this frame
5975 * was already received. Ignore it completely.
5976 */
5977 break;
5978 case L2CAP_TXSEQ_INVALID_IGNORE:
5979 break;
5980 case L2CAP_TXSEQ_INVALID:
5981 default:
5982 l2cap_send_disconn_req(chan, ECONNRESET);
5983 break;
5984 }
5985 break;
5986 case L2CAP_EV_RECV_RR:
5987 l2cap_pass_to_tx(chan, control);
5988 if (control->final) {
5989 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5990
5991 if (!test_and_clear_bit(CONN_REJ_ACT,
5992 &chan->conn_state)) {
5993 control->final = 0;
5994 l2cap_retransmit_all(chan, control);
5995 }
5996
5997 l2cap_ertm_send(chan);
5998 } else if (control->poll) {
5999 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6000 &chan->conn_state) &&
6001 chan->unacked_frames) {
6002 __set_retrans_timer(chan);
6003 }
6004
6005 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6006 l2cap_send_srej_tail(chan);
6007 } else {
6008 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6009 &chan->conn_state) &&
6010 chan->unacked_frames)
6011 __set_retrans_timer(chan);
6012
6013 l2cap_send_ack(chan);
6014 }
6015 break;
6016 case L2CAP_EV_RECV_RNR:
6017 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6018 l2cap_pass_to_tx(chan, control);
6019 if (control->poll) {
6020 l2cap_send_srej_tail(chan);
6021 } else {
6022 struct l2cap_ctrl rr_control;
6023 memset(&rr_control, 0, sizeof(rr_control));
6024 rr_control.sframe = 1;
6025 rr_control.super = L2CAP_SUPER_RR;
6026 rr_control.reqseq = chan->buffer_seq;
6027 l2cap_send_sframe(chan, &rr_control);
6028 }
6029
6030 break;
6031 case L2CAP_EV_RECV_REJ:
6032 l2cap_handle_rej(chan, control);
6033 break;
6034 case L2CAP_EV_RECV_SREJ:
6035 l2cap_handle_srej(chan, control);
6036 break;
6037 }
6038
6039 if (skb && !skb_in_use) {
6040 BT_DBG("Freeing %p", skb);
6041 kfree_skb(skb);
6042 }
6043
6044 return err;
6045 }
6046
6047 static int l2cap_finish_move(struct l2cap_chan *chan)
6048 {
6049 BT_DBG("chan %p", chan);
6050
6051 chan->rx_state = L2CAP_RX_STATE_RECV;
6052
6053 if (chan->hs_hcon)
6054 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6055 else
6056 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6057
6058 return l2cap_resegment(chan);
6059 }
6060
6061 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6062 struct l2cap_ctrl *control,
6063 struct sk_buff *skb, u8 event)
6064 {
6065 int err;
6066
6067 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6068 event);
6069
6070 if (!control->poll)
6071 return -EPROTO;
6072
6073 l2cap_process_reqseq(chan, control->reqseq);
6074
6075 if (!skb_queue_empty(&chan->tx_q))
6076 chan->tx_send_head = skb_peek(&chan->tx_q);
6077 else
6078 chan->tx_send_head = NULL;
6079
6080 /* Rewind next_tx_seq to the point expected
6081 * by the receiver.
6082 */
6083 chan->next_tx_seq = control->reqseq;
6084 chan->unacked_frames = 0;
6085
6086 err = l2cap_finish_move(chan);
6087 if (err)
6088 return err;
6089
6090 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6091 l2cap_send_i_or_rr_or_rnr(chan);
6092
6093 if (event == L2CAP_EV_RECV_IFRAME)
6094 return -EPROTO;
6095
6096 return l2cap_rx_state_recv(chan, control, NULL, event);
6097 }
6098
6099 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6100 struct l2cap_ctrl *control,
6101 struct sk_buff *skb, u8 event)
6102 {
6103 int err;
6104
6105 if (!control->final)
6106 return -EPROTO;
6107
6108 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6109
6110 chan->rx_state = L2CAP_RX_STATE_RECV;
6111 l2cap_process_reqseq(chan, control->reqseq);
6112
6113 if (!skb_queue_empty(&chan->tx_q))
6114 chan->tx_send_head = skb_peek(&chan->tx_q);
6115 else
6116 chan->tx_send_head = NULL;
6117
6118 /* Rewind next_tx_seq to the point expected
6119 * by the receiver.
6120 */
6121 chan->next_tx_seq = control->reqseq;
6122 chan->unacked_frames = 0;
6123
6124 if (chan->hs_hcon)
6125 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6126 else
6127 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6128
6129 err = l2cap_resegment(chan);
6130
6131 if (!err)
6132 err = l2cap_rx_state_recv(chan, control, skb, event);
6133
6134 return err;
6135 }
6136
6137 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6138 {
6139 /* Make sure reqseq is for a packet that has been sent but not acked */
6140 u16 unacked;
6141
6142 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6143 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6144 }
6145
6146 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6147 struct sk_buff *skb, u8 event)
6148 {
6149 int err = 0;
6150
6151 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6152 control, skb, event, chan->rx_state);
6153
6154 if (__valid_reqseq(chan, control->reqseq)) {
6155 switch (chan->rx_state) {
6156 case L2CAP_RX_STATE_RECV:
6157 err = l2cap_rx_state_recv(chan, control, skb, event);
6158 break;
6159 case L2CAP_RX_STATE_SREJ_SENT:
6160 err = l2cap_rx_state_srej_sent(chan, control, skb,
6161 event);
6162 break;
6163 case L2CAP_RX_STATE_WAIT_P:
6164 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6165 break;
6166 case L2CAP_RX_STATE_WAIT_F:
6167 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6168 break;
6169 default:
6170 /* shut it down */
6171 break;
6172 }
6173 } else {
6174 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6175 control->reqseq, chan->next_tx_seq,
6176 chan->expected_ack_seq);
6177 l2cap_send_disconn_req(chan, ECONNRESET);
6178 }
6179
6180 return err;
6181 }
6182
6183 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6184 struct sk_buff *skb)
6185 {
6186 int err = 0;
6187
6188 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6189 chan->rx_state);
6190
6191 if (l2cap_classify_txseq(chan, control->txseq) ==
6192 L2CAP_TXSEQ_EXPECTED) {
6193 l2cap_pass_to_tx(chan, control);
6194
6195 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6196 __next_seq(chan, chan->buffer_seq));
6197
6198 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6199
6200 l2cap_reassemble_sdu(chan, skb, control);
6201 } else {
6202 if (chan->sdu) {
6203 kfree_skb(chan->sdu);
6204 chan->sdu = NULL;
6205 }
6206 chan->sdu_last_frag = NULL;
6207 chan->sdu_len = 0;
6208
6209 if (skb) {
6210 BT_DBG("Freeing %p", skb);
6211 kfree_skb(skb);
6212 }
6213 }
6214
6215 chan->last_acked_seq = control->txseq;
6216 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6217
6218 return err;
6219 }
6220
6221 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6222 {
6223 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6224 u16 len;
6225 u8 event;
6226
6227 __unpack_control(chan, skb);
6228
6229 len = skb->len;
6230
6231 /*
6232 * We can just drop the corrupted I-frame here.
6233 * Receiver will miss it and start proper recovery
6234 * procedures and ask for retransmission.
6235 */
6236 if (l2cap_check_fcs(chan, skb))
6237 goto drop;
6238
6239 if (!control->sframe && control->sar == L2CAP_SAR_START)
6240 len -= L2CAP_SDULEN_SIZE;
6241
6242 if (chan->fcs == L2CAP_FCS_CRC16)
6243 len -= L2CAP_FCS_SIZE;
6244
6245 if (len > chan->mps) {
6246 l2cap_send_disconn_req(chan, ECONNRESET);
6247 goto drop;
6248 }
6249
6250 if (!control->sframe) {
6251 int err;
6252
6253 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6254 control->sar, control->reqseq, control->final,
6255 control->txseq);
6256
6257 /* Validate F-bit - F=0 always valid, F=1 only
6258 * valid in TX WAIT_F
6259 */
6260 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6261 goto drop;
6262
6263 if (chan->mode != L2CAP_MODE_STREAMING) {
6264 event = L2CAP_EV_RECV_IFRAME;
6265 err = l2cap_rx(chan, control, skb, event);
6266 } else {
6267 err = l2cap_stream_rx(chan, control, skb);
6268 }
6269
6270 if (err)
6271 l2cap_send_disconn_req(chan, ECONNRESET);
6272 } else {
6273 const u8 rx_func_to_event[4] = {
6274 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6275 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6276 };
6277
6278 /* Only I-frames are expected in streaming mode */
6279 if (chan->mode == L2CAP_MODE_STREAMING)
6280 goto drop;
6281
6282 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6283 control->reqseq, control->final, control->poll,
6284 control->super);
6285
6286 if (len != 0) {
6287 BT_ERR("Trailing bytes: %d in sframe", len);
6288 l2cap_send_disconn_req(chan, ECONNRESET);
6289 goto drop;
6290 }
6291
6292 /* Validate F and P bits */
6293 if (control->final && (control->poll ||
6294 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6295 goto drop;
6296
6297 event = rx_func_to_event[control->super];
6298 if (l2cap_rx(chan, control, skb, event))
6299 l2cap_send_disconn_req(chan, ECONNRESET);
6300 }
6301
6302 return 0;
6303
6304 drop:
6305 kfree_skb(skb);
6306 return 0;
6307 }
6308
6309 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6310 struct sk_buff *skb)
6311 {
6312 struct l2cap_chan *chan;
6313
6314 chan = l2cap_get_chan_by_scid(conn, cid);
6315 if (!chan) {
6316 if (cid == L2CAP_CID_A2MP) {
6317 chan = a2mp_channel_create(conn, skb);
6318 if (!chan) {
6319 kfree_skb(skb);
6320 return;
6321 }
6322
6323 l2cap_chan_lock(chan);
6324 } else {
6325 BT_DBG("unknown cid 0x%4.4x", cid);
6326 /* Drop packet and return */
6327 kfree_skb(skb);
6328 return;
6329 }
6330 }
6331
6332 BT_DBG("chan %p, len %d", chan, skb->len);
6333
6334 if (chan->state != BT_CONNECTED)
6335 goto drop;
6336
6337 switch (chan->mode) {
6338 case L2CAP_MODE_BASIC:
6339 /* If socket recv buffers overflows we drop data here
6340 * which is *bad* because L2CAP has to be reliable.
6341 * But we don't have any other choice. L2CAP doesn't
6342 * provide flow control mechanism. */
6343
6344 if (chan->imtu < skb->len)
6345 goto drop;
6346
6347 if (!chan->ops->recv(chan, skb))
6348 goto done;
6349 break;
6350
6351 case L2CAP_MODE_ERTM:
6352 case L2CAP_MODE_STREAMING:
6353 l2cap_data_rcv(chan, skb);
6354 goto done;
6355
6356 default:
6357 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6358 break;
6359 }
6360
6361 drop:
6362 kfree_skb(skb);
6363
6364 done:
6365 l2cap_chan_unlock(chan);
6366 }
6367
6368 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6369 struct sk_buff *skb)
6370 {
6371 struct l2cap_chan *chan;
6372
6373 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6374 if (!chan)
6375 goto drop;
6376
6377 BT_DBG("chan %p, len %d", chan, skb->len);
6378
6379 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6380 goto drop;
6381
6382 if (chan->imtu < skb->len)
6383 goto drop;
6384
6385 if (!chan->ops->recv(chan, skb))
6386 return;
6387
6388 drop:
6389 kfree_skb(skb);
6390 }
6391
6392 static void l2cap_att_channel(struct l2cap_conn *conn,
6393 struct sk_buff *skb)
6394 {
6395 struct l2cap_chan *chan;
6396
6397 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6398 conn->src, conn->dst);
6399 if (!chan)
6400 goto drop;
6401
6402 BT_DBG("chan %p, len %d", chan, skb->len);
6403
6404 if (chan->imtu < skb->len)
6405 goto drop;
6406
6407 if (!chan->ops->recv(chan, skb))
6408 return;
6409
6410 drop:
6411 kfree_skb(skb);
6412 }
6413
6414 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6415 {
6416 struct l2cap_hdr *lh = (void *) skb->data;
6417 u16 cid, len;
6418 __le16 psm;
6419
6420 skb_pull(skb, L2CAP_HDR_SIZE);
6421 cid = __le16_to_cpu(lh->cid);
6422 len = __le16_to_cpu(lh->len);
6423
6424 if (len != skb->len) {
6425 kfree_skb(skb);
6426 return;
6427 }
6428
6429 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6430
6431 switch (cid) {
6432 case L2CAP_CID_LE_SIGNALING:
6433 l2cap_le_sig_channel(conn, skb);
6434 break;
6435 case L2CAP_CID_SIGNALING:
6436 l2cap_sig_channel(conn, skb);
6437 break;
6438
6439 case L2CAP_CID_CONN_LESS:
6440 psm = get_unaligned((__le16 *) skb->data);
6441 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6442 l2cap_conless_channel(conn, psm, skb);
6443 break;
6444
6445 case L2CAP_CID_ATT:
6446 l2cap_att_channel(conn, skb);
6447 break;
6448
6449 case L2CAP_CID_SMP:
6450 if (smp_sig_channel(conn, skb))
6451 l2cap_conn_del(conn->hcon, EACCES);
6452 break;
6453
6454 default:
6455 l2cap_data_channel(conn, cid, skb);
6456 break;
6457 }
6458 }
6459
6460 /* ---- L2CAP interface with lower layer (HCI) ---- */
6461
6462 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6463 {
6464 int exact = 0, lm1 = 0, lm2 = 0;
6465 struct l2cap_chan *c;
6466
6467 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6468
6469 /* Find listening sockets and check their link_mode */
6470 read_lock(&chan_list_lock);
6471 list_for_each_entry(c, &chan_list, global_l) {
6472 struct sock *sk = c->sk;
6473
6474 if (c->state != BT_LISTEN)
6475 continue;
6476
6477 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6478 lm1 |= HCI_LM_ACCEPT;
6479 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6480 lm1 |= HCI_LM_MASTER;
6481 exact++;
6482 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6483 lm2 |= HCI_LM_ACCEPT;
6484 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6485 lm2 |= HCI_LM_MASTER;
6486 }
6487 }
6488 read_unlock(&chan_list_lock);
6489
6490 return exact ? lm1 : lm2;
6491 }
6492
6493 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6494 {
6495 struct l2cap_conn *conn;
6496
6497 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6498
6499 if (!status) {
6500 conn = l2cap_conn_add(hcon);
6501 if (conn)
6502 l2cap_conn_ready(conn);
6503 } else {
6504 l2cap_conn_del(hcon, bt_to_errno(status));
6505 }
6506 }
6507
6508 int l2cap_disconn_ind(struct hci_conn *hcon)
6509 {
6510 struct l2cap_conn *conn = hcon->l2cap_data;
6511
6512 BT_DBG("hcon %p", hcon);
6513
6514 if (!conn)
6515 return HCI_ERROR_REMOTE_USER_TERM;
6516 return conn->disc_reason;
6517 }
6518
6519 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6520 {
6521 BT_DBG("hcon %p reason %d", hcon, reason);
6522
6523 l2cap_conn_del(hcon, bt_to_errno(reason));
6524 }
6525
6526 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6527 {
6528 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6529 return;
6530
6531 if (encrypt == 0x00) {
6532 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6533 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6534 } else if (chan->sec_level == BT_SECURITY_HIGH)
6535 l2cap_chan_close(chan, ECONNREFUSED);
6536 } else {
6537 if (chan->sec_level == BT_SECURITY_MEDIUM)
6538 __clear_chan_timer(chan);
6539 }
6540 }
6541
6542 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6543 {
6544 struct l2cap_conn *conn = hcon->l2cap_data;
6545 struct l2cap_chan *chan;
6546
6547 if (!conn)
6548 return 0;
6549
6550 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6551
6552 if (hcon->type == LE_LINK) {
6553 if (!status && encrypt)
6554 smp_distribute_keys(conn, 0);
6555 cancel_delayed_work(&conn->security_timer);
6556 }
6557
6558 mutex_lock(&conn->chan_lock);
6559
6560 list_for_each_entry(chan, &conn->chan_l, list) {
6561 l2cap_chan_lock(chan);
6562
6563 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6564 state_to_string(chan->state));
6565
6566 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6567 l2cap_chan_unlock(chan);
6568 continue;
6569 }
6570
6571 if (chan->scid == L2CAP_CID_ATT) {
6572 if (!status && encrypt) {
6573 chan->sec_level = hcon->sec_level;
6574 l2cap_chan_ready(chan);
6575 }
6576
6577 l2cap_chan_unlock(chan);
6578 continue;
6579 }
6580
6581 if (!__l2cap_no_conn_pending(chan)) {
6582 l2cap_chan_unlock(chan);
6583 continue;
6584 }
6585
6586 if (!status && (chan->state == BT_CONNECTED ||
6587 chan->state == BT_CONFIG)) {
6588 struct sock *sk = chan->sk;
6589
6590 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6591 sk->sk_state_change(sk);
6592
6593 l2cap_check_encryption(chan, encrypt);
6594 l2cap_chan_unlock(chan);
6595 continue;
6596 }
6597
6598 if (chan->state == BT_CONNECT) {
6599 if (!status) {
6600 l2cap_start_connection(chan);
6601 } else {
6602 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6603 }
6604 } else if (chan->state == BT_CONNECT2) {
6605 struct sock *sk = chan->sk;
6606 struct l2cap_conn_rsp rsp;
6607 __u16 res, stat;
6608
6609 lock_sock(sk);
6610
6611 if (!status) {
6612 if (test_bit(BT_SK_DEFER_SETUP,
6613 &bt_sk(sk)->flags)) {
6614 res = L2CAP_CR_PEND;
6615 stat = L2CAP_CS_AUTHOR_PEND;
6616 chan->ops->defer(chan);
6617 } else {
6618 __l2cap_state_change(chan, BT_CONFIG);
6619 res = L2CAP_CR_SUCCESS;
6620 stat = L2CAP_CS_NO_INFO;
6621 }
6622 } else {
6623 __l2cap_state_change(chan, BT_DISCONN);
6624 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6625 res = L2CAP_CR_SEC_BLOCK;
6626 stat = L2CAP_CS_NO_INFO;
6627 }
6628
6629 release_sock(sk);
6630
6631 rsp.scid = cpu_to_le16(chan->dcid);
6632 rsp.dcid = cpu_to_le16(chan->scid);
6633 rsp.result = cpu_to_le16(res);
6634 rsp.status = cpu_to_le16(stat);
6635 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6636 sizeof(rsp), &rsp);
6637
6638 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6639 res == L2CAP_CR_SUCCESS) {
6640 char buf[128];
6641 set_bit(CONF_REQ_SENT, &chan->conf_state);
6642 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6643 L2CAP_CONF_REQ,
6644 l2cap_build_conf_req(chan, buf),
6645 buf);
6646 chan->num_conf_req++;
6647 }
6648 }
6649
6650 l2cap_chan_unlock(chan);
6651 }
6652
6653 mutex_unlock(&conn->chan_lock);
6654
6655 return 0;
6656 }
6657
6658 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6659 {
6660 struct l2cap_conn *conn = hcon->l2cap_data;
6661 struct l2cap_hdr *hdr;
6662 int len;
6663
6664 /* For AMP controller do not create l2cap conn */
6665 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6666 goto drop;
6667
6668 if (!conn)
6669 conn = l2cap_conn_add(hcon);
6670
6671 if (!conn)
6672 goto drop;
6673
6674 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6675
6676 switch (flags) {
6677 case ACL_START:
6678 case ACL_START_NO_FLUSH:
6679 case ACL_COMPLETE:
6680 if (conn->rx_len) {
6681 BT_ERR("Unexpected start frame (len %d)", skb->len);
6682 kfree_skb(conn->rx_skb);
6683 conn->rx_skb = NULL;
6684 conn->rx_len = 0;
6685 l2cap_conn_unreliable(conn, ECOMM);
6686 }
6687
6688 /* Start fragment always begin with Basic L2CAP header */
6689 if (skb->len < L2CAP_HDR_SIZE) {
6690 BT_ERR("Frame is too short (len %d)", skb->len);
6691 l2cap_conn_unreliable(conn, ECOMM);
6692 goto drop;
6693 }
6694
6695 hdr = (struct l2cap_hdr *) skb->data;
6696 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6697
6698 if (len == skb->len) {
6699 /* Complete frame received */
6700 l2cap_recv_frame(conn, skb);
6701 return 0;
6702 }
6703
6704 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6705
6706 if (skb->len > len) {
6707 BT_ERR("Frame is too long (len %d, expected len %d)",
6708 skb->len, len);
6709 l2cap_conn_unreliable(conn, ECOMM);
6710 goto drop;
6711 }
6712
6713 /* Allocate skb for the complete frame (with header) */
6714 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6715 if (!conn->rx_skb)
6716 goto drop;
6717
6718 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6719 skb->len);
6720 conn->rx_len = len - skb->len;
6721 break;
6722
6723 case ACL_CONT:
6724 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6725
6726 if (!conn->rx_len) {
6727 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6728 l2cap_conn_unreliable(conn, ECOMM);
6729 goto drop;
6730 }
6731
6732 if (skb->len > conn->rx_len) {
6733 BT_ERR("Fragment is too long (len %d, expected %d)",
6734 skb->len, conn->rx_len);
6735 kfree_skb(conn->rx_skb);
6736 conn->rx_skb = NULL;
6737 conn->rx_len = 0;
6738 l2cap_conn_unreliable(conn, ECOMM);
6739 goto drop;
6740 }
6741
6742 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6743 skb->len);
6744 conn->rx_len -= skb->len;
6745
6746 if (!conn->rx_len) {
6747 /* Complete frame received */
6748 l2cap_recv_frame(conn, conn->rx_skb);
6749 conn->rx_skb = NULL;
6750 }
6751 break;
6752 }
6753
6754 drop:
6755 kfree_skb(skb);
6756 return 0;
6757 }
6758
6759 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6760 {
6761 struct l2cap_chan *c;
6762
6763 read_lock(&chan_list_lock);
6764
6765 list_for_each_entry(c, &chan_list, global_l) {
6766 struct sock *sk = c->sk;
6767
6768 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6769 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6770 c->state, __le16_to_cpu(c->psm),
6771 c->scid, c->dcid, c->imtu, c->omtu,
6772 c->sec_level, c->mode);
6773 }
6774
6775 read_unlock(&chan_list_lock);
6776
6777 return 0;
6778 }
6779
6780 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6781 {
6782 return single_open(file, l2cap_debugfs_show, inode->i_private);
6783 }
6784
6785 static const struct file_operations l2cap_debugfs_fops = {
6786 .open = l2cap_debugfs_open,
6787 .read = seq_read,
6788 .llseek = seq_lseek,
6789 .release = single_release,
6790 };
6791
6792 static struct dentry *l2cap_debugfs;
6793
6794 int __init l2cap_init(void)
6795 {
6796 int err;
6797
6798 err = l2cap_init_sockets();
6799 if (err < 0)
6800 return err;
6801
6802 if (bt_debugfs) {
6803 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6804 NULL, &l2cap_debugfs_fops);
6805 if (!l2cap_debugfs)
6806 BT_ERR("Failed to create L2CAP debug file");
6807 }
6808
6809 return 0;
6810 }
6811
6812 void l2cap_exit(void)
6813 {
6814 debugfs_remove(l2cap_debugfs);
6815 l2cap_cleanup_sockets();
6816 }
6817
6818 module_param(disable_ertm, bool, 0644);
6819 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.242269 seconds and 6 git commands to generate.