Bluetooth: Access sk_sndtimeo indirectly in l2cap_core.c
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 bool disable_ertm;
45
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
48
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
51
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
55 void *data);
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
61
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
63 {
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
67 else
68 return BDADDR_LE_RANDOM;
69 }
70
71 return BDADDR_BREDR;
72 }
73
74 /* ---- L2CAP channels ---- */
75
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
77 u16 cid)
78 {
79 struct l2cap_chan *c;
80
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->dcid == cid)
83 return c;
84 }
85 return NULL;
86 }
87
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
89 u16 cid)
90 {
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98 }
99
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
109 if (c)
110 l2cap_chan_lock(c);
111 mutex_unlock(&conn->chan_lock);
112
113 return c;
114 }
115
116 /* Find channel with given DCID.
117 * Returns locked channel.
118 */
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
120 u16 cid)
121 {
122 struct l2cap_chan *c;
123
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
126 if (c)
127 l2cap_chan_lock(c);
128 mutex_unlock(&conn->chan_lock);
129
130 return c;
131 }
132
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
134 u8 ident)
135 {
136 struct l2cap_chan *c;
137
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
140 return c;
141 }
142 return NULL;
143 }
144
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 u8 ident)
147 {
148 struct l2cap_chan *c;
149
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
152 if (c)
153 l2cap_chan_lock(c);
154 mutex_unlock(&conn->chan_lock);
155
156 return c;
157 }
158
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
160 {
161 struct l2cap_chan *c;
162
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
165 return c;
166 }
167 return NULL;
168 }
169
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
171 {
172 int err;
173
174 write_lock(&chan_list_lock);
175
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
177 err = -EADDRINUSE;
178 goto done;
179 }
180
181 if (psm) {
182 chan->psm = psm;
183 chan->sport = psm;
184 err = 0;
185 } else {
186 u16 p;
187
188 err = -EINVAL;
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
193 err = 0;
194 break;
195 }
196 }
197
198 done:
199 write_unlock(&chan_list_lock);
200 return err;
201 }
202
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
204 {
205 write_lock(&chan_list_lock);
206
207 chan->scid = scid;
208
209 write_unlock(&chan_list_lock);
210
211 return 0;
212 }
213
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
215 {
216 u16 cid = L2CAP_CID_DYN_START;
217
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
220 return cid;
221 }
222
223 return 0;
224 }
225
226 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
227 {
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
230
231 chan->state = state;
232 chan->ops->state_change(chan, state, 0);
233 }
234
235 static void l2cap_state_change(struct l2cap_chan *chan, int state)
236 {
237 struct sock *sk = chan->sk;
238
239 lock_sock(sk);
240 __l2cap_state_change(chan, state);
241 release_sock(sk);
242 }
243
244 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
245 int state, int err)
246 {
247 struct sock *sk = chan->sk;
248
249 lock_sock(sk);
250 chan->state = state;
251 chan->ops->state_change(chan, chan->state, err);
252 release_sock(sk);
253 }
254
255 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
256 {
257 struct sock *sk = chan->sk;
258
259 lock_sock(sk);
260 chan->ops->state_change(chan, chan->state, err);
261 release_sock(sk);
262 }
263
264 static void __set_retrans_timer(struct l2cap_chan *chan)
265 {
266 if (!delayed_work_pending(&chan->monitor_timer) &&
267 chan->retrans_timeout) {
268 l2cap_set_timer(chan, &chan->retrans_timer,
269 msecs_to_jiffies(chan->retrans_timeout));
270 }
271 }
272
273 static void __set_monitor_timer(struct l2cap_chan *chan)
274 {
275 __clear_retrans_timer(chan);
276 if (chan->monitor_timeout) {
277 l2cap_set_timer(chan, &chan->monitor_timer,
278 msecs_to_jiffies(chan->monitor_timeout));
279 }
280 }
281
282 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
283 u16 seq)
284 {
285 struct sk_buff *skb;
286
287 skb_queue_walk(head, skb) {
288 if (bt_cb(skb)->control.txseq == seq)
289 return skb;
290 }
291
292 return NULL;
293 }
294
295 /* ---- L2CAP sequence number lists ---- */
296
297 /* For ERTM, ordered lists of sequence numbers must be tracked for
298 * SREJ requests that are received and for frames that are to be
299 * retransmitted. These seq_list functions implement a singly-linked
300 * list in an array, where membership in the list can also be checked
301 * in constant time. Items can also be added to the tail of the list
302 * and removed from the head in constant time, without further memory
303 * allocs or frees.
304 */
305
306 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
307 {
308 size_t alloc_size, i;
309
310 /* Allocated size is a power of 2 to map sequence numbers
311 * (which may be up to 14 bits) in to a smaller array that is
312 * sized for the negotiated ERTM transmit windows.
313 */
314 alloc_size = roundup_pow_of_two(size);
315
316 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
317 if (!seq_list->list)
318 return -ENOMEM;
319
320 seq_list->mask = alloc_size - 1;
321 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
322 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
323 for (i = 0; i < alloc_size; i++)
324 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
325
326 return 0;
327 }
328
329 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
330 {
331 kfree(seq_list->list);
332 }
333
334 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
335 u16 seq)
336 {
337 /* Constant-time check for list membership */
338 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
339 }
340
341 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
342 {
343 u16 mask = seq_list->mask;
344
345 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
346 /* In case someone tries to pop the head of an empty list */
347 return L2CAP_SEQ_LIST_CLEAR;
348 } else if (seq_list->head == seq) {
349 /* Head can be removed in constant time */
350 seq_list->head = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352
353 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
354 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
355 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
356 }
357 } else {
358 /* Walk the list to find the sequence number */
359 u16 prev = seq_list->head;
360 while (seq_list->list[prev & mask] != seq) {
361 prev = seq_list->list[prev & mask];
362 if (prev == L2CAP_SEQ_LIST_TAIL)
363 return L2CAP_SEQ_LIST_CLEAR;
364 }
365
366 /* Unlink the number from the list and clear it */
367 seq_list->list[prev & mask] = seq_list->list[seq & mask];
368 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
369 if (seq_list->tail == seq)
370 seq_list->tail = prev;
371 }
372 return seq;
373 }
374
375 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
376 {
377 /* Remove the head in constant time */
378 return l2cap_seq_list_remove(seq_list, seq_list->head);
379 }
380
381 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
382 {
383 u16 i;
384
385 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
386 return;
387
388 for (i = 0; i <= seq_list->mask; i++)
389 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
390
391 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
392 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
393 }
394
395 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
396 {
397 u16 mask = seq_list->mask;
398
399 /* All appends happen in constant time */
400
401 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
402 return;
403
404 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
405 seq_list->head = seq;
406 else
407 seq_list->list[seq_list->tail & mask] = seq;
408
409 seq_list->tail = seq;
410 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
411 }
412
413 static void l2cap_chan_timeout(struct work_struct *work)
414 {
415 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
416 chan_timer.work);
417 struct l2cap_conn *conn = chan->conn;
418 int reason;
419
420 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
421
422 mutex_lock(&conn->chan_lock);
423 l2cap_chan_lock(chan);
424
425 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
426 reason = ECONNREFUSED;
427 else if (chan->state == BT_CONNECT &&
428 chan->sec_level != BT_SECURITY_SDP)
429 reason = ECONNREFUSED;
430 else
431 reason = ETIMEDOUT;
432
433 l2cap_chan_close(chan, reason);
434
435 l2cap_chan_unlock(chan);
436
437 chan->ops->close(chan);
438 mutex_unlock(&conn->chan_lock);
439
440 l2cap_chan_put(chan);
441 }
442
443 struct l2cap_chan *l2cap_chan_create(void)
444 {
445 struct l2cap_chan *chan;
446
447 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
448 if (!chan)
449 return NULL;
450
451 mutex_init(&chan->lock);
452
453 write_lock(&chan_list_lock);
454 list_add(&chan->global_l, &chan_list);
455 write_unlock(&chan_list_lock);
456
457 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
458
459 chan->state = BT_OPEN;
460
461 kref_init(&chan->kref);
462
463 /* This flag is cleared in l2cap_chan_ready() */
464 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
465
466 BT_DBG("chan %p", chan);
467
468 return chan;
469 }
470
471 static void l2cap_chan_destroy(struct kref *kref)
472 {
473 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
474
475 BT_DBG("chan %p", chan);
476
477 write_lock(&chan_list_lock);
478 list_del(&chan->global_l);
479 write_unlock(&chan_list_lock);
480
481 kfree(chan);
482 }
483
484 void l2cap_chan_hold(struct l2cap_chan *c)
485 {
486 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
487
488 kref_get(&c->kref);
489 }
490
491 void l2cap_chan_put(struct l2cap_chan *c)
492 {
493 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
494
495 kref_put(&c->kref, l2cap_chan_destroy);
496 }
497
498 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
499 {
500 chan->fcs = L2CAP_FCS_CRC16;
501 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
502 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
503 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
504 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
505 chan->sec_level = BT_SECURITY_LOW;
506
507 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
508 }
509
510 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
511 {
512 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
513 __le16_to_cpu(chan->psm), chan->dcid);
514
515 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
516
517 chan->conn = conn;
518
519 switch (chan->chan_type) {
520 case L2CAP_CHAN_CONN_ORIENTED:
521 if (conn->hcon->type == LE_LINK) {
522 /* LE connection */
523 chan->omtu = L2CAP_DEFAULT_MTU;
524 if (chan->dcid == L2CAP_CID_ATT)
525 chan->scid = L2CAP_CID_ATT;
526 else
527 chan->scid = l2cap_alloc_cid(conn);
528 } else {
529 /* Alloc CID for connection-oriented socket */
530 chan->scid = l2cap_alloc_cid(conn);
531 chan->omtu = L2CAP_DEFAULT_MTU;
532 }
533 break;
534
535 case L2CAP_CHAN_CONN_LESS:
536 /* Connectionless socket */
537 chan->scid = L2CAP_CID_CONN_LESS;
538 chan->dcid = L2CAP_CID_CONN_LESS;
539 chan->omtu = L2CAP_DEFAULT_MTU;
540 break;
541
542 case L2CAP_CHAN_CONN_FIX_A2MP:
543 chan->scid = L2CAP_CID_A2MP;
544 chan->dcid = L2CAP_CID_A2MP;
545 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
546 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
547 break;
548
549 default:
550 /* Raw socket can send/recv signalling messages only */
551 chan->scid = L2CAP_CID_SIGNALING;
552 chan->dcid = L2CAP_CID_SIGNALING;
553 chan->omtu = L2CAP_DEFAULT_MTU;
554 }
555
556 chan->local_id = L2CAP_BESTEFFORT_ID;
557 chan->local_stype = L2CAP_SERV_BESTEFFORT;
558 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
559 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
560 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
561 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
562
563 l2cap_chan_hold(chan);
564
565 hci_conn_hold(conn->hcon);
566
567 list_add(&chan->list, &conn->chan_l);
568 }
569
570 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
571 {
572 mutex_lock(&conn->chan_lock);
573 __l2cap_chan_add(conn, chan);
574 mutex_unlock(&conn->chan_lock);
575 }
576
577 void l2cap_chan_del(struct l2cap_chan *chan, int err)
578 {
579 struct l2cap_conn *conn = chan->conn;
580
581 __clear_chan_timer(chan);
582
583 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
584
585 if (conn) {
586 struct amp_mgr *mgr = conn->hcon->amp_mgr;
587 /* Delete from channel list */
588 list_del(&chan->list);
589
590 l2cap_chan_put(chan);
591
592 chan->conn = NULL;
593
594 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
595 hci_conn_drop(conn->hcon);
596
597 if (mgr && mgr->bredr_chan == chan)
598 mgr->bredr_chan = NULL;
599 }
600
601 if (chan->hs_hchan) {
602 struct hci_chan *hs_hchan = chan->hs_hchan;
603
604 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
605 amp_disconnect_logical_link(hs_hchan);
606 }
607
608 chan->ops->teardown(chan, err);
609
610 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
611 return;
612
613 switch(chan->mode) {
614 case L2CAP_MODE_BASIC:
615 break;
616
617 case L2CAP_MODE_ERTM:
618 __clear_retrans_timer(chan);
619 __clear_monitor_timer(chan);
620 __clear_ack_timer(chan);
621
622 skb_queue_purge(&chan->srej_q);
623
624 l2cap_seq_list_free(&chan->srej_list);
625 l2cap_seq_list_free(&chan->retrans_list);
626
627 /* fall through */
628
629 case L2CAP_MODE_STREAMING:
630 skb_queue_purge(&chan->tx_q);
631 break;
632 }
633
634 return;
635 }
636
637 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
638 {
639 struct l2cap_conn *conn = chan->conn;
640
641 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
642
643 switch (chan->state) {
644 case BT_LISTEN:
645 chan->ops->teardown(chan, 0);
646 break;
647
648 case BT_CONNECTED:
649 case BT_CONFIG:
650 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
651 conn->hcon->type == ACL_LINK) {
652 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
653 l2cap_send_disconn_req(chan, reason);
654 } else
655 l2cap_chan_del(chan, reason);
656 break;
657
658 case BT_CONNECT2:
659 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
660 conn->hcon->type == ACL_LINK) {
661 struct l2cap_conn_rsp rsp;
662 __u16 result;
663
664 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
665 result = L2CAP_CR_SEC_BLOCK;
666 else
667 result = L2CAP_CR_BAD_PSM;
668
669 l2cap_state_change(chan, BT_DISCONN);
670
671 rsp.scid = cpu_to_le16(chan->dcid);
672 rsp.dcid = cpu_to_le16(chan->scid);
673 rsp.result = cpu_to_le16(result);
674 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
675 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
676 sizeof(rsp), &rsp);
677 }
678
679 l2cap_chan_del(chan, reason);
680 break;
681
682 case BT_CONNECT:
683 case BT_DISCONN:
684 l2cap_chan_del(chan, reason);
685 break;
686
687 default:
688 chan->ops->teardown(chan, 0);
689 break;
690 }
691 }
692
693 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
694 {
695 switch (chan->chan_type) {
696 case L2CAP_CHAN_RAW:
697 switch (chan->sec_level) {
698 case BT_SECURITY_HIGH:
699 return HCI_AT_DEDICATED_BONDING_MITM;
700 case BT_SECURITY_MEDIUM:
701 return HCI_AT_DEDICATED_BONDING;
702 default:
703 return HCI_AT_NO_BONDING;
704 }
705 break;
706 case L2CAP_CHAN_CONN_LESS:
707 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
708 if (chan->sec_level == BT_SECURITY_LOW)
709 chan->sec_level = BT_SECURITY_SDP;
710 }
711 if (chan->sec_level == BT_SECURITY_HIGH)
712 return HCI_AT_NO_BONDING_MITM;
713 else
714 return HCI_AT_NO_BONDING;
715 break;
716 case L2CAP_CHAN_CONN_ORIENTED:
717 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
718 if (chan->sec_level == BT_SECURITY_LOW)
719 chan->sec_level = BT_SECURITY_SDP;
720
721 if (chan->sec_level == BT_SECURITY_HIGH)
722 return HCI_AT_NO_BONDING_MITM;
723 else
724 return HCI_AT_NO_BONDING;
725 }
726 /* fall through */
727 default:
728 switch (chan->sec_level) {
729 case BT_SECURITY_HIGH:
730 return HCI_AT_GENERAL_BONDING_MITM;
731 case BT_SECURITY_MEDIUM:
732 return HCI_AT_GENERAL_BONDING;
733 default:
734 return HCI_AT_NO_BONDING;
735 }
736 break;
737 }
738 }
739
740 /* Service level security */
741 int l2cap_chan_check_security(struct l2cap_chan *chan)
742 {
743 struct l2cap_conn *conn = chan->conn;
744 __u8 auth_type;
745
746 auth_type = l2cap_get_auth_type(chan);
747
748 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
749 }
750
751 static u8 l2cap_get_ident(struct l2cap_conn *conn)
752 {
753 u8 id;
754
755 /* Get next available identificator.
756 * 1 - 128 are used by kernel.
757 * 129 - 199 are reserved.
758 * 200 - 254 are used by utilities like l2ping, etc.
759 */
760
761 spin_lock(&conn->lock);
762
763 if (++conn->tx_ident > 128)
764 conn->tx_ident = 1;
765
766 id = conn->tx_ident;
767
768 spin_unlock(&conn->lock);
769
770 return id;
771 }
772
773 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
774 void *data)
775 {
776 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
777 u8 flags;
778
779 BT_DBG("code 0x%2.2x", code);
780
781 if (!skb)
782 return;
783
784 if (lmp_no_flush_capable(conn->hcon->hdev))
785 flags = ACL_START_NO_FLUSH;
786 else
787 flags = ACL_START;
788
789 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
790 skb->priority = HCI_PRIO_MAX;
791
792 hci_send_acl(conn->hchan, skb, flags);
793 }
794
795 static bool __chan_is_moving(struct l2cap_chan *chan)
796 {
797 return chan->move_state != L2CAP_MOVE_STABLE &&
798 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
799 }
800
801 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
802 {
803 struct hci_conn *hcon = chan->conn->hcon;
804 u16 flags;
805
806 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
807 skb->priority);
808
809 if (chan->hs_hcon && !__chan_is_moving(chan)) {
810 if (chan->hs_hchan)
811 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
812 else
813 kfree_skb(skb);
814
815 return;
816 }
817
818 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
819 lmp_no_flush_capable(hcon->hdev))
820 flags = ACL_START_NO_FLUSH;
821 else
822 flags = ACL_START;
823
824 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
825 hci_send_acl(chan->conn->hchan, skb, flags);
826 }
827
828 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
829 {
830 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
831 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
832
833 if (enh & L2CAP_CTRL_FRAME_TYPE) {
834 /* S-Frame */
835 control->sframe = 1;
836 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
837 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
838
839 control->sar = 0;
840 control->txseq = 0;
841 } else {
842 /* I-Frame */
843 control->sframe = 0;
844 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
845 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
846
847 control->poll = 0;
848 control->super = 0;
849 }
850 }
851
852 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
853 {
854 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
855 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
856
857 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
858 /* S-Frame */
859 control->sframe = 1;
860 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
861 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
862
863 control->sar = 0;
864 control->txseq = 0;
865 } else {
866 /* I-Frame */
867 control->sframe = 0;
868 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
869 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
870
871 control->poll = 0;
872 control->super = 0;
873 }
874 }
875
876 static inline void __unpack_control(struct l2cap_chan *chan,
877 struct sk_buff *skb)
878 {
879 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
880 __unpack_extended_control(get_unaligned_le32(skb->data),
881 &bt_cb(skb)->control);
882 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
883 } else {
884 __unpack_enhanced_control(get_unaligned_le16(skb->data),
885 &bt_cb(skb)->control);
886 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
887 }
888 }
889
890 static u32 __pack_extended_control(struct l2cap_ctrl *control)
891 {
892 u32 packed;
893
894 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
895 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
896
897 if (control->sframe) {
898 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
899 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
900 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
901 } else {
902 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
903 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
904 }
905
906 return packed;
907 }
908
909 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
910 {
911 u16 packed;
912
913 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
914 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
915
916 if (control->sframe) {
917 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
918 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
919 packed |= L2CAP_CTRL_FRAME_TYPE;
920 } else {
921 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
922 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
923 }
924
925 return packed;
926 }
927
928 static inline void __pack_control(struct l2cap_chan *chan,
929 struct l2cap_ctrl *control,
930 struct sk_buff *skb)
931 {
932 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
933 put_unaligned_le32(__pack_extended_control(control),
934 skb->data + L2CAP_HDR_SIZE);
935 } else {
936 put_unaligned_le16(__pack_enhanced_control(control),
937 skb->data + L2CAP_HDR_SIZE);
938 }
939 }
940
941 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
942 {
943 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
944 return L2CAP_EXT_HDR_SIZE;
945 else
946 return L2CAP_ENH_HDR_SIZE;
947 }
948
949 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
950 u32 control)
951 {
952 struct sk_buff *skb;
953 struct l2cap_hdr *lh;
954 int hlen = __ertm_hdr_size(chan);
955
956 if (chan->fcs == L2CAP_FCS_CRC16)
957 hlen += L2CAP_FCS_SIZE;
958
959 skb = bt_skb_alloc(hlen, GFP_KERNEL);
960
961 if (!skb)
962 return ERR_PTR(-ENOMEM);
963
964 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
965 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
966 lh->cid = cpu_to_le16(chan->dcid);
967
968 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
969 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
970 else
971 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
972
973 if (chan->fcs == L2CAP_FCS_CRC16) {
974 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
975 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
976 }
977
978 skb->priority = HCI_PRIO_MAX;
979 return skb;
980 }
981
982 static void l2cap_send_sframe(struct l2cap_chan *chan,
983 struct l2cap_ctrl *control)
984 {
985 struct sk_buff *skb;
986 u32 control_field;
987
988 BT_DBG("chan %p, control %p", chan, control);
989
990 if (!control->sframe)
991 return;
992
993 if (__chan_is_moving(chan))
994 return;
995
996 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
997 !control->poll)
998 control->final = 1;
999
1000 if (control->super == L2CAP_SUPER_RR)
1001 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1002 else if (control->super == L2CAP_SUPER_RNR)
1003 set_bit(CONN_RNR_SENT, &chan->conn_state);
1004
1005 if (control->super != L2CAP_SUPER_SREJ) {
1006 chan->last_acked_seq = control->reqseq;
1007 __clear_ack_timer(chan);
1008 }
1009
1010 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1011 control->final, control->poll, control->super);
1012
1013 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1014 control_field = __pack_extended_control(control);
1015 else
1016 control_field = __pack_enhanced_control(control);
1017
1018 skb = l2cap_create_sframe_pdu(chan, control_field);
1019 if (!IS_ERR(skb))
1020 l2cap_do_send(chan, skb);
1021 }
1022
1023 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1024 {
1025 struct l2cap_ctrl control;
1026
1027 BT_DBG("chan %p, poll %d", chan, poll);
1028
1029 memset(&control, 0, sizeof(control));
1030 control.sframe = 1;
1031 control.poll = poll;
1032
1033 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1034 control.super = L2CAP_SUPER_RNR;
1035 else
1036 control.super = L2CAP_SUPER_RR;
1037
1038 control.reqseq = chan->buffer_seq;
1039 l2cap_send_sframe(chan, &control);
1040 }
1041
1042 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1043 {
1044 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1045 }
1046
1047 static bool __amp_capable(struct l2cap_chan *chan)
1048 {
1049 struct l2cap_conn *conn = chan->conn;
1050 struct hci_dev *hdev;
1051 bool amp_available = false;
1052
1053 if (!conn->hs_enabled)
1054 return false;
1055
1056 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1057 return false;
1058
1059 read_lock(&hci_dev_list_lock);
1060 list_for_each_entry(hdev, &hci_dev_list, list) {
1061 if (hdev->amp_type != AMP_TYPE_BREDR &&
1062 test_bit(HCI_UP, &hdev->flags)) {
1063 amp_available = true;
1064 break;
1065 }
1066 }
1067 read_unlock(&hci_dev_list_lock);
1068
1069 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1070 return amp_available;
1071
1072 return false;
1073 }
1074
1075 static bool l2cap_check_efs(struct l2cap_chan *chan)
1076 {
1077 /* Check EFS parameters */
1078 return true;
1079 }
1080
1081 void l2cap_send_conn_req(struct l2cap_chan *chan)
1082 {
1083 struct l2cap_conn *conn = chan->conn;
1084 struct l2cap_conn_req req;
1085
1086 req.scid = cpu_to_le16(chan->scid);
1087 req.psm = chan->psm;
1088
1089 chan->ident = l2cap_get_ident(conn);
1090
1091 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1092
1093 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1094 }
1095
1096 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1097 {
1098 struct l2cap_create_chan_req req;
1099 req.scid = cpu_to_le16(chan->scid);
1100 req.psm = chan->psm;
1101 req.amp_id = amp_id;
1102
1103 chan->ident = l2cap_get_ident(chan->conn);
1104
1105 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1106 sizeof(req), &req);
1107 }
1108
1109 static void l2cap_move_setup(struct l2cap_chan *chan)
1110 {
1111 struct sk_buff *skb;
1112
1113 BT_DBG("chan %p", chan);
1114
1115 if (chan->mode != L2CAP_MODE_ERTM)
1116 return;
1117
1118 __clear_retrans_timer(chan);
1119 __clear_monitor_timer(chan);
1120 __clear_ack_timer(chan);
1121
1122 chan->retry_count = 0;
1123 skb_queue_walk(&chan->tx_q, skb) {
1124 if (bt_cb(skb)->control.retries)
1125 bt_cb(skb)->control.retries = 1;
1126 else
1127 break;
1128 }
1129
1130 chan->expected_tx_seq = chan->buffer_seq;
1131
1132 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1133 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1134 l2cap_seq_list_clear(&chan->retrans_list);
1135 l2cap_seq_list_clear(&chan->srej_list);
1136 skb_queue_purge(&chan->srej_q);
1137
1138 chan->tx_state = L2CAP_TX_STATE_XMIT;
1139 chan->rx_state = L2CAP_RX_STATE_MOVE;
1140
1141 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1142 }
1143
1144 static void l2cap_move_done(struct l2cap_chan *chan)
1145 {
1146 u8 move_role = chan->move_role;
1147 BT_DBG("chan %p", chan);
1148
1149 chan->move_state = L2CAP_MOVE_STABLE;
1150 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1151
1152 if (chan->mode != L2CAP_MODE_ERTM)
1153 return;
1154
1155 switch (move_role) {
1156 case L2CAP_MOVE_ROLE_INITIATOR:
1157 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1158 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1159 break;
1160 case L2CAP_MOVE_ROLE_RESPONDER:
1161 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1162 break;
1163 }
1164 }
1165
1166 static void l2cap_chan_ready(struct l2cap_chan *chan)
1167 {
1168 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1169 chan->conf_state = 0;
1170 __clear_chan_timer(chan);
1171
1172 chan->state = BT_CONNECTED;
1173
1174 chan->ops->ready(chan);
1175 }
1176
1177 static void l2cap_start_connection(struct l2cap_chan *chan)
1178 {
1179 if (__amp_capable(chan)) {
1180 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1181 a2mp_discover_amp(chan);
1182 } else {
1183 l2cap_send_conn_req(chan);
1184 }
1185 }
1186
1187 static void l2cap_do_start(struct l2cap_chan *chan)
1188 {
1189 struct l2cap_conn *conn = chan->conn;
1190
1191 if (conn->hcon->type == LE_LINK) {
1192 l2cap_chan_ready(chan);
1193 return;
1194 }
1195
1196 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1197 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1198 return;
1199
1200 if (l2cap_chan_check_security(chan) &&
1201 __l2cap_no_conn_pending(chan)) {
1202 l2cap_start_connection(chan);
1203 }
1204 } else {
1205 struct l2cap_info_req req;
1206 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1207
1208 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1209 conn->info_ident = l2cap_get_ident(conn);
1210
1211 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1212
1213 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1214 sizeof(req), &req);
1215 }
1216 }
1217
1218 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1219 {
1220 u32 local_feat_mask = l2cap_feat_mask;
1221 if (!disable_ertm)
1222 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1223
1224 switch (mode) {
1225 case L2CAP_MODE_ERTM:
1226 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1227 case L2CAP_MODE_STREAMING:
1228 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1229 default:
1230 return 0x00;
1231 }
1232 }
1233
1234 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1235 {
1236 struct l2cap_conn *conn = chan->conn;
1237 struct l2cap_disconn_req req;
1238
1239 if (!conn)
1240 return;
1241
1242 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1243 __clear_retrans_timer(chan);
1244 __clear_monitor_timer(chan);
1245 __clear_ack_timer(chan);
1246 }
1247
1248 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1249 l2cap_state_change(chan, BT_DISCONN);
1250 return;
1251 }
1252
1253 req.dcid = cpu_to_le16(chan->dcid);
1254 req.scid = cpu_to_le16(chan->scid);
1255 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1256 sizeof(req), &req);
1257
1258 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1259 }
1260
1261 /* ---- L2CAP connections ---- */
1262 static void l2cap_conn_start(struct l2cap_conn *conn)
1263 {
1264 struct l2cap_chan *chan, *tmp;
1265
1266 BT_DBG("conn %p", conn);
1267
1268 mutex_lock(&conn->chan_lock);
1269
1270 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1271 l2cap_chan_lock(chan);
1272
1273 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1274 l2cap_chan_unlock(chan);
1275 continue;
1276 }
1277
1278 if (chan->state == BT_CONNECT) {
1279 if (!l2cap_chan_check_security(chan) ||
1280 !__l2cap_no_conn_pending(chan)) {
1281 l2cap_chan_unlock(chan);
1282 continue;
1283 }
1284
1285 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1286 && test_bit(CONF_STATE2_DEVICE,
1287 &chan->conf_state)) {
1288 l2cap_chan_close(chan, ECONNRESET);
1289 l2cap_chan_unlock(chan);
1290 continue;
1291 }
1292
1293 l2cap_start_connection(chan);
1294
1295 } else if (chan->state == BT_CONNECT2) {
1296 struct l2cap_conn_rsp rsp;
1297 char buf[128];
1298 rsp.scid = cpu_to_le16(chan->dcid);
1299 rsp.dcid = cpu_to_le16(chan->scid);
1300
1301 if (l2cap_chan_check_security(chan)) {
1302 struct sock *sk = chan->sk;
1303
1304 lock_sock(sk);
1305 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1306 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1307 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1308 chan->ops->defer(chan);
1309
1310 } else {
1311 __l2cap_state_change(chan, BT_CONFIG);
1312 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1313 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1314 }
1315 release_sock(sk);
1316 } else {
1317 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1318 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1319 }
1320
1321 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1322 sizeof(rsp), &rsp);
1323
1324 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1325 rsp.result != L2CAP_CR_SUCCESS) {
1326 l2cap_chan_unlock(chan);
1327 continue;
1328 }
1329
1330 set_bit(CONF_REQ_SENT, &chan->conf_state);
1331 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1332 l2cap_build_conf_req(chan, buf), buf);
1333 chan->num_conf_req++;
1334 }
1335
1336 l2cap_chan_unlock(chan);
1337 }
1338
1339 mutex_unlock(&conn->chan_lock);
1340 }
1341
1342 /* Find socket with cid and source/destination bdaddr.
1343 * Returns closest match, locked.
1344 */
1345 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1346 bdaddr_t *src,
1347 bdaddr_t *dst)
1348 {
1349 struct l2cap_chan *c, *c1 = NULL;
1350
1351 read_lock(&chan_list_lock);
1352
1353 list_for_each_entry(c, &chan_list, global_l) {
1354 if (state && c->state != state)
1355 continue;
1356
1357 if (c->scid == cid) {
1358 int src_match, dst_match;
1359 int src_any, dst_any;
1360
1361 /* Exact match. */
1362 src_match = !bacmp(&c->src, src);
1363 dst_match = !bacmp(&c->dst, dst);
1364 if (src_match && dst_match) {
1365 read_unlock(&chan_list_lock);
1366 return c;
1367 }
1368
1369 /* Closest match */
1370 src_any = !bacmp(&c->src, BDADDR_ANY);
1371 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1372 if ((src_match && dst_any) || (src_any && dst_match) ||
1373 (src_any && dst_any))
1374 c1 = c;
1375 }
1376 }
1377
1378 read_unlock(&chan_list_lock);
1379
1380 return c1;
1381 }
1382
1383 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1384 {
1385 struct sock *parent;
1386 struct l2cap_chan *chan, *pchan;
1387
1388 BT_DBG("");
1389
1390 /* Check if we have socket listening on cid */
1391 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1392 &conn->hcon->src, &conn->hcon->dst);
1393 if (!pchan)
1394 return;
1395
1396 /* Client ATT sockets should override the server one */
1397 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1398 return;
1399
1400 parent = pchan->sk;
1401
1402 lock_sock(parent);
1403
1404 chan = pchan->ops->new_connection(pchan);
1405 if (!chan)
1406 goto clean;
1407
1408 chan->dcid = L2CAP_CID_ATT;
1409
1410 bacpy(&chan->src, &conn->hcon->src);
1411 bacpy(&chan->dst, &conn->hcon->dst);
1412 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
1413 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
1414
1415 __l2cap_chan_add(conn, chan);
1416
1417 clean:
1418 release_sock(parent);
1419 }
1420
1421 static void l2cap_conn_ready(struct l2cap_conn *conn)
1422 {
1423 struct l2cap_chan *chan;
1424 struct hci_conn *hcon = conn->hcon;
1425
1426 BT_DBG("conn %p", conn);
1427
1428 /* For outgoing pairing which doesn't necessarily have an
1429 * associated socket (e.g. mgmt_pair_device).
1430 */
1431 if (hcon->out && hcon->type == LE_LINK)
1432 smp_conn_security(hcon, hcon->pending_sec_level);
1433
1434 mutex_lock(&conn->chan_lock);
1435
1436 if (hcon->type == LE_LINK)
1437 l2cap_le_conn_ready(conn);
1438
1439 list_for_each_entry(chan, &conn->chan_l, list) {
1440
1441 l2cap_chan_lock(chan);
1442
1443 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1444 l2cap_chan_unlock(chan);
1445 continue;
1446 }
1447
1448 if (hcon->type == LE_LINK) {
1449 if (smp_conn_security(hcon, chan->sec_level))
1450 l2cap_chan_ready(chan);
1451
1452 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1453 struct sock *sk = chan->sk;
1454 __clear_chan_timer(chan);
1455 lock_sock(sk);
1456 __l2cap_state_change(chan, BT_CONNECTED);
1457 sk->sk_state_change(sk);
1458 release_sock(sk);
1459
1460 } else if (chan->state == BT_CONNECT) {
1461 l2cap_do_start(chan);
1462 }
1463
1464 l2cap_chan_unlock(chan);
1465 }
1466
1467 mutex_unlock(&conn->chan_lock);
1468 }
1469
1470 /* Notify sockets that we cannot guaranty reliability anymore */
1471 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1472 {
1473 struct l2cap_chan *chan;
1474
1475 BT_DBG("conn %p", conn);
1476
1477 mutex_lock(&conn->chan_lock);
1478
1479 list_for_each_entry(chan, &conn->chan_l, list) {
1480 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1481 l2cap_chan_set_err(chan, err);
1482 }
1483
1484 mutex_unlock(&conn->chan_lock);
1485 }
1486
1487 static void l2cap_info_timeout(struct work_struct *work)
1488 {
1489 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1490 info_timer.work);
1491
1492 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1493 conn->info_ident = 0;
1494
1495 l2cap_conn_start(conn);
1496 }
1497
1498 /*
1499 * l2cap_user
1500 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1501 * callback is called during registration. The ->remove callback is called
1502 * during unregistration.
1503 * An l2cap_user object can either be explicitly unregistered or when the
1504 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1505 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1506 * External modules must own a reference to the l2cap_conn object if they intend
1507 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1508 * any time if they don't.
1509 */
1510
1511 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1512 {
1513 struct hci_dev *hdev = conn->hcon->hdev;
1514 int ret;
1515
1516 /* We need to check whether l2cap_conn is registered. If it is not, we
1517 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1518 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1519 * relies on the parent hci_conn object to be locked. This itself relies
1520 * on the hci_dev object to be locked. So we must lock the hci device
1521 * here, too. */
1522
1523 hci_dev_lock(hdev);
1524
1525 if (user->list.next || user->list.prev) {
1526 ret = -EINVAL;
1527 goto out_unlock;
1528 }
1529
1530 /* conn->hchan is NULL after l2cap_conn_del() was called */
1531 if (!conn->hchan) {
1532 ret = -ENODEV;
1533 goto out_unlock;
1534 }
1535
1536 ret = user->probe(conn, user);
1537 if (ret)
1538 goto out_unlock;
1539
1540 list_add(&user->list, &conn->users);
1541 ret = 0;
1542
1543 out_unlock:
1544 hci_dev_unlock(hdev);
1545 return ret;
1546 }
1547 EXPORT_SYMBOL(l2cap_register_user);
1548
1549 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1550 {
1551 struct hci_dev *hdev = conn->hcon->hdev;
1552
1553 hci_dev_lock(hdev);
1554
1555 if (!user->list.next || !user->list.prev)
1556 goto out_unlock;
1557
1558 list_del(&user->list);
1559 user->list.next = NULL;
1560 user->list.prev = NULL;
1561 user->remove(conn, user);
1562
1563 out_unlock:
1564 hci_dev_unlock(hdev);
1565 }
1566 EXPORT_SYMBOL(l2cap_unregister_user);
1567
1568 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1569 {
1570 struct l2cap_user *user;
1571
1572 while (!list_empty(&conn->users)) {
1573 user = list_first_entry(&conn->users, struct l2cap_user, list);
1574 list_del(&user->list);
1575 user->list.next = NULL;
1576 user->list.prev = NULL;
1577 user->remove(conn, user);
1578 }
1579 }
1580
1581 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1582 {
1583 struct l2cap_conn *conn = hcon->l2cap_data;
1584 struct l2cap_chan *chan, *l;
1585
1586 if (!conn)
1587 return;
1588
1589 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1590
1591 kfree_skb(conn->rx_skb);
1592
1593 l2cap_unregister_all_users(conn);
1594
1595 mutex_lock(&conn->chan_lock);
1596
1597 /* Kill channels */
1598 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1599 l2cap_chan_hold(chan);
1600 l2cap_chan_lock(chan);
1601
1602 l2cap_chan_del(chan, err);
1603
1604 l2cap_chan_unlock(chan);
1605
1606 chan->ops->close(chan);
1607 l2cap_chan_put(chan);
1608 }
1609
1610 mutex_unlock(&conn->chan_lock);
1611
1612 hci_chan_del(conn->hchan);
1613
1614 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1615 cancel_delayed_work_sync(&conn->info_timer);
1616
1617 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1618 cancel_delayed_work_sync(&conn->security_timer);
1619 smp_chan_destroy(conn);
1620 }
1621
1622 hcon->l2cap_data = NULL;
1623 conn->hchan = NULL;
1624 l2cap_conn_put(conn);
1625 }
1626
1627 static void security_timeout(struct work_struct *work)
1628 {
1629 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1630 security_timer.work);
1631
1632 BT_DBG("conn %p", conn);
1633
1634 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1635 smp_chan_destroy(conn);
1636 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1637 }
1638 }
1639
1640 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1641 {
1642 struct l2cap_conn *conn = hcon->l2cap_data;
1643 struct hci_chan *hchan;
1644
1645 if (conn)
1646 return conn;
1647
1648 hchan = hci_chan_create(hcon);
1649 if (!hchan)
1650 return NULL;
1651
1652 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1653 if (!conn) {
1654 hci_chan_del(hchan);
1655 return NULL;
1656 }
1657
1658 kref_init(&conn->ref);
1659 hcon->l2cap_data = conn;
1660 conn->hcon = hcon;
1661 hci_conn_get(conn->hcon);
1662 conn->hchan = hchan;
1663
1664 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1665
1666 switch (hcon->type) {
1667 case LE_LINK:
1668 if (hcon->hdev->le_mtu) {
1669 conn->mtu = hcon->hdev->le_mtu;
1670 break;
1671 }
1672 /* fall through */
1673 default:
1674 conn->mtu = hcon->hdev->acl_mtu;
1675 break;
1676 }
1677
1678 conn->feat_mask = 0;
1679
1680 if (hcon->type == ACL_LINK)
1681 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1682 &hcon->hdev->dev_flags);
1683
1684 spin_lock_init(&conn->lock);
1685 mutex_init(&conn->chan_lock);
1686
1687 INIT_LIST_HEAD(&conn->chan_l);
1688 INIT_LIST_HEAD(&conn->users);
1689
1690 if (hcon->type == LE_LINK)
1691 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1692 else
1693 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1694
1695 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1696
1697 return conn;
1698 }
1699
1700 static void l2cap_conn_free(struct kref *ref)
1701 {
1702 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1703
1704 hci_conn_put(conn->hcon);
1705 kfree(conn);
1706 }
1707
1708 void l2cap_conn_get(struct l2cap_conn *conn)
1709 {
1710 kref_get(&conn->ref);
1711 }
1712 EXPORT_SYMBOL(l2cap_conn_get);
1713
1714 void l2cap_conn_put(struct l2cap_conn *conn)
1715 {
1716 kref_put(&conn->ref, l2cap_conn_free);
1717 }
1718 EXPORT_SYMBOL(l2cap_conn_put);
1719
1720 /* ---- Socket interface ---- */
1721
1722 /* Find socket with psm and source / destination bdaddr.
1723 * Returns closest match.
1724 */
1725 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1726 bdaddr_t *src,
1727 bdaddr_t *dst)
1728 {
1729 struct l2cap_chan *c, *c1 = NULL;
1730
1731 read_lock(&chan_list_lock);
1732
1733 list_for_each_entry(c, &chan_list, global_l) {
1734 if (state && c->state != state)
1735 continue;
1736
1737 if (c->psm == psm) {
1738 int src_match, dst_match;
1739 int src_any, dst_any;
1740
1741 /* Exact match. */
1742 src_match = !bacmp(&c->src, src);
1743 dst_match = !bacmp(&c->dst, dst);
1744 if (src_match && dst_match) {
1745 read_unlock(&chan_list_lock);
1746 return c;
1747 }
1748
1749 /* Closest match */
1750 src_any = !bacmp(&c->src, BDADDR_ANY);
1751 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1752 if ((src_match && dst_any) || (src_any && dst_match) ||
1753 (src_any && dst_any))
1754 c1 = c;
1755 }
1756 }
1757
1758 read_unlock(&chan_list_lock);
1759
1760 return c1;
1761 }
1762
1763 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1764 bdaddr_t *dst, u8 dst_type)
1765 {
1766 struct l2cap_conn *conn;
1767 struct hci_conn *hcon;
1768 struct hci_dev *hdev;
1769 __u8 auth_type;
1770 int err;
1771
1772 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1773 dst_type, __le16_to_cpu(psm));
1774
1775 hdev = hci_get_route(dst, &chan->src);
1776 if (!hdev)
1777 return -EHOSTUNREACH;
1778
1779 hci_dev_lock(hdev);
1780
1781 l2cap_chan_lock(chan);
1782
1783 /* PSM must be odd and lsb of upper byte must be 0 */
1784 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1785 chan->chan_type != L2CAP_CHAN_RAW) {
1786 err = -EINVAL;
1787 goto done;
1788 }
1789
1790 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1791 err = -EINVAL;
1792 goto done;
1793 }
1794
1795 switch (chan->mode) {
1796 case L2CAP_MODE_BASIC:
1797 break;
1798 case L2CAP_MODE_ERTM:
1799 case L2CAP_MODE_STREAMING:
1800 if (!disable_ertm)
1801 break;
1802 /* fall through */
1803 default:
1804 err = -ENOTSUPP;
1805 goto done;
1806 }
1807
1808 switch (chan->state) {
1809 case BT_CONNECT:
1810 case BT_CONNECT2:
1811 case BT_CONFIG:
1812 /* Already connecting */
1813 err = 0;
1814 goto done;
1815
1816 case BT_CONNECTED:
1817 /* Already connected */
1818 err = -EISCONN;
1819 goto done;
1820
1821 case BT_OPEN:
1822 case BT_BOUND:
1823 /* Can connect */
1824 break;
1825
1826 default:
1827 err = -EBADFD;
1828 goto done;
1829 }
1830
1831 /* Set destination address and psm */
1832 bacpy(&chan->dst, dst);
1833 chan->dst_type = dst_type;
1834
1835 chan->psm = psm;
1836 chan->dcid = cid;
1837
1838 auth_type = l2cap_get_auth_type(chan);
1839
1840 if (bdaddr_type_is_le(dst_type))
1841 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1842 chan->sec_level, auth_type);
1843 else
1844 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1845 chan->sec_level, auth_type);
1846
1847 if (IS_ERR(hcon)) {
1848 err = PTR_ERR(hcon);
1849 goto done;
1850 }
1851
1852 conn = l2cap_conn_add(hcon);
1853 if (!conn) {
1854 hci_conn_drop(hcon);
1855 err = -ENOMEM;
1856 goto done;
1857 }
1858
1859 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1860 hci_conn_drop(hcon);
1861 err = -EBUSY;
1862 goto done;
1863 }
1864
1865 /* Update source addr of the socket */
1866 bacpy(&chan->src, &hcon->src);
1867 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1868
1869 l2cap_chan_unlock(chan);
1870 l2cap_chan_add(conn, chan);
1871 l2cap_chan_lock(chan);
1872
1873 /* l2cap_chan_add takes its own ref so we can drop this one */
1874 hci_conn_drop(hcon);
1875
1876 l2cap_state_change(chan, BT_CONNECT);
1877 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1878
1879 if (hcon->state == BT_CONNECTED) {
1880 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1881 __clear_chan_timer(chan);
1882 if (l2cap_chan_check_security(chan))
1883 l2cap_state_change(chan, BT_CONNECTED);
1884 } else
1885 l2cap_do_start(chan);
1886 }
1887
1888 err = 0;
1889
1890 done:
1891 l2cap_chan_unlock(chan);
1892 hci_dev_unlock(hdev);
1893 hci_dev_put(hdev);
1894 return err;
1895 }
1896
1897 int __l2cap_wait_ack(struct sock *sk)
1898 {
1899 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1900 DECLARE_WAITQUEUE(wait, current);
1901 int err = 0;
1902 int timeo = HZ/5;
1903
1904 add_wait_queue(sk_sleep(sk), &wait);
1905 set_current_state(TASK_INTERRUPTIBLE);
1906 while (chan->unacked_frames > 0 && chan->conn) {
1907 if (!timeo)
1908 timeo = HZ/5;
1909
1910 if (signal_pending(current)) {
1911 err = sock_intr_errno(timeo);
1912 break;
1913 }
1914
1915 release_sock(sk);
1916 timeo = schedule_timeout(timeo);
1917 lock_sock(sk);
1918 set_current_state(TASK_INTERRUPTIBLE);
1919
1920 err = sock_error(sk);
1921 if (err)
1922 break;
1923 }
1924 set_current_state(TASK_RUNNING);
1925 remove_wait_queue(sk_sleep(sk), &wait);
1926 return err;
1927 }
1928
1929 static void l2cap_monitor_timeout(struct work_struct *work)
1930 {
1931 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1932 monitor_timer.work);
1933
1934 BT_DBG("chan %p", chan);
1935
1936 l2cap_chan_lock(chan);
1937
1938 if (!chan->conn) {
1939 l2cap_chan_unlock(chan);
1940 l2cap_chan_put(chan);
1941 return;
1942 }
1943
1944 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1945
1946 l2cap_chan_unlock(chan);
1947 l2cap_chan_put(chan);
1948 }
1949
1950 static void l2cap_retrans_timeout(struct work_struct *work)
1951 {
1952 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1953 retrans_timer.work);
1954
1955 BT_DBG("chan %p", chan);
1956
1957 l2cap_chan_lock(chan);
1958
1959 if (!chan->conn) {
1960 l2cap_chan_unlock(chan);
1961 l2cap_chan_put(chan);
1962 return;
1963 }
1964
1965 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1966 l2cap_chan_unlock(chan);
1967 l2cap_chan_put(chan);
1968 }
1969
1970 static void l2cap_streaming_send(struct l2cap_chan *chan,
1971 struct sk_buff_head *skbs)
1972 {
1973 struct sk_buff *skb;
1974 struct l2cap_ctrl *control;
1975
1976 BT_DBG("chan %p, skbs %p", chan, skbs);
1977
1978 if (__chan_is_moving(chan))
1979 return;
1980
1981 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1982
1983 while (!skb_queue_empty(&chan->tx_q)) {
1984
1985 skb = skb_dequeue(&chan->tx_q);
1986
1987 bt_cb(skb)->control.retries = 1;
1988 control = &bt_cb(skb)->control;
1989
1990 control->reqseq = 0;
1991 control->txseq = chan->next_tx_seq;
1992
1993 __pack_control(chan, control, skb);
1994
1995 if (chan->fcs == L2CAP_FCS_CRC16) {
1996 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1997 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1998 }
1999
2000 l2cap_do_send(chan, skb);
2001
2002 BT_DBG("Sent txseq %u", control->txseq);
2003
2004 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2005 chan->frames_sent++;
2006 }
2007 }
2008
2009 static int l2cap_ertm_send(struct l2cap_chan *chan)
2010 {
2011 struct sk_buff *skb, *tx_skb;
2012 struct l2cap_ctrl *control;
2013 int sent = 0;
2014
2015 BT_DBG("chan %p", chan);
2016
2017 if (chan->state != BT_CONNECTED)
2018 return -ENOTCONN;
2019
2020 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2021 return 0;
2022
2023 if (__chan_is_moving(chan))
2024 return 0;
2025
2026 while (chan->tx_send_head &&
2027 chan->unacked_frames < chan->remote_tx_win &&
2028 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2029
2030 skb = chan->tx_send_head;
2031
2032 bt_cb(skb)->control.retries = 1;
2033 control = &bt_cb(skb)->control;
2034
2035 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2036 control->final = 1;
2037
2038 control->reqseq = chan->buffer_seq;
2039 chan->last_acked_seq = chan->buffer_seq;
2040 control->txseq = chan->next_tx_seq;
2041
2042 __pack_control(chan, control, skb);
2043
2044 if (chan->fcs == L2CAP_FCS_CRC16) {
2045 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2046 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2047 }
2048
2049 /* Clone after data has been modified. Data is assumed to be
2050 read-only (for locking purposes) on cloned sk_buffs.
2051 */
2052 tx_skb = skb_clone(skb, GFP_KERNEL);
2053
2054 if (!tx_skb)
2055 break;
2056
2057 __set_retrans_timer(chan);
2058
2059 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2060 chan->unacked_frames++;
2061 chan->frames_sent++;
2062 sent++;
2063
2064 if (skb_queue_is_last(&chan->tx_q, skb))
2065 chan->tx_send_head = NULL;
2066 else
2067 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2068
2069 l2cap_do_send(chan, tx_skb);
2070 BT_DBG("Sent txseq %u", control->txseq);
2071 }
2072
2073 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2074 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2075
2076 return sent;
2077 }
2078
2079 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2080 {
2081 struct l2cap_ctrl control;
2082 struct sk_buff *skb;
2083 struct sk_buff *tx_skb;
2084 u16 seq;
2085
2086 BT_DBG("chan %p", chan);
2087
2088 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2089 return;
2090
2091 if (__chan_is_moving(chan))
2092 return;
2093
2094 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2095 seq = l2cap_seq_list_pop(&chan->retrans_list);
2096
2097 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2098 if (!skb) {
2099 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2100 seq);
2101 continue;
2102 }
2103
2104 bt_cb(skb)->control.retries++;
2105 control = bt_cb(skb)->control;
2106
2107 if (chan->max_tx != 0 &&
2108 bt_cb(skb)->control.retries > chan->max_tx) {
2109 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2110 l2cap_send_disconn_req(chan, ECONNRESET);
2111 l2cap_seq_list_clear(&chan->retrans_list);
2112 break;
2113 }
2114
2115 control.reqseq = chan->buffer_seq;
2116 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2117 control.final = 1;
2118 else
2119 control.final = 0;
2120
2121 if (skb_cloned(skb)) {
2122 /* Cloned sk_buffs are read-only, so we need a
2123 * writeable copy
2124 */
2125 tx_skb = skb_copy(skb, GFP_KERNEL);
2126 } else {
2127 tx_skb = skb_clone(skb, GFP_KERNEL);
2128 }
2129
2130 if (!tx_skb) {
2131 l2cap_seq_list_clear(&chan->retrans_list);
2132 break;
2133 }
2134
2135 /* Update skb contents */
2136 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2137 put_unaligned_le32(__pack_extended_control(&control),
2138 tx_skb->data + L2CAP_HDR_SIZE);
2139 } else {
2140 put_unaligned_le16(__pack_enhanced_control(&control),
2141 tx_skb->data + L2CAP_HDR_SIZE);
2142 }
2143
2144 if (chan->fcs == L2CAP_FCS_CRC16) {
2145 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2146 put_unaligned_le16(fcs, skb_put(tx_skb,
2147 L2CAP_FCS_SIZE));
2148 }
2149
2150 l2cap_do_send(chan, tx_skb);
2151
2152 BT_DBG("Resent txseq %d", control.txseq);
2153
2154 chan->last_acked_seq = chan->buffer_seq;
2155 }
2156 }
2157
2158 static void l2cap_retransmit(struct l2cap_chan *chan,
2159 struct l2cap_ctrl *control)
2160 {
2161 BT_DBG("chan %p, control %p", chan, control);
2162
2163 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2164 l2cap_ertm_resend(chan);
2165 }
2166
2167 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2168 struct l2cap_ctrl *control)
2169 {
2170 struct sk_buff *skb;
2171
2172 BT_DBG("chan %p, control %p", chan, control);
2173
2174 if (control->poll)
2175 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2176
2177 l2cap_seq_list_clear(&chan->retrans_list);
2178
2179 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2180 return;
2181
2182 if (chan->unacked_frames) {
2183 skb_queue_walk(&chan->tx_q, skb) {
2184 if (bt_cb(skb)->control.txseq == control->reqseq ||
2185 skb == chan->tx_send_head)
2186 break;
2187 }
2188
2189 skb_queue_walk_from(&chan->tx_q, skb) {
2190 if (skb == chan->tx_send_head)
2191 break;
2192
2193 l2cap_seq_list_append(&chan->retrans_list,
2194 bt_cb(skb)->control.txseq);
2195 }
2196
2197 l2cap_ertm_resend(chan);
2198 }
2199 }
2200
2201 static void l2cap_send_ack(struct l2cap_chan *chan)
2202 {
2203 struct l2cap_ctrl control;
2204 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2205 chan->last_acked_seq);
2206 int threshold;
2207
2208 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2209 chan, chan->last_acked_seq, chan->buffer_seq);
2210
2211 memset(&control, 0, sizeof(control));
2212 control.sframe = 1;
2213
2214 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2215 chan->rx_state == L2CAP_RX_STATE_RECV) {
2216 __clear_ack_timer(chan);
2217 control.super = L2CAP_SUPER_RNR;
2218 control.reqseq = chan->buffer_seq;
2219 l2cap_send_sframe(chan, &control);
2220 } else {
2221 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2222 l2cap_ertm_send(chan);
2223 /* If any i-frames were sent, they included an ack */
2224 if (chan->buffer_seq == chan->last_acked_seq)
2225 frames_to_ack = 0;
2226 }
2227
2228 /* Ack now if the window is 3/4ths full.
2229 * Calculate without mul or div
2230 */
2231 threshold = chan->ack_win;
2232 threshold += threshold << 1;
2233 threshold >>= 2;
2234
2235 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2236 threshold);
2237
2238 if (frames_to_ack >= threshold) {
2239 __clear_ack_timer(chan);
2240 control.super = L2CAP_SUPER_RR;
2241 control.reqseq = chan->buffer_seq;
2242 l2cap_send_sframe(chan, &control);
2243 frames_to_ack = 0;
2244 }
2245
2246 if (frames_to_ack)
2247 __set_ack_timer(chan);
2248 }
2249 }
2250
2251 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2252 struct msghdr *msg, int len,
2253 int count, struct sk_buff *skb)
2254 {
2255 struct l2cap_conn *conn = chan->conn;
2256 struct sk_buff **frag;
2257 int sent = 0;
2258
2259 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2260 return -EFAULT;
2261
2262 sent += count;
2263 len -= count;
2264
2265 /* Continuation fragments (no L2CAP header) */
2266 frag = &skb_shinfo(skb)->frag_list;
2267 while (len) {
2268 struct sk_buff *tmp;
2269
2270 count = min_t(unsigned int, conn->mtu, len);
2271
2272 tmp = chan->ops->alloc_skb(chan, count,
2273 msg->msg_flags & MSG_DONTWAIT);
2274 if (IS_ERR(tmp))
2275 return PTR_ERR(tmp);
2276
2277 *frag = tmp;
2278
2279 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2280 return -EFAULT;
2281
2282 (*frag)->priority = skb->priority;
2283
2284 sent += count;
2285 len -= count;
2286
2287 skb->len += (*frag)->len;
2288 skb->data_len += (*frag)->len;
2289
2290 frag = &(*frag)->next;
2291 }
2292
2293 return sent;
2294 }
2295
2296 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2297 struct msghdr *msg, size_t len,
2298 u32 priority)
2299 {
2300 struct l2cap_conn *conn = chan->conn;
2301 struct sk_buff *skb;
2302 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2303 struct l2cap_hdr *lh;
2304
2305 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2306 __le16_to_cpu(chan->psm), len, priority);
2307
2308 count = min_t(unsigned int, (conn->mtu - hlen), len);
2309
2310 skb = chan->ops->alloc_skb(chan, count + hlen,
2311 msg->msg_flags & MSG_DONTWAIT);
2312 if (IS_ERR(skb))
2313 return skb;
2314
2315 skb->priority = priority;
2316
2317 /* Create L2CAP header */
2318 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2319 lh->cid = cpu_to_le16(chan->dcid);
2320 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2321 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2322
2323 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2324 if (unlikely(err < 0)) {
2325 kfree_skb(skb);
2326 return ERR_PTR(err);
2327 }
2328 return skb;
2329 }
2330
2331 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2332 struct msghdr *msg, size_t len,
2333 u32 priority)
2334 {
2335 struct l2cap_conn *conn = chan->conn;
2336 struct sk_buff *skb;
2337 int err, count;
2338 struct l2cap_hdr *lh;
2339
2340 BT_DBG("chan %p len %zu", chan, len);
2341
2342 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2343
2344 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2345 msg->msg_flags & MSG_DONTWAIT);
2346 if (IS_ERR(skb))
2347 return skb;
2348
2349 skb->priority = priority;
2350
2351 /* Create L2CAP header */
2352 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2353 lh->cid = cpu_to_le16(chan->dcid);
2354 lh->len = cpu_to_le16(len);
2355
2356 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2357 if (unlikely(err < 0)) {
2358 kfree_skb(skb);
2359 return ERR_PTR(err);
2360 }
2361 return skb;
2362 }
2363
2364 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2365 struct msghdr *msg, size_t len,
2366 u16 sdulen)
2367 {
2368 struct l2cap_conn *conn = chan->conn;
2369 struct sk_buff *skb;
2370 int err, count, hlen;
2371 struct l2cap_hdr *lh;
2372
2373 BT_DBG("chan %p len %zu", chan, len);
2374
2375 if (!conn)
2376 return ERR_PTR(-ENOTCONN);
2377
2378 hlen = __ertm_hdr_size(chan);
2379
2380 if (sdulen)
2381 hlen += L2CAP_SDULEN_SIZE;
2382
2383 if (chan->fcs == L2CAP_FCS_CRC16)
2384 hlen += L2CAP_FCS_SIZE;
2385
2386 count = min_t(unsigned int, (conn->mtu - hlen), len);
2387
2388 skb = chan->ops->alloc_skb(chan, count + hlen,
2389 msg->msg_flags & MSG_DONTWAIT);
2390 if (IS_ERR(skb))
2391 return skb;
2392
2393 /* Create L2CAP header */
2394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2395 lh->cid = cpu_to_le16(chan->dcid);
2396 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2397
2398 /* Control header is populated later */
2399 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2400 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2401 else
2402 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2403
2404 if (sdulen)
2405 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2406
2407 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2408 if (unlikely(err < 0)) {
2409 kfree_skb(skb);
2410 return ERR_PTR(err);
2411 }
2412
2413 bt_cb(skb)->control.fcs = chan->fcs;
2414 bt_cb(skb)->control.retries = 0;
2415 return skb;
2416 }
2417
2418 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2419 struct sk_buff_head *seg_queue,
2420 struct msghdr *msg, size_t len)
2421 {
2422 struct sk_buff *skb;
2423 u16 sdu_len;
2424 size_t pdu_len;
2425 u8 sar;
2426
2427 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2428
2429 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2430 * so fragmented skbs are not used. The HCI layer's handling
2431 * of fragmented skbs is not compatible with ERTM's queueing.
2432 */
2433
2434 /* PDU size is derived from the HCI MTU */
2435 pdu_len = chan->conn->mtu;
2436
2437 /* Constrain PDU size for BR/EDR connections */
2438 if (!chan->hs_hcon)
2439 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2440
2441 /* Adjust for largest possible L2CAP overhead. */
2442 if (chan->fcs)
2443 pdu_len -= L2CAP_FCS_SIZE;
2444
2445 pdu_len -= __ertm_hdr_size(chan);
2446
2447 /* Remote device may have requested smaller PDUs */
2448 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2449
2450 if (len <= pdu_len) {
2451 sar = L2CAP_SAR_UNSEGMENTED;
2452 sdu_len = 0;
2453 pdu_len = len;
2454 } else {
2455 sar = L2CAP_SAR_START;
2456 sdu_len = len;
2457 pdu_len -= L2CAP_SDULEN_SIZE;
2458 }
2459
2460 while (len > 0) {
2461 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2462
2463 if (IS_ERR(skb)) {
2464 __skb_queue_purge(seg_queue);
2465 return PTR_ERR(skb);
2466 }
2467
2468 bt_cb(skb)->control.sar = sar;
2469 __skb_queue_tail(seg_queue, skb);
2470
2471 len -= pdu_len;
2472 if (sdu_len) {
2473 sdu_len = 0;
2474 pdu_len += L2CAP_SDULEN_SIZE;
2475 }
2476
2477 if (len <= pdu_len) {
2478 sar = L2CAP_SAR_END;
2479 pdu_len = len;
2480 } else {
2481 sar = L2CAP_SAR_CONTINUE;
2482 }
2483 }
2484
2485 return 0;
2486 }
2487
2488 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2489 u32 priority)
2490 {
2491 struct sk_buff *skb;
2492 int err;
2493 struct sk_buff_head seg_queue;
2494
2495 /* Connectionless channel */
2496 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2497 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2498 if (IS_ERR(skb))
2499 return PTR_ERR(skb);
2500
2501 l2cap_do_send(chan, skb);
2502 return len;
2503 }
2504
2505 switch (chan->mode) {
2506 case L2CAP_MODE_BASIC:
2507 /* Check outgoing MTU */
2508 if (len > chan->omtu)
2509 return -EMSGSIZE;
2510
2511 /* Create a basic PDU */
2512 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2513 if (IS_ERR(skb))
2514 return PTR_ERR(skb);
2515
2516 l2cap_do_send(chan, skb);
2517 err = len;
2518 break;
2519
2520 case L2CAP_MODE_ERTM:
2521 case L2CAP_MODE_STREAMING:
2522 /* Check outgoing MTU */
2523 if (len > chan->omtu) {
2524 err = -EMSGSIZE;
2525 break;
2526 }
2527
2528 __skb_queue_head_init(&seg_queue);
2529
2530 /* Do segmentation before calling in to the state machine,
2531 * since it's possible to block while waiting for memory
2532 * allocation.
2533 */
2534 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2535
2536 /* The channel could have been closed while segmenting,
2537 * check that it is still connected.
2538 */
2539 if (chan->state != BT_CONNECTED) {
2540 __skb_queue_purge(&seg_queue);
2541 err = -ENOTCONN;
2542 }
2543
2544 if (err)
2545 break;
2546
2547 if (chan->mode == L2CAP_MODE_ERTM)
2548 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2549 else
2550 l2cap_streaming_send(chan, &seg_queue);
2551
2552 err = len;
2553
2554 /* If the skbs were not queued for sending, they'll still be in
2555 * seg_queue and need to be purged.
2556 */
2557 __skb_queue_purge(&seg_queue);
2558 break;
2559
2560 default:
2561 BT_DBG("bad state %1.1x", chan->mode);
2562 err = -EBADFD;
2563 }
2564
2565 return err;
2566 }
2567
2568 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2569 {
2570 struct l2cap_ctrl control;
2571 u16 seq;
2572
2573 BT_DBG("chan %p, txseq %u", chan, txseq);
2574
2575 memset(&control, 0, sizeof(control));
2576 control.sframe = 1;
2577 control.super = L2CAP_SUPER_SREJ;
2578
2579 for (seq = chan->expected_tx_seq; seq != txseq;
2580 seq = __next_seq(chan, seq)) {
2581 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2582 control.reqseq = seq;
2583 l2cap_send_sframe(chan, &control);
2584 l2cap_seq_list_append(&chan->srej_list, seq);
2585 }
2586 }
2587
2588 chan->expected_tx_seq = __next_seq(chan, txseq);
2589 }
2590
2591 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2592 {
2593 struct l2cap_ctrl control;
2594
2595 BT_DBG("chan %p", chan);
2596
2597 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2598 return;
2599
2600 memset(&control, 0, sizeof(control));
2601 control.sframe = 1;
2602 control.super = L2CAP_SUPER_SREJ;
2603 control.reqseq = chan->srej_list.tail;
2604 l2cap_send_sframe(chan, &control);
2605 }
2606
2607 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2608 {
2609 struct l2cap_ctrl control;
2610 u16 initial_head;
2611 u16 seq;
2612
2613 BT_DBG("chan %p, txseq %u", chan, txseq);
2614
2615 memset(&control, 0, sizeof(control));
2616 control.sframe = 1;
2617 control.super = L2CAP_SUPER_SREJ;
2618
2619 /* Capture initial list head to allow only one pass through the list. */
2620 initial_head = chan->srej_list.head;
2621
2622 do {
2623 seq = l2cap_seq_list_pop(&chan->srej_list);
2624 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2625 break;
2626
2627 control.reqseq = seq;
2628 l2cap_send_sframe(chan, &control);
2629 l2cap_seq_list_append(&chan->srej_list, seq);
2630 } while (chan->srej_list.head != initial_head);
2631 }
2632
2633 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2634 {
2635 struct sk_buff *acked_skb;
2636 u16 ackseq;
2637
2638 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2639
2640 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2641 return;
2642
2643 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2644 chan->expected_ack_seq, chan->unacked_frames);
2645
2646 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2647 ackseq = __next_seq(chan, ackseq)) {
2648
2649 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2650 if (acked_skb) {
2651 skb_unlink(acked_skb, &chan->tx_q);
2652 kfree_skb(acked_skb);
2653 chan->unacked_frames--;
2654 }
2655 }
2656
2657 chan->expected_ack_seq = reqseq;
2658
2659 if (chan->unacked_frames == 0)
2660 __clear_retrans_timer(chan);
2661
2662 BT_DBG("unacked_frames %u", chan->unacked_frames);
2663 }
2664
2665 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2666 {
2667 BT_DBG("chan %p", chan);
2668
2669 chan->expected_tx_seq = chan->buffer_seq;
2670 l2cap_seq_list_clear(&chan->srej_list);
2671 skb_queue_purge(&chan->srej_q);
2672 chan->rx_state = L2CAP_RX_STATE_RECV;
2673 }
2674
2675 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2676 struct l2cap_ctrl *control,
2677 struct sk_buff_head *skbs, u8 event)
2678 {
2679 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2680 event);
2681
2682 switch (event) {
2683 case L2CAP_EV_DATA_REQUEST:
2684 if (chan->tx_send_head == NULL)
2685 chan->tx_send_head = skb_peek(skbs);
2686
2687 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2688 l2cap_ertm_send(chan);
2689 break;
2690 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2691 BT_DBG("Enter LOCAL_BUSY");
2692 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2693
2694 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2695 /* The SREJ_SENT state must be aborted if we are to
2696 * enter the LOCAL_BUSY state.
2697 */
2698 l2cap_abort_rx_srej_sent(chan);
2699 }
2700
2701 l2cap_send_ack(chan);
2702
2703 break;
2704 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2705 BT_DBG("Exit LOCAL_BUSY");
2706 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2707
2708 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2709 struct l2cap_ctrl local_control;
2710
2711 memset(&local_control, 0, sizeof(local_control));
2712 local_control.sframe = 1;
2713 local_control.super = L2CAP_SUPER_RR;
2714 local_control.poll = 1;
2715 local_control.reqseq = chan->buffer_seq;
2716 l2cap_send_sframe(chan, &local_control);
2717
2718 chan->retry_count = 1;
2719 __set_monitor_timer(chan);
2720 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2721 }
2722 break;
2723 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2724 l2cap_process_reqseq(chan, control->reqseq);
2725 break;
2726 case L2CAP_EV_EXPLICIT_POLL:
2727 l2cap_send_rr_or_rnr(chan, 1);
2728 chan->retry_count = 1;
2729 __set_monitor_timer(chan);
2730 __clear_ack_timer(chan);
2731 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2732 break;
2733 case L2CAP_EV_RETRANS_TO:
2734 l2cap_send_rr_or_rnr(chan, 1);
2735 chan->retry_count = 1;
2736 __set_monitor_timer(chan);
2737 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2738 break;
2739 case L2CAP_EV_RECV_FBIT:
2740 /* Nothing to process */
2741 break;
2742 default:
2743 break;
2744 }
2745 }
2746
2747 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2748 struct l2cap_ctrl *control,
2749 struct sk_buff_head *skbs, u8 event)
2750 {
2751 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2752 event);
2753
2754 switch (event) {
2755 case L2CAP_EV_DATA_REQUEST:
2756 if (chan->tx_send_head == NULL)
2757 chan->tx_send_head = skb_peek(skbs);
2758 /* Queue data, but don't send. */
2759 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2760 break;
2761 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2762 BT_DBG("Enter LOCAL_BUSY");
2763 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2764
2765 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2766 /* The SREJ_SENT state must be aborted if we are to
2767 * enter the LOCAL_BUSY state.
2768 */
2769 l2cap_abort_rx_srej_sent(chan);
2770 }
2771
2772 l2cap_send_ack(chan);
2773
2774 break;
2775 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2776 BT_DBG("Exit LOCAL_BUSY");
2777 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2778
2779 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2780 struct l2cap_ctrl local_control;
2781 memset(&local_control, 0, sizeof(local_control));
2782 local_control.sframe = 1;
2783 local_control.super = L2CAP_SUPER_RR;
2784 local_control.poll = 1;
2785 local_control.reqseq = chan->buffer_seq;
2786 l2cap_send_sframe(chan, &local_control);
2787
2788 chan->retry_count = 1;
2789 __set_monitor_timer(chan);
2790 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2791 }
2792 break;
2793 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2794 l2cap_process_reqseq(chan, control->reqseq);
2795
2796 /* Fall through */
2797
2798 case L2CAP_EV_RECV_FBIT:
2799 if (control && control->final) {
2800 __clear_monitor_timer(chan);
2801 if (chan->unacked_frames > 0)
2802 __set_retrans_timer(chan);
2803 chan->retry_count = 0;
2804 chan->tx_state = L2CAP_TX_STATE_XMIT;
2805 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2806 }
2807 break;
2808 case L2CAP_EV_EXPLICIT_POLL:
2809 /* Ignore */
2810 break;
2811 case L2CAP_EV_MONITOR_TO:
2812 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2813 l2cap_send_rr_or_rnr(chan, 1);
2814 __set_monitor_timer(chan);
2815 chan->retry_count++;
2816 } else {
2817 l2cap_send_disconn_req(chan, ECONNABORTED);
2818 }
2819 break;
2820 default:
2821 break;
2822 }
2823 }
2824
2825 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2826 struct sk_buff_head *skbs, u8 event)
2827 {
2828 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2829 chan, control, skbs, event, chan->tx_state);
2830
2831 switch (chan->tx_state) {
2832 case L2CAP_TX_STATE_XMIT:
2833 l2cap_tx_state_xmit(chan, control, skbs, event);
2834 break;
2835 case L2CAP_TX_STATE_WAIT_F:
2836 l2cap_tx_state_wait_f(chan, control, skbs, event);
2837 break;
2838 default:
2839 /* Ignore event */
2840 break;
2841 }
2842 }
2843
2844 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2845 struct l2cap_ctrl *control)
2846 {
2847 BT_DBG("chan %p, control %p", chan, control);
2848 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2849 }
2850
2851 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2852 struct l2cap_ctrl *control)
2853 {
2854 BT_DBG("chan %p, control %p", chan, control);
2855 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2856 }
2857
2858 /* Copy frame to all raw sockets on that connection */
2859 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2860 {
2861 struct sk_buff *nskb;
2862 struct l2cap_chan *chan;
2863
2864 BT_DBG("conn %p", conn);
2865
2866 mutex_lock(&conn->chan_lock);
2867
2868 list_for_each_entry(chan, &conn->chan_l, list) {
2869 struct sock *sk = chan->sk;
2870 if (chan->chan_type != L2CAP_CHAN_RAW)
2871 continue;
2872
2873 /* Don't send frame to the socket it came from */
2874 if (skb->sk == sk)
2875 continue;
2876 nskb = skb_clone(skb, GFP_KERNEL);
2877 if (!nskb)
2878 continue;
2879
2880 if (chan->ops->recv(chan, nskb))
2881 kfree_skb(nskb);
2882 }
2883
2884 mutex_unlock(&conn->chan_lock);
2885 }
2886
2887 /* ---- L2CAP signalling commands ---- */
2888 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2889 u8 ident, u16 dlen, void *data)
2890 {
2891 struct sk_buff *skb, **frag;
2892 struct l2cap_cmd_hdr *cmd;
2893 struct l2cap_hdr *lh;
2894 int len, count;
2895
2896 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2897 conn, code, ident, dlen);
2898
2899 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2900 return NULL;
2901
2902 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2903 count = min_t(unsigned int, conn->mtu, len);
2904
2905 skb = bt_skb_alloc(count, GFP_KERNEL);
2906 if (!skb)
2907 return NULL;
2908
2909 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2910 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2911
2912 if (conn->hcon->type == LE_LINK)
2913 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2914 else
2915 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2916
2917 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2918 cmd->code = code;
2919 cmd->ident = ident;
2920 cmd->len = cpu_to_le16(dlen);
2921
2922 if (dlen) {
2923 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2924 memcpy(skb_put(skb, count), data, count);
2925 data += count;
2926 }
2927
2928 len -= skb->len;
2929
2930 /* Continuation fragments (no L2CAP header) */
2931 frag = &skb_shinfo(skb)->frag_list;
2932 while (len) {
2933 count = min_t(unsigned int, conn->mtu, len);
2934
2935 *frag = bt_skb_alloc(count, GFP_KERNEL);
2936 if (!*frag)
2937 goto fail;
2938
2939 memcpy(skb_put(*frag, count), data, count);
2940
2941 len -= count;
2942 data += count;
2943
2944 frag = &(*frag)->next;
2945 }
2946
2947 return skb;
2948
2949 fail:
2950 kfree_skb(skb);
2951 return NULL;
2952 }
2953
2954 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2955 unsigned long *val)
2956 {
2957 struct l2cap_conf_opt *opt = *ptr;
2958 int len;
2959
2960 len = L2CAP_CONF_OPT_SIZE + opt->len;
2961 *ptr += len;
2962
2963 *type = opt->type;
2964 *olen = opt->len;
2965
2966 switch (opt->len) {
2967 case 1:
2968 *val = *((u8 *) opt->val);
2969 break;
2970
2971 case 2:
2972 *val = get_unaligned_le16(opt->val);
2973 break;
2974
2975 case 4:
2976 *val = get_unaligned_le32(opt->val);
2977 break;
2978
2979 default:
2980 *val = (unsigned long) opt->val;
2981 break;
2982 }
2983
2984 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2985 return len;
2986 }
2987
2988 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2989 {
2990 struct l2cap_conf_opt *opt = *ptr;
2991
2992 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2993
2994 opt->type = type;
2995 opt->len = len;
2996
2997 switch (len) {
2998 case 1:
2999 *((u8 *) opt->val) = val;
3000 break;
3001
3002 case 2:
3003 put_unaligned_le16(val, opt->val);
3004 break;
3005
3006 case 4:
3007 put_unaligned_le32(val, opt->val);
3008 break;
3009
3010 default:
3011 memcpy(opt->val, (void *) val, len);
3012 break;
3013 }
3014
3015 *ptr += L2CAP_CONF_OPT_SIZE + len;
3016 }
3017
3018 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3019 {
3020 struct l2cap_conf_efs efs;
3021
3022 switch (chan->mode) {
3023 case L2CAP_MODE_ERTM:
3024 efs.id = chan->local_id;
3025 efs.stype = chan->local_stype;
3026 efs.msdu = cpu_to_le16(chan->local_msdu);
3027 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3028 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3029 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3030 break;
3031
3032 case L2CAP_MODE_STREAMING:
3033 efs.id = 1;
3034 efs.stype = L2CAP_SERV_BESTEFFORT;
3035 efs.msdu = cpu_to_le16(chan->local_msdu);
3036 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3037 efs.acc_lat = 0;
3038 efs.flush_to = 0;
3039 break;
3040
3041 default:
3042 return;
3043 }
3044
3045 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3046 (unsigned long) &efs);
3047 }
3048
3049 static void l2cap_ack_timeout(struct work_struct *work)
3050 {
3051 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3052 ack_timer.work);
3053 u16 frames_to_ack;
3054
3055 BT_DBG("chan %p", chan);
3056
3057 l2cap_chan_lock(chan);
3058
3059 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3060 chan->last_acked_seq);
3061
3062 if (frames_to_ack)
3063 l2cap_send_rr_or_rnr(chan, 0);
3064
3065 l2cap_chan_unlock(chan);
3066 l2cap_chan_put(chan);
3067 }
3068
3069 int l2cap_ertm_init(struct l2cap_chan *chan)
3070 {
3071 int err;
3072
3073 chan->next_tx_seq = 0;
3074 chan->expected_tx_seq = 0;
3075 chan->expected_ack_seq = 0;
3076 chan->unacked_frames = 0;
3077 chan->buffer_seq = 0;
3078 chan->frames_sent = 0;
3079 chan->last_acked_seq = 0;
3080 chan->sdu = NULL;
3081 chan->sdu_last_frag = NULL;
3082 chan->sdu_len = 0;
3083
3084 skb_queue_head_init(&chan->tx_q);
3085
3086 chan->local_amp_id = AMP_ID_BREDR;
3087 chan->move_id = AMP_ID_BREDR;
3088 chan->move_state = L2CAP_MOVE_STABLE;
3089 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3090
3091 if (chan->mode != L2CAP_MODE_ERTM)
3092 return 0;
3093
3094 chan->rx_state = L2CAP_RX_STATE_RECV;
3095 chan->tx_state = L2CAP_TX_STATE_XMIT;
3096
3097 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3098 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3099 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3100
3101 skb_queue_head_init(&chan->srej_q);
3102
3103 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3104 if (err < 0)
3105 return err;
3106
3107 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3108 if (err < 0)
3109 l2cap_seq_list_free(&chan->srej_list);
3110
3111 return err;
3112 }
3113
3114 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3115 {
3116 switch (mode) {
3117 case L2CAP_MODE_STREAMING:
3118 case L2CAP_MODE_ERTM:
3119 if (l2cap_mode_supported(mode, remote_feat_mask))
3120 return mode;
3121 /* fall through */
3122 default:
3123 return L2CAP_MODE_BASIC;
3124 }
3125 }
3126
3127 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3128 {
3129 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3130 }
3131
3132 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3133 {
3134 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3135 }
3136
3137 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3138 struct l2cap_conf_rfc *rfc)
3139 {
3140 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3141 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3142
3143 /* Class 1 devices have must have ERTM timeouts
3144 * exceeding the Link Supervision Timeout. The
3145 * default Link Supervision Timeout for AMP
3146 * controllers is 10 seconds.
3147 *
3148 * Class 1 devices use 0xffffffff for their
3149 * best-effort flush timeout, so the clamping logic
3150 * will result in a timeout that meets the above
3151 * requirement. ERTM timeouts are 16-bit values, so
3152 * the maximum timeout is 65.535 seconds.
3153 */
3154
3155 /* Convert timeout to milliseconds and round */
3156 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3157
3158 /* This is the recommended formula for class 2 devices
3159 * that start ERTM timers when packets are sent to the
3160 * controller.
3161 */
3162 ertm_to = 3 * ertm_to + 500;
3163
3164 if (ertm_to > 0xffff)
3165 ertm_to = 0xffff;
3166
3167 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3168 rfc->monitor_timeout = rfc->retrans_timeout;
3169 } else {
3170 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3171 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3172 }
3173 }
3174
3175 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3176 {
3177 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3178 __l2cap_ews_supported(chan->conn)) {
3179 /* use extended control field */
3180 set_bit(FLAG_EXT_CTRL, &chan->flags);
3181 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3182 } else {
3183 chan->tx_win = min_t(u16, chan->tx_win,
3184 L2CAP_DEFAULT_TX_WINDOW);
3185 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3186 }
3187 chan->ack_win = chan->tx_win;
3188 }
3189
3190 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3191 {
3192 struct l2cap_conf_req *req = data;
3193 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3194 void *ptr = req->data;
3195 u16 size;
3196
3197 BT_DBG("chan %p", chan);
3198
3199 if (chan->num_conf_req || chan->num_conf_rsp)
3200 goto done;
3201
3202 switch (chan->mode) {
3203 case L2CAP_MODE_STREAMING:
3204 case L2CAP_MODE_ERTM:
3205 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3206 break;
3207
3208 if (__l2cap_efs_supported(chan->conn))
3209 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3210
3211 /* fall through */
3212 default:
3213 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3214 break;
3215 }
3216
3217 done:
3218 if (chan->imtu != L2CAP_DEFAULT_MTU)
3219 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3220
3221 switch (chan->mode) {
3222 case L2CAP_MODE_BASIC:
3223 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3224 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3225 break;
3226
3227 rfc.mode = L2CAP_MODE_BASIC;
3228 rfc.txwin_size = 0;
3229 rfc.max_transmit = 0;
3230 rfc.retrans_timeout = 0;
3231 rfc.monitor_timeout = 0;
3232 rfc.max_pdu_size = 0;
3233
3234 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3235 (unsigned long) &rfc);
3236 break;
3237
3238 case L2CAP_MODE_ERTM:
3239 rfc.mode = L2CAP_MODE_ERTM;
3240 rfc.max_transmit = chan->max_tx;
3241
3242 __l2cap_set_ertm_timeouts(chan, &rfc);
3243
3244 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3245 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3246 L2CAP_FCS_SIZE);
3247 rfc.max_pdu_size = cpu_to_le16(size);
3248
3249 l2cap_txwin_setup(chan);
3250
3251 rfc.txwin_size = min_t(u16, chan->tx_win,
3252 L2CAP_DEFAULT_TX_WINDOW);
3253
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3255 (unsigned long) &rfc);
3256
3257 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3258 l2cap_add_opt_efs(&ptr, chan);
3259
3260 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3262 chan->tx_win);
3263
3264 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3265 if (chan->fcs == L2CAP_FCS_NONE ||
3266 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3267 chan->fcs = L2CAP_FCS_NONE;
3268 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3269 chan->fcs);
3270 }
3271 break;
3272
3273 case L2CAP_MODE_STREAMING:
3274 l2cap_txwin_setup(chan);
3275 rfc.mode = L2CAP_MODE_STREAMING;
3276 rfc.txwin_size = 0;
3277 rfc.max_transmit = 0;
3278 rfc.retrans_timeout = 0;
3279 rfc.monitor_timeout = 0;
3280
3281 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3282 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3283 L2CAP_FCS_SIZE);
3284 rfc.max_pdu_size = cpu_to_le16(size);
3285
3286 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3287 (unsigned long) &rfc);
3288
3289 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3290 l2cap_add_opt_efs(&ptr, chan);
3291
3292 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3293 if (chan->fcs == L2CAP_FCS_NONE ||
3294 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3295 chan->fcs = L2CAP_FCS_NONE;
3296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3297 chan->fcs);
3298 }
3299 break;
3300 }
3301
3302 req->dcid = cpu_to_le16(chan->dcid);
3303 req->flags = __constant_cpu_to_le16(0);
3304
3305 return ptr - data;
3306 }
3307
3308 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3309 {
3310 struct l2cap_conf_rsp *rsp = data;
3311 void *ptr = rsp->data;
3312 void *req = chan->conf_req;
3313 int len = chan->conf_len;
3314 int type, hint, olen;
3315 unsigned long val;
3316 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3317 struct l2cap_conf_efs efs;
3318 u8 remote_efs = 0;
3319 u16 mtu = L2CAP_DEFAULT_MTU;
3320 u16 result = L2CAP_CONF_SUCCESS;
3321 u16 size;
3322
3323 BT_DBG("chan %p", chan);
3324
3325 while (len >= L2CAP_CONF_OPT_SIZE) {
3326 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3327
3328 hint = type & L2CAP_CONF_HINT;
3329 type &= L2CAP_CONF_MASK;
3330
3331 switch (type) {
3332 case L2CAP_CONF_MTU:
3333 mtu = val;
3334 break;
3335
3336 case L2CAP_CONF_FLUSH_TO:
3337 chan->flush_to = val;
3338 break;
3339
3340 case L2CAP_CONF_QOS:
3341 break;
3342
3343 case L2CAP_CONF_RFC:
3344 if (olen == sizeof(rfc))
3345 memcpy(&rfc, (void *) val, olen);
3346 break;
3347
3348 case L2CAP_CONF_FCS:
3349 if (val == L2CAP_FCS_NONE)
3350 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3351 break;
3352
3353 case L2CAP_CONF_EFS:
3354 remote_efs = 1;
3355 if (olen == sizeof(efs))
3356 memcpy(&efs, (void *) val, olen);
3357 break;
3358
3359 case L2CAP_CONF_EWS:
3360 if (!chan->conn->hs_enabled)
3361 return -ECONNREFUSED;
3362
3363 set_bit(FLAG_EXT_CTRL, &chan->flags);
3364 set_bit(CONF_EWS_RECV, &chan->conf_state);
3365 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3366 chan->remote_tx_win = val;
3367 break;
3368
3369 default:
3370 if (hint)
3371 break;
3372
3373 result = L2CAP_CONF_UNKNOWN;
3374 *((u8 *) ptr++) = type;
3375 break;
3376 }
3377 }
3378
3379 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3380 goto done;
3381
3382 switch (chan->mode) {
3383 case L2CAP_MODE_STREAMING:
3384 case L2CAP_MODE_ERTM:
3385 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3386 chan->mode = l2cap_select_mode(rfc.mode,
3387 chan->conn->feat_mask);
3388 break;
3389 }
3390
3391 if (remote_efs) {
3392 if (__l2cap_efs_supported(chan->conn))
3393 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3394 else
3395 return -ECONNREFUSED;
3396 }
3397
3398 if (chan->mode != rfc.mode)
3399 return -ECONNREFUSED;
3400
3401 break;
3402 }
3403
3404 done:
3405 if (chan->mode != rfc.mode) {
3406 result = L2CAP_CONF_UNACCEPT;
3407 rfc.mode = chan->mode;
3408
3409 if (chan->num_conf_rsp == 1)
3410 return -ECONNREFUSED;
3411
3412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3413 (unsigned long) &rfc);
3414 }
3415
3416 if (result == L2CAP_CONF_SUCCESS) {
3417 /* Configure output options and let the other side know
3418 * which ones we don't like. */
3419
3420 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3421 result = L2CAP_CONF_UNACCEPT;
3422 else {
3423 chan->omtu = mtu;
3424 set_bit(CONF_MTU_DONE, &chan->conf_state);
3425 }
3426 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3427
3428 if (remote_efs) {
3429 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3430 efs.stype != L2CAP_SERV_NOTRAFIC &&
3431 efs.stype != chan->local_stype) {
3432
3433 result = L2CAP_CONF_UNACCEPT;
3434
3435 if (chan->num_conf_req >= 1)
3436 return -ECONNREFUSED;
3437
3438 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3439 sizeof(efs),
3440 (unsigned long) &efs);
3441 } else {
3442 /* Send PENDING Conf Rsp */
3443 result = L2CAP_CONF_PENDING;
3444 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3445 }
3446 }
3447
3448 switch (rfc.mode) {
3449 case L2CAP_MODE_BASIC:
3450 chan->fcs = L2CAP_FCS_NONE;
3451 set_bit(CONF_MODE_DONE, &chan->conf_state);
3452 break;
3453
3454 case L2CAP_MODE_ERTM:
3455 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3456 chan->remote_tx_win = rfc.txwin_size;
3457 else
3458 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3459
3460 chan->remote_max_tx = rfc.max_transmit;
3461
3462 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3463 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3464 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3465 rfc.max_pdu_size = cpu_to_le16(size);
3466 chan->remote_mps = size;
3467
3468 __l2cap_set_ertm_timeouts(chan, &rfc);
3469
3470 set_bit(CONF_MODE_DONE, &chan->conf_state);
3471
3472 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3473 sizeof(rfc), (unsigned long) &rfc);
3474
3475 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3476 chan->remote_id = efs.id;
3477 chan->remote_stype = efs.stype;
3478 chan->remote_msdu = le16_to_cpu(efs.msdu);
3479 chan->remote_flush_to =
3480 le32_to_cpu(efs.flush_to);
3481 chan->remote_acc_lat =
3482 le32_to_cpu(efs.acc_lat);
3483 chan->remote_sdu_itime =
3484 le32_to_cpu(efs.sdu_itime);
3485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3486 sizeof(efs),
3487 (unsigned long) &efs);
3488 }
3489 break;
3490
3491 case L2CAP_MODE_STREAMING:
3492 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3493 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3494 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3495 rfc.max_pdu_size = cpu_to_le16(size);
3496 chan->remote_mps = size;
3497
3498 set_bit(CONF_MODE_DONE, &chan->conf_state);
3499
3500 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3501 (unsigned long) &rfc);
3502
3503 break;
3504
3505 default:
3506 result = L2CAP_CONF_UNACCEPT;
3507
3508 memset(&rfc, 0, sizeof(rfc));
3509 rfc.mode = chan->mode;
3510 }
3511
3512 if (result == L2CAP_CONF_SUCCESS)
3513 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3514 }
3515 rsp->scid = cpu_to_le16(chan->dcid);
3516 rsp->result = cpu_to_le16(result);
3517 rsp->flags = __constant_cpu_to_le16(0);
3518
3519 return ptr - data;
3520 }
3521
3522 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3523 void *data, u16 *result)
3524 {
3525 struct l2cap_conf_req *req = data;
3526 void *ptr = req->data;
3527 int type, olen;
3528 unsigned long val;
3529 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3530 struct l2cap_conf_efs efs;
3531
3532 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3533
3534 while (len >= L2CAP_CONF_OPT_SIZE) {
3535 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3536
3537 switch (type) {
3538 case L2CAP_CONF_MTU:
3539 if (val < L2CAP_DEFAULT_MIN_MTU) {
3540 *result = L2CAP_CONF_UNACCEPT;
3541 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3542 } else
3543 chan->imtu = val;
3544 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3545 break;
3546
3547 case L2CAP_CONF_FLUSH_TO:
3548 chan->flush_to = val;
3549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3550 2, chan->flush_to);
3551 break;
3552
3553 case L2CAP_CONF_RFC:
3554 if (olen == sizeof(rfc))
3555 memcpy(&rfc, (void *)val, olen);
3556
3557 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3558 rfc.mode != chan->mode)
3559 return -ECONNREFUSED;
3560
3561 chan->fcs = 0;
3562
3563 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3564 sizeof(rfc), (unsigned long) &rfc);
3565 break;
3566
3567 case L2CAP_CONF_EWS:
3568 chan->ack_win = min_t(u16, val, chan->ack_win);
3569 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3570 chan->tx_win);
3571 break;
3572
3573 case L2CAP_CONF_EFS:
3574 if (olen == sizeof(efs))
3575 memcpy(&efs, (void *)val, olen);
3576
3577 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3578 efs.stype != L2CAP_SERV_NOTRAFIC &&
3579 efs.stype != chan->local_stype)
3580 return -ECONNREFUSED;
3581
3582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3583 (unsigned long) &efs);
3584 break;
3585
3586 case L2CAP_CONF_FCS:
3587 if (*result == L2CAP_CONF_PENDING)
3588 if (val == L2CAP_FCS_NONE)
3589 set_bit(CONF_RECV_NO_FCS,
3590 &chan->conf_state);
3591 break;
3592 }
3593 }
3594
3595 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3596 return -ECONNREFUSED;
3597
3598 chan->mode = rfc.mode;
3599
3600 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3601 switch (rfc.mode) {
3602 case L2CAP_MODE_ERTM:
3603 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3604 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3605 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3606 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3607 chan->ack_win = min_t(u16, chan->ack_win,
3608 rfc.txwin_size);
3609
3610 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3611 chan->local_msdu = le16_to_cpu(efs.msdu);
3612 chan->local_sdu_itime =
3613 le32_to_cpu(efs.sdu_itime);
3614 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3615 chan->local_flush_to =
3616 le32_to_cpu(efs.flush_to);
3617 }
3618 break;
3619
3620 case L2CAP_MODE_STREAMING:
3621 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3622 }
3623 }
3624
3625 req->dcid = cpu_to_le16(chan->dcid);
3626 req->flags = __constant_cpu_to_le16(0);
3627
3628 return ptr - data;
3629 }
3630
3631 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3632 u16 result, u16 flags)
3633 {
3634 struct l2cap_conf_rsp *rsp = data;
3635 void *ptr = rsp->data;
3636
3637 BT_DBG("chan %p", chan);
3638
3639 rsp->scid = cpu_to_le16(chan->dcid);
3640 rsp->result = cpu_to_le16(result);
3641 rsp->flags = cpu_to_le16(flags);
3642
3643 return ptr - data;
3644 }
3645
3646 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3647 {
3648 struct l2cap_conn_rsp rsp;
3649 struct l2cap_conn *conn = chan->conn;
3650 u8 buf[128];
3651 u8 rsp_code;
3652
3653 rsp.scid = cpu_to_le16(chan->dcid);
3654 rsp.dcid = cpu_to_le16(chan->scid);
3655 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3656 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3657
3658 if (chan->hs_hcon)
3659 rsp_code = L2CAP_CREATE_CHAN_RSP;
3660 else
3661 rsp_code = L2CAP_CONN_RSP;
3662
3663 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3664
3665 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3666
3667 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3668 return;
3669
3670 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3671 l2cap_build_conf_req(chan, buf), buf);
3672 chan->num_conf_req++;
3673 }
3674
3675 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3676 {
3677 int type, olen;
3678 unsigned long val;
3679 /* Use sane default values in case a misbehaving remote device
3680 * did not send an RFC or extended window size option.
3681 */
3682 u16 txwin_ext = chan->ack_win;
3683 struct l2cap_conf_rfc rfc = {
3684 .mode = chan->mode,
3685 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3686 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3687 .max_pdu_size = cpu_to_le16(chan->imtu),
3688 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3689 };
3690
3691 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3692
3693 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3694 return;
3695
3696 while (len >= L2CAP_CONF_OPT_SIZE) {
3697 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3698
3699 switch (type) {
3700 case L2CAP_CONF_RFC:
3701 if (olen == sizeof(rfc))
3702 memcpy(&rfc, (void *)val, olen);
3703 break;
3704 case L2CAP_CONF_EWS:
3705 txwin_ext = val;
3706 break;
3707 }
3708 }
3709
3710 switch (rfc.mode) {
3711 case L2CAP_MODE_ERTM:
3712 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3713 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3714 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3715 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3716 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3717 else
3718 chan->ack_win = min_t(u16, chan->ack_win,
3719 rfc.txwin_size);
3720 break;
3721 case L2CAP_MODE_STREAMING:
3722 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3723 }
3724 }
3725
3726 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3727 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3728 u8 *data)
3729 {
3730 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3731
3732 if (cmd_len < sizeof(*rej))
3733 return -EPROTO;
3734
3735 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3736 return 0;
3737
3738 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3739 cmd->ident == conn->info_ident) {
3740 cancel_delayed_work(&conn->info_timer);
3741
3742 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3743 conn->info_ident = 0;
3744
3745 l2cap_conn_start(conn);
3746 }
3747
3748 return 0;
3749 }
3750
3751 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3752 struct l2cap_cmd_hdr *cmd,
3753 u8 *data, u8 rsp_code, u8 amp_id)
3754 {
3755 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3756 struct l2cap_conn_rsp rsp;
3757 struct l2cap_chan *chan = NULL, *pchan;
3758 struct sock *parent, *sk = NULL;
3759 int result, status = L2CAP_CS_NO_INFO;
3760
3761 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3762 __le16 psm = req->psm;
3763
3764 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3765
3766 /* Check if we have socket listening on psm */
3767 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3768 &conn->hcon->dst);
3769 if (!pchan) {
3770 result = L2CAP_CR_BAD_PSM;
3771 goto sendresp;
3772 }
3773
3774 parent = pchan->sk;
3775
3776 mutex_lock(&conn->chan_lock);
3777 lock_sock(parent);
3778
3779 /* Check if the ACL is secure enough (if not SDP) */
3780 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3781 !hci_conn_check_link_mode(conn->hcon)) {
3782 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3783 result = L2CAP_CR_SEC_BLOCK;
3784 goto response;
3785 }
3786
3787 result = L2CAP_CR_NO_MEM;
3788
3789 /* Check if we already have channel with that dcid */
3790 if (__l2cap_get_chan_by_dcid(conn, scid))
3791 goto response;
3792
3793 chan = pchan->ops->new_connection(pchan);
3794 if (!chan)
3795 goto response;
3796
3797 sk = chan->sk;
3798
3799 /* For certain devices (ex: HID mouse), support for authentication,
3800 * pairing and bonding is optional. For such devices, inorder to avoid
3801 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3802 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3803 */
3804 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3805
3806 bacpy(&chan->src, &conn->hcon->src);
3807 bacpy(&chan->dst, &conn->hcon->dst);
3808 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3809 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3810 chan->psm = psm;
3811 chan->dcid = scid;
3812 chan->local_amp_id = amp_id;
3813
3814 __l2cap_chan_add(conn, chan);
3815
3816 dcid = chan->scid;
3817
3818 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3819
3820 chan->ident = cmd->ident;
3821
3822 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3823 if (l2cap_chan_check_security(chan)) {
3824 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3825 __l2cap_state_change(chan, BT_CONNECT2);
3826 result = L2CAP_CR_PEND;
3827 status = L2CAP_CS_AUTHOR_PEND;
3828 chan->ops->defer(chan);
3829 } else {
3830 /* Force pending result for AMP controllers.
3831 * The connection will succeed after the
3832 * physical link is up.
3833 */
3834 if (amp_id == AMP_ID_BREDR) {
3835 __l2cap_state_change(chan, BT_CONFIG);
3836 result = L2CAP_CR_SUCCESS;
3837 } else {
3838 __l2cap_state_change(chan, BT_CONNECT2);
3839 result = L2CAP_CR_PEND;
3840 }
3841 status = L2CAP_CS_NO_INFO;
3842 }
3843 } else {
3844 __l2cap_state_change(chan, BT_CONNECT2);
3845 result = L2CAP_CR_PEND;
3846 status = L2CAP_CS_AUTHEN_PEND;
3847 }
3848 } else {
3849 __l2cap_state_change(chan, BT_CONNECT2);
3850 result = L2CAP_CR_PEND;
3851 status = L2CAP_CS_NO_INFO;
3852 }
3853
3854 response:
3855 release_sock(parent);
3856 mutex_unlock(&conn->chan_lock);
3857
3858 sendresp:
3859 rsp.scid = cpu_to_le16(scid);
3860 rsp.dcid = cpu_to_le16(dcid);
3861 rsp.result = cpu_to_le16(result);
3862 rsp.status = cpu_to_le16(status);
3863 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3864
3865 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3866 struct l2cap_info_req info;
3867 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3868
3869 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3870 conn->info_ident = l2cap_get_ident(conn);
3871
3872 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3873
3874 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3875 sizeof(info), &info);
3876 }
3877
3878 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3879 result == L2CAP_CR_SUCCESS) {
3880 u8 buf[128];
3881 set_bit(CONF_REQ_SENT, &chan->conf_state);
3882 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3883 l2cap_build_conf_req(chan, buf), buf);
3884 chan->num_conf_req++;
3885 }
3886
3887 return chan;
3888 }
3889
3890 static int l2cap_connect_req(struct l2cap_conn *conn,
3891 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3892 {
3893 struct hci_dev *hdev = conn->hcon->hdev;
3894 struct hci_conn *hcon = conn->hcon;
3895
3896 if (cmd_len < sizeof(struct l2cap_conn_req))
3897 return -EPROTO;
3898
3899 hci_dev_lock(hdev);
3900 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3901 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3902 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3903 hcon->dst_type, 0, NULL, 0,
3904 hcon->dev_class);
3905 hci_dev_unlock(hdev);
3906
3907 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3908 return 0;
3909 }
3910
3911 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3912 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3913 u8 *data)
3914 {
3915 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3916 u16 scid, dcid, result, status;
3917 struct l2cap_chan *chan;
3918 u8 req[128];
3919 int err;
3920
3921 if (cmd_len < sizeof(*rsp))
3922 return -EPROTO;
3923
3924 scid = __le16_to_cpu(rsp->scid);
3925 dcid = __le16_to_cpu(rsp->dcid);
3926 result = __le16_to_cpu(rsp->result);
3927 status = __le16_to_cpu(rsp->status);
3928
3929 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3930 dcid, scid, result, status);
3931
3932 mutex_lock(&conn->chan_lock);
3933
3934 if (scid) {
3935 chan = __l2cap_get_chan_by_scid(conn, scid);
3936 if (!chan) {
3937 err = -EBADSLT;
3938 goto unlock;
3939 }
3940 } else {
3941 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3942 if (!chan) {
3943 err = -EBADSLT;
3944 goto unlock;
3945 }
3946 }
3947
3948 err = 0;
3949
3950 l2cap_chan_lock(chan);
3951
3952 switch (result) {
3953 case L2CAP_CR_SUCCESS:
3954 l2cap_state_change(chan, BT_CONFIG);
3955 chan->ident = 0;
3956 chan->dcid = dcid;
3957 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3958
3959 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3960 break;
3961
3962 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3963 l2cap_build_conf_req(chan, req), req);
3964 chan->num_conf_req++;
3965 break;
3966
3967 case L2CAP_CR_PEND:
3968 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3969 break;
3970
3971 default:
3972 l2cap_chan_del(chan, ECONNREFUSED);
3973 break;
3974 }
3975
3976 l2cap_chan_unlock(chan);
3977
3978 unlock:
3979 mutex_unlock(&conn->chan_lock);
3980
3981 return err;
3982 }
3983
3984 static inline void set_default_fcs(struct l2cap_chan *chan)
3985 {
3986 /* FCS is enabled only in ERTM or streaming mode, if one or both
3987 * sides request it.
3988 */
3989 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3990 chan->fcs = L2CAP_FCS_NONE;
3991 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3992 chan->fcs = L2CAP_FCS_CRC16;
3993 }
3994
3995 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3996 u8 ident, u16 flags)
3997 {
3998 struct l2cap_conn *conn = chan->conn;
3999
4000 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4001 flags);
4002
4003 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4004 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4005
4006 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4007 l2cap_build_conf_rsp(chan, data,
4008 L2CAP_CONF_SUCCESS, flags), data);
4009 }
4010
4011 static inline int l2cap_config_req(struct l2cap_conn *conn,
4012 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4013 u8 *data)
4014 {
4015 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4016 u16 dcid, flags;
4017 u8 rsp[64];
4018 struct l2cap_chan *chan;
4019 int len, err = 0;
4020
4021 if (cmd_len < sizeof(*req))
4022 return -EPROTO;
4023
4024 dcid = __le16_to_cpu(req->dcid);
4025 flags = __le16_to_cpu(req->flags);
4026
4027 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4028
4029 chan = l2cap_get_chan_by_scid(conn, dcid);
4030 if (!chan)
4031 return -EBADSLT;
4032
4033 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4034 struct l2cap_cmd_rej_cid rej;
4035
4036 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4037 rej.scid = cpu_to_le16(chan->scid);
4038 rej.dcid = cpu_to_le16(chan->dcid);
4039
4040 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4041 sizeof(rej), &rej);
4042 goto unlock;
4043 }
4044
4045 /* Reject if config buffer is too small. */
4046 len = cmd_len - sizeof(*req);
4047 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4048 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4049 l2cap_build_conf_rsp(chan, rsp,
4050 L2CAP_CONF_REJECT, flags), rsp);
4051 goto unlock;
4052 }
4053
4054 /* Store config. */
4055 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4056 chan->conf_len += len;
4057
4058 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4059 /* Incomplete config. Send empty response. */
4060 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4061 l2cap_build_conf_rsp(chan, rsp,
4062 L2CAP_CONF_SUCCESS, flags), rsp);
4063 goto unlock;
4064 }
4065
4066 /* Complete config. */
4067 len = l2cap_parse_conf_req(chan, rsp);
4068 if (len < 0) {
4069 l2cap_send_disconn_req(chan, ECONNRESET);
4070 goto unlock;
4071 }
4072
4073 chan->ident = cmd->ident;
4074 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4075 chan->num_conf_rsp++;
4076
4077 /* Reset config buffer. */
4078 chan->conf_len = 0;
4079
4080 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4081 goto unlock;
4082
4083 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4084 set_default_fcs(chan);
4085
4086 if (chan->mode == L2CAP_MODE_ERTM ||
4087 chan->mode == L2CAP_MODE_STREAMING)
4088 err = l2cap_ertm_init(chan);
4089
4090 if (err < 0)
4091 l2cap_send_disconn_req(chan, -err);
4092 else
4093 l2cap_chan_ready(chan);
4094
4095 goto unlock;
4096 }
4097
4098 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4099 u8 buf[64];
4100 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4101 l2cap_build_conf_req(chan, buf), buf);
4102 chan->num_conf_req++;
4103 }
4104
4105 /* Got Conf Rsp PENDING from remote side and asume we sent
4106 Conf Rsp PENDING in the code above */
4107 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4108 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4109
4110 /* check compatibility */
4111
4112 /* Send rsp for BR/EDR channel */
4113 if (!chan->hs_hcon)
4114 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4115 else
4116 chan->ident = cmd->ident;
4117 }
4118
4119 unlock:
4120 l2cap_chan_unlock(chan);
4121 return err;
4122 }
4123
4124 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4125 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4126 u8 *data)
4127 {
4128 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4129 u16 scid, flags, result;
4130 struct l2cap_chan *chan;
4131 int len = cmd_len - sizeof(*rsp);
4132 int err = 0;
4133
4134 if (cmd_len < sizeof(*rsp))
4135 return -EPROTO;
4136
4137 scid = __le16_to_cpu(rsp->scid);
4138 flags = __le16_to_cpu(rsp->flags);
4139 result = __le16_to_cpu(rsp->result);
4140
4141 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4142 result, len);
4143
4144 chan = l2cap_get_chan_by_scid(conn, scid);
4145 if (!chan)
4146 return 0;
4147
4148 switch (result) {
4149 case L2CAP_CONF_SUCCESS:
4150 l2cap_conf_rfc_get(chan, rsp->data, len);
4151 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4152 break;
4153
4154 case L2CAP_CONF_PENDING:
4155 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4156
4157 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4158 char buf[64];
4159
4160 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4161 buf, &result);
4162 if (len < 0) {
4163 l2cap_send_disconn_req(chan, ECONNRESET);
4164 goto done;
4165 }
4166
4167 if (!chan->hs_hcon) {
4168 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4169 0);
4170 } else {
4171 if (l2cap_check_efs(chan)) {
4172 amp_create_logical_link(chan);
4173 chan->ident = cmd->ident;
4174 }
4175 }
4176 }
4177 goto done;
4178
4179 case L2CAP_CONF_UNACCEPT:
4180 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4181 char req[64];
4182
4183 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4184 l2cap_send_disconn_req(chan, ECONNRESET);
4185 goto done;
4186 }
4187
4188 /* throw out any old stored conf requests */
4189 result = L2CAP_CONF_SUCCESS;
4190 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4191 req, &result);
4192 if (len < 0) {
4193 l2cap_send_disconn_req(chan, ECONNRESET);
4194 goto done;
4195 }
4196
4197 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4198 L2CAP_CONF_REQ, len, req);
4199 chan->num_conf_req++;
4200 if (result != L2CAP_CONF_SUCCESS)
4201 goto done;
4202 break;
4203 }
4204
4205 default:
4206 l2cap_chan_set_err(chan, ECONNRESET);
4207
4208 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4209 l2cap_send_disconn_req(chan, ECONNRESET);
4210 goto done;
4211 }
4212
4213 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4214 goto done;
4215
4216 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4217
4218 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4219 set_default_fcs(chan);
4220
4221 if (chan->mode == L2CAP_MODE_ERTM ||
4222 chan->mode == L2CAP_MODE_STREAMING)
4223 err = l2cap_ertm_init(chan);
4224
4225 if (err < 0)
4226 l2cap_send_disconn_req(chan, -err);
4227 else
4228 l2cap_chan_ready(chan);
4229 }
4230
4231 done:
4232 l2cap_chan_unlock(chan);
4233 return err;
4234 }
4235
4236 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4237 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4238 u8 *data)
4239 {
4240 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4241 struct l2cap_disconn_rsp rsp;
4242 u16 dcid, scid;
4243 struct l2cap_chan *chan;
4244 struct sock *sk;
4245
4246 if (cmd_len != sizeof(*req))
4247 return -EPROTO;
4248
4249 scid = __le16_to_cpu(req->scid);
4250 dcid = __le16_to_cpu(req->dcid);
4251
4252 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4253
4254 mutex_lock(&conn->chan_lock);
4255
4256 chan = __l2cap_get_chan_by_scid(conn, dcid);
4257 if (!chan) {
4258 mutex_unlock(&conn->chan_lock);
4259 return -EBADSLT;
4260 }
4261
4262 l2cap_chan_lock(chan);
4263
4264 sk = chan->sk;
4265
4266 rsp.dcid = cpu_to_le16(chan->scid);
4267 rsp.scid = cpu_to_le16(chan->dcid);
4268 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4269
4270 lock_sock(sk);
4271 sk->sk_shutdown = SHUTDOWN_MASK;
4272 release_sock(sk);
4273
4274 l2cap_chan_hold(chan);
4275 l2cap_chan_del(chan, ECONNRESET);
4276
4277 l2cap_chan_unlock(chan);
4278
4279 chan->ops->close(chan);
4280 l2cap_chan_put(chan);
4281
4282 mutex_unlock(&conn->chan_lock);
4283
4284 return 0;
4285 }
4286
4287 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4288 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4289 u8 *data)
4290 {
4291 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4292 u16 dcid, scid;
4293 struct l2cap_chan *chan;
4294
4295 if (cmd_len != sizeof(*rsp))
4296 return -EPROTO;
4297
4298 scid = __le16_to_cpu(rsp->scid);
4299 dcid = __le16_to_cpu(rsp->dcid);
4300
4301 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4302
4303 mutex_lock(&conn->chan_lock);
4304
4305 chan = __l2cap_get_chan_by_scid(conn, scid);
4306 if (!chan) {
4307 mutex_unlock(&conn->chan_lock);
4308 return 0;
4309 }
4310
4311 l2cap_chan_lock(chan);
4312
4313 l2cap_chan_hold(chan);
4314 l2cap_chan_del(chan, 0);
4315
4316 l2cap_chan_unlock(chan);
4317
4318 chan->ops->close(chan);
4319 l2cap_chan_put(chan);
4320
4321 mutex_unlock(&conn->chan_lock);
4322
4323 return 0;
4324 }
4325
4326 static inline int l2cap_information_req(struct l2cap_conn *conn,
4327 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4328 u8 *data)
4329 {
4330 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4331 u16 type;
4332
4333 if (cmd_len != sizeof(*req))
4334 return -EPROTO;
4335
4336 type = __le16_to_cpu(req->type);
4337
4338 BT_DBG("type 0x%4.4x", type);
4339
4340 if (type == L2CAP_IT_FEAT_MASK) {
4341 u8 buf[8];
4342 u32 feat_mask = l2cap_feat_mask;
4343 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4344 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4345 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4346 if (!disable_ertm)
4347 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4348 | L2CAP_FEAT_FCS;
4349 if (conn->hs_enabled)
4350 feat_mask |= L2CAP_FEAT_EXT_FLOW
4351 | L2CAP_FEAT_EXT_WINDOW;
4352
4353 put_unaligned_le32(feat_mask, rsp->data);
4354 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4355 buf);
4356 } else if (type == L2CAP_IT_FIXED_CHAN) {
4357 u8 buf[12];
4358 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4359
4360 if (conn->hs_enabled)
4361 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4362 else
4363 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4364
4365 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4366 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4367 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4368 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4369 buf);
4370 } else {
4371 struct l2cap_info_rsp rsp;
4372 rsp.type = cpu_to_le16(type);
4373 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4374 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4375 &rsp);
4376 }
4377
4378 return 0;
4379 }
4380
4381 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4382 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4383 u8 *data)
4384 {
4385 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4386 u16 type, result;
4387
4388 if (cmd_len < sizeof(*rsp))
4389 return -EPROTO;
4390
4391 type = __le16_to_cpu(rsp->type);
4392 result = __le16_to_cpu(rsp->result);
4393
4394 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4395
4396 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4397 if (cmd->ident != conn->info_ident ||
4398 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4399 return 0;
4400
4401 cancel_delayed_work(&conn->info_timer);
4402
4403 if (result != L2CAP_IR_SUCCESS) {
4404 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4405 conn->info_ident = 0;
4406
4407 l2cap_conn_start(conn);
4408
4409 return 0;
4410 }
4411
4412 switch (type) {
4413 case L2CAP_IT_FEAT_MASK:
4414 conn->feat_mask = get_unaligned_le32(rsp->data);
4415
4416 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4417 struct l2cap_info_req req;
4418 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4419
4420 conn->info_ident = l2cap_get_ident(conn);
4421
4422 l2cap_send_cmd(conn, conn->info_ident,
4423 L2CAP_INFO_REQ, sizeof(req), &req);
4424 } else {
4425 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4426 conn->info_ident = 0;
4427
4428 l2cap_conn_start(conn);
4429 }
4430 break;
4431
4432 case L2CAP_IT_FIXED_CHAN:
4433 conn->fixed_chan_mask = rsp->data[0];
4434 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4435 conn->info_ident = 0;
4436
4437 l2cap_conn_start(conn);
4438 break;
4439 }
4440
4441 return 0;
4442 }
4443
4444 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4445 struct l2cap_cmd_hdr *cmd,
4446 u16 cmd_len, void *data)
4447 {
4448 struct l2cap_create_chan_req *req = data;
4449 struct l2cap_create_chan_rsp rsp;
4450 struct l2cap_chan *chan;
4451 struct hci_dev *hdev;
4452 u16 psm, scid;
4453
4454 if (cmd_len != sizeof(*req))
4455 return -EPROTO;
4456
4457 if (!conn->hs_enabled)
4458 return -EINVAL;
4459
4460 psm = le16_to_cpu(req->psm);
4461 scid = le16_to_cpu(req->scid);
4462
4463 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4464
4465 /* For controller id 0 make BR/EDR connection */
4466 if (req->amp_id == AMP_ID_BREDR) {
4467 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4468 req->amp_id);
4469 return 0;
4470 }
4471
4472 /* Validate AMP controller id */
4473 hdev = hci_dev_get(req->amp_id);
4474 if (!hdev)
4475 goto error;
4476
4477 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4478 hci_dev_put(hdev);
4479 goto error;
4480 }
4481
4482 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4483 req->amp_id);
4484 if (chan) {
4485 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4486 struct hci_conn *hs_hcon;
4487
4488 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4489 &conn->hcon->dst);
4490 if (!hs_hcon) {
4491 hci_dev_put(hdev);
4492 return -EBADSLT;
4493 }
4494
4495 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4496
4497 mgr->bredr_chan = chan;
4498 chan->hs_hcon = hs_hcon;
4499 chan->fcs = L2CAP_FCS_NONE;
4500 conn->mtu = hdev->block_mtu;
4501 }
4502
4503 hci_dev_put(hdev);
4504
4505 return 0;
4506
4507 error:
4508 rsp.dcid = 0;
4509 rsp.scid = cpu_to_le16(scid);
4510 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4511 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4512
4513 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4514 sizeof(rsp), &rsp);
4515
4516 return 0;
4517 }
4518
4519 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4520 {
4521 struct l2cap_move_chan_req req;
4522 u8 ident;
4523
4524 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4525
4526 ident = l2cap_get_ident(chan->conn);
4527 chan->ident = ident;
4528
4529 req.icid = cpu_to_le16(chan->scid);
4530 req.dest_amp_id = dest_amp_id;
4531
4532 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4533 &req);
4534
4535 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4536 }
4537
4538 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4539 {
4540 struct l2cap_move_chan_rsp rsp;
4541
4542 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4543
4544 rsp.icid = cpu_to_le16(chan->dcid);
4545 rsp.result = cpu_to_le16(result);
4546
4547 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4548 sizeof(rsp), &rsp);
4549 }
4550
4551 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4552 {
4553 struct l2cap_move_chan_cfm cfm;
4554
4555 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4556
4557 chan->ident = l2cap_get_ident(chan->conn);
4558
4559 cfm.icid = cpu_to_le16(chan->scid);
4560 cfm.result = cpu_to_le16(result);
4561
4562 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4563 sizeof(cfm), &cfm);
4564
4565 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4566 }
4567
4568 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4569 {
4570 struct l2cap_move_chan_cfm cfm;
4571
4572 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4573
4574 cfm.icid = cpu_to_le16(icid);
4575 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4576
4577 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4578 sizeof(cfm), &cfm);
4579 }
4580
4581 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4582 u16 icid)
4583 {
4584 struct l2cap_move_chan_cfm_rsp rsp;
4585
4586 BT_DBG("icid 0x%4.4x", icid);
4587
4588 rsp.icid = cpu_to_le16(icid);
4589 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4590 }
4591
4592 static void __release_logical_link(struct l2cap_chan *chan)
4593 {
4594 chan->hs_hchan = NULL;
4595 chan->hs_hcon = NULL;
4596
4597 /* Placeholder - release the logical link */
4598 }
4599
4600 static void l2cap_logical_fail(struct l2cap_chan *chan)
4601 {
4602 /* Logical link setup failed */
4603 if (chan->state != BT_CONNECTED) {
4604 /* Create channel failure, disconnect */
4605 l2cap_send_disconn_req(chan, ECONNRESET);
4606 return;
4607 }
4608
4609 switch (chan->move_role) {
4610 case L2CAP_MOVE_ROLE_RESPONDER:
4611 l2cap_move_done(chan);
4612 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4613 break;
4614 case L2CAP_MOVE_ROLE_INITIATOR:
4615 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4616 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4617 /* Remote has only sent pending or
4618 * success responses, clean up
4619 */
4620 l2cap_move_done(chan);
4621 }
4622
4623 /* Other amp move states imply that the move
4624 * has already aborted
4625 */
4626 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4627 break;
4628 }
4629 }
4630
4631 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4632 struct hci_chan *hchan)
4633 {
4634 struct l2cap_conf_rsp rsp;
4635
4636 chan->hs_hchan = hchan;
4637 chan->hs_hcon->l2cap_data = chan->conn;
4638
4639 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4640
4641 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4642 int err;
4643
4644 set_default_fcs(chan);
4645
4646 err = l2cap_ertm_init(chan);
4647 if (err < 0)
4648 l2cap_send_disconn_req(chan, -err);
4649 else
4650 l2cap_chan_ready(chan);
4651 }
4652 }
4653
4654 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4655 struct hci_chan *hchan)
4656 {
4657 chan->hs_hcon = hchan->conn;
4658 chan->hs_hcon->l2cap_data = chan->conn;
4659
4660 BT_DBG("move_state %d", chan->move_state);
4661
4662 switch (chan->move_state) {
4663 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4664 /* Move confirm will be sent after a success
4665 * response is received
4666 */
4667 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4668 break;
4669 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4670 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4671 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4672 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4673 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4674 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4675 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4676 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4677 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4678 }
4679 break;
4680 default:
4681 /* Move was not in expected state, free the channel */
4682 __release_logical_link(chan);
4683
4684 chan->move_state = L2CAP_MOVE_STABLE;
4685 }
4686 }
4687
4688 /* Call with chan locked */
4689 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4690 u8 status)
4691 {
4692 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4693
4694 if (status) {
4695 l2cap_logical_fail(chan);
4696 __release_logical_link(chan);
4697 return;
4698 }
4699
4700 if (chan->state != BT_CONNECTED) {
4701 /* Ignore logical link if channel is on BR/EDR */
4702 if (chan->local_amp_id != AMP_ID_BREDR)
4703 l2cap_logical_finish_create(chan, hchan);
4704 } else {
4705 l2cap_logical_finish_move(chan, hchan);
4706 }
4707 }
4708
4709 void l2cap_move_start(struct l2cap_chan *chan)
4710 {
4711 BT_DBG("chan %p", chan);
4712
4713 if (chan->local_amp_id == AMP_ID_BREDR) {
4714 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4715 return;
4716 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4717 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4718 /* Placeholder - start physical link setup */
4719 } else {
4720 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4721 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4722 chan->move_id = 0;
4723 l2cap_move_setup(chan);
4724 l2cap_send_move_chan_req(chan, 0);
4725 }
4726 }
4727
4728 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4729 u8 local_amp_id, u8 remote_amp_id)
4730 {
4731 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4732 local_amp_id, remote_amp_id);
4733
4734 chan->fcs = L2CAP_FCS_NONE;
4735
4736 /* Outgoing channel on AMP */
4737 if (chan->state == BT_CONNECT) {
4738 if (result == L2CAP_CR_SUCCESS) {
4739 chan->local_amp_id = local_amp_id;
4740 l2cap_send_create_chan_req(chan, remote_amp_id);
4741 } else {
4742 /* Revert to BR/EDR connect */
4743 l2cap_send_conn_req(chan);
4744 }
4745
4746 return;
4747 }
4748
4749 /* Incoming channel on AMP */
4750 if (__l2cap_no_conn_pending(chan)) {
4751 struct l2cap_conn_rsp rsp;
4752 char buf[128];
4753 rsp.scid = cpu_to_le16(chan->dcid);
4754 rsp.dcid = cpu_to_le16(chan->scid);
4755
4756 if (result == L2CAP_CR_SUCCESS) {
4757 /* Send successful response */
4758 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4759 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4760 } else {
4761 /* Send negative response */
4762 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4763 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4764 }
4765
4766 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4767 sizeof(rsp), &rsp);
4768
4769 if (result == L2CAP_CR_SUCCESS) {
4770 __l2cap_state_change(chan, BT_CONFIG);
4771 set_bit(CONF_REQ_SENT, &chan->conf_state);
4772 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4773 L2CAP_CONF_REQ,
4774 l2cap_build_conf_req(chan, buf), buf);
4775 chan->num_conf_req++;
4776 }
4777 }
4778 }
4779
4780 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4781 u8 remote_amp_id)
4782 {
4783 l2cap_move_setup(chan);
4784 chan->move_id = local_amp_id;
4785 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4786
4787 l2cap_send_move_chan_req(chan, remote_amp_id);
4788 }
4789
4790 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4791 {
4792 struct hci_chan *hchan = NULL;
4793
4794 /* Placeholder - get hci_chan for logical link */
4795
4796 if (hchan) {
4797 if (hchan->state == BT_CONNECTED) {
4798 /* Logical link is ready to go */
4799 chan->hs_hcon = hchan->conn;
4800 chan->hs_hcon->l2cap_data = chan->conn;
4801 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4802 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4803
4804 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4805 } else {
4806 /* Wait for logical link to be ready */
4807 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4808 }
4809 } else {
4810 /* Logical link not available */
4811 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4812 }
4813 }
4814
4815 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4816 {
4817 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4818 u8 rsp_result;
4819 if (result == -EINVAL)
4820 rsp_result = L2CAP_MR_BAD_ID;
4821 else
4822 rsp_result = L2CAP_MR_NOT_ALLOWED;
4823
4824 l2cap_send_move_chan_rsp(chan, rsp_result);
4825 }
4826
4827 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4828 chan->move_state = L2CAP_MOVE_STABLE;
4829
4830 /* Restart data transmission */
4831 l2cap_ertm_send(chan);
4832 }
4833
4834 /* Invoke with locked chan */
4835 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4836 {
4837 u8 local_amp_id = chan->local_amp_id;
4838 u8 remote_amp_id = chan->remote_amp_id;
4839
4840 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4841 chan, result, local_amp_id, remote_amp_id);
4842
4843 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4844 l2cap_chan_unlock(chan);
4845 return;
4846 }
4847
4848 if (chan->state != BT_CONNECTED) {
4849 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4850 } else if (result != L2CAP_MR_SUCCESS) {
4851 l2cap_do_move_cancel(chan, result);
4852 } else {
4853 switch (chan->move_role) {
4854 case L2CAP_MOVE_ROLE_INITIATOR:
4855 l2cap_do_move_initiate(chan, local_amp_id,
4856 remote_amp_id);
4857 break;
4858 case L2CAP_MOVE_ROLE_RESPONDER:
4859 l2cap_do_move_respond(chan, result);
4860 break;
4861 default:
4862 l2cap_do_move_cancel(chan, result);
4863 break;
4864 }
4865 }
4866 }
4867
4868 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4869 struct l2cap_cmd_hdr *cmd,
4870 u16 cmd_len, void *data)
4871 {
4872 struct l2cap_move_chan_req *req = data;
4873 struct l2cap_move_chan_rsp rsp;
4874 struct l2cap_chan *chan;
4875 u16 icid = 0;
4876 u16 result = L2CAP_MR_NOT_ALLOWED;
4877
4878 if (cmd_len != sizeof(*req))
4879 return -EPROTO;
4880
4881 icid = le16_to_cpu(req->icid);
4882
4883 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4884
4885 if (!conn->hs_enabled)
4886 return -EINVAL;
4887
4888 chan = l2cap_get_chan_by_dcid(conn, icid);
4889 if (!chan) {
4890 rsp.icid = cpu_to_le16(icid);
4891 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4892 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4893 sizeof(rsp), &rsp);
4894 return 0;
4895 }
4896
4897 chan->ident = cmd->ident;
4898
4899 if (chan->scid < L2CAP_CID_DYN_START ||
4900 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4901 (chan->mode != L2CAP_MODE_ERTM &&
4902 chan->mode != L2CAP_MODE_STREAMING)) {
4903 result = L2CAP_MR_NOT_ALLOWED;
4904 goto send_move_response;
4905 }
4906
4907 if (chan->local_amp_id == req->dest_amp_id) {
4908 result = L2CAP_MR_SAME_ID;
4909 goto send_move_response;
4910 }
4911
4912 if (req->dest_amp_id != AMP_ID_BREDR) {
4913 struct hci_dev *hdev;
4914 hdev = hci_dev_get(req->dest_amp_id);
4915 if (!hdev || hdev->dev_type != HCI_AMP ||
4916 !test_bit(HCI_UP, &hdev->flags)) {
4917 if (hdev)
4918 hci_dev_put(hdev);
4919
4920 result = L2CAP_MR_BAD_ID;
4921 goto send_move_response;
4922 }
4923 hci_dev_put(hdev);
4924 }
4925
4926 /* Detect a move collision. Only send a collision response
4927 * if this side has "lost", otherwise proceed with the move.
4928 * The winner has the larger bd_addr.
4929 */
4930 if ((__chan_is_moving(chan) ||
4931 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4932 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4933 result = L2CAP_MR_COLLISION;
4934 goto send_move_response;
4935 }
4936
4937 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4938 l2cap_move_setup(chan);
4939 chan->move_id = req->dest_amp_id;
4940 icid = chan->dcid;
4941
4942 if (req->dest_amp_id == AMP_ID_BREDR) {
4943 /* Moving to BR/EDR */
4944 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4945 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4946 result = L2CAP_MR_PEND;
4947 } else {
4948 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4949 result = L2CAP_MR_SUCCESS;
4950 }
4951 } else {
4952 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4953 /* Placeholder - uncomment when amp functions are available */
4954 /*amp_accept_physical(chan, req->dest_amp_id);*/
4955 result = L2CAP_MR_PEND;
4956 }
4957
4958 send_move_response:
4959 l2cap_send_move_chan_rsp(chan, result);
4960
4961 l2cap_chan_unlock(chan);
4962
4963 return 0;
4964 }
4965
4966 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4967 {
4968 struct l2cap_chan *chan;
4969 struct hci_chan *hchan = NULL;
4970
4971 chan = l2cap_get_chan_by_scid(conn, icid);
4972 if (!chan) {
4973 l2cap_send_move_chan_cfm_icid(conn, icid);
4974 return;
4975 }
4976
4977 __clear_chan_timer(chan);
4978 if (result == L2CAP_MR_PEND)
4979 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4980
4981 switch (chan->move_state) {
4982 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4983 /* Move confirm will be sent when logical link
4984 * is complete.
4985 */
4986 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4987 break;
4988 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4989 if (result == L2CAP_MR_PEND) {
4990 break;
4991 } else if (test_bit(CONN_LOCAL_BUSY,
4992 &chan->conn_state)) {
4993 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4994 } else {
4995 /* Logical link is up or moving to BR/EDR,
4996 * proceed with move
4997 */
4998 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4999 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5000 }
5001 break;
5002 case L2CAP_MOVE_WAIT_RSP:
5003 /* Moving to AMP */
5004 if (result == L2CAP_MR_SUCCESS) {
5005 /* Remote is ready, send confirm immediately
5006 * after logical link is ready
5007 */
5008 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5009 } else {
5010 /* Both logical link and move success
5011 * are required to confirm
5012 */
5013 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5014 }
5015
5016 /* Placeholder - get hci_chan for logical link */
5017 if (!hchan) {
5018 /* Logical link not available */
5019 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5020 break;
5021 }
5022
5023 /* If the logical link is not yet connected, do not
5024 * send confirmation.
5025 */
5026 if (hchan->state != BT_CONNECTED)
5027 break;
5028
5029 /* Logical link is already ready to go */
5030
5031 chan->hs_hcon = hchan->conn;
5032 chan->hs_hcon->l2cap_data = chan->conn;
5033
5034 if (result == L2CAP_MR_SUCCESS) {
5035 /* Can confirm now */
5036 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5037 } else {
5038 /* Now only need move success
5039 * to confirm
5040 */
5041 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5042 }
5043
5044 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5045 break;
5046 default:
5047 /* Any other amp move state means the move failed. */
5048 chan->move_id = chan->local_amp_id;
5049 l2cap_move_done(chan);
5050 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5051 }
5052
5053 l2cap_chan_unlock(chan);
5054 }
5055
5056 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5057 u16 result)
5058 {
5059 struct l2cap_chan *chan;
5060
5061 chan = l2cap_get_chan_by_ident(conn, ident);
5062 if (!chan) {
5063 /* Could not locate channel, icid is best guess */
5064 l2cap_send_move_chan_cfm_icid(conn, icid);
5065 return;
5066 }
5067
5068 __clear_chan_timer(chan);
5069
5070 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5071 if (result == L2CAP_MR_COLLISION) {
5072 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5073 } else {
5074 /* Cleanup - cancel move */
5075 chan->move_id = chan->local_amp_id;
5076 l2cap_move_done(chan);
5077 }
5078 }
5079
5080 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5081
5082 l2cap_chan_unlock(chan);
5083 }
5084
5085 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5086 struct l2cap_cmd_hdr *cmd,
5087 u16 cmd_len, void *data)
5088 {
5089 struct l2cap_move_chan_rsp *rsp = data;
5090 u16 icid, result;
5091
5092 if (cmd_len != sizeof(*rsp))
5093 return -EPROTO;
5094
5095 icid = le16_to_cpu(rsp->icid);
5096 result = le16_to_cpu(rsp->result);
5097
5098 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5099
5100 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5101 l2cap_move_continue(conn, icid, result);
5102 else
5103 l2cap_move_fail(conn, cmd->ident, icid, result);
5104
5105 return 0;
5106 }
5107
5108 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5109 struct l2cap_cmd_hdr *cmd,
5110 u16 cmd_len, void *data)
5111 {
5112 struct l2cap_move_chan_cfm *cfm = data;
5113 struct l2cap_chan *chan;
5114 u16 icid, result;
5115
5116 if (cmd_len != sizeof(*cfm))
5117 return -EPROTO;
5118
5119 icid = le16_to_cpu(cfm->icid);
5120 result = le16_to_cpu(cfm->result);
5121
5122 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5123
5124 chan = l2cap_get_chan_by_dcid(conn, icid);
5125 if (!chan) {
5126 /* Spec requires a response even if the icid was not found */
5127 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5128 return 0;
5129 }
5130
5131 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5132 if (result == L2CAP_MC_CONFIRMED) {
5133 chan->local_amp_id = chan->move_id;
5134 if (chan->local_amp_id == AMP_ID_BREDR)
5135 __release_logical_link(chan);
5136 } else {
5137 chan->move_id = chan->local_amp_id;
5138 }
5139
5140 l2cap_move_done(chan);
5141 }
5142
5143 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5144
5145 l2cap_chan_unlock(chan);
5146
5147 return 0;
5148 }
5149
5150 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5151 struct l2cap_cmd_hdr *cmd,
5152 u16 cmd_len, void *data)
5153 {
5154 struct l2cap_move_chan_cfm_rsp *rsp = data;
5155 struct l2cap_chan *chan;
5156 u16 icid;
5157
5158 if (cmd_len != sizeof(*rsp))
5159 return -EPROTO;
5160
5161 icid = le16_to_cpu(rsp->icid);
5162
5163 BT_DBG("icid 0x%4.4x", icid);
5164
5165 chan = l2cap_get_chan_by_scid(conn, icid);
5166 if (!chan)
5167 return 0;
5168
5169 __clear_chan_timer(chan);
5170
5171 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5172 chan->local_amp_id = chan->move_id;
5173
5174 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5175 __release_logical_link(chan);
5176
5177 l2cap_move_done(chan);
5178 }
5179
5180 l2cap_chan_unlock(chan);
5181
5182 return 0;
5183 }
5184
5185 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5186 u16 to_multiplier)
5187 {
5188 u16 max_latency;
5189
5190 if (min > max || min < 6 || max > 3200)
5191 return -EINVAL;
5192
5193 if (to_multiplier < 10 || to_multiplier > 3200)
5194 return -EINVAL;
5195
5196 if (max >= to_multiplier * 8)
5197 return -EINVAL;
5198
5199 max_latency = (to_multiplier * 8 / max) - 1;
5200 if (latency > 499 || latency > max_latency)
5201 return -EINVAL;
5202
5203 return 0;
5204 }
5205
5206 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5207 struct l2cap_cmd_hdr *cmd,
5208 u8 *data)
5209 {
5210 struct hci_conn *hcon = conn->hcon;
5211 struct l2cap_conn_param_update_req *req;
5212 struct l2cap_conn_param_update_rsp rsp;
5213 u16 min, max, latency, to_multiplier, cmd_len;
5214 int err;
5215
5216 if (!(hcon->link_mode & HCI_LM_MASTER))
5217 return -EINVAL;
5218
5219 cmd_len = __le16_to_cpu(cmd->len);
5220 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5221 return -EPROTO;
5222
5223 req = (struct l2cap_conn_param_update_req *) data;
5224 min = __le16_to_cpu(req->min);
5225 max = __le16_to_cpu(req->max);
5226 latency = __le16_to_cpu(req->latency);
5227 to_multiplier = __le16_to_cpu(req->to_multiplier);
5228
5229 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5230 min, max, latency, to_multiplier);
5231
5232 memset(&rsp, 0, sizeof(rsp));
5233
5234 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5235 if (err)
5236 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5237 else
5238 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5239
5240 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5241 sizeof(rsp), &rsp);
5242
5243 if (!err)
5244 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5245
5246 return 0;
5247 }
5248
5249 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5250 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5251 u8 *data)
5252 {
5253 int err = 0;
5254
5255 switch (cmd->code) {
5256 case L2CAP_COMMAND_REJ:
5257 l2cap_command_rej(conn, cmd, cmd_len, data);
5258 break;
5259
5260 case L2CAP_CONN_REQ:
5261 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5262 break;
5263
5264 case L2CAP_CONN_RSP:
5265 case L2CAP_CREATE_CHAN_RSP:
5266 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5267 break;
5268
5269 case L2CAP_CONF_REQ:
5270 err = l2cap_config_req(conn, cmd, cmd_len, data);
5271 break;
5272
5273 case L2CAP_CONF_RSP:
5274 l2cap_config_rsp(conn, cmd, cmd_len, data);
5275 break;
5276
5277 case L2CAP_DISCONN_REQ:
5278 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5279 break;
5280
5281 case L2CAP_DISCONN_RSP:
5282 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5283 break;
5284
5285 case L2CAP_ECHO_REQ:
5286 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5287 break;
5288
5289 case L2CAP_ECHO_RSP:
5290 break;
5291
5292 case L2CAP_INFO_REQ:
5293 err = l2cap_information_req(conn, cmd, cmd_len, data);
5294 break;
5295
5296 case L2CAP_INFO_RSP:
5297 l2cap_information_rsp(conn, cmd, cmd_len, data);
5298 break;
5299
5300 case L2CAP_CREATE_CHAN_REQ:
5301 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5302 break;
5303
5304 case L2CAP_MOVE_CHAN_REQ:
5305 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5306 break;
5307
5308 case L2CAP_MOVE_CHAN_RSP:
5309 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5310 break;
5311
5312 case L2CAP_MOVE_CHAN_CFM:
5313 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5314 break;
5315
5316 case L2CAP_MOVE_CHAN_CFM_RSP:
5317 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5318 break;
5319
5320 default:
5321 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5322 err = -EINVAL;
5323 break;
5324 }
5325
5326 return err;
5327 }
5328
5329 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5330 struct l2cap_cmd_hdr *cmd, u8 *data)
5331 {
5332 switch (cmd->code) {
5333 case L2CAP_COMMAND_REJ:
5334 return 0;
5335
5336 case L2CAP_CONN_PARAM_UPDATE_REQ:
5337 return l2cap_conn_param_update_req(conn, cmd, data);
5338
5339 case L2CAP_CONN_PARAM_UPDATE_RSP:
5340 return 0;
5341
5342 default:
5343 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5344 return -EINVAL;
5345 }
5346 }
5347
5348 static __le16 l2cap_err_to_reason(int err)
5349 {
5350 switch (err) {
5351 case -EBADSLT:
5352 return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
5353 case -EMSGSIZE:
5354 return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED);
5355 case -EINVAL:
5356 case -EPROTO:
5357 default:
5358 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5359 }
5360 }
5361
5362 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5363 struct sk_buff *skb)
5364 {
5365 struct hci_conn *hcon = conn->hcon;
5366 struct l2cap_cmd_hdr *cmd;
5367 u16 len;
5368 int err;
5369
5370 if (hcon->type != LE_LINK)
5371 goto drop;
5372
5373 if (skb->len < L2CAP_CMD_HDR_SIZE)
5374 goto drop;
5375
5376 cmd = (void *) skb->data;
5377 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5378
5379 len = le16_to_cpu(cmd->len);
5380
5381 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5382
5383 if (len != skb->len || !cmd->ident) {
5384 BT_DBG("corrupted command");
5385 goto drop;
5386 }
5387
5388 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5389 if (err) {
5390 struct l2cap_cmd_rej_unk rej;
5391
5392 BT_ERR("Wrong link type (%d)", err);
5393
5394 rej.reason = l2cap_err_to_reason(err);
5395 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5396 sizeof(rej), &rej);
5397 }
5398
5399 drop:
5400 kfree_skb(skb);
5401 }
5402
5403 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5404 struct sk_buff *skb)
5405 {
5406 struct hci_conn *hcon = conn->hcon;
5407 u8 *data = skb->data;
5408 int len = skb->len;
5409 struct l2cap_cmd_hdr cmd;
5410 int err;
5411
5412 l2cap_raw_recv(conn, skb);
5413
5414 if (hcon->type != ACL_LINK)
5415 goto drop;
5416
5417 while (len >= L2CAP_CMD_HDR_SIZE) {
5418 u16 cmd_len;
5419 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5420 data += L2CAP_CMD_HDR_SIZE;
5421 len -= L2CAP_CMD_HDR_SIZE;
5422
5423 cmd_len = le16_to_cpu(cmd.len);
5424
5425 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5426 cmd.ident);
5427
5428 if (cmd_len > len || !cmd.ident) {
5429 BT_DBG("corrupted command");
5430 break;
5431 }
5432
5433 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5434 if (err) {
5435 struct l2cap_cmd_rej_unk rej;
5436
5437 BT_ERR("Wrong link type (%d)", err);
5438
5439 rej.reason = l2cap_err_to_reason(err);
5440 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5441 sizeof(rej), &rej);
5442 }
5443
5444 data += cmd_len;
5445 len -= cmd_len;
5446 }
5447
5448 drop:
5449 kfree_skb(skb);
5450 }
5451
5452 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5453 {
5454 u16 our_fcs, rcv_fcs;
5455 int hdr_size;
5456
5457 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5458 hdr_size = L2CAP_EXT_HDR_SIZE;
5459 else
5460 hdr_size = L2CAP_ENH_HDR_SIZE;
5461
5462 if (chan->fcs == L2CAP_FCS_CRC16) {
5463 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5464 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5465 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5466
5467 if (our_fcs != rcv_fcs)
5468 return -EBADMSG;
5469 }
5470 return 0;
5471 }
5472
5473 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5474 {
5475 struct l2cap_ctrl control;
5476
5477 BT_DBG("chan %p", chan);
5478
5479 memset(&control, 0, sizeof(control));
5480 control.sframe = 1;
5481 control.final = 1;
5482 control.reqseq = chan->buffer_seq;
5483 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5484
5485 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5486 control.super = L2CAP_SUPER_RNR;
5487 l2cap_send_sframe(chan, &control);
5488 }
5489
5490 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5491 chan->unacked_frames > 0)
5492 __set_retrans_timer(chan);
5493
5494 /* Send pending iframes */
5495 l2cap_ertm_send(chan);
5496
5497 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5498 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5499 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5500 * send it now.
5501 */
5502 control.super = L2CAP_SUPER_RR;
5503 l2cap_send_sframe(chan, &control);
5504 }
5505 }
5506
5507 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5508 struct sk_buff **last_frag)
5509 {
5510 /* skb->len reflects data in skb as well as all fragments
5511 * skb->data_len reflects only data in fragments
5512 */
5513 if (!skb_has_frag_list(skb))
5514 skb_shinfo(skb)->frag_list = new_frag;
5515
5516 new_frag->next = NULL;
5517
5518 (*last_frag)->next = new_frag;
5519 *last_frag = new_frag;
5520
5521 skb->len += new_frag->len;
5522 skb->data_len += new_frag->len;
5523 skb->truesize += new_frag->truesize;
5524 }
5525
5526 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5527 struct l2cap_ctrl *control)
5528 {
5529 int err = -EINVAL;
5530
5531 switch (control->sar) {
5532 case L2CAP_SAR_UNSEGMENTED:
5533 if (chan->sdu)
5534 break;
5535
5536 err = chan->ops->recv(chan, skb);
5537 break;
5538
5539 case L2CAP_SAR_START:
5540 if (chan->sdu)
5541 break;
5542
5543 chan->sdu_len = get_unaligned_le16(skb->data);
5544 skb_pull(skb, L2CAP_SDULEN_SIZE);
5545
5546 if (chan->sdu_len > chan->imtu) {
5547 err = -EMSGSIZE;
5548 break;
5549 }
5550
5551 if (skb->len >= chan->sdu_len)
5552 break;
5553
5554 chan->sdu = skb;
5555 chan->sdu_last_frag = skb;
5556
5557 skb = NULL;
5558 err = 0;
5559 break;
5560
5561 case L2CAP_SAR_CONTINUE:
5562 if (!chan->sdu)
5563 break;
5564
5565 append_skb_frag(chan->sdu, skb,
5566 &chan->sdu_last_frag);
5567 skb = NULL;
5568
5569 if (chan->sdu->len >= chan->sdu_len)
5570 break;
5571
5572 err = 0;
5573 break;
5574
5575 case L2CAP_SAR_END:
5576 if (!chan->sdu)
5577 break;
5578
5579 append_skb_frag(chan->sdu, skb,
5580 &chan->sdu_last_frag);
5581 skb = NULL;
5582
5583 if (chan->sdu->len != chan->sdu_len)
5584 break;
5585
5586 err = chan->ops->recv(chan, chan->sdu);
5587
5588 if (!err) {
5589 /* Reassembly complete */
5590 chan->sdu = NULL;
5591 chan->sdu_last_frag = NULL;
5592 chan->sdu_len = 0;
5593 }
5594 break;
5595 }
5596
5597 if (err) {
5598 kfree_skb(skb);
5599 kfree_skb(chan->sdu);
5600 chan->sdu = NULL;
5601 chan->sdu_last_frag = NULL;
5602 chan->sdu_len = 0;
5603 }
5604
5605 return err;
5606 }
5607
5608 static int l2cap_resegment(struct l2cap_chan *chan)
5609 {
5610 /* Placeholder */
5611 return 0;
5612 }
5613
5614 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5615 {
5616 u8 event;
5617
5618 if (chan->mode != L2CAP_MODE_ERTM)
5619 return;
5620
5621 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5622 l2cap_tx(chan, NULL, NULL, event);
5623 }
5624
5625 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5626 {
5627 int err = 0;
5628 /* Pass sequential frames to l2cap_reassemble_sdu()
5629 * until a gap is encountered.
5630 */
5631
5632 BT_DBG("chan %p", chan);
5633
5634 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5635 struct sk_buff *skb;
5636 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5637 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5638
5639 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5640
5641 if (!skb)
5642 break;
5643
5644 skb_unlink(skb, &chan->srej_q);
5645 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5646 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5647 if (err)
5648 break;
5649 }
5650
5651 if (skb_queue_empty(&chan->srej_q)) {
5652 chan->rx_state = L2CAP_RX_STATE_RECV;
5653 l2cap_send_ack(chan);
5654 }
5655
5656 return err;
5657 }
5658
5659 static void l2cap_handle_srej(struct l2cap_chan *chan,
5660 struct l2cap_ctrl *control)
5661 {
5662 struct sk_buff *skb;
5663
5664 BT_DBG("chan %p, control %p", chan, control);
5665
5666 if (control->reqseq == chan->next_tx_seq) {
5667 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5668 l2cap_send_disconn_req(chan, ECONNRESET);
5669 return;
5670 }
5671
5672 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5673
5674 if (skb == NULL) {
5675 BT_DBG("Seq %d not available for retransmission",
5676 control->reqseq);
5677 return;
5678 }
5679
5680 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5681 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5682 l2cap_send_disconn_req(chan, ECONNRESET);
5683 return;
5684 }
5685
5686 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5687
5688 if (control->poll) {
5689 l2cap_pass_to_tx(chan, control);
5690
5691 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5692 l2cap_retransmit(chan, control);
5693 l2cap_ertm_send(chan);
5694
5695 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5696 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5697 chan->srej_save_reqseq = control->reqseq;
5698 }
5699 } else {
5700 l2cap_pass_to_tx_fbit(chan, control);
5701
5702 if (control->final) {
5703 if (chan->srej_save_reqseq != control->reqseq ||
5704 !test_and_clear_bit(CONN_SREJ_ACT,
5705 &chan->conn_state))
5706 l2cap_retransmit(chan, control);
5707 } else {
5708 l2cap_retransmit(chan, control);
5709 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5710 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5711 chan->srej_save_reqseq = control->reqseq;
5712 }
5713 }
5714 }
5715 }
5716
5717 static void l2cap_handle_rej(struct l2cap_chan *chan,
5718 struct l2cap_ctrl *control)
5719 {
5720 struct sk_buff *skb;
5721
5722 BT_DBG("chan %p, control %p", chan, control);
5723
5724 if (control->reqseq == chan->next_tx_seq) {
5725 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5726 l2cap_send_disconn_req(chan, ECONNRESET);
5727 return;
5728 }
5729
5730 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5731
5732 if (chan->max_tx && skb &&
5733 bt_cb(skb)->control.retries >= chan->max_tx) {
5734 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5735 l2cap_send_disconn_req(chan, ECONNRESET);
5736 return;
5737 }
5738
5739 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5740
5741 l2cap_pass_to_tx(chan, control);
5742
5743 if (control->final) {
5744 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5745 l2cap_retransmit_all(chan, control);
5746 } else {
5747 l2cap_retransmit_all(chan, control);
5748 l2cap_ertm_send(chan);
5749 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5750 set_bit(CONN_REJ_ACT, &chan->conn_state);
5751 }
5752 }
5753
5754 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5755 {
5756 BT_DBG("chan %p, txseq %d", chan, txseq);
5757
5758 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5759 chan->expected_tx_seq);
5760
5761 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5762 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5763 chan->tx_win) {
5764 /* See notes below regarding "double poll" and
5765 * invalid packets.
5766 */
5767 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5768 BT_DBG("Invalid/Ignore - after SREJ");
5769 return L2CAP_TXSEQ_INVALID_IGNORE;
5770 } else {
5771 BT_DBG("Invalid - in window after SREJ sent");
5772 return L2CAP_TXSEQ_INVALID;
5773 }
5774 }
5775
5776 if (chan->srej_list.head == txseq) {
5777 BT_DBG("Expected SREJ");
5778 return L2CAP_TXSEQ_EXPECTED_SREJ;
5779 }
5780
5781 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5782 BT_DBG("Duplicate SREJ - txseq already stored");
5783 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5784 }
5785
5786 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5787 BT_DBG("Unexpected SREJ - not requested");
5788 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5789 }
5790 }
5791
5792 if (chan->expected_tx_seq == txseq) {
5793 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5794 chan->tx_win) {
5795 BT_DBG("Invalid - txseq outside tx window");
5796 return L2CAP_TXSEQ_INVALID;
5797 } else {
5798 BT_DBG("Expected");
5799 return L2CAP_TXSEQ_EXPECTED;
5800 }
5801 }
5802
5803 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5804 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5805 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5806 return L2CAP_TXSEQ_DUPLICATE;
5807 }
5808
5809 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5810 /* A source of invalid packets is a "double poll" condition,
5811 * where delays cause us to send multiple poll packets. If
5812 * the remote stack receives and processes both polls,
5813 * sequence numbers can wrap around in such a way that a
5814 * resent frame has a sequence number that looks like new data
5815 * with a sequence gap. This would trigger an erroneous SREJ
5816 * request.
5817 *
5818 * Fortunately, this is impossible with a tx window that's
5819 * less than half of the maximum sequence number, which allows
5820 * invalid frames to be safely ignored.
5821 *
5822 * With tx window sizes greater than half of the tx window
5823 * maximum, the frame is invalid and cannot be ignored. This
5824 * causes a disconnect.
5825 */
5826
5827 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5828 BT_DBG("Invalid/Ignore - txseq outside tx window");
5829 return L2CAP_TXSEQ_INVALID_IGNORE;
5830 } else {
5831 BT_DBG("Invalid - txseq outside tx window");
5832 return L2CAP_TXSEQ_INVALID;
5833 }
5834 } else {
5835 BT_DBG("Unexpected - txseq indicates missing frames");
5836 return L2CAP_TXSEQ_UNEXPECTED;
5837 }
5838 }
5839
5840 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5841 struct l2cap_ctrl *control,
5842 struct sk_buff *skb, u8 event)
5843 {
5844 int err = 0;
5845 bool skb_in_use = false;
5846
5847 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5848 event);
5849
5850 switch (event) {
5851 case L2CAP_EV_RECV_IFRAME:
5852 switch (l2cap_classify_txseq(chan, control->txseq)) {
5853 case L2CAP_TXSEQ_EXPECTED:
5854 l2cap_pass_to_tx(chan, control);
5855
5856 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5857 BT_DBG("Busy, discarding expected seq %d",
5858 control->txseq);
5859 break;
5860 }
5861
5862 chan->expected_tx_seq = __next_seq(chan,
5863 control->txseq);
5864
5865 chan->buffer_seq = chan->expected_tx_seq;
5866 skb_in_use = true;
5867
5868 err = l2cap_reassemble_sdu(chan, skb, control);
5869 if (err)
5870 break;
5871
5872 if (control->final) {
5873 if (!test_and_clear_bit(CONN_REJ_ACT,
5874 &chan->conn_state)) {
5875 control->final = 0;
5876 l2cap_retransmit_all(chan, control);
5877 l2cap_ertm_send(chan);
5878 }
5879 }
5880
5881 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5882 l2cap_send_ack(chan);
5883 break;
5884 case L2CAP_TXSEQ_UNEXPECTED:
5885 l2cap_pass_to_tx(chan, control);
5886
5887 /* Can't issue SREJ frames in the local busy state.
5888 * Drop this frame, it will be seen as missing
5889 * when local busy is exited.
5890 */
5891 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5892 BT_DBG("Busy, discarding unexpected seq %d",
5893 control->txseq);
5894 break;
5895 }
5896
5897 /* There was a gap in the sequence, so an SREJ
5898 * must be sent for each missing frame. The
5899 * current frame is stored for later use.
5900 */
5901 skb_queue_tail(&chan->srej_q, skb);
5902 skb_in_use = true;
5903 BT_DBG("Queued %p (queue len %d)", skb,
5904 skb_queue_len(&chan->srej_q));
5905
5906 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5907 l2cap_seq_list_clear(&chan->srej_list);
5908 l2cap_send_srej(chan, control->txseq);
5909
5910 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5911 break;
5912 case L2CAP_TXSEQ_DUPLICATE:
5913 l2cap_pass_to_tx(chan, control);
5914 break;
5915 case L2CAP_TXSEQ_INVALID_IGNORE:
5916 break;
5917 case L2CAP_TXSEQ_INVALID:
5918 default:
5919 l2cap_send_disconn_req(chan, ECONNRESET);
5920 break;
5921 }
5922 break;
5923 case L2CAP_EV_RECV_RR:
5924 l2cap_pass_to_tx(chan, control);
5925 if (control->final) {
5926 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5927
5928 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5929 !__chan_is_moving(chan)) {
5930 control->final = 0;
5931 l2cap_retransmit_all(chan, control);
5932 }
5933
5934 l2cap_ertm_send(chan);
5935 } else if (control->poll) {
5936 l2cap_send_i_or_rr_or_rnr(chan);
5937 } else {
5938 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5939 &chan->conn_state) &&
5940 chan->unacked_frames)
5941 __set_retrans_timer(chan);
5942
5943 l2cap_ertm_send(chan);
5944 }
5945 break;
5946 case L2CAP_EV_RECV_RNR:
5947 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5948 l2cap_pass_to_tx(chan, control);
5949 if (control && control->poll) {
5950 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5951 l2cap_send_rr_or_rnr(chan, 0);
5952 }
5953 __clear_retrans_timer(chan);
5954 l2cap_seq_list_clear(&chan->retrans_list);
5955 break;
5956 case L2CAP_EV_RECV_REJ:
5957 l2cap_handle_rej(chan, control);
5958 break;
5959 case L2CAP_EV_RECV_SREJ:
5960 l2cap_handle_srej(chan, control);
5961 break;
5962 default:
5963 break;
5964 }
5965
5966 if (skb && !skb_in_use) {
5967 BT_DBG("Freeing %p", skb);
5968 kfree_skb(skb);
5969 }
5970
5971 return err;
5972 }
5973
5974 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5975 struct l2cap_ctrl *control,
5976 struct sk_buff *skb, u8 event)
5977 {
5978 int err = 0;
5979 u16 txseq = control->txseq;
5980 bool skb_in_use = false;
5981
5982 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5983 event);
5984
5985 switch (event) {
5986 case L2CAP_EV_RECV_IFRAME:
5987 switch (l2cap_classify_txseq(chan, txseq)) {
5988 case L2CAP_TXSEQ_EXPECTED:
5989 /* Keep frame for reassembly later */
5990 l2cap_pass_to_tx(chan, control);
5991 skb_queue_tail(&chan->srej_q, skb);
5992 skb_in_use = true;
5993 BT_DBG("Queued %p (queue len %d)", skb,
5994 skb_queue_len(&chan->srej_q));
5995
5996 chan->expected_tx_seq = __next_seq(chan, txseq);
5997 break;
5998 case L2CAP_TXSEQ_EXPECTED_SREJ:
5999 l2cap_seq_list_pop(&chan->srej_list);
6000
6001 l2cap_pass_to_tx(chan, control);
6002 skb_queue_tail(&chan->srej_q, skb);
6003 skb_in_use = true;
6004 BT_DBG("Queued %p (queue len %d)", skb,
6005 skb_queue_len(&chan->srej_q));
6006
6007 err = l2cap_rx_queued_iframes(chan);
6008 if (err)
6009 break;
6010
6011 break;
6012 case L2CAP_TXSEQ_UNEXPECTED:
6013 /* Got a frame that can't be reassembled yet.
6014 * Save it for later, and send SREJs to cover
6015 * the missing frames.
6016 */
6017 skb_queue_tail(&chan->srej_q, skb);
6018 skb_in_use = true;
6019 BT_DBG("Queued %p (queue len %d)", skb,
6020 skb_queue_len(&chan->srej_q));
6021
6022 l2cap_pass_to_tx(chan, control);
6023 l2cap_send_srej(chan, control->txseq);
6024 break;
6025 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6026 /* This frame was requested with an SREJ, but
6027 * some expected retransmitted frames are
6028 * missing. Request retransmission of missing
6029 * SREJ'd frames.
6030 */
6031 skb_queue_tail(&chan->srej_q, skb);
6032 skb_in_use = true;
6033 BT_DBG("Queued %p (queue len %d)", skb,
6034 skb_queue_len(&chan->srej_q));
6035
6036 l2cap_pass_to_tx(chan, control);
6037 l2cap_send_srej_list(chan, control->txseq);
6038 break;
6039 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6040 /* We've already queued this frame. Drop this copy. */
6041 l2cap_pass_to_tx(chan, control);
6042 break;
6043 case L2CAP_TXSEQ_DUPLICATE:
6044 /* Expecting a later sequence number, so this frame
6045 * was already received. Ignore it completely.
6046 */
6047 break;
6048 case L2CAP_TXSEQ_INVALID_IGNORE:
6049 break;
6050 case L2CAP_TXSEQ_INVALID:
6051 default:
6052 l2cap_send_disconn_req(chan, ECONNRESET);
6053 break;
6054 }
6055 break;
6056 case L2CAP_EV_RECV_RR:
6057 l2cap_pass_to_tx(chan, control);
6058 if (control->final) {
6059 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6060
6061 if (!test_and_clear_bit(CONN_REJ_ACT,
6062 &chan->conn_state)) {
6063 control->final = 0;
6064 l2cap_retransmit_all(chan, control);
6065 }
6066
6067 l2cap_ertm_send(chan);
6068 } else if (control->poll) {
6069 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6070 &chan->conn_state) &&
6071 chan->unacked_frames) {
6072 __set_retrans_timer(chan);
6073 }
6074
6075 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6076 l2cap_send_srej_tail(chan);
6077 } else {
6078 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6079 &chan->conn_state) &&
6080 chan->unacked_frames)
6081 __set_retrans_timer(chan);
6082
6083 l2cap_send_ack(chan);
6084 }
6085 break;
6086 case L2CAP_EV_RECV_RNR:
6087 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6088 l2cap_pass_to_tx(chan, control);
6089 if (control->poll) {
6090 l2cap_send_srej_tail(chan);
6091 } else {
6092 struct l2cap_ctrl rr_control;
6093 memset(&rr_control, 0, sizeof(rr_control));
6094 rr_control.sframe = 1;
6095 rr_control.super = L2CAP_SUPER_RR;
6096 rr_control.reqseq = chan->buffer_seq;
6097 l2cap_send_sframe(chan, &rr_control);
6098 }
6099
6100 break;
6101 case L2CAP_EV_RECV_REJ:
6102 l2cap_handle_rej(chan, control);
6103 break;
6104 case L2CAP_EV_RECV_SREJ:
6105 l2cap_handle_srej(chan, control);
6106 break;
6107 }
6108
6109 if (skb && !skb_in_use) {
6110 BT_DBG("Freeing %p", skb);
6111 kfree_skb(skb);
6112 }
6113
6114 return err;
6115 }
6116
6117 static int l2cap_finish_move(struct l2cap_chan *chan)
6118 {
6119 BT_DBG("chan %p", chan);
6120
6121 chan->rx_state = L2CAP_RX_STATE_RECV;
6122
6123 if (chan->hs_hcon)
6124 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6125 else
6126 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6127
6128 return l2cap_resegment(chan);
6129 }
6130
6131 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6132 struct l2cap_ctrl *control,
6133 struct sk_buff *skb, u8 event)
6134 {
6135 int err;
6136
6137 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6138 event);
6139
6140 if (!control->poll)
6141 return -EPROTO;
6142
6143 l2cap_process_reqseq(chan, control->reqseq);
6144
6145 if (!skb_queue_empty(&chan->tx_q))
6146 chan->tx_send_head = skb_peek(&chan->tx_q);
6147 else
6148 chan->tx_send_head = NULL;
6149
6150 /* Rewind next_tx_seq to the point expected
6151 * by the receiver.
6152 */
6153 chan->next_tx_seq = control->reqseq;
6154 chan->unacked_frames = 0;
6155
6156 err = l2cap_finish_move(chan);
6157 if (err)
6158 return err;
6159
6160 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6161 l2cap_send_i_or_rr_or_rnr(chan);
6162
6163 if (event == L2CAP_EV_RECV_IFRAME)
6164 return -EPROTO;
6165
6166 return l2cap_rx_state_recv(chan, control, NULL, event);
6167 }
6168
6169 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6170 struct l2cap_ctrl *control,
6171 struct sk_buff *skb, u8 event)
6172 {
6173 int err;
6174
6175 if (!control->final)
6176 return -EPROTO;
6177
6178 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6179
6180 chan->rx_state = L2CAP_RX_STATE_RECV;
6181 l2cap_process_reqseq(chan, control->reqseq);
6182
6183 if (!skb_queue_empty(&chan->tx_q))
6184 chan->tx_send_head = skb_peek(&chan->tx_q);
6185 else
6186 chan->tx_send_head = NULL;
6187
6188 /* Rewind next_tx_seq to the point expected
6189 * by the receiver.
6190 */
6191 chan->next_tx_seq = control->reqseq;
6192 chan->unacked_frames = 0;
6193
6194 if (chan->hs_hcon)
6195 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6196 else
6197 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6198
6199 err = l2cap_resegment(chan);
6200
6201 if (!err)
6202 err = l2cap_rx_state_recv(chan, control, skb, event);
6203
6204 return err;
6205 }
6206
6207 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6208 {
6209 /* Make sure reqseq is for a packet that has been sent but not acked */
6210 u16 unacked;
6211
6212 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6213 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6214 }
6215
6216 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6217 struct sk_buff *skb, u8 event)
6218 {
6219 int err = 0;
6220
6221 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6222 control, skb, event, chan->rx_state);
6223
6224 if (__valid_reqseq(chan, control->reqseq)) {
6225 switch (chan->rx_state) {
6226 case L2CAP_RX_STATE_RECV:
6227 err = l2cap_rx_state_recv(chan, control, skb, event);
6228 break;
6229 case L2CAP_RX_STATE_SREJ_SENT:
6230 err = l2cap_rx_state_srej_sent(chan, control, skb,
6231 event);
6232 break;
6233 case L2CAP_RX_STATE_WAIT_P:
6234 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6235 break;
6236 case L2CAP_RX_STATE_WAIT_F:
6237 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6238 break;
6239 default:
6240 /* shut it down */
6241 break;
6242 }
6243 } else {
6244 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6245 control->reqseq, chan->next_tx_seq,
6246 chan->expected_ack_seq);
6247 l2cap_send_disconn_req(chan, ECONNRESET);
6248 }
6249
6250 return err;
6251 }
6252
6253 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6254 struct sk_buff *skb)
6255 {
6256 int err = 0;
6257
6258 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6259 chan->rx_state);
6260
6261 if (l2cap_classify_txseq(chan, control->txseq) ==
6262 L2CAP_TXSEQ_EXPECTED) {
6263 l2cap_pass_to_tx(chan, control);
6264
6265 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6266 __next_seq(chan, chan->buffer_seq));
6267
6268 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6269
6270 l2cap_reassemble_sdu(chan, skb, control);
6271 } else {
6272 if (chan->sdu) {
6273 kfree_skb(chan->sdu);
6274 chan->sdu = NULL;
6275 }
6276 chan->sdu_last_frag = NULL;
6277 chan->sdu_len = 0;
6278
6279 if (skb) {
6280 BT_DBG("Freeing %p", skb);
6281 kfree_skb(skb);
6282 }
6283 }
6284
6285 chan->last_acked_seq = control->txseq;
6286 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6287
6288 return err;
6289 }
6290
6291 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6292 {
6293 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6294 u16 len;
6295 u8 event;
6296
6297 __unpack_control(chan, skb);
6298
6299 len = skb->len;
6300
6301 /*
6302 * We can just drop the corrupted I-frame here.
6303 * Receiver will miss it and start proper recovery
6304 * procedures and ask for retransmission.
6305 */
6306 if (l2cap_check_fcs(chan, skb))
6307 goto drop;
6308
6309 if (!control->sframe && control->sar == L2CAP_SAR_START)
6310 len -= L2CAP_SDULEN_SIZE;
6311
6312 if (chan->fcs == L2CAP_FCS_CRC16)
6313 len -= L2CAP_FCS_SIZE;
6314
6315 if (len > chan->mps) {
6316 l2cap_send_disconn_req(chan, ECONNRESET);
6317 goto drop;
6318 }
6319
6320 if (!control->sframe) {
6321 int err;
6322
6323 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6324 control->sar, control->reqseq, control->final,
6325 control->txseq);
6326
6327 /* Validate F-bit - F=0 always valid, F=1 only
6328 * valid in TX WAIT_F
6329 */
6330 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6331 goto drop;
6332
6333 if (chan->mode != L2CAP_MODE_STREAMING) {
6334 event = L2CAP_EV_RECV_IFRAME;
6335 err = l2cap_rx(chan, control, skb, event);
6336 } else {
6337 err = l2cap_stream_rx(chan, control, skb);
6338 }
6339
6340 if (err)
6341 l2cap_send_disconn_req(chan, ECONNRESET);
6342 } else {
6343 const u8 rx_func_to_event[4] = {
6344 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6345 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6346 };
6347
6348 /* Only I-frames are expected in streaming mode */
6349 if (chan->mode == L2CAP_MODE_STREAMING)
6350 goto drop;
6351
6352 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6353 control->reqseq, control->final, control->poll,
6354 control->super);
6355
6356 if (len != 0) {
6357 BT_ERR("Trailing bytes: %d in sframe", len);
6358 l2cap_send_disconn_req(chan, ECONNRESET);
6359 goto drop;
6360 }
6361
6362 /* Validate F and P bits */
6363 if (control->final && (control->poll ||
6364 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6365 goto drop;
6366
6367 event = rx_func_to_event[control->super];
6368 if (l2cap_rx(chan, control, skb, event))
6369 l2cap_send_disconn_req(chan, ECONNRESET);
6370 }
6371
6372 return 0;
6373
6374 drop:
6375 kfree_skb(skb);
6376 return 0;
6377 }
6378
6379 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6380 struct sk_buff *skb)
6381 {
6382 struct l2cap_chan *chan;
6383
6384 chan = l2cap_get_chan_by_scid(conn, cid);
6385 if (!chan) {
6386 if (cid == L2CAP_CID_A2MP) {
6387 chan = a2mp_channel_create(conn, skb);
6388 if (!chan) {
6389 kfree_skb(skb);
6390 return;
6391 }
6392
6393 l2cap_chan_lock(chan);
6394 } else {
6395 BT_DBG("unknown cid 0x%4.4x", cid);
6396 /* Drop packet and return */
6397 kfree_skb(skb);
6398 return;
6399 }
6400 }
6401
6402 BT_DBG("chan %p, len %d", chan, skb->len);
6403
6404 if (chan->state != BT_CONNECTED)
6405 goto drop;
6406
6407 switch (chan->mode) {
6408 case L2CAP_MODE_BASIC:
6409 /* If socket recv buffers overflows we drop data here
6410 * which is *bad* because L2CAP has to be reliable.
6411 * But we don't have any other choice. L2CAP doesn't
6412 * provide flow control mechanism. */
6413
6414 if (chan->imtu < skb->len)
6415 goto drop;
6416
6417 if (!chan->ops->recv(chan, skb))
6418 goto done;
6419 break;
6420
6421 case L2CAP_MODE_ERTM:
6422 case L2CAP_MODE_STREAMING:
6423 l2cap_data_rcv(chan, skb);
6424 goto done;
6425
6426 default:
6427 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6428 break;
6429 }
6430
6431 drop:
6432 kfree_skb(skb);
6433
6434 done:
6435 l2cap_chan_unlock(chan);
6436 }
6437
6438 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6439 struct sk_buff *skb)
6440 {
6441 struct hci_conn *hcon = conn->hcon;
6442 struct l2cap_chan *chan;
6443
6444 if (hcon->type != ACL_LINK)
6445 goto drop;
6446
6447 chan = l2cap_global_chan_by_psm(0, psm, &conn->hcon->src,
6448 &conn->hcon->dst);
6449 if (!chan)
6450 goto drop;
6451
6452 BT_DBG("chan %p, len %d", chan, skb->len);
6453
6454 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6455 goto drop;
6456
6457 if (chan->imtu < skb->len)
6458 goto drop;
6459
6460 /* Store remote BD_ADDR and PSM for msg_name */
6461 bacpy(&bt_cb(skb)->bdaddr, &conn->hcon->dst);
6462 bt_cb(skb)->psm = psm;
6463
6464 if (!chan->ops->recv(chan, skb))
6465 return;
6466
6467 drop:
6468 kfree_skb(skb);
6469 }
6470
6471 static void l2cap_att_channel(struct l2cap_conn *conn,
6472 struct sk_buff *skb)
6473 {
6474 struct hci_conn *hcon = conn->hcon;
6475 struct l2cap_chan *chan;
6476
6477 if (hcon->type != LE_LINK)
6478 goto drop;
6479
6480 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6481 &conn->hcon->src, &conn->hcon->dst);
6482 if (!chan)
6483 goto drop;
6484
6485 BT_DBG("chan %p, len %d", chan, skb->len);
6486
6487 if (chan->imtu < skb->len)
6488 goto drop;
6489
6490 if (!chan->ops->recv(chan, skb))
6491 return;
6492
6493 drop:
6494 kfree_skb(skb);
6495 }
6496
6497 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6498 {
6499 struct l2cap_hdr *lh = (void *) skb->data;
6500 u16 cid, len;
6501 __le16 psm;
6502
6503 skb_pull(skb, L2CAP_HDR_SIZE);
6504 cid = __le16_to_cpu(lh->cid);
6505 len = __le16_to_cpu(lh->len);
6506
6507 if (len != skb->len) {
6508 kfree_skb(skb);
6509 return;
6510 }
6511
6512 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6513
6514 switch (cid) {
6515 case L2CAP_CID_SIGNALING:
6516 l2cap_sig_channel(conn, skb);
6517 break;
6518
6519 case L2CAP_CID_CONN_LESS:
6520 psm = get_unaligned((__le16 *) skb->data);
6521 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6522 l2cap_conless_channel(conn, psm, skb);
6523 break;
6524
6525 case L2CAP_CID_ATT:
6526 l2cap_att_channel(conn, skb);
6527 break;
6528
6529 case L2CAP_CID_LE_SIGNALING:
6530 l2cap_le_sig_channel(conn, skb);
6531 break;
6532
6533 case L2CAP_CID_SMP:
6534 if (smp_sig_channel(conn, skb))
6535 l2cap_conn_del(conn->hcon, EACCES);
6536 break;
6537
6538 default:
6539 l2cap_data_channel(conn, cid, skb);
6540 break;
6541 }
6542 }
6543
6544 /* ---- L2CAP interface with lower layer (HCI) ---- */
6545
6546 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6547 {
6548 int exact = 0, lm1 = 0, lm2 = 0;
6549 struct l2cap_chan *c;
6550
6551 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6552
6553 /* Find listening sockets and check their link_mode */
6554 read_lock(&chan_list_lock);
6555 list_for_each_entry(c, &chan_list, global_l) {
6556 if (c->state != BT_LISTEN)
6557 continue;
6558
6559 if (!bacmp(&c->src, &hdev->bdaddr)) {
6560 lm1 |= HCI_LM_ACCEPT;
6561 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6562 lm1 |= HCI_LM_MASTER;
6563 exact++;
6564 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6565 lm2 |= HCI_LM_ACCEPT;
6566 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6567 lm2 |= HCI_LM_MASTER;
6568 }
6569 }
6570 read_unlock(&chan_list_lock);
6571
6572 return exact ? lm1 : lm2;
6573 }
6574
6575 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6576 {
6577 struct l2cap_conn *conn;
6578
6579 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6580
6581 if (!status) {
6582 conn = l2cap_conn_add(hcon);
6583 if (conn)
6584 l2cap_conn_ready(conn);
6585 } else {
6586 l2cap_conn_del(hcon, bt_to_errno(status));
6587 }
6588 }
6589
6590 int l2cap_disconn_ind(struct hci_conn *hcon)
6591 {
6592 struct l2cap_conn *conn = hcon->l2cap_data;
6593
6594 BT_DBG("hcon %p", hcon);
6595
6596 if (!conn)
6597 return HCI_ERROR_REMOTE_USER_TERM;
6598 return conn->disc_reason;
6599 }
6600
6601 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6602 {
6603 BT_DBG("hcon %p reason %d", hcon, reason);
6604
6605 l2cap_conn_del(hcon, bt_to_errno(reason));
6606 }
6607
6608 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6609 {
6610 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6611 return;
6612
6613 if (encrypt == 0x00) {
6614 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6615 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6616 } else if (chan->sec_level == BT_SECURITY_HIGH)
6617 l2cap_chan_close(chan, ECONNREFUSED);
6618 } else {
6619 if (chan->sec_level == BT_SECURITY_MEDIUM)
6620 __clear_chan_timer(chan);
6621 }
6622 }
6623
6624 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6625 {
6626 struct l2cap_conn *conn = hcon->l2cap_data;
6627 struct l2cap_chan *chan;
6628
6629 if (!conn)
6630 return 0;
6631
6632 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6633
6634 if (hcon->type == LE_LINK) {
6635 if (!status && encrypt)
6636 smp_distribute_keys(conn, 0);
6637 cancel_delayed_work(&conn->security_timer);
6638 }
6639
6640 mutex_lock(&conn->chan_lock);
6641
6642 list_for_each_entry(chan, &conn->chan_l, list) {
6643 l2cap_chan_lock(chan);
6644
6645 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6646 state_to_string(chan->state));
6647
6648 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6649 l2cap_chan_unlock(chan);
6650 continue;
6651 }
6652
6653 if (chan->scid == L2CAP_CID_ATT) {
6654 if (!status && encrypt) {
6655 chan->sec_level = hcon->sec_level;
6656 l2cap_chan_ready(chan);
6657 }
6658
6659 l2cap_chan_unlock(chan);
6660 continue;
6661 }
6662
6663 if (!__l2cap_no_conn_pending(chan)) {
6664 l2cap_chan_unlock(chan);
6665 continue;
6666 }
6667
6668 if (!status && (chan->state == BT_CONNECTED ||
6669 chan->state == BT_CONFIG)) {
6670 chan->ops->resume(chan);
6671 l2cap_check_encryption(chan, encrypt);
6672 l2cap_chan_unlock(chan);
6673 continue;
6674 }
6675
6676 if (chan->state == BT_CONNECT) {
6677 if (!status) {
6678 l2cap_start_connection(chan);
6679 } else {
6680 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6681 }
6682 } else if (chan->state == BT_CONNECT2) {
6683 struct sock *sk = chan->sk;
6684 struct l2cap_conn_rsp rsp;
6685 __u16 res, stat;
6686
6687 lock_sock(sk);
6688
6689 if (!status) {
6690 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6691 res = L2CAP_CR_PEND;
6692 stat = L2CAP_CS_AUTHOR_PEND;
6693 chan->ops->defer(chan);
6694 } else {
6695 __l2cap_state_change(chan, BT_CONFIG);
6696 res = L2CAP_CR_SUCCESS;
6697 stat = L2CAP_CS_NO_INFO;
6698 }
6699 } else {
6700 __l2cap_state_change(chan, BT_DISCONN);
6701 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6702 res = L2CAP_CR_SEC_BLOCK;
6703 stat = L2CAP_CS_NO_INFO;
6704 }
6705
6706 release_sock(sk);
6707
6708 rsp.scid = cpu_to_le16(chan->dcid);
6709 rsp.dcid = cpu_to_le16(chan->scid);
6710 rsp.result = cpu_to_le16(res);
6711 rsp.status = cpu_to_le16(stat);
6712 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6713 sizeof(rsp), &rsp);
6714
6715 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6716 res == L2CAP_CR_SUCCESS) {
6717 char buf[128];
6718 set_bit(CONF_REQ_SENT, &chan->conf_state);
6719 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6720 L2CAP_CONF_REQ,
6721 l2cap_build_conf_req(chan, buf),
6722 buf);
6723 chan->num_conf_req++;
6724 }
6725 }
6726
6727 l2cap_chan_unlock(chan);
6728 }
6729
6730 mutex_unlock(&conn->chan_lock);
6731
6732 return 0;
6733 }
6734
6735 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6736 {
6737 struct l2cap_conn *conn = hcon->l2cap_data;
6738 struct l2cap_hdr *hdr;
6739 int len;
6740
6741 /* For AMP controller do not create l2cap conn */
6742 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6743 goto drop;
6744
6745 if (!conn)
6746 conn = l2cap_conn_add(hcon);
6747
6748 if (!conn)
6749 goto drop;
6750
6751 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6752
6753 switch (flags) {
6754 case ACL_START:
6755 case ACL_START_NO_FLUSH:
6756 case ACL_COMPLETE:
6757 if (conn->rx_len) {
6758 BT_ERR("Unexpected start frame (len %d)", skb->len);
6759 kfree_skb(conn->rx_skb);
6760 conn->rx_skb = NULL;
6761 conn->rx_len = 0;
6762 l2cap_conn_unreliable(conn, ECOMM);
6763 }
6764
6765 /* Start fragment always begin with Basic L2CAP header */
6766 if (skb->len < L2CAP_HDR_SIZE) {
6767 BT_ERR("Frame is too short (len %d)", skb->len);
6768 l2cap_conn_unreliable(conn, ECOMM);
6769 goto drop;
6770 }
6771
6772 hdr = (struct l2cap_hdr *) skb->data;
6773 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6774
6775 if (len == skb->len) {
6776 /* Complete frame received */
6777 l2cap_recv_frame(conn, skb);
6778 return 0;
6779 }
6780
6781 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6782
6783 if (skb->len > len) {
6784 BT_ERR("Frame is too long (len %d, expected len %d)",
6785 skb->len, len);
6786 l2cap_conn_unreliable(conn, ECOMM);
6787 goto drop;
6788 }
6789
6790 /* Allocate skb for the complete frame (with header) */
6791 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6792 if (!conn->rx_skb)
6793 goto drop;
6794
6795 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6796 skb->len);
6797 conn->rx_len = len - skb->len;
6798 break;
6799
6800 case ACL_CONT:
6801 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6802
6803 if (!conn->rx_len) {
6804 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6805 l2cap_conn_unreliable(conn, ECOMM);
6806 goto drop;
6807 }
6808
6809 if (skb->len > conn->rx_len) {
6810 BT_ERR("Fragment is too long (len %d, expected %d)",
6811 skb->len, conn->rx_len);
6812 kfree_skb(conn->rx_skb);
6813 conn->rx_skb = NULL;
6814 conn->rx_len = 0;
6815 l2cap_conn_unreliable(conn, ECOMM);
6816 goto drop;
6817 }
6818
6819 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6820 skb->len);
6821 conn->rx_len -= skb->len;
6822
6823 if (!conn->rx_len) {
6824 /* Complete frame received. l2cap_recv_frame
6825 * takes ownership of the skb so set the global
6826 * rx_skb pointer to NULL first.
6827 */
6828 struct sk_buff *rx_skb = conn->rx_skb;
6829 conn->rx_skb = NULL;
6830 l2cap_recv_frame(conn, rx_skb);
6831 }
6832 break;
6833 }
6834
6835 drop:
6836 kfree_skb(skb);
6837 return 0;
6838 }
6839
6840 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6841 {
6842 struct l2cap_chan *c;
6843
6844 read_lock(&chan_list_lock);
6845
6846 list_for_each_entry(c, &chan_list, global_l) {
6847 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6848 &c->src, &c->dst,
6849 c->state, __le16_to_cpu(c->psm),
6850 c->scid, c->dcid, c->imtu, c->omtu,
6851 c->sec_level, c->mode);
6852 }
6853
6854 read_unlock(&chan_list_lock);
6855
6856 return 0;
6857 }
6858
6859 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6860 {
6861 return single_open(file, l2cap_debugfs_show, inode->i_private);
6862 }
6863
6864 static const struct file_operations l2cap_debugfs_fops = {
6865 .open = l2cap_debugfs_open,
6866 .read = seq_read,
6867 .llseek = seq_lseek,
6868 .release = single_release,
6869 };
6870
6871 static struct dentry *l2cap_debugfs;
6872
6873 int __init l2cap_init(void)
6874 {
6875 int err;
6876
6877 err = l2cap_init_sockets();
6878 if (err < 0)
6879 return err;
6880
6881 if (bt_debugfs) {
6882 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6883 NULL, &l2cap_debugfs_fops);
6884 if (!l2cap_debugfs)
6885 BT_ERR("Failed to create L2CAP debug file");
6886 }
6887
6888 return 0;
6889 }
6890
6891 void l2cap_exit(void)
6892 {
6893 debugfs_remove(l2cap_debugfs);
6894 l2cap_cleanup_sockets();
6895 }
6896
6897 module_param(disable_ertm, bool, 0644);
6898 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.221828 seconds and 5 git commands to generate.