Bluetooth: Add debugfs controls for LE CoC MPS and Credits
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 bool disable_ertm;
45
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
48
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
51
52 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
53 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
54
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 struct sk_buff_head *skbs, u8 event);
64
65 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
66 {
67 if (hcon->type == LE_LINK) {
68 if (type == ADDR_LE_DEV_PUBLIC)
69 return BDADDR_LE_PUBLIC;
70 else
71 return BDADDR_LE_RANDOM;
72 }
73
74 return BDADDR_BREDR;
75 }
76
77 /* ---- L2CAP channels ---- */
78
79 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
80 u16 cid)
81 {
82 struct l2cap_chan *c;
83
84 list_for_each_entry(c, &conn->chan_l, list) {
85 if (c->dcid == cid)
86 return c;
87 }
88 return NULL;
89 }
90
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
92 u16 cid)
93 {
94 struct l2cap_chan *c;
95
96 list_for_each_entry(c, &conn->chan_l, list) {
97 if (c->scid == cid)
98 return c;
99 }
100 return NULL;
101 }
102
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
106 u16 cid)
107 {
108 struct l2cap_chan *c;
109
110 mutex_lock(&conn->chan_lock);
111 c = __l2cap_get_chan_by_scid(conn, cid);
112 if (c)
113 l2cap_chan_lock(c);
114 mutex_unlock(&conn->chan_lock);
115
116 return c;
117 }
118
119 /* Find channel with given DCID.
120 * Returns locked channel.
121 */
122 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
123 u16 cid)
124 {
125 struct l2cap_chan *c;
126
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_dcid(conn, cid);
129 if (c)
130 l2cap_chan_lock(c);
131 mutex_unlock(&conn->chan_lock);
132
133 return c;
134 }
135
136 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
137 u8 ident)
138 {
139 struct l2cap_chan *c;
140
141 list_for_each_entry(c, &conn->chan_l, list) {
142 if (c->ident == ident)
143 return c;
144 }
145 return NULL;
146 }
147
148 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
149 u8 ident)
150 {
151 struct l2cap_chan *c;
152
153 mutex_lock(&conn->chan_lock);
154 c = __l2cap_get_chan_by_ident(conn, ident);
155 if (c)
156 l2cap_chan_lock(c);
157 mutex_unlock(&conn->chan_lock);
158
159 return c;
160 }
161
162 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
163 {
164 struct l2cap_chan *c;
165
166 list_for_each_entry(c, &chan_list, global_l) {
167 if (c->sport == psm && !bacmp(&c->src, src))
168 return c;
169 }
170 return NULL;
171 }
172
173 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 {
175 int err;
176
177 write_lock(&chan_list_lock);
178
179 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
180 err = -EADDRINUSE;
181 goto done;
182 }
183
184 if (psm) {
185 chan->psm = psm;
186 chan->sport = psm;
187 err = 0;
188 } else {
189 u16 p;
190
191 err = -EINVAL;
192 for (p = 0x1001; p < 0x1100; p += 2)
193 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
194 chan->psm = cpu_to_le16(p);
195 chan->sport = cpu_to_le16(p);
196 err = 0;
197 break;
198 }
199 }
200
201 done:
202 write_unlock(&chan_list_lock);
203 return err;
204 }
205
206 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
207 {
208 write_lock(&chan_list_lock);
209
210 chan->scid = scid;
211
212 write_unlock(&chan_list_lock);
213
214 return 0;
215 }
216
217 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
218 {
219 u16 cid, dyn_end;
220
221 if (conn->hcon->type == LE_LINK)
222 dyn_end = L2CAP_CID_LE_DYN_END;
223 else
224 dyn_end = L2CAP_CID_DYN_END;
225
226 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
227 if (!__l2cap_get_chan_by_scid(conn, cid))
228 return cid;
229 }
230
231 return 0;
232 }
233
234 static void l2cap_state_change(struct l2cap_chan *chan, int state)
235 {
236 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
237 state_to_string(state));
238
239 chan->state = state;
240 chan->ops->state_change(chan, state, 0);
241 }
242
243 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
244 int state, int err)
245 {
246 chan->state = state;
247 chan->ops->state_change(chan, chan->state, err);
248 }
249
250 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
251 {
252 chan->ops->state_change(chan, chan->state, err);
253 }
254
255 static void __set_retrans_timer(struct l2cap_chan *chan)
256 {
257 if (!delayed_work_pending(&chan->monitor_timer) &&
258 chan->retrans_timeout) {
259 l2cap_set_timer(chan, &chan->retrans_timer,
260 msecs_to_jiffies(chan->retrans_timeout));
261 }
262 }
263
264 static void __set_monitor_timer(struct l2cap_chan *chan)
265 {
266 __clear_retrans_timer(chan);
267 if (chan->monitor_timeout) {
268 l2cap_set_timer(chan, &chan->monitor_timer,
269 msecs_to_jiffies(chan->monitor_timeout));
270 }
271 }
272
273 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
274 u16 seq)
275 {
276 struct sk_buff *skb;
277
278 skb_queue_walk(head, skb) {
279 if (bt_cb(skb)->control.txseq == seq)
280 return skb;
281 }
282
283 return NULL;
284 }
285
286 /* ---- L2CAP sequence number lists ---- */
287
288 /* For ERTM, ordered lists of sequence numbers must be tracked for
289 * SREJ requests that are received and for frames that are to be
290 * retransmitted. These seq_list functions implement a singly-linked
291 * list in an array, where membership in the list can also be checked
292 * in constant time. Items can also be added to the tail of the list
293 * and removed from the head in constant time, without further memory
294 * allocs or frees.
295 */
296
297 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
298 {
299 size_t alloc_size, i;
300
301 /* Allocated size is a power of 2 to map sequence numbers
302 * (which may be up to 14 bits) in to a smaller array that is
303 * sized for the negotiated ERTM transmit windows.
304 */
305 alloc_size = roundup_pow_of_two(size);
306
307 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
308 if (!seq_list->list)
309 return -ENOMEM;
310
311 seq_list->mask = alloc_size - 1;
312 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
313 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
314 for (i = 0; i < alloc_size; i++)
315 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
316
317 return 0;
318 }
319
320 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
321 {
322 kfree(seq_list->list);
323 }
324
325 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
326 u16 seq)
327 {
328 /* Constant-time check for list membership */
329 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
330 }
331
332 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
333 {
334 u16 mask = seq_list->mask;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
337 /* In case someone tries to pop the head of an empty list */
338 return L2CAP_SEQ_LIST_CLEAR;
339 } else if (seq_list->head == seq) {
340 /* Head can be removed in constant time */
341 seq_list->head = seq_list->list[seq & mask];
342 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
343
344 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
345 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
346 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
347 }
348 } else {
349 /* Walk the list to find the sequence number */
350 u16 prev = seq_list->head;
351 while (seq_list->list[prev & mask] != seq) {
352 prev = seq_list->list[prev & mask];
353 if (prev == L2CAP_SEQ_LIST_TAIL)
354 return L2CAP_SEQ_LIST_CLEAR;
355 }
356
357 /* Unlink the number from the list and clear it */
358 seq_list->list[prev & mask] = seq_list->list[seq & mask];
359 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
360 if (seq_list->tail == seq)
361 seq_list->tail = prev;
362 }
363 return seq;
364 }
365
366 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
367 {
368 /* Remove the head in constant time */
369 return l2cap_seq_list_remove(seq_list, seq_list->head);
370 }
371
372 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
373 {
374 u16 i;
375
376 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
377 return;
378
379 for (i = 0; i <= seq_list->mask; i++)
380 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
381
382 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
383 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
384 }
385
386 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
387 {
388 u16 mask = seq_list->mask;
389
390 /* All appends happen in constant time */
391
392 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
393 return;
394
395 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
396 seq_list->head = seq;
397 else
398 seq_list->list[seq_list->tail & mask] = seq;
399
400 seq_list->tail = seq;
401 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
402 }
403
404 static void l2cap_chan_timeout(struct work_struct *work)
405 {
406 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
407 chan_timer.work);
408 struct l2cap_conn *conn = chan->conn;
409 int reason;
410
411 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
412
413 mutex_lock(&conn->chan_lock);
414 l2cap_chan_lock(chan);
415
416 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
417 reason = ECONNREFUSED;
418 else if (chan->state == BT_CONNECT &&
419 chan->sec_level != BT_SECURITY_SDP)
420 reason = ECONNREFUSED;
421 else
422 reason = ETIMEDOUT;
423
424 l2cap_chan_close(chan, reason);
425
426 l2cap_chan_unlock(chan);
427
428 chan->ops->close(chan);
429 mutex_unlock(&conn->chan_lock);
430
431 l2cap_chan_put(chan);
432 }
433
434 struct l2cap_chan *l2cap_chan_create(void)
435 {
436 struct l2cap_chan *chan;
437
438 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
439 if (!chan)
440 return NULL;
441
442 mutex_init(&chan->lock);
443
444 write_lock(&chan_list_lock);
445 list_add(&chan->global_l, &chan_list);
446 write_unlock(&chan_list_lock);
447
448 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
449
450 chan->state = BT_OPEN;
451
452 kref_init(&chan->kref);
453
454 /* This flag is cleared in l2cap_chan_ready() */
455 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
456
457 BT_DBG("chan %p", chan);
458
459 return chan;
460 }
461
462 static void l2cap_chan_destroy(struct kref *kref)
463 {
464 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
465
466 BT_DBG("chan %p", chan);
467
468 write_lock(&chan_list_lock);
469 list_del(&chan->global_l);
470 write_unlock(&chan_list_lock);
471
472 kfree(chan);
473 }
474
475 void l2cap_chan_hold(struct l2cap_chan *c)
476 {
477 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478
479 kref_get(&c->kref);
480 }
481
482 void l2cap_chan_put(struct l2cap_chan *c)
483 {
484 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
485
486 kref_put(&c->kref, l2cap_chan_destroy);
487 }
488
489 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
490 {
491 chan->fcs = L2CAP_FCS_CRC16;
492 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
493 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
494 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
495 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
496 chan->sec_level = BT_SECURITY_LOW;
497
498 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
499 }
500
501 void l2cap_le_flowctl_init(struct l2cap_chan *chan)
502 {
503 chan->imtu = L2CAP_DEFAULT_MTU;
504 chan->omtu = L2CAP_LE_MIN_MTU;
505 chan->mode = L2CAP_MODE_LE_FLOWCTL;
506 chan->tx_credits = 0;
507 chan->rx_credits = le_max_credits;
508
509 if (chan->imtu < L2CAP_LE_DEFAULT_MPS)
510 chan->mps = chan->imtu;
511 else
512 chan->mps = L2CAP_LE_DEFAULT_MPS;
513 }
514
515 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
516 {
517 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
518 __le16_to_cpu(chan->psm), chan->dcid);
519
520 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
521
522 chan->conn = conn;
523
524 switch (chan->chan_type) {
525 case L2CAP_CHAN_CONN_ORIENTED:
526 if (conn->hcon->type == LE_LINK) {
527 if (chan->dcid == L2CAP_CID_ATT) {
528 chan->omtu = L2CAP_DEFAULT_MTU;
529 chan->scid = L2CAP_CID_ATT;
530 } else {
531 chan->scid = l2cap_alloc_cid(conn);
532 }
533 } else {
534 /* Alloc CID for connection-oriented socket */
535 chan->scid = l2cap_alloc_cid(conn);
536 chan->omtu = L2CAP_DEFAULT_MTU;
537 }
538 break;
539
540 case L2CAP_CHAN_CONN_LESS:
541 /* Connectionless socket */
542 chan->scid = L2CAP_CID_CONN_LESS;
543 chan->dcid = L2CAP_CID_CONN_LESS;
544 chan->omtu = L2CAP_DEFAULT_MTU;
545 break;
546
547 case L2CAP_CHAN_CONN_FIX_A2MP:
548 chan->scid = L2CAP_CID_A2MP;
549 chan->dcid = L2CAP_CID_A2MP;
550 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
551 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
552 break;
553
554 default:
555 /* Raw socket can send/recv signalling messages only */
556 chan->scid = L2CAP_CID_SIGNALING;
557 chan->dcid = L2CAP_CID_SIGNALING;
558 chan->omtu = L2CAP_DEFAULT_MTU;
559 }
560
561 chan->local_id = L2CAP_BESTEFFORT_ID;
562 chan->local_stype = L2CAP_SERV_BESTEFFORT;
563 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
564 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
565 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
566 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
567
568 l2cap_chan_hold(chan);
569
570 hci_conn_hold(conn->hcon);
571
572 list_add(&chan->list, &conn->chan_l);
573 }
574
575 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
576 {
577 mutex_lock(&conn->chan_lock);
578 __l2cap_chan_add(conn, chan);
579 mutex_unlock(&conn->chan_lock);
580 }
581
582 void l2cap_chan_del(struct l2cap_chan *chan, int err)
583 {
584 struct l2cap_conn *conn = chan->conn;
585
586 __clear_chan_timer(chan);
587
588 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
589
590 if (conn) {
591 struct amp_mgr *mgr = conn->hcon->amp_mgr;
592 /* Delete from channel list */
593 list_del(&chan->list);
594
595 l2cap_chan_put(chan);
596
597 chan->conn = NULL;
598
599 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
600 hci_conn_drop(conn->hcon);
601
602 if (mgr && mgr->bredr_chan == chan)
603 mgr->bredr_chan = NULL;
604 }
605
606 if (chan->hs_hchan) {
607 struct hci_chan *hs_hchan = chan->hs_hchan;
608
609 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
610 amp_disconnect_logical_link(hs_hchan);
611 }
612
613 chan->ops->teardown(chan, err);
614
615 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
616 return;
617
618 switch(chan->mode) {
619 case L2CAP_MODE_BASIC:
620 break;
621
622 case L2CAP_MODE_LE_FLOWCTL:
623 skb_queue_purge(&chan->tx_q);
624 break;
625
626 case L2CAP_MODE_ERTM:
627 __clear_retrans_timer(chan);
628 __clear_monitor_timer(chan);
629 __clear_ack_timer(chan);
630
631 skb_queue_purge(&chan->srej_q);
632
633 l2cap_seq_list_free(&chan->srej_list);
634 l2cap_seq_list_free(&chan->retrans_list);
635
636 /* fall through */
637
638 case L2CAP_MODE_STREAMING:
639 skb_queue_purge(&chan->tx_q);
640 break;
641 }
642
643 return;
644 }
645
646 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
647 {
648 struct l2cap_conn *conn = chan->conn;
649 struct l2cap_le_conn_rsp rsp;
650 u16 result;
651
652 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
653 result = L2CAP_CR_AUTHORIZATION;
654 else
655 result = L2CAP_CR_BAD_PSM;
656
657 l2cap_state_change(chan, BT_DISCONN);
658
659 rsp.dcid = cpu_to_le16(chan->scid);
660 rsp.mtu = cpu_to_le16(chan->imtu);
661 rsp.mps = cpu_to_le16(chan->mps);
662 rsp.credits = cpu_to_le16(chan->rx_credits);
663 rsp.result = cpu_to_le16(result);
664
665 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
666 &rsp);
667 }
668
669 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
670 {
671 struct l2cap_conn *conn = chan->conn;
672 struct l2cap_conn_rsp rsp;
673 u16 result;
674
675 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
676 result = L2CAP_CR_SEC_BLOCK;
677 else
678 result = L2CAP_CR_BAD_PSM;
679
680 l2cap_state_change(chan, BT_DISCONN);
681
682 rsp.scid = cpu_to_le16(chan->dcid);
683 rsp.dcid = cpu_to_le16(chan->scid);
684 rsp.result = cpu_to_le16(result);
685 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
686
687 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
688 }
689
690 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
691 {
692 struct l2cap_conn *conn = chan->conn;
693
694 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
695
696 switch (chan->state) {
697 case BT_LISTEN:
698 chan->ops->teardown(chan, 0);
699 break;
700
701 case BT_CONNECTED:
702 case BT_CONFIG:
703 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
704 * check for chan->psm.
705 */
706 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
707 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
708 l2cap_send_disconn_req(chan, reason);
709 } else
710 l2cap_chan_del(chan, reason);
711 break;
712
713 case BT_CONNECT2:
714 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
715 if (conn->hcon->type == ACL_LINK)
716 l2cap_chan_connect_reject(chan);
717 else if (conn->hcon->type == LE_LINK)
718 l2cap_chan_le_connect_reject(chan);
719 }
720
721 l2cap_chan_del(chan, reason);
722 break;
723
724 case BT_CONNECT:
725 case BT_DISCONN:
726 l2cap_chan_del(chan, reason);
727 break;
728
729 default:
730 chan->ops->teardown(chan, 0);
731 break;
732 }
733 }
734
735 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
736 {
737 switch (chan->chan_type) {
738 case L2CAP_CHAN_RAW:
739 switch (chan->sec_level) {
740 case BT_SECURITY_HIGH:
741 return HCI_AT_DEDICATED_BONDING_MITM;
742 case BT_SECURITY_MEDIUM:
743 return HCI_AT_DEDICATED_BONDING;
744 default:
745 return HCI_AT_NO_BONDING;
746 }
747 break;
748 case L2CAP_CHAN_CONN_LESS:
749 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
750 if (chan->sec_level == BT_SECURITY_LOW)
751 chan->sec_level = BT_SECURITY_SDP;
752 }
753 if (chan->sec_level == BT_SECURITY_HIGH)
754 return HCI_AT_NO_BONDING_MITM;
755 else
756 return HCI_AT_NO_BONDING;
757 break;
758 case L2CAP_CHAN_CONN_ORIENTED:
759 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
760 if (chan->sec_level == BT_SECURITY_LOW)
761 chan->sec_level = BT_SECURITY_SDP;
762
763 if (chan->sec_level == BT_SECURITY_HIGH)
764 return HCI_AT_NO_BONDING_MITM;
765 else
766 return HCI_AT_NO_BONDING;
767 }
768 /* fall through */
769 default:
770 switch (chan->sec_level) {
771 case BT_SECURITY_HIGH:
772 return HCI_AT_GENERAL_BONDING_MITM;
773 case BT_SECURITY_MEDIUM:
774 return HCI_AT_GENERAL_BONDING;
775 default:
776 return HCI_AT_NO_BONDING;
777 }
778 break;
779 }
780 }
781
782 /* Service level security */
783 int l2cap_chan_check_security(struct l2cap_chan *chan)
784 {
785 struct l2cap_conn *conn = chan->conn;
786 __u8 auth_type;
787
788 if (conn->hcon->type == LE_LINK)
789 return smp_conn_security(conn->hcon, chan->sec_level);
790
791 auth_type = l2cap_get_auth_type(chan);
792
793 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
794 }
795
796 static u8 l2cap_get_ident(struct l2cap_conn *conn)
797 {
798 u8 id;
799
800 /* Get next available identificator.
801 * 1 - 128 are used by kernel.
802 * 129 - 199 are reserved.
803 * 200 - 254 are used by utilities like l2ping, etc.
804 */
805
806 spin_lock(&conn->lock);
807
808 if (++conn->tx_ident > 128)
809 conn->tx_ident = 1;
810
811 id = conn->tx_ident;
812
813 spin_unlock(&conn->lock);
814
815 return id;
816 }
817
818 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
819 void *data)
820 {
821 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
822 u8 flags;
823
824 BT_DBG("code 0x%2.2x", code);
825
826 if (!skb)
827 return;
828
829 if (lmp_no_flush_capable(conn->hcon->hdev))
830 flags = ACL_START_NO_FLUSH;
831 else
832 flags = ACL_START;
833
834 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
835 skb->priority = HCI_PRIO_MAX;
836
837 hci_send_acl(conn->hchan, skb, flags);
838 }
839
840 static bool __chan_is_moving(struct l2cap_chan *chan)
841 {
842 return chan->move_state != L2CAP_MOVE_STABLE &&
843 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
844 }
845
846 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
847 {
848 struct hci_conn *hcon = chan->conn->hcon;
849 u16 flags;
850
851 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
852 skb->priority);
853
854 if (chan->hs_hcon && !__chan_is_moving(chan)) {
855 if (chan->hs_hchan)
856 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
857 else
858 kfree_skb(skb);
859
860 return;
861 }
862
863 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
864 lmp_no_flush_capable(hcon->hdev))
865 flags = ACL_START_NO_FLUSH;
866 else
867 flags = ACL_START;
868
869 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
870 hci_send_acl(chan->conn->hchan, skb, flags);
871 }
872
873 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
874 {
875 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
876 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
877
878 if (enh & L2CAP_CTRL_FRAME_TYPE) {
879 /* S-Frame */
880 control->sframe = 1;
881 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
882 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
883
884 control->sar = 0;
885 control->txseq = 0;
886 } else {
887 /* I-Frame */
888 control->sframe = 0;
889 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
890 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
891
892 control->poll = 0;
893 control->super = 0;
894 }
895 }
896
897 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
898 {
899 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
900 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
901
902 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
903 /* S-Frame */
904 control->sframe = 1;
905 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
906 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
907
908 control->sar = 0;
909 control->txseq = 0;
910 } else {
911 /* I-Frame */
912 control->sframe = 0;
913 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
914 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
915
916 control->poll = 0;
917 control->super = 0;
918 }
919 }
920
921 static inline void __unpack_control(struct l2cap_chan *chan,
922 struct sk_buff *skb)
923 {
924 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
925 __unpack_extended_control(get_unaligned_le32(skb->data),
926 &bt_cb(skb)->control);
927 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
928 } else {
929 __unpack_enhanced_control(get_unaligned_le16(skb->data),
930 &bt_cb(skb)->control);
931 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
932 }
933 }
934
935 static u32 __pack_extended_control(struct l2cap_ctrl *control)
936 {
937 u32 packed;
938
939 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
940 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
941
942 if (control->sframe) {
943 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
944 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
945 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
946 } else {
947 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
948 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
949 }
950
951 return packed;
952 }
953
954 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
955 {
956 u16 packed;
957
958 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
959 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
960
961 if (control->sframe) {
962 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
963 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
964 packed |= L2CAP_CTRL_FRAME_TYPE;
965 } else {
966 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
967 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
968 }
969
970 return packed;
971 }
972
973 static inline void __pack_control(struct l2cap_chan *chan,
974 struct l2cap_ctrl *control,
975 struct sk_buff *skb)
976 {
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
978 put_unaligned_le32(__pack_extended_control(control),
979 skb->data + L2CAP_HDR_SIZE);
980 } else {
981 put_unaligned_le16(__pack_enhanced_control(control),
982 skb->data + L2CAP_HDR_SIZE);
983 }
984 }
985
986 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
987 {
988 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
989 return L2CAP_EXT_HDR_SIZE;
990 else
991 return L2CAP_ENH_HDR_SIZE;
992 }
993
994 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
995 u32 control)
996 {
997 struct sk_buff *skb;
998 struct l2cap_hdr *lh;
999 int hlen = __ertm_hdr_size(chan);
1000
1001 if (chan->fcs == L2CAP_FCS_CRC16)
1002 hlen += L2CAP_FCS_SIZE;
1003
1004 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1005
1006 if (!skb)
1007 return ERR_PTR(-ENOMEM);
1008
1009 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1010 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1011 lh->cid = cpu_to_le16(chan->dcid);
1012
1013 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1014 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1015 else
1016 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1017
1018 if (chan->fcs == L2CAP_FCS_CRC16) {
1019 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1020 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1021 }
1022
1023 skb->priority = HCI_PRIO_MAX;
1024 return skb;
1025 }
1026
1027 static void l2cap_send_sframe(struct l2cap_chan *chan,
1028 struct l2cap_ctrl *control)
1029 {
1030 struct sk_buff *skb;
1031 u32 control_field;
1032
1033 BT_DBG("chan %p, control %p", chan, control);
1034
1035 if (!control->sframe)
1036 return;
1037
1038 if (__chan_is_moving(chan))
1039 return;
1040
1041 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1042 !control->poll)
1043 control->final = 1;
1044
1045 if (control->super == L2CAP_SUPER_RR)
1046 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1047 else if (control->super == L2CAP_SUPER_RNR)
1048 set_bit(CONN_RNR_SENT, &chan->conn_state);
1049
1050 if (control->super != L2CAP_SUPER_SREJ) {
1051 chan->last_acked_seq = control->reqseq;
1052 __clear_ack_timer(chan);
1053 }
1054
1055 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1056 control->final, control->poll, control->super);
1057
1058 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1059 control_field = __pack_extended_control(control);
1060 else
1061 control_field = __pack_enhanced_control(control);
1062
1063 skb = l2cap_create_sframe_pdu(chan, control_field);
1064 if (!IS_ERR(skb))
1065 l2cap_do_send(chan, skb);
1066 }
1067
1068 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1069 {
1070 struct l2cap_ctrl control;
1071
1072 BT_DBG("chan %p, poll %d", chan, poll);
1073
1074 memset(&control, 0, sizeof(control));
1075 control.sframe = 1;
1076 control.poll = poll;
1077
1078 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1079 control.super = L2CAP_SUPER_RNR;
1080 else
1081 control.super = L2CAP_SUPER_RR;
1082
1083 control.reqseq = chan->buffer_seq;
1084 l2cap_send_sframe(chan, &control);
1085 }
1086
1087 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1088 {
1089 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1090 }
1091
1092 static bool __amp_capable(struct l2cap_chan *chan)
1093 {
1094 struct l2cap_conn *conn = chan->conn;
1095 struct hci_dev *hdev;
1096 bool amp_available = false;
1097
1098 if (!conn->hs_enabled)
1099 return false;
1100
1101 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1102 return false;
1103
1104 read_lock(&hci_dev_list_lock);
1105 list_for_each_entry(hdev, &hci_dev_list, list) {
1106 if (hdev->amp_type != AMP_TYPE_BREDR &&
1107 test_bit(HCI_UP, &hdev->flags)) {
1108 amp_available = true;
1109 break;
1110 }
1111 }
1112 read_unlock(&hci_dev_list_lock);
1113
1114 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1115 return amp_available;
1116
1117 return false;
1118 }
1119
1120 static bool l2cap_check_efs(struct l2cap_chan *chan)
1121 {
1122 /* Check EFS parameters */
1123 return true;
1124 }
1125
1126 void l2cap_send_conn_req(struct l2cap_chan *chan)
1127 {
1128 struct l2cap_conn *conn = chan->conn;
1129 struct l2cap_conn_req req;
1130
1131 req.scid = cpu_to_le16(chan->scid);
1132 req.psm = chan->psm;
1133
1134 chan->ident = l2cap_get_ident(conn);
1135
1136 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1137
1138 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1139 }
1140
1141 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1142 {
1143 struct l2cap_create_chan_req req;
1144 req.scid = cpu_to_le16(chan->scid);
1145 req.psm = chan->psm;
1146 req.amp_id = amp_id;
1147
1148 chan->ident = l2cap_get_ident(chan->conn);
1149
1150 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1151 sizeof(req), &req);
1152 }
1153
1154 static void l2cap_move_setup(struct l2cap_chan *chan)
1155 {
1156 struct sk_buff *skb;
1157
1158 BT_DBG("chan %p", chan);
1159
1160 if (chan->mode != L2CAP_MODE_ERTM)
1161 return;
1162
1163 __clear_retrans_timer(chan);
1164 __clear_monitor_timer(chan);
1165 __clear_ack_timer(chan);
1166
1167 chan->retry_count = 0;
1168 skb_queue_walk(&chan->tx_q, skb) {
1169 if (bt_cb(skb)->control.retries)
1170 bt_cb(skb)->control.retries = 1;
1171 else
1172 break;
1173 }
1174
1175 chan->expected_tx_seq = chan->buffer_seq;
1176
1177 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1178 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1179 l2cap_seq_list_clear(&chan->retrans_list);
1180 l2cap_seq_list_clear(&chan->srej_list);
1181 skb_queue_purge(&chan->srej_q);
1182
1183 chan->tx_state = L2CAP_TX_STATE_XMIT;
1184 chan->rx_state = L2CAP_RX_STATE_MOVE;
1185
1186 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1187 }
1188
1189 static void l2cap_move_done(struct l2cap_chan *chan)
1190 {
1191 u8 move_role = chan->move_role;
1192 BT_DBG("chan %p", chan);
1193
1194 chan->move_state = L2CAP_MOVE_STABLE;
1195 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1196
1197 if (chan->mode != L2CAP_MODE_ERTM)
1198 return;
1199
1200 switch (move_role) {
1201 case L2CAP_MOVE_ROLE_INITIATOR:
1202 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1203 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1204 break;
1205 case L2CAP_MOVE_ROLE_RESPONDER:
1206 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1207 break;
1208 }
1209 }
1210
1211 static void l2cap_le_flowctl_start(struct l2cap_chan *chan)
1212 {
1213 chan->sdu = NULL;
1214 chan->sdu_last_frag = NULL;
1215 chan->sdu_len = 0;
1216
1217 if (chan->imtu < L2CAP_LE_DEFAULT_MPS)
1218 chan->mps = chan->imtu;
1219 else
1220 chan->mps = le_default_mps;
1221
1222 skb_queue_head_init(&chan->tx_q);
1223
1224 if (!chan->tx_credits)
1225 chan->ops->suspend(chan);
1226 }
1227
1228 static void l2cap_chan_ready(struct l2cap_chan *chan)
1229 {
1230 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1231 chan->conf_state = 0;
1232 __clear_chan_timer(chan);
1233
1234 if (chan->mode == L2CAP_MODE_LE_FLOWCTL)
1235 l2cap_le_flowctl_start(chan);
1236
1237 chan->state = BT_CONNECTED;
1238
1239 chan->ops->ready(chan);
1240 }
1241
1242 static void l2cap_le_connect(struct l2cap_chan *chan)
1243 {
1244 struct l2cap_conn *conn = chan->conn;
1245 struct l2cap_le_conn_req req;
1246
1247 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1248 return;
1249
1250 req.psm = chan->psm;
1251 req.scid = cpu_to_le16(chan->scid);
1252 req.mtu = cpu_to_le16(chan->imtu);
1253 req.mps = cpu_to_le16(chan->mps);
1254 req.credits = cpu_to_le16(chan->rx_credits);
1255
1256 chan->ident = l2cap_get_ident(conn);
1257
1258 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1259 sizeof(req), &req);
1260 }
1261
1262 static void l2cap_le_start(struct l2cap_chan *chan)
1263 {
1264 struct l2cap_conn *conn = chan->conn;
1265
1266 if (!smp_conn_security(conn->hcon, chan->sec_level))
1267 return;
1268
1269 if (!chan->psm) {
1270 l2cap_chan_ready(chan);
1271 return;
1272 }
1273
1274 if (chan->state == BT_CONNECT)
1275 l2cap_le_connect(chan);
1276 }
1277
1278 static void l2cap_start_connection(struct l2cap_chan *chan)
1279 {
1280 if (__amp_capable(chan)) {
1281 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1282 a2mp_discover_amp(chan);
1283 } else if (chan->conn->hcon->type == LE_LINK) {
1284 l2cap_le_start(chan);
1285 } else {
1286 l2cap_send_conn_req(chan);
1287 }
1288 }
1289
1290 static void l2cap_do_start(struct l2cap_chan *chan)
1291 {
1292 struct l2cap_conn *conn = chan->conn;
1293
1294 if (conn->hcon->type == LE_LINK) {
1295 l2cap_le_start(chan);
1296 return;
1297 }
1298
1299 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1300 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1301 return;
1302
1303 if (l2cap_chan_check_security(chan) &&
1304 __l2cap_no_conn_pending(chan)) {
1305 l2cap_start_connection(chan);
1306 }
1307 } else {
1308 struct l2cap_info_req req;
1309 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1310
1311 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1312 conn->info_ident = l2cap_get_ident(conn);
1313
1314 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1315
1316 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1317 sizeof(req), &req);
1318 }
1319 }
1320
1321 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1322 {
1323 u32 local_feat_mask = l2cap_feat_mask;
1324 if (!disable_ertm)
1325 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1326
1327 switch (mode) {
1328 case L2CAP_MODE_ERTM:
1329 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1330 case L2CAP_MODE_STREAMING:
1331 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1332 default:
1333 return 0x00;
1334 }
1335 }
1336
1337 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1338 {
1339 struct l2cap_conn *conn = chan->conn;
1340 struct l2cap_disconn_req req;
1341
1342 if (!conn)
1343 return;
1344
1345 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1346 __clear_retrans_timer(chan);
1347 __clear_monitor_timer(chan);
1348 __clear_ack_timer(chan);
1349 }
1350
1351 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1352 l2cap_state_change(chan, BT_DISCONN);
1353 return;
1354 }
1355
1356 req.dcid = cpu_to_le16(chan->dcid);
1357 req.scid = cpu_to_le16(chan->scid);
1358 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1359 sizeof(req), &req);
1360
1361 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1362 }
1363
1364 /* ---- L2CAP connections ---- */
1365 static void l2cap_conn_start(struct l2cap_conn *conn)
1366 {
1367 struct l2cap_chan *chan, *tmp;
1368
1369 BT_DBG("conn %p", conn);
1370
1371 mutex_lock(&conn->chan_lock);
1372
1373 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1374 l2cap_chan_lock(chan);
1375
1376 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1377 l2cap_chan_unlock(chan);
1378 continue;
1379 }
1380
1381 if (chan->state == BT_CONNECT) {
1382 if (!l2cap_chan_check_security(chan) ||
1383 !__l2cap_no_conn_pending(chan)) {
1384 l2cap_chan_unlock(chan);
1385 continue;
1386 }
1387
1388 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1389 && test_bit(CONF_STATE2_DEVICE,
1390 &chan->conf_state)) {
1391 l2cap_chan_close(chan, ECONNRESET);
1392 l2cap_chan_unlock(chan);
1393 continue;
1394 }
1395
1396 l2cap_start_connection(chan);
1397
1398 } else if (chan->state == BT_CONNECT2) {
1399 struct l2cap_conn_rsp rsp;
1400 char buf[128];
1401 rsp.scid = cpu_to_le16(chan->dcid);
1402 rsp.dcid = cpu_to_le16(chan->scid);
1403
1404 if (l2cap_chan_check_security(chan)) {
1405 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1406 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1407 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1408 chan->ops->defer(chan);
1409
1410 } else {
1411 l2cap_state_change(chan, BT_CONFIG);
1412 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1413 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1414 }
1415 } else {
1416 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1417 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1418 }
1419
1420 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1421 sizeof(rsp), &rsp);
1422
1423 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1424 rsp.result != L2CAP_CR_SUCCESS) {
1425 l2cap_chan_unlock(chan);
1426 continue;
1427 }
1428
1429 set_bit(CONF_REQ_SENT, &chan->conf_state);
1430 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1431 l2cap_build_conf_req(chan, buf), buf);
1432 chan->num_conf_req++;
1433 }
1434
1435 l2cap_chan_unlock(chan);
1436 }
1437
1438 mutex_unlock(&conn->chan_lock);
1439 }
1440
1441 /* Find socket with cid and source/destination bdaddr.
1442 * Returns closest match, locked.
1443 */
1444 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1445 bdaddr_t *src,
1446 bdaddr_t *dst)
1447 {
1448 struct l2cap_chan *c, *c1 = NULL;
1449
1450 read_lock(&chan_list_lock);
1451
1452 list_for_each_entry(c, &chan_list, global_l) {
1453 if (state && c->state != state)
1454 continue;
1455
1456 if (c->scid == cid) {
1457 int src_match, dst_match;
1458 int src_any, dst_any;
1459
1460 /* Exact match. */
1461 src_match = !bacmp(&c->src, src);
1462 dst_match = !bacmp(&c->dst, dst);
1463 if (src_match && dst_match) {
1464 read_unlock(&chan_list_lock);
1465 return c;
1466 }
1467
1468 /* Closest match */
1469 src_any = !bacmp(&c->src, BDADDR_ANY);
1470 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1471 if ((src_match && dst_any) || (src_any && dst_match) ||
1472 (src_any && dst_any))
1473 c1 = c;
1474 }
1475 }
1476
1477 read_unlock(&chan_list_lock);
1478
1479 return c1;
1480 }
1481
1482 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1483 {
1484 struct hci_conn *hcon = conn->hcon;
1485 struct l2cap_chan *chan, *pchan;
1486 u8 dst_type;
1487
1488 BT_DBG("");
1489
1490 /* Check if we have socket listening on cid */
1491 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1492 &hcon->src, &hcon->dst);
1493 if (!pchan)
1494 return;
1495
1496 /* Client ATT sockets should override the server one */
1497 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1498 return;
1499
1500 dst_type = bdaddr_type(hcon, hcon->dst_type);
1501
1502 /* If device is blocked, do not create a channel for it */
1503 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1504 return;
1505
1506 l2cap_chan_lock(pchan);
1507
1508 chan = pchan->ops->new_connection(pchan);
1509 if (!chan)
1510 goto clean;
1511
1512 chan->dcid = L2CAP_CID_ATT;
1513
1514 bacpy(&chan->src, &hcon->src);
1515 bacpy(&chan->dst, &hcon->dst);
1516 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1517 chan->dst_type = dst_type;
1518
1519 __l2cap_chan_add(conn, chan);
1520
1521 clean:
1522 l2cap_chan_unlock(pchan);
1523 }
1524
1525 static void l2cap_conn_ready(struct l2cap_conn *conn)
1526 {
1527 struct l2cap_chan *chan;
1528 struct hci_conn *hcon = conn->hcon;
1529
1530 BT_DBG("conn %p", conn);
1531
1532 /* For outgoing pairing which doesn't necessarily have an
1533 * associated socket (e.g. mgmt_pair_device).
1534 */
1535 if (hcon->out && hcon->type == LE_LINK)
1536 smp_conn_security(hcon, hcon->pending_sec_level);
1537
1538 mutex_lock(&conn->chan_lock);
1539
1540 if (hcon->type == LE_LINK)
1541 l2cap_le_conn_ready(conn);
1542
1543 list_for_each_entry(chan, &conn->chan_l, list) {
1544
1545 l2cap_chan_lock(chan);
1546
1547 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1548 l2cap_chan_unlock(chan);
1549 continue;
1550 }
1551
1552 if (hcon->type == LE_LINK) {
1553 l2cap_le_start(chan);
1554 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1555 l2cap_chan_ready(chan);
1556
1557 } else if (chan->state == BT_CONNECT) {
1558 l2cap_do_start(chan);
1559 }
1560
1561 l2cap_chan_unlock(chan);
1562 }
1563
1564 mutex_unlock(&conn->chan_lock);
1565 }
1566
1567 /* Notify sockets that we cannot guaranty reliability anymore */
1568 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1569 {
1570 struct l2cap_chan *chan;
1571
1572 BT_DBG("conn %p", conn);
1573
1574 mutex_lock(&conn->chan_lock);
1575
1576 list_for_each_entry(chan, &conn->chan_l, list) {
1577 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1578 l2cap_chan_set_err(chan, err);
1579 }
1580
1581 mutex_unlock(&conn->chan_lock);
1582 }
1583
1584 static void l2cap_info_timeout(struct work_struct *work)
1585 {
1586 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1587 info_timer.work);
1588
1589 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1590 conn->info_ident = 0;
1591
1592 l2cap_conn_start(conn);
1593 }
1594
1595 /*
1596 * l2cap_user
1597 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1598 * callback is called during registration. The ->remove callback is called
1599 * during unregistration.
1600 * An l2cap_user object can either be explicitly unregistered or when the
1601 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1602 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1603 * External modules must own a reference to the l2cap_conn object if they intend
1604 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1605 * any time if they don't.
1606 */
1607
1608 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1609 {
1610 struct hci_dev *hdev = conn->hcon->hdev;
1611 int ret;
1612
1613 /* We need to check whether l2cap_conn is registered. If it is not, we
1614 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1615 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1616 * relies on the parent hci_conn object to be locked. This itself relies
1617 * on the hci_dev object to be locked. So we must lock the hci device
1618 * here, too. */
1619
1620 hci_dev_lock(hdev);
1621
1622 if (user->list.next || user->list.prev) {
1623 ret = -EINVAL;
1624 goto out_unlock;
1625 }
1626
1627 /* conn->hchan is NULL after l2cap_conn_del() was called */
1628 if (!conn->hchan) {
1629 ret = -ENODEV;
1630 goto out_unlock;
1631 }
1632
1633 ret = user->probe(conn, user);
1634 if (ret)
1635 goto out_unlock;
1636
1637 list_add(&user->list, &conn->users);
1638 ret = 0;
1639
1640 out_unlock:
1641 hci_dev_unlock(hdev);
1642 return ret;
1643 }
1644 EXPORT_SYMBOL(l2cap_register_user);
1645
1646 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1647 {
1648 struct hci_dev *hdev = conn->hcon->hdev;
1649
1650 hci_dev_lock(hdev);
1651
1652 if (!user->list.next || !user->list.prev)
1653 goto out_unlock;
1654
1655 list_del(&user->list);
1656 user->list.next = NULL;
1657 user->list.prev = NULL;
1658 user->remove(conn, user);
1659
1660 out_unlock:
1661 hci_dev_unlock(hdev);
1662 }
1663 EXPORT_SYMBOL(l2cap_unregister_user);
1664
1665 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1666 {
1667 struct l2cap_user *user;
1668
1669 while (!list_empty(&conn->users)) {
1670 user = list_first_entry(&conn->users, struct l2cap_user, list);
1671 list_del(&user->list);
1672 user->list.next = NULL;
1673 user->list.prev = NULL;
1674 user->remove(conn, user);
1675 }
1676 }
1677
1678 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1679 {
1680 struct l2cap_conn *conn = hcon->l2cap_data;
1681 struct l2cap_chan *chan, *l;
1682
1683 if (!conn)
1684 return;
1685
1686 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1687
1688 kfree_skb(conn->rx_skb);
1689
1690 l2cap_unregister_all_users(conn);
1691
1692 mutex_lock(&conn->chan_lock);
1693
1694 /* Kill channels */
1695 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1696 l2cap_chan_hold(chan);
1697 l2cap_chan_lock(chan);
1698
1699 l2cap_chan_del(chan, err);
1700
1701 l2cap_chan_unlock(chan);
1702
1703 chan->ops->close(chan);
1704 l2cap_chan_put(chan);
1705 }
1706
1707 mutex_unlock(&conn->chan_lock);
1708
1709 hci_chan_del(conn->hchan);
1710
1711 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1712 cancel_delayed_work_sync(&conn->info_timer);
1713
1714 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1715 cancel_delayed_work_sync(&conn->security_timer);
1716 smp_chan_destroy(conn);
1717 }
1718
1719 hcon->l2cap_data = NULL;
1720 conn->hchan = NULL;
1721 l2cap_conn_put(conn);
1722 }
1723
1724 static void security_timeout(struct work_struct *work)
1725 {
1726 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1727 security_timer.work);
1728
1729 BT_DBG("conn %p", conn);
1730
1731 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1732 smp_chan_destroy(conn);
1733 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1734 }
1735 }
1736
1737 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1738 {
1739 struct l2cap_conn *conn = hcon->l2cap_data;
1740 struct hci_chan *hchan;
1741
1742 if (conn)
1743 return conn;
1744
1745 hchan = hci_chan_create(hcon);
1746 if (!hchan)
1747 return NULL;
1748
1749 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1750 if (!conn) {
1751 hci_chan_del(hchan);
1752 return NULL;
1753 }
1754
1755 kref_init(&conn->ref);
1756 hcon->l2cap_data = conn;
1757 conn->hcon = hcon;
1758 hci_conn_get(conn->hcon);
1759 conn->hchan = hchan;
1760
1761 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1762
1763 switch (hcon->type) {
1764 case LE_LINK:
1765 if (hcon->hdev->le_mtu) {
1766 conn->mtu = hcon->hdev->le_mtu;
1767 break;
1768 }
1769 /* fall through */
1770 default:
1771 conn->mtu = hcon->hdev->acl_mtu;
1772 break;
1773 }
1774
1775 conn->feat_mask = 0;
1776
1777 if (hcon->type == ACL_LINK)
1778 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1779 &hcon->hdev->dev_flags);
1780
1781 spin_lock_init(&conn->lock);
1782 mutex_init(&conn->chan_lock);
1783
1784 INIT_LIST_HEAD(&conn->chan_l);
1785 INIT_LIST_HEAD(&conn->users);
1786
1787 if (hcon->type == LE_LINK)
1788 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1789 else
1790 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1791
1792 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1793
1794 return conn;
1795 }
1796
1797 static void l2cap_conn_free(struct kref *ref)
1798 {
1799 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1800
1801 hci_conn_put(conn->hcon);
1802 kfree(conn);
1803 }
1804
1805 void l2cap_conn_get(struct l2cap_conn *conn)
1806 {
1807 kref_get(&conn->ref);
1808 }
1809 EXPORT_SYMBOL(l2cap_conn_get);
1810
1811 void l2cap_conn_put(struct l2cap_conn *conn)
1812 {
1813 kref_put(&conn->ref, l2cap_conn_free);
1814 }
1815 EXPORT_SYMBOL(l2cap_conn_put);
1816
1817 /* ---- Socket interface ---- */
1818
1819 /* Find socket with psm and source / destination bdaddr.
1820 * Returns closest match.
1821 */
1822 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1823 bdaddr_t *src,
1824 bdaddr_t *dst,
1825 u8 link_type)
1826 {
1827 struct l2cap_chan *c, *c1 = NULL;
1828
1829 read_lock(&chan_list_lock);
1830
1831 list_for_each_entry(c, &chan_list, global_l) {
1832 if (state && c->state != state)
1833 continue;
1834
1835 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1836 continue;
1837
1838 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1839 continue;
1840
1841 if (c->psm == psm) {
1842 int src_match, dst_match;
1843 int src_any, dst_any;
1844
1845 /* Exact match. */
1846 src_match = !bacmp(&c->src, src);
1847 dst_match = !bacmp(&c->dst, dst);
1848 if (src_match && dst_match) {
1849 read_unlock(&chan_list_lock);
1850 return c;
1851 }
1852
1853 /* Closest match */
1854 src_any = !bacmp(&c->src, BDADDR_ANY);
1855 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1856 if ((src_match && dst_any) || (src_any && dst_match) ||
1857 (src_any && dst_any))
1858 c1 = c;
1859 }
1860 }
1861
1862 read_unlock(&chan_list_lock);
1863
1864 return c1;
1865 }
1866
1867 static bool is_valid_psm(u16 psm, u8 dst_type)
1868 {
1869 if (!psm)
1870 return false;
1871
1872 if (bdaddr_type_is_le(dst_type))
1873 return (psm < 0x00ff);
1874
1875 /* PSM must be odd and lsb of upper byte must be 0 */
1876 return ((psm & 0x0101) == 0x0001);
1877 }
1878
1879 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1880 bdaddr_t *dst, u8 dst_type)
1881 {
1882 struct l2cap_conn *conn;
1883 struct hci_conn *hcon;
1884 struct hci_dev *hdev;
1885 __u8 auth_type;
1886 int err;
1887
1888 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1889 dst_type, __le16_to_cpu(psm));
1890
1891 hdev = hci_get_route(dst, &chan->src);
1892 if (!hdev)
1893 return -EHOSTUNREACH;
1894
1895 hci_dev_lock(hdev);
1896
1897 l2cap_chan_lock(chan);
1898
1899 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
1900 chan->chan_type != L2CAP_CHAN_RAW) {
1901 err = -EINVAL;
1902 goto done;
1903 }
1904
1905 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1906 err = -EINVAL;
1907 goto done;
1908 }
1909
1910 switch (chan->mode) {
1911 case L2CAP_MODE_BASIC:
1912 case L2CAP_MODE_LE_FLOWCTL:
1913 break;
1914 case L2CAP_MODE_ERTM:
1915 case L2CAP_MODE_STREAMING:
1916 if (!disable_ertm)
1917 break;
1918 /* fall through */
1919 default:
1920 err = -ENOTSUPP;
1921 goto done;
1922 }
1923
1924 switch (chan->state) {
1925 case BT_CONNECT:
1926 case BT_CONNECT2:
1927 case BT_CONFIG:
1928 /* Already connecting */
1929 err = 0;
1930 goto done;
1931
1932 case BT_CONNECTED:
1933 /* Already connected */
1934 err = -EISCONN;
1935 goto done;
1936
1937 case BT_OPEN:
1938 case BT_BOUND:
1939 /* Can connect */
1940 break;
1941
1942 default:
1943 err = -EBADFD;
1944 goto done;
1945 }
1946
1947 /* Set destination address and psm */
1948 bacpy(&chan->dst, dst);
1949 chan->dst_type = dst_type;
1950
1951 chan->psm = psm;
1952 chan->dcid = cid;
1953
1954 auth_type = l2cap_get_auth_type(chan);
1955
1956 if (bdaddr_type_is_le(dst_type))
1957 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1958 chan->sec_level, auth_type);
1959 else
1960 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1961 chan->sec_level, auth_type);
1962
1963 if (IS_ERR(hcon)) {
1964 err = PTR_ERR(hcon);
1965 goto done;
1966 }
1967
1968 conn = l2cap_conn_add(hcon);
1969 if (!conn) {
1970 hci_conn_drop(hcon);
1971 err = -ENOMEM;
1972 goto done;
1973 }
1974
1975 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1976 hci_conn_drop(hcon);
1977 err = -EBUSY;
1978 goto done;
1979 }
1980
1981 /* Update source addr of the socket */
1982 bacpy(&chan->src, &hcon->src);
1983 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1984
1985 l2cap_chan_unlock(chan);
1986 l2cap_chan_add(conn, chan);
1987 l2cap_chan_lock(chan);
1988
1989 /* l2cap_chan_add takes its own ref so we can drop this one */
1990 hci_conn_drop(hcon);
1991
1992 l2cap_state_change(chan, BT_CONNECT);
1993 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1994
1995 if (hcon->state == BT_CONNECTED) {
1996 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1997 __clear_chan_timer(chan);
1998 if (l2cap_chan_check_security(chan))
1999 l2cap_state_change(chan, BT_CONNECTED);
2000 } else
2001 l2cap_do_start(chan);
2002 }
2003
2004 err = 0;
2005
2006 done:
2007 l2cap_chan_unlock(chan);
2008 hci_dev_unlock(hdev);
2009 hci_dev_put(hdev);
2010 return err;
2011 }
2012
2013 static void l2cap_monitor_timeout(struct work_struct *work)
2014 {
2015 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2016 monitor_timer.work);
2017
2018 BT_DBG("chan %p", chan);
2019
2020 l2cap_chan_lock(chan);
2021
2022 if (!chan->conn) {
2023 l2cap_chan_unlock(chan);
2024 l2cap_chan_put(chan);
2025 return;
2026 }
2027
2028 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2029
2030 l2cap_chan_unlock(chan);
2031 l2cap_chan_put(chan);
2032 }
2033
2034 static void l2cap_retrans_timeout(struct work_struct *work)
2035 {
2036 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2037 retrans_timer.work);
2038
2039 BT_DBG("chan %p", chan);
2040
2041 l2cap_chan_lock(chan);
2042
2043 if (!chan->conn) {
2044 l2cap_chan_unlock(chan);
2045 l2cap_chan_put(chan);
2046 return;
2047 }
2048
2049 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2050 l2cap_chan_unlock(chan);
2051 l2cap_chan_put(chan);
2052 }
2053
2054 static void l2cap_streaming_send(struct l2cap_chan *chan,
2055 struct sk_buff_head *skbs)
2056 {
2057 struct sk_buff *skb;
2058 struct l2cap_ctrl *control;
2059
2060 BT_DBG("chan %p, skbs %p", chan, skbs);
2061
2062 if (__chan_is_moving(chan))
2063 return;
2064
2065 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2066
2067 while (!skb_queue_empty(&chan->tx_q)) {
2068
2069 skb = skb_dequeue(&chan->tx_q);
2070
2071 bt_cb(skb)->control.retries = 1;
2072 control = &bt_cb(skb)->control;
2073
2074 control->reqseq = 0;
2075 control->txseq = chan->next_tx_seq;
2076
2077 __pack_control(chan, control, skb);
2078
2079 if (chan->fcs == L2CAP_FCS_CRC16) {
2080 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2081 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2082 }
2083
2084 l2cap_do_send(chan, skb);
2085
2086 BT_DBG("Sent txseq %u", control->txseq);
2087
2088 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2089 chan->frames_sent++;
2090 }
2091 }
2092
2093 static int l2cap_ertm_send(struct l2cap_chan *chan)
2094 {
2095 struct sk_buff *skb, *tx_skb;
2096 struct l2cap_ctrl *control;
2097 int sent = 0;
2098
2099 BT_DBG("chan %p", chan);
2100
2101 if (chan->state != BT_CONNECTED)
2102 return -ENOTCONN;
2103
2104 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2105 return 0;
2106
2107 if (__chan_is_moving(chan))
2108 return 0;
2109
2110 while (chan->tx_send_head &&
2111 chan->unacked_frames < chan->remote_tx_win &&
2112 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2113
2114 skb = chan->tx_send_head;
2115
2116 bt_cb(skb)->control.retries = 1;
2117 control = &bt_cb(skb)->control;
2118
2119 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2120 control->final = 1;
2121
2122 control->reqseq = chan->buffer_seq;
2123 chan->last_acked_seq = chan->buffer_seq;
2124 control->txseq = chan->next_tx_seq;
2125
2126 __pack_control(chan, control, skb);
2127
2128 if (chan->fcs == L2CAP_FCS_CRC16) {
2129 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2130 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2131 }
2132
2133 /* Clone after data has been modified. Data is assumed to be
2134 read-only (for locking purposes) on cloned sk_buffs.
2135 */
2136 tx_skb = skb_clone(skb, GFP_KERNEL);
2137
2138 if (!tx_skb)
2139 break;
2140
2141 __set_retrans_timer(chan);
2142
2143 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2144 chan->unacked_frames++;
2145 chan->frames_sent++;
2146 sent++;
2147
2148 if (skb_queue_is_last(&chan->tx_q, skb))
2149 chan->tx_send_head = NULL;
2150 else
2151 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2152
2153 l2cap_do_send(chan, tx_skb);
2154 BT_DBG("Sent txseq %u", control->txseq);
2155 }
2156
2157 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2158 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2159
2160 return sent;
2161 }
2162
2163 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2164 {
2165 struct l2cap_ctrl control;
2166 struct sk_buff *skb;
2167 struct sk_buff *tx_skb;
2168 u16 seq;
2169
2170 BT_DBG("chan %p", chan);
2171
2172 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2173 return;
2174
2175 if (__chan_is_moving(chan))
2176 return;
2177
2178 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2179 seq = l2cap_seq_list_pop(&chan->retrans_list);
2180
2181 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2182 if (!skb) {
2183 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2184 seq);
2185 continue;
2186 }
2187
2188 bt_cb(skb)->control.retries++;
2189 control = bt_cb(skb)->control;
2190
2191 if (chan->max_tx != 0 &&
2192 bt_cb(skb)->control.retries > chan->max_tx) {
2193 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2194 l2cap_send_disconn_req(chan, ECONNRESET);
2195 l2cap_seq_list_clear(&chan->retrans_list);
2196 break;
2197 }
2198
2199 control.reqseq = chan->buffer_seq;
2200 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2201 control.final = 1;
2202 else
2203 control.final = 0;
2204
2205 if (skb_cloned(skb)) {
2206 /* Cloned sk_buffs are read-only, so we need a
2207 * writeable copy
2208 */
2209 tx_skb = skb_copy(skb, GFP_KERNEL);
2210 } else {
2211 tx_skb = skb_clone(skb, GFP_KERNEL);
2212 }
2213
2214 if (!tx_skb) {
2215 l2cap_seq_list_clear(&chan->retrans_list);
2216 break;
2217 }
2218
2219 /* Update skb contents */
2220 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2221 put_unaligned_le32(__pack_extended_control(&control),
2222 tx_skb->data + L2CAP_HDR_SIZE);
2223 } else {
2224 put_unaligned_le16(__pack_enhanced_control(&control),
2225 tx_skb->data + L2CAP_HDR_SIZE);
2226 }
2227
2228 if (chan->fcs == L2CAP_FCS_CRC16) {
2229 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2230 put_unaligned_le16(fcs, skb_put(tx_skb,
2231 L2CAP_FCS_SIZE));
2232 }
2233
2234 l2cap_do_send(chan, tx_skb);
2235
2236 BT_DBG("Resent txseq %d", control.txseq);
2237
2238 chan->last_acked_seq = chan->buffer_seq;
2239 }
2240 }
2241
2242 static void l2cap_retransmit(struct l2cap_chan *chan,
2243 struct l2cap_ctrl *control)
2244 {
2245 BT_DBG("chan %p, control %p", chan, control);
2246
2247 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2248 l2cap_ertm_resend(chan);
2249 }
2250
2251 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2252 struct l2cap_ctrl *control)
2253 {
2254 struct sk_buff *skb;
2255
2256 BT_DBG("chan %p, control %p", chan, control);
2257
2258 if (control->poll)
2259 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2260
2261 l2cap_seq_list_clear(&chan->retrans_list);
2262
2263 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2264 return;
2265
2266 if (chan->unacked_frames) {
2267 skb_queue_walk(&chan->tx_q, skb) {
2268 if (bt_cb(skb)->control.txseq == control->reqseq ||
2269 skb == chan->tx_send_head)
2270 break;
2271 }
2272
2273 skb_queue_walk_from(&chan->tx_q, skb) {
2274 if (skb == chan->tx_send_head)
2275 break;
2276
2277 l2cap_seq_list_append(&chan->retrans_list,
2278 bt_cb(skb)->control.txseq);
2279 }
2280
2281 l2cap_ertm_resend(chan);
2282 }
2283 }
2284
2285 static void l2cap_send_ack(struct l2cap_chan *chan)
2286 {
2287 struct l2cap_ctrl control;
2288 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2289 chan->last_acked_seq);
2290 int threshold;
2291
2292 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2293 chan, chan->last_acked_seq, chan->buffer_seq);
2294
2295 memset(&control, 0, sizeof(control));
2296 control.sframe = 1;
2297
2298 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2299 chan->rx_state == L2CAP_RX_STATE_RECV) {
2300 __clear_ack_timer(chan);
2301 control.super = L2CAP_SUPER_RNR;
2302 control.reqseq = chan->buffer_seq;
2303 l2cap_send_sframe(chan, &control);
2304 } else {
2305 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2306 l2cap_ertm_send(chan);
2307 /* If any i-frames were sent, they included an ack */
2308 if (chan->buffer_seq == chan->last_acked_seq)
2309 frames_to_ack = 0;
2310 }
2311
2312 /* Ack now if the window is 3/4ths full.
2313 * Calculate without mul or div
2314 */
2315 threshold = chan->ack_win;
2316 threshold += threshold << 1;
2317 threshold >>= 2;
2318
2319 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2320 threshold);
2321
2322 if (frames_to_ack >= threshold) {
2323 __clear_ack_timer(chan);
2324 control.super = L2CAP_SUPER_RR;
2325 control.reqseq = chan->buffer_seq;
2326 l2cap_send_sframe(chan, &control);
2327 frames_to_ack = 0;
2328 }
2329
2330 if (frames_to_ack)
2331 __set_ack_timer(chan);
2332 }
2333 }
2334
2335 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2336 struct msghdr *msg, int len,
2337 int count, struct sk_buff *skb)
2338 {
2339 struct l2cap_conn *conn = chan->conn;
2340 struct sk_buff **frag;
2341 int sent = 0;
2342
2343 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2344 return -EFAULT;
2345
2346 sent += count;
2347 len -= count;
2348
2349 /* Continuation fragments (no L2CAP header) */
2350 frag = &skb_shinfo(skb)->frag_list;
2351 while (len) {
2352 struct sk_buff *tmp;
2353
2354 count = min_t(unsigned int, conn->mtu, len);
2355
2356 tmp = chan->ops->alloc_skb(chan, count,
2357 msg->msg_flags & MSG_DONTWAIT);
2358 if (IS_ERR(tmp))
2359 return PTR_ERR(tmp);
2360
2361 *frag = tmp;
2362
2363 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2364 return -EFAULT;
2365
2366 (*frag)->priority = skb->priority;
2367
2368 sent += count;
2369 len -= count;
2370
2371 skb->len += (*frag)->len;
2372 skb->data_len += (*frag)->len;
2373
2374 frag = &(*frag)->next;
2375 }
2376
2377 return sent;
2378 }
2379
2380 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2381 struct msghdr *msg, size_t len,
2382 u32 priority)
2383 {
2384 struct l2cap_conn *conn = chan->conn;
2385 struct sk_buff *skb;
2386 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2387 struct l2cap_hdr *lh;
2388
2389 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2390 __le16_to_cpu(chan->psm), len, priority);
2391
2392 count = min_t(unsigned int, (conn->mtu - hlen), len);
2393
2394 skb = chan->ops->alloc_skb(chan, count + hlen,
2395 msg->msg_flags & MSG_DONTWAIT);
2396 if (IS_ERR(skb))
2397 return skb;
2398
2399 skb->priority = priority;
2400
2401 /* Create L2CAP header */
2402 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2403 lh->cid = cpu_to_le16(chan->dcid);
2404 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2405 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2406
2407 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2408 if (unlikely(err < 0)) {
2409 kfree_skb(skb);
2410 return ERR_PTR(err);
2411 }
2412 return skb;
2413 }
2414
2415 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2416 struct msghdr *msg, size_t len,
2417 u32 priority)
2418 {
2419 struct l2cap_conn *conn = chan->conn;
2420 struct sk_buff *skb;
2421 int err, count;
2422 struct l2cap_hdr *lh;
2423
2424 BT_DBG("chan %p len %zu", chan, len);
2425
2426 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2427
2428 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2429 msg->msg_flags & MSG_DONTWAIT);
2430 if (IS_ERR(skb))
2431 return skb;
2432
2433 skb->priority = priority;
2434
2435 /* Create L2CAP header */
2436 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2437 lh->cid = cpu_to_le16(chan->dcid);
2438 lh->len = cpu_to_le16(len);
2439
2440 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2441 if (unlikely(err < 0)) {
2442 kfree_skb(skb);
2443 return ERR_PTR(err);
2444 }
2445 return skb;
2446 }
2447
2448 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2449 struct msghdr *msg, size_t len,
2450 u16 sdulen)
2451 {
2452 struct l2cap_conn *conn = chan->conn;
2453 struct sk_buff *skb;
2454 int err, count, hlen;
2455 struct l2cap_hdr *lh;
2456
2457 BT_DBG("chan %p len %zu", chan, len);
2458
2459 if (!conn)
2460 return ERR_PTR(-ENOTCONN);
2461
2462 hlen = __ertm_hdr_size(chan);
2463
2464 if (sdulen)
2465 hlen += L2CAP_SDULEN_SIZE;
2466
2467 if (chan->fcs == L2CAP_FCS_CRC16)
2468 hlen += L2CAP_FCS_SIZE;
2469
2470 count = min_t(unsigned int, (conn->mtu - hlen), len);
2471
2472 skb = chan->ops->alloc_skb(chan, count + hlen,
2473 msg->msg_flags & MSG_DONTWAIT);
2474 if (IS_ERR(skb))
2475 return skb;
2476
2477 /* Create L2CAP header */
2478 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2479 lh->cid = cpu_to_le16(chan->dcid);
2480 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2481
2482 /* Control header is populated later */
2483 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2484 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2485 else
2486 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2487
2488 if (sdulen)
2489 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2490
2491 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2492 if (unlikely(err < 0)) {
2493 kfree_skb(skb);
2494 return ERR_PTR(err);
2495 }
2496
2497 bt_cb(skb)->control.fcs = chan->fcs;
2498 bt_cb(skb)->control.retries = 0;
2499 return skb;
2500 }
2501
2502 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2503 struct sk_buff_head *seg_queue,
2504 struct msghdr *msg, size_t len)
2505 {
2506 struct sk_buff *skb;
2507 u16 sdu_len;
2508 size_t pdu_len;
2509 u8 sar;
2510
2511 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2512
2513 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2514 * so fragmented skbs are not used. The HCI layer's handling
2515 * of fragmented skbs is not compatible with ERTM's queueing.
2516 */
2517
2518 /* PDU size is derived from the HCI MTU */
2519 pdu_len = chan->conn->mtu;
2520
2521 /* Constrain PDU size for BR/EDR connections */
2522 if (!chan->hs_hcon)
2523 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2524
2525 /* Adjust for largest possible L2CAP overhead. */
2526 if (chan->fcs)
2527 pdu_len -= L2CAP_FCS_SIZE;
2528
2529 pdu_len -= __ertm_hdr_size(chan);
2530
2531 /* Remote device may have requested smaller PDUs */
2532 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2533
2534 if (len <= pdu_len) {
2535 sar = L2CAP_SAR_UNSEGMENTED;
2536 sdu_len = 0;
2537 pdu_len = len;
2538 } else {
2539 sar = L2CAP_SAR_START;
2540 sdu_len = len;
2541 pdu_len -= L2CAP_SDULEN_SIZE;
2542 }
2543
2544 while (len > 0) {
2545 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2546
2547 if (IS_ERR(skb)) {
2548 __skb_queue_purge(seg_queue);
2549 return PTR_ERR(skb);
2550 }
2551
2552 bt_cb(skb)->control.sar = sar;
2553 __skb_queue_tail(seg_queue, skb);
2554
2555 len -= pdu_len;
2556 if (sdu_len) {
2557 sdu_len = 0;
2558 pdu_len += L2CAP_SDULEN_SIZE;
2559 }
2560
2561 if (len <= pdu_len) {
2562 sar = L2CAP_SAR_END;
2563 pdu_len = len;
2564 } else {
2565 sar = L2CAP_SAR_CONTINUE;
2566 }
2567 }
2568
2569 return 0;
2570 }
2571
2572 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2573 struct msghdr *msg,
2574 size_t len, u16 sdulen)
2575 {
2576 struct l2cap_conn *conn = chan->conn;
2577 struct sk_buff *skb;
2578 int err, count, hlen;
2579 struct l2cap_hdr *lh;
2580
2581 BT_DBG("chan %p len %zu", chan, len);
2582
2583 if (!conn)
2584 return ERR_PTR(-ENOTCONN);
2585
2586 hlen = L2CAP_HDR_SIZE;
2587
2588 if (sdulen)
2589 hlen += L2CAP_SDULEN_SIZE;
2590
2591 count = min_t(unsigned int, (conn->mtu - hlen), len);
2592
2593 skb = chan->ops->alloc_skb(chan, count + hlen,
2594 msg->msg_flags & MSG_DONTWAIT);
2595 if (IS_ERR(skb))
2596 return skb;
2597
2598 /* Create L2CAP header */
2599 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2600 lh->cid = cpu_to_le16(chan->dcid);
2601 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2602
2603 if (sdulen)
2604 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2605
2606 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2607 if (unlikely(err < 0)) {
2608 kfree_skb(skb);
2609 return ERR_PTR(err);
2610 }
2611
2612 return skb;
2613 }
2614
2615 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2616 struct sk_buff_head *seg_queue,
2617 struct msghdr *msg, size_t len)
2618 {
2619 struct sk_buff *skb;
2620 size_t pdu_len;
2621 u16 sdu_len;
2622
2623 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2624
2625 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2626
2627 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2628
2629 sdu_len = len;
2630 pdu_len -= L2CAP_SDULEN_SIZE;
2631
2632 while (len > 0) {
2633 if (len <= pdu_len)
2634 pdu_len = len;
2635
2636 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2637 if (IS_ERR(skb)) {
2638 __skb_queue_purge(seg_queue);
2639 return PTR_ERR(skb);
2640 }
2641
2642 __skb_queue_tail(seg_queue, skb);
2643
2644 len -= pdu_len;
2645
2646 if (sdu_len) {
2647 sdu_len = 0;
2648 pdu_len += L2CAP_SDULEN_SIZE;
2649 }
2650 }
2651
2652 return 0;
2653 }
2654
2655 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2656 u32 priority)
2657 {
2658 struct sk_buff *skb;
2659 int err;
2660 struct sk_buff_head seg_queue;
2661
2662 if (!chan->conn)
2663 return -ENOTCONN;
2664
2665 /* Connectionless channel */
2666 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2667 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2668 if (IS_ERR(skb))
2669 return PTR_ERR(skb);
2670
2671 l2cap_do_send(chan, skb);
2672 return len;
2673 }
2674
2675 switch (chan->mode) {
2676 case L2CAP_MODE_LE_FLOWCTL:
2677 /* Check outgoing MTU */
2678 if (len > chan->omtu)
2679 return -EMSGSIZE;
2680
2681 if (!chan->tx_credits)
2682 return -EAGAIN;
2683
2684 __skb_queue_head_init(&seg_queue);
2685
2686 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2687
2688 if (chan->state != BT_CONNECTED) {
2689 __skb_queue_purge(&seg_queue);
2690 err = -ENOTCONN;
2691 }
2692
2693 if (err)
2694 return err;
2695
2696 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2697
2698 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2699 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2700 chan->tx_credits--;
2701 }
2702
2703 if (!chan->tx_credits)
2704 chan->ops->suspend(chan);
2705
2706 err = len;
2707
2708 break;
2709
2710 case L2CAP_MODE_BASIC:
2711 /* Check outgoing MTU */
2712 if (len > chan->omtu)
2713 return -EMSGSIZE;
2714
2715 /* Create a basic PDU */
2716 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2717 if (IS_ERR(skb))
2718 return PTR_ERR(skb);
2719
2720 l2cap_do_send(chan, skb);
2721 err = len;
2722 break;
2723
2724 case L2CAP_MODE_ERTM:
2725 case L2CAP_MODE_STREAMING:
2726 /* Check outgoing MTU */
2727 if (len > chan->omtu) {
2728 err = -EMSGSIZE;
2729 break;
2730 }
2731
2732 __skb_queue_head_init(&seg_queue);
2733
2734 /* Do segmentation before calling in to the state machine,
2735 * since it's possible to block while waiting for memory
2736 * allocation.
2737 */
2738 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2739
2740 /* The channel could have been closed while segmenting,
2741 * check that it is still connected.
2742 */
2743 if (chan->state != BT_CONNECTED) {
2744 __skb_queue_purge(&seg_queue);
2745 err = -ENOTCONN;
2746 }
2747
2748 if (err)
2749 break;
2750
2751 if (chan->mode == L2CAP_MODE_ERTM)
2752 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2753 else
2754 l2cap_streaming_send(chan, &seg_queue);
2755
2756 err = len;
2757
2758 /* If the skbs were not queued for sending, they'll still be in
2759 * seg_queue and need to be purged.
2760 */
2761 __skb_queue_purge(&seg_queue);
2762 break;
2763
2764 default:
2765 BT_DBG("bad state %1.1x", chan->mode);
2766 err = -EBADFD;
2767 }
2768
2769 return err;
2770 }
2771
2772 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2773 {
2774 struct l2cap_ctrl control;
2775 u16 seq;
2776
2777 BT_DBG("chan %p, txseq %u", chan, txseq);
2778
2779 memset(&control, 0, sizeof(control));
2780 control.sframe = 1;
2781 control.super = L2CAP_SUPER_SREJ;
2782
2783 for (seq = chan->expected_tx_seq; seq != txseq;
2784 seq = __next_seq(chan, seq)) {
2785 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2786 control.reqseq = seq;
2787 l2cap_send_sframe(chan, &control);
2788 l2cap_seq_list_append(&chan->srej_list, seq);
2789 }
2790 }
2791
2792 chan->expected_tx_seq = __next_seq(chan, txseq);
2793 }
2794
2795 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2796 {
2797 struct l2cap_ctrl control;
2798
2799 BT_DBG("chan %p", chan);
2800
2801 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2802 return;
2803
2804 memset(&control, 0, sizeof(control));
2805 control.sframe = 1;
2806 control.super = L2CAP_SUPER_SREJ;
2807 control.reqseq = chan->srej_list.tail;
2808 l2cap_send_sframe(chan, &control);
2809 }
2810
2811 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2812 {
2813 struct l2cap_ctrl control;
2814 u16 initial_head;
2815 u16 seq;
2816
2817 BT_DBG("chan %p, txseq %u", chan, txseq);
2818
2819 memset(&control, 0, sizeof(control));
2820 control.sframe = 1;
2821 control.super = L2CAP_SUPER_SREJ;
2822
2823 /* Capture initial list head to allow only one pass through the list. */
2824 initial_head = chan->srej_list.head;
2825
2826 do {
2827 seq = l2cap_seq_list_pop(&chan->srej_list);
2828 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2829 break;
2830
2831 control.reqseq = seq;
2832 l2cap_send_sframe(chan, &control);
2833 l2cap_seq_list_append(&chan->srej_list, seq);
2834 } while (chan->srej_list.head != initial_head);
2835 }
2836
2837 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2838 {
2839 struct sk_buff *acked_skb;
2840 u16 ackseq;
2841
2842 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2843
2844 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2845 return;
2846
2847 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2848 chan->expected_ack_seq, chan->unacked_frames);
2849
2850 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2851 ackseq = __next_seq(chan, ackseq)) {
2852
2853 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2854 if (acked_skb) {
2855 skb_unlink(acked_skb, &chan->tx_q);
2856 kfree_skb(acked_skb);
2857 chan->unacked_frames--;
2858 }
2859 }
2860
2861 chan->expected_ack_seq = reqseq;
2862
2863 if (chan->unacked_frames == 0)
2864 __clear_retrans_timer(chan);
2865
2866 BT_DBG("unacked_frames %u", chan->unacked_frames);
2867 }
2868
2869 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2870 {
2871 BT_DBG("chan %p", chan);
2872
2873 chan->expected_tx_seq = chan->buffer_seq;
2874 l2cap_seq_list_clear(&chan->srej_list);
2875 skb_queue_purge(&chan->srej_q);
2876 chan->rx_state = L2CAP_RX_STATE_RECV;
2877 }
2878
2879 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2880 struct l2cap_ctrl *control,
2881 struct sk_buff_head *skbs, u8 event)
2882 {
2883 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2884 event);
2885
2886 switch (event) {
2887 case L2CAP_EV_DATA_REQUEST:
2888 if (chan->tx_send_head == NULL)
2889 chan->tx_send_head = skb_peek(skbs);
2890
2891 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2892 l2cap_ertm_send(chan);
2893 break;
2894 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2895 BT_DBG("Enter LOCAL_BUSY");
2896 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2897
2898 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2899 /* The SREJ_SENT state must be aborted if we are to
2900 * enter the LOCAL_BUSY state.
2901 */
2902 l2cap_abort_rx_srej_sent(chan);
2903 }
2904
2905 l2cap_send_ack(chan);
2906
2907 break;
2908 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2909 BT_DBG("Exit LOCAL_BUSY");
2910 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2911
2912 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2913 struct l2cap_ctrl local_control;
2914
2915 memset(&local_control, 0, sizeof(local_control));
2916 local_control.sframe = 1;
2917 local_control.super = L2CAP_SUPER_RR;
2918 local_control.poll = 1;
2919 local_control.reqseq = chan->buffer_seq;
2920 l2cap_send_sframe(chan, &local_control);
2921
2922 chan->retry_count = 1;
2923 __set_monitor_timer(chan);
2924 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2925 }
2926 break;
2927 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2928 l2cap_process_reqseq(chan, control->reqseq);
2929 break;
2930 case L2CAP_EV_EXPLICIT_POLL:
2931 l2cap_send_rr_or_rnr(chan, 1);
2932 chan->retry_count = 1;
2933 __set_monitor_timer(chan);
2934 __clear_ack_timer(chan);
2935 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2936 break;
2937 case L2CAP_EV_RETRANS_TO:
2938 l2cap_send_rr_or_rnr(chan, 1);
2939 chan->retry_count = 1;
2940 __set_monitor_timer(chan);
2941 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2942 break;
2943 case L2CAP_EV_RECV_FBIT:
2944 /* Nothing to process */
2945 break;
2946 default:
2947 break;
2948 }
2949 }
2950
2951 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2952 struct l2cap_ctrl *control,
2953 struct sk_buff_head *skbs, u8 event)
2954 {
2955 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2956 event);
2957
2958 switch (event) {
2959 case L2CAP_EV_DATA_REQUEST:
2960 if (chan->tx_send_head == NULL)
2961 chan->tx_send_head = skb_peek(skbs);
2962 /* Queue data, but don't send. */
2963 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2964 break;
2965 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2966 BT_DBG("Enter LOCAL_BUSY");
2967 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2968
2969 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2970 /* The SREJ_SENT state must be aborted if we are to
2971 * enter the LOCAL_BUSY state.
2972 */
2973 l2cap_abort_rx_srej_sent(chan);
2974 }
2975
2976 l2cap_send_ack(chan);
2977
2978 break;
2979 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2980 BT_DBG("Exit LOCAL_BUSY");
2981 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2982
2983 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2984 struct l2cap_ctrl local_control;
2985 memset(&local_control, 0, sizeof(local_control));
2986 local_control.sframe = 1;
2987 local_control.super = L2CAP_SUPER_RR;
2988 local_control.poll = 1;
2989 local_control.reqseq = chan->buffer_seq;
2990 l2cap_send_sframe(chan, &local_control);
2991
2992 chan->retry_count = 1;
2993 __set_monitor_timer(chan);
2994 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2995 }
2996 break;
2997 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2998 l2cap_process_reqseq(chan, control->reqseq);
2999
3000 /* Fall through */
3001
3002 case L2CAP_EV_RECV_FBIT:
3003 if (control && control->final) {
3004 __clear_monitor_timer(chan);
3005 if (chan->unacked_frames > 0)
3006 __set_retrans_timer(chan);
3007 chan->retry_count = 0;
3008 chan->tx_state = L2CAP_TX_STATE_XMIT;
3009 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3010 }
3011 break;
3012 case L2CAP_EV_EXPLICIT_POLL:
3013 /* Ignore */
3014 break;
3015 case L2CAP_EV_MONITOR_TO:
3016 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3017 l2cap_send_rr_or_rnr(chan, 1);
3018 __set_monitor_timer(chan);
3019 chan->retry_count++;
3020 } else {
3021 l2cap_send_disconn_req(chan, ECONNABORTED);
3022 }
3023 break;
3024 default:
3025 break;
3026 }
3027 }
3028
3029 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3030 struct sk_buff_head *skbs, u8 event)
3031 {
3032 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3033 chan, control, skbs, event, chan->tx_state);
3034
3035 switch (chan->tx_state) {
3036 case L2CAP_TX_STATE_XMIT:
3037 l2cap_tx_state_xmit(chan, control, skbs, event);
3038 break;
3039 case L2CAP_TX_STATE_WAIT_F:
3040 l2cap_tx_state_wait_f(chan, control, skbs, event);
3041 break;
3042 default:
3043 /* Ignore event */
3044 break;
3045 }
3046 }
3047
3048 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3049 struct l2cap_ctrl *control)
3050 {
3051 BT_DBG("chan %p, control %p", chan, control);
3052 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3053 }
3054
3055 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3056 struct l2cap_ctrl *control)
3057 {
3058 BT_DBG("chan %p, control %p", chan, control);
3059 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3060 }
3061
3062 /* Copy frame to all raw sockets on that connection */
3063 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3064 {
3065 struct sk_buff *nskb;
3066 struct l2cap_chan *chan;
3067
3068 BT_DBG("conn %p", conn);
3069
3070 mutex_lock(&conn->chan_lock);
3071
3072 list_for_each_entry(chan, &conn->chan_l, list) {
3073 if (chan->chan_type != L2CAP_CHAN_RAW)
3074 continue;
3075
3076 /* Don't send frame to the channel it came from */
3077 if (bt_cb(skb)->chan == chan)
3078 continue;
3079
3080 nskb = skb_clone(skb, GFP_KERNEL);
3081 if (!nskb)
3082 continue;
3083 if (chan->ops->recv(chan, nskb))
3084 kfree_skb(nskb);
3085 }
3086
3087 mutex_unlock(&conn->chan_lock);
3088 }
3089
3090 /* ---- L2CAP signalling commands ---- */
3091 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3092 u8 ident, u16 dlen, void *data)
3093 {
3094 struct sk_buff *skb, **frag;
3095 struct l2cap_cmd_hdr *cmd;
3096 struct l2cap_hdr *lh;
3097 int len, count;
3098
3099 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3100 conn, code, ident, dlen);
3101
3102 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3103 return NULL;
3104
3105 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3106 count = min_t(unsigned int, conn->mtu, len);
3107
3108 skb = bt_skb_alloc(count, GFP_KERNEL);
3109 if (!skb)
3110 return NULL;
3111
3112 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3113 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3114
3115 if (conn->hcon->type == LE_LINK)
3116 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3117 else
3118 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3119
3120 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3121 cmd->code = code;
3122 cmd->ident = ident;
3123 cmd->len = cpu_to_le16(dlen);
3124
3125 if (dlen) {
3126 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3127 memcpy(skb_put(skb, count), data, count);
3128 data += count;
3129 }
3130
3131 len -= skb->len;
3132
3133 /* Continuation fragments (no L2CAP header) */
3134 frag = &skb_shinfo(skb)->frag_list;
3135 while (len) {
3136 count = min_t(unsigned int, conn->mtu, len);
3137
3138 *frag = bt_skb_alloc(count, GFP_KERNEL);
3139 if (!*frag)
3140 goto fail;
3141
3142 memcpy(skb_put(*frag, count), data, count);
3143
3144 len -= count;
3145 data += count;
3146
3147 frag = &(*frag)->next;
3148 }
3149
3150 return skb;
3151
3152 fail:
3153 kfree_skb(skb);
3154 return NULL;
3155 }
3156
3157 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3158 unsigned long *val)
3159 {
3160 struct l2cap_conf_opt *opt = *ptr;
3161 int len;
3162
3163 len = L2CAP_CONF_OPT_SIZE + opt->len;
3164 *ptr += len;
3165
3166 *type = opt->type;
3167 *olen = opt->len;
3168
3169 switch (opt->len) {
3170 case 1:
3171 *val = *((u8 *) opt->val);
3172 break;
3173
3174 case 2:
3175 *val = get_unaligned_le16(opt->val);
3176 break;
3177
3178 case 4:
3179 *val = get_unaligned_le32(opt->val);
3180 break;
3181
3182 default:
3183 *val = (unsigned long) opt->val;
3184 break;
3185 }
3186
3187 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3188 return len;
3189 }
3190
3191 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3192 {
3193 struct l2cap_conf_opt *opt = *ptr;
3194
3195 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3196
3197 opt->type = type;
3198 opt->len = len;
3199
3200 switch (len) {
3201 case 1:
3202 *((u8 *) opt->val) = val;
3203 break;
3204
3205 case 2:
3206 put_unaligned_le16(val, opt->val);
3207 break;
3208
3209 case 4:
3210 put_unaligned_le32(val, opt->val);
3211 break;
3212
3213 default:
3214 memcpy(opt->val, (void *) val, len);
3215 break;
3216 }
3217
3218 *ptr += L2CAP_CONF_OPT_SIZE + len;
3219 }
3220
3221 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3222 {
3223 struct l2cap_conf_efs efs;
3224
3225 switch (chan->mode) {
3226 case L2CAP_MODE_ERTM:
3227 efs.id = chan->local_id;
3228 efs.stype = chan->local_stype;
3229 efs.msdu = cpu_to_le16(chan->local_msdu);
3230 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3231 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3232 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3233 break;
3234
3235 case L2CAP_MODE_STREAMING:
3236 efs.id = 1;
3237 efs.stype = L2CAP_SERV_BESTEFFORT;
3238 efs.msdu = cpu_to_le16(chan->local_msdu);
3239 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3240 efs.acc_lat = 0;
3241 efs.flush_to = 0;
3242 break;
3243
3244 default:
3245 return;
3246 }
3247
3248 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3249 (unsigned long) &efs);
3250 }
3251
3252 static void l2cap_ack_timeout(struct work_struct *work)
3253 {
3254 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3255 ack_timer.work);
3256 u16 frames_to_ack;
3257
3258 BT_DBG("chan %p", chan);
3259
3260 l2cap_chan_lock(chan);
3261
3262 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3263 chan->last_acked_seq);
3264
3265 if (frames_to_ack)
3266 l2cap_send_rr_or_rnr(chan, 0);
3267
3268 l2cap_chan_unlock(chan);
3269 l2cap_chan_put(chan);
3270 }
3271
3272 int l2cap_ertm_init(struct l2cap_chan *chan)
3273 {
3274 int err;
3275
3276 chan->next_tx_seq = 0;
3277 chan->expected_tx_seq = 0;
3278 chan->expected_ack_seq = 0;
3279 chan->unacked_frames = 0;
3280 chan->buffer_seq = 0;
3281 chan->frames_sent = 0;
3282 chan->last_acked_seq = 0;
3283 chan->sdu = NULL;
3284 chan->sdu_last_frag = NULL;
3285 chan->sdu_len = 0;
3286
3287 skb_queue_head_init(&chan->tx_q);
3288
3289 chan->local_amp_id = AMP_ID_BREDR;
3290 chan->move_id = AMP_ID_BREDR;
3291 chan->move_state = L2CAP_MOVE_STABLE;
3292 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3293
3294 if (chan->mode != L2CAP_MODE_ERTM)
3295 return 0;
3296
3297 chan->rx_state = L2CAP_RX_STATE_RECV;
3298 chan->tx_state = L2CAP_TX_STATE_XMIT;
3299
3300 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3301 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3302 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3303
3304 skb_queue_head_init(&chan->srej_q);
3305
3306 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3307 if (err < 0)
3308 return err;
3309
3310 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3311 if (err < 0)
3312 l2cap_seq_list_free(&chan->srej_list);
3313
3314 return err;
3315 }
3316
3317 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3318 {
3319 switch (mode) {
3320 case L2CAP_MODE_STREAMING:
3321 case L2CAP_MODE_ERTM:
3322 if (l2cap_mode_supported(mode, remote_feat_mask))
3323 return mode;
3324 /* fall through */
3325 default:
3326 return L2CAP_MODE_BASIC;
3327 }
3328 }
3329
3330 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3331 {
3332 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3333 }
3334
3335 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3336 {
3337 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3338 }
3339
3340 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3341 struct l2cap_conf_rfc *rfc)
3342 {
3343 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3344 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3345
3346 /* Class 1 devices have must have ERTM timeouts
3347 * exceeding the Link Supervision Timeout. The
3348 * default Link Supervision Timeout for AMP
3349 * controllers is 10 seconds.
3350 *
3351 * Class 1 devices use 0xffffffff for their
3352 * best-effort flush timeout, so the clamping logic
3353 * will result in a timeout that meets the above
3354 * requirement. ERTM timeouts are 16-bit values, so
3355 * the maximum timeout is 65.535 seconds.
3356 */
3357
3358 /* Convert timeout to milliseconds and round */
3359 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3360
3361 /* This is the recommended formula for class 2 devices
3362 * that start ERTM timers when packets are sent to the
3363 * controller.
3364 */
3365 ertm_to = 3 * ertm_to + 500;
3366
3367 if (ertm_to > 0xffff)
3368 ertm_to = 0xffff;
3369
3370 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3371 rfc->monitor_timeout = rfc->retrans_timeout;
3372 } else {
3373 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3374 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3375 }
3376 }
3377
3378 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3379 {
3380 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3381 __l2cap_ews_supported(chan->conn)) {
3382 /* use extended control field */
3383 set_bit(FLAG_EXT_CTRL, &chan->flags);
3384 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3385 } else {
3386 chan->tx_win = min_t(u16, chan->tx_win,
3387 L2CAP_DEFAULT_TX_WINDOW);
3388 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3389 }
3390 chan->ack_win = chan->tx_win;
3391 }
3392
3393 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3394 {
3395 struct l2cap_conf_req *req = data;
3396 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3397 void *ptr = req->data;
3398 u16 size;
3399
3400 BT_DBG("chan %p", chan);
3401
3402 if (chan->num_conf_req || chan->num_conf_rsp)
3403 goto done;
3404
3405 switch (chan->mode) {
3406 case L2CAP_MODE_STREAMING:
3407 case L2CAP_MODE_ERTM:
3408 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3409 break;
3410
3411 if (__l2cap_efs_supported(chan->conn))
3412 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3413
3414 /* fall through */
3415 default:
3416 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3417 break;
3418 }
3419
3420 done:
3421 if (chan->imtu != L2CAP_DEFAULT_MTU)
3422 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3423
3424 switch (chan->mode) {
3425 case L2CAP_MODE_BASIC:
3426 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3427 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3428 break;
3429
3430 rfc.mode = L2CAP_MODE_BASIC;
3431 rfc.txwin_size = 0;
3432 rfc.max_transmit = 0;
3433 rfc.retrans_timeout = 0;
3434 rfc.monitor_timeout = 0;
3435 rfc.max_pdu_size = 0;
3436
3437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3438 (unsigned long) &rfc);
3439 break;
3440
3441 case L2CAP_MODE_ERTM:
3442 rfc.mode = L2CAP_MODE_ERTM;
3443 rfc.max_transmit = chan->max_tx;
3444
3445 __l2cap_set_ertm_timeouts(chan, &rfc);
3446
3447 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3448 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3449 L2CAP_FCS_SIZE);
3450 rfc.max_pdu_size = cpu_to_le16(size);
3451
3452 l2cap_txwin_setup(chan);
3453
3454 rfc.txwin_size = min_t(u16, chan->tx_win,
3455 L2CAP_DEFAULT_TX_WINDOW);
3456
3457 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3458 (unsigned long) &rfc);
3459
3460 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3461 l2cap_add_opt_efs(&ptr, chan);
3462
3463 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3465 chan->tx_win);
3466
3467 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3468 if (chan->fcs == L2CAP_FCS_NONE ||
3469 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3470 chan->fcs = L2CAP_FCS_NONE;
3471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3472 chan->fcs);
3473 }
3474 break;
3475
3476 case L2CAP_MODE_STREAMING:
3477 l2cap_txwin_setup(chan);
3478 rfc.mode = L2CAP_MODE_STREAMING;
3479 rfc.txwin_size = 0;
3480 rfc.max_transmit = 0;
3481 rfc.retrans_timeout = 0;
3482 rfc.monitor_timeout = 0;
3483
3484 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3485 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3486 L2CAP_FCS_SIZE);
3487 rfc.max_pdu_size = cpu_to_le16(size);
3488
3489 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3490 (unsigned long) &rfc);
3491
3492 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3493 l2cap_add_opt_efs(&ptr, chan);
3494
3495 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3496 if (chan->fcs == L2CAP_FCS_NONE ||
3497 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3498 chan->fcs = L2CAP_FCS_NONE;
3499 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3500 chan->fcs);
3501 }
3502 break;
3503 }
3504
3505 req->dcid = cpu_to_le16(chan->dcid);
3506 req->flags = __constant_cpu_to_le16(0);
3507
3508 return ptr - data;
3509 }
3510
3511 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3512 {
3513 struct l2cap_conf_rsp *rsp = data;
3514 void *ptr = rsp->data;
3515 void *req = chan->conf_req;
3516 int len = chan->conf_len;
3517 int type, hint, olen;
3518 unsigned long val;
3519 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3520 struct l2cap_conf_efs efs;
3521 u8 remote_efs = 0;
3522 u16 mtu = L2CAP_DEFAULT_MTU;
3523 u16 result = L2CAP_CONF_SUCCESS;
3524 u16 size;
3525
3526 BT_DBG("chan %p", chan);
3527
3528 while (len >= L2CAP_CONF_OPT_SIZE) {
3529 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3530
3531 hint = type & L2CAP_CONF_HINT;
3532 type &= L2CAP_CONF_MASK;
3533
3534 switch (type) {
3535 case L2CAP_CONF_MTU:
3536 mtu = val;
3537 break;
3538
3539 case L2CAP_CONF_FLUSH_TO:
3540 chan->flush_to = val;
3541 break;
3542
3543 case L2CAP_CONF_QOS:
3544 break;
3545
3546 case L2CAP_CONF_RFC:
3547 if (olen == sizeof(rfc))
3548 memcpy(&rfc, (void *) val, olen);
3549 break;
3550
3551 case L2CAP_CONF_FCS:
3552 if (val == L2CAP_FCS_NONE)
3553 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3554 break;
3555
3556 case L2CAP_CONF_EFS:
3557 remote_efs = 1;
3558 if (olen == sizeof(efs))
3559 memcpy(&efs, (void *) val, olen);
3560 break;
3561
3562 case L2CAP_CONF_EWS:
3563 if (!chan->conn->hs_enabled)
3564 return -ECONNREFUSED;
3565
3566 set_bit(FLAG_EXT_CTRL, &chan->flags);
3567 set_bit(CONF_EWS_RECV, &chan->conf_state);
3568 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3569 chan->remote_tx_win = val;
3570 break;
3571
3572 default:
3573 if (hint)
3574 break;
3575
3576 result = L2CAP_CONF_UNKNOWN;
3577 *((u8 *) ptr++) = type;
3578 break;
3579 }
3580 }
3581
3582 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3583 goto done;
3584
3585 switch (chan->mode) {
3586 case L2CAP_MODE_STREAMING:
3587 case L2CAP_MODE_ERTM:
3588 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3589 chan->mode = l2cap_select_mode(rfc.mode,
3590 chan->conn->feat_mask);
3591 break;
3592 }
3593
3594 if (remote_efs) {
3595 if (__l2cap_efs_supported(chan->conn))
3596 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3597 else
3598 return -ECONNREFUSED;
3599 }
3600
3601 if (chan->mode != rfc.mode)
3602 return -ECONNREFUSED;
3603
3604 break;
3605 }
3606
3607 done:
3608 if (chan->mode != rfc.mode) {
3609 result = L2CAP_CONF_UNACCEPT;
3610 rfc.mode = chan->mode;
3611
3612 if (chan->num_conf_rsp == 1)
3613 return -ECONNREFUSED;
3614
3615 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3616 (unsigned long) &rfc);
3617 }
3618
3619 if (result == L2CAP_CONF_SUCCESS) {
3620 /* Configure output options and let the other side know
3621 * which ones we don't like. */
3622
3623 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3624 result = L2CAP_CONF_UNACCEPT;
3625 else {
3626 chan->omtu = mtu;
3627 set_bit(CONF_MTU_DONE, &chan->conf_state);
3628 }
3629 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3630
3631 if (remote_efs) {
3632 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3633 efs.stype != L2CAP_SERV_NOTRAFIC &&
3634 efs.stype != chan->local_stype) {
3635
3636 result = L2CAP_CONF_UNACCEPT;
3637
3638 if (chan->num_conf_req >= 1)
3639 return -ECONNREFUSED;
3640
3641 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3642 sizeof(efs),
3643 (unsigned long) &efs);
3644 } else {
3645 /* Send PENDING Conf Rsp */
3646 result = L2CAP_CONF_PENDING;
3647 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3648 }
3649 }
3650
3651 switch (rfc.mode) {
3652 case L2CAP_MODE_BASIC:
3653 chan->fcs = L2CAP_FCS_NONE;
3654 set_bit(CONF_MODE_DONE, &chan->conf_state);
3655 break;
3656
3657 case L2CAP_MODE_ERTM:
3658 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3659 chan->remote_tx_win = rfc.txwin_size;
3660 else
3661 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3662
3663 chan->remote_max_tx = rfc.max_transmit;
3664
3665 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3666 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3667 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3668 rfc.max_pdu_size = cpu_to_le16(size);
3669 chan->remote_mps = size;
3670
3671 __l2cap_set_ertm_timeouts(chan, &rfc);
3672
3673 set_bit(CONF_MODE_DONE, &chan->conf_state);
3674
3675 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3676 sizeof(rfc), (unsigned long) &rfc);
3677
3678 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3679 chan->remote_id = efs.id;
3680 chan->remote_stype = efs.stype;
3681 chan->remote_msdu = le16_to_cpu(efs.msdu);
3682 chan->remote_flush_to =
3683 le32_to_cpu(efs.flush_to);
3684 chan->remote_acc_lat =
3685 le32_to_cpu(efs.acc_lat);
3686 chan->remote_sdu_itime =
3687 le32_to_cpu(efs.sdu_itime);
3688 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3689 sizeof(efs),
3690 (unsigned long) &efs);
3691 }
3692 break;
3693
3694 case L2CAP_MODE_STREAMING:
3695 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3696 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3697 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3698 rfc.max_pdu_size = cpu_to_le16(size);
3699 chan->remote_mps = size;
3700
3701 set_bit(CONF_MODE_DONE, &chan->conf_state);
3702
3703 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3704 (unsigned long) &rfc);
3705
3706 break;
3707
3708 default:
3709 result = L2CAP_CONF_UNACCEPT;
3710
3711 memset(&rfc, 0, sizeof(rfc));
3712 rfc.mode = chan->mode;
3713 }
3714
3715 if (result == L2CAP_CONF_SUCCESS)
3716 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3717 }
3718 rsp->scid = cpu_to_le16(chan->dcid);
3719 rsp->result = cpu_to_le16(result);
3720 rsp->flags = __constant_cpu_to_le16(0);
3721
3722 return ptr - data;
3723 }
3724
3725 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3726 void *data, u16 *result)
3727 {
3728 struct l2cap_conf_req *req = data;
3729 void *ptr = req->data;
3730 int type, olen;
3731 unsigned long val;
3732 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3733 struct l2cap_conf_efs efs;
3734
3735 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3736
3737 while (len >= L2CAP_CONF_OPT_SIZE) {
3738 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3739
3740 switch (type) {
3741 case L2CAP_CONF_MTU:
3742 if (val < L2CAP_DEFAULT_MIN_MTU) {
3743 *result = L2CAP_CONF_UNACCEPT;
3744 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3745 } else
3746 chan->imtu = val;
3747 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3748 break;
3749
3750 case L2CAP_CONF_FLUSH_TO:
3751 chan->flush_to = val;
3752 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3753 2, chan->flush_to);
3754 break;
3755
3756 case L2CAP_CONF_RFC:
3757 if (olen == sizeof(rfc))
3758 memcpy(&rfc, (void *)val, olen);
3759
3760 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3761 rfc.mode != chan->mode)
3762 return -ECONNREFUSED;
3763
3764 chan->fcs = 0;
3765
3766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3767 sizeof(rfc), (unsigned long) &rfc);
3768 break;
3769
3770 case L2CAP_CONF_EWS:
3771 chan->ack_win = min_t(u16, val, chan->ack_win);
3772 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3773 chan->tx_win);
3774 break;
3775
3776 case L2CAP_CONF_EFS:
3777 if (olen == sizeof(efs))
3778 memcpy(&efs, (void *)val, olen);
3779
3780 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3781 efs.stype != L2CAP_SERV_NOTRAFIC &&
3782 efs.stype != chan->local_stype)
3783 return -ECONNREFUSED;
3784
3785 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3786 (unsigned long) &efs);
3787 break;
3788
3789 case L2CAP_CONF_FCS:
3790 if (*result == L2CAP_CONF_PENDING)
3791 if (val == L2CAP_FCS_NONE)
3792 set_bit(CONF_RECV_NO_FCS,
3793 &chan->conf_state);
3794 break;
3795 }
3796 }
3797
3798 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3799 return -ECONNREFUSED;
3800
3801 chan->mode = rfc.mode;
3802
3803 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3804 switch (rfc.mode) {
3805 case L2CAP_MODE_ERTM:
3806 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3807 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3808 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3809 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3810 chan->ack_win = min_t(u16, chan->ack_win,
3811 rfc.txwin_size);
3812
3813 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3814 chan->local_msdu = le16_to_cpu(efs.msdu);
3815 chan->local_sdu_itime =
3816 le32_to_cpu(efs.sdu_itime);
3817 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3818 chan->local_flush_to =
3819 le32_to_cpu(efs.flush_to);
3820 }
3821 break;
3822
3823 case L2CAP_MODE_STREAMING:
3824 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3825 }
3826 }
3827
3828 req->dcid = cpu_to_le16(chan->dcid);
3829 req->flags = __constant_cpu_to_le16(0);
3830
3831 return ptr - data;
3832 }
3833
3834 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3835 u16 result, u16 flags)
3836 {
3837 struct l2cap_conf_rsp *rsp = data;
3838 void *ptr = rsp->data;
3839
3840 BT_DBG("chan %p", chan);
3841
3842 rsp->scid = cpu_to_le16(chan->dcid);
3843 rsp->result = cpu_to_le16(result);
3844 rsp->flags = cpu_to_le16(flags);
3845
3846 return ptr - data;
3847 }
3848
3849 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3850 {
3851 struct l2cap_le_conn_rsp rsp;
3852 struct l2cap_conn *conn = chan->conn;
3853
3854 BT_DBG("chan %p", chan);
3855
3856 rsp.dcid = cpu_to_le16(chan->scid);
3857 rsp.mtu = cpu_to_le16(chan->imtu);
3858 rsp.mps = cpu_to_le16(chan->mps);
3859 rsp.credits = cpu_to_le16(chan->rx_credits);
3860 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3861
3862 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3863 &rsp);
3864 }
3865
3866 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3867 {
3868 struct l2cap_conn_rsp rsp;
3869 struct l2cap_conn *conn = chan->conn;
3870 u8 buf[128];
3871 u8 rsp_code;
3872
3873 rsp.scid = cpu_to_le16(chan->dcid);
3874 rsp.dcid = cpu_to_le16(chan->scid);
3875 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3876 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3877
3878 if (chan->hs_hcon)
3879 rsp_code = L2CAP_CREATE_CHAN_RSP;
3880 else
3881 rsp_code = L2CAP_CONN_RSP;
3882
3883 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3884
3885 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3886
3887 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3888 return;
3889
3890 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3891 l2cap_build_conf_req(chan, buf), buf);
3892 chan->num_conf_req++;
3893 }
3894
3895 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3896 {
3897 int type, olen;
3898 unsigned long val;
3899 /* Use sane default values in case a misbehaving remote device
3900 * did not send an RFC or extended window size option.
3901 */
3902 u16 txwin_ext = chan->ack_win;
3903 struct l2cap_conf_rfc rfc = {
3904 .mode = chan->mode,
3905 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3906 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3907 .max_pdu_size = cpu_to_le16(chan->imtu),
3908 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3909 };
3910
3911 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3912
3913 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3914 return;
3915
3916 while (len >= L2CAP_CONF_OPT_SIZE) {
3917 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3918
3919 switch (type) {
3920 case L2CAP_CONF_RFC:
3921 if (olen == sizeof(rfc))
3922 memcpy(&rfc, (void *)val, olen);
3923 break;
3924 case L2CAP_CONF_EWS:
3925 txwin_ext = val;
3926 break;
3927 }
3928 }
3929
3930 switch (rfc.mode) {
3931 case L2CAP_MODE_ERTM:
3932 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3933 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3934 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3935 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3936 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3937 else
3938 chan->ack_win = min_t(u16, chan->ack_win,
3939 rfc.txwin_size);
3940 break;
3941 case L2CAP_MODE_STREAMING:
3942 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3943 }
3944 }
3945
3946 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3947 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3948 u8 *data)
3949 {
3950 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3951
3952 if (cmd_len < sizeof(*rej))
3953 return -EPROTO;
3954
3955 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3956 return 0;
3957
3958 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3959 cmd->ident == conn->info_ident) {
3960 cancel_delayed_work(&conn->info_timer);
3961
3962 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3963 conn->info_ident = 0;
3964
3965 l2cap_conn_start(conn);
3966 }
3967
3968 return 0;
3969 }
3970
3971 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3972 struct l2cap_cmd_hdr *cmd,
3973 u8 *data, u8 rsp_code, u8 amp_id)
3974 {
3975 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3976 struct l2cap_conn_rsp rsp;
3977 struct l2cap_chan *chan = NULL, *pchan;
3978 int result, status = L2CAP_CS_NO_INFO;
3979
3980 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3981 __le16 psm = req->psm;
3982
3983 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3984
3985 /* Check if we have socket listening on psm */
3986 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3987 &conn->hcon->dst, ACL_LINK);
3988 if (!pchan) {
3989 result = L2CAP_CR_BAD_PSM;
3990 goto sendresp;
3991 }
3992
3993 mutex_lock(&conn->chan_lock);
3994 l2cap_chan_lock(pchan);
3995
3996 /* Check if the ACL is secure enough (if not SDP) */
3997 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3998 !hci_conn_check_link_mode(conn->hcon)) {
3999 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4000 result = L2CAP_CR_SEC_BLOCK;
4001 goto response;
4002 }
4003
4004 result = L2CAP_CR_NO_MEM;
4005
4006 /* Check if we already have channel with that dcid */
4007 if (__l2cap_get_chan_by_dcid(conn, scid))
4008 goto response;
4009
4010 chan = pchan->ops->new_connection(pchan);
4011 if (!chan)
4012 goto response;
4013
4014 /* For certain devices (ex: HID mouse), support for authentication,
4015 * pairing and bonding is optional. For such devices, inorder to avoid
4016 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4017 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4018 */
4019 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4020
4021 bacpy(&chan->src, &conn->hcon->src);
4022 bacpy(&chan->dst, &conn->hcon->dst);
4023 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
4024 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
4025 chan->psm = psm;
4026 chan->dcid = scid;
4027 chan->local_amp_id = amp_id;
4028
4029 __l2cap_chan_add(conn, chan);
4030
4031 dcid = chan->scid;
4032
4033 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4034
4035 chan->ident = cmd->ident;
4036
4037 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4038 if (l2cap_chan_check_security(chan)) {
4039 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4040 l2cap_state_change(chan, BT_CONNECT2);
4041 result = L2CAP_CR_PEND;
4042 status = L2CAP_CS_AUTHOR_PEND;
4043 chan->ops->defer(chan);
4044 } else {
4045 /* Force pending result for AMP controllers.
4046 * The connection will succeed after the
4047 * physical link is up.
4048 */
4049 if (amp_id == AMP_ID_BREDR) {
4050 l2cap_state_change(chan, BT_CONFIG);
4051 result = L2CAP_CR_SUCCESS;
4052 } else {
4053 l2cap_state_change(chan, BT_CONNECT2);
4054 result = L2CAP_CR_PEND;
4055 }
4056 status = L2CAP_CS_NO_INFO;
4057 }
4058 } else {
4059 l2cap_state_change(chan, BT_CONNECT2);
4060 result = L2CAP_CR_PEND;
4061 status = L2CAP_CS_AUTHEN_PEND;
4062 }
4063 } else {
4064 l2cap_state_change(chan, BT_CONNECT2);
4065 result = L2CAP_CR_PEND;
4066 status = L2CAP_CS_NO_INFO;
4067 }
4068
4069 response:
4070 l2cap_chan_unlock(pchan);
4071 mutex_unlock(&conn->chan_lock);
4072
4073 sendresp:
4074 rsp.scid = cpu_to_le16(scid);
4075 rsp.dcid = cpu_to_le16(dcid);
4076 rsp.result = cpu_to_le16(result);
4077 rsp.status = cpu_to_le16(status);
4078 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4079
4080 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4081 struct l2cap_info_req info;
4082 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4083
4084 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4085 conn->info_ident = l2cap_get_ident(conn);
4086
4087 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4088
4089 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4090 sizeof(info), &info);
4091 }
4092
4093 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4094 result == L2CAP_CR_SUCCESS) {
4095 u8 buf[128];
4096 set_bit(CONF_REQ_SENT, &chan->conf_state);
4097 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4098 l2cap_build_conf_req(chan, buf), buf);
4099 chan->num_conf_req++;
4100 }
4101
4102 return chan;
4103 }
4104
4105 static int l2cap_connect_req(struct l2cap_conn *conn,
4106 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4107 {
4108 struct hci_dev *hdev = conn->hcon->hdev;
4109 struct hci_conn *hcon = conn->hcon;
4110
4111 if (cmd_len < sizeof(struct l2cap_conn_req))
4112 return -EPROTO;
4113
4114 hci_dev_lock(hdev);
4115 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4116 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4117 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4118 hcon->dst_type, 0, NULL, 0,
4119 hcon->dev_class);
4120 hci_dev_unlock(hdev);
4121
4122 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4123 return 0;
4124 }
4125
4126 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4127 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4128 u8 *data)
4129 {
4130 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4131 u16 scid, dcid, result, status;
4132 struct l2cap_chan *chan;
4133 u8 req[128];
4134 int err;
4135
4136 if (cmd_len < sizeof(*rsp))
4137 return -EPROTO;
4138
4139 scid = __le16_to_cpu(rsp->scid);
4140 dcid = __le16_to_cpu(rsp->dcid);
4141 result = __le16_to_cpu(rsp->result);
4142 status = __le16_to_cpu(rsp->status);
4143
4144 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4145 dcid, scid, result, status);
4146
4147 mutex_lock(&conn->chan_lock);
4148
4149 if (scid) {
4150 chan = __l2cap_get_chan_by_scid(conn, scid);
4151 if (!chan) {
4152 err = -EBADSLT;
4153 goto unlock;
4154 }
4155 } else {
4156 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4157 if (!chan) {
4158 err = -EBADSLT;
4159 goto unlock;
4160 }
4161 }
4162
4163 err = 0;
4164
4165 l2cap_chan_lock(chan);
4166
4167 switch (result) {
4168 case L2CAP_CR_SUCCESS:
4169 l2cap_state_change(chan, BT_CONFIG);
4170 chan->ident = 0;
4171 chan->dcid = dcid;
4172 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4173
4174 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4175 break;
4176
4177 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4178 l2cap_build_conf_req(chan, req), req);
4179 chan->num_conf_req++;
4180 break;
4181
4182 case L2CAP_CR_PEND:
4183 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4184 break;
4185
4186 default:
4187 l2cap_chan_del(chan, ECONNREFUSED);
4188 break;
4189 }
4190
4191 l2cap_chan_unlock(chan);
4192
4193 unlock:
4194 mutex_unlock(&conn->chan_lock);
4195
4196 return err;
4197 }
4198
4199 static inline void set_default_fcs(struct l2cap_chan *chan)
4200 {
4201 /* FCS is enabled only in ERTM or streaming mode, if one or both
4202 * sides request it.
4203 */
4204 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4205 chan->fcs = L2CAP_FCS_NONE;
4206 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4207 chan->fcs = L2CAP_FCS_CRC16;
4208 }
4209
4210 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4211 u8 ident, u16 flags)
4212 {
4213 struct l2cap_conn *conn = chan->conn;
4214
4215 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4216 flags);
4217
4218 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4219 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4220
4221 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4222 l2cap_build_conf_rsp(chan, data,
4223 L2CAP_CONF_SUCCESS, flags), data);
4224 }
4225
4226 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4227 u16 scid, u16 dcid)
4228 {
4229 struct l2cap_cmd_rej_cid rej;
4230
4231 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4232 rej.scid = __cpu_to_le16(scid);
4233 rej.dcid = __cpu_to_le16(dcid);
4234
4235 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4236 }
4237
4238 static inline int l2cap_config_req(struct l2cap_conn *conn,
4239 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4240 u8 *data)
4241 {
4242 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4243 u16 dcid, flags;
4244 u8 rsp[64];
4245 struct l2cap_chan *chan;
4246 int len, err = 0;
4247
4248 if (cmd_len < sizeof(*req))
4249 return -EPROTO;
4250
4251 dcid = __le16_to_cpu(req->dcid);
4252 flags = __le16_to_cpu(req->flags);
4253
4254 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4255
4256 chan = l2cap_get_chan_by_scid(conn, dcid);
4257 if (!chan) {
4258 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4259 return 0;
4260 }
4261
4262 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4263 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4264 chan->dcid);
4265 goto unlock;
4266 }
4267
4268 /* Reject if config buffer is too small. */
4269 len = cmd_len - sizeof(*req);
4270 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4271 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4272 l2cap_build_conf_rsp(chan, rsp,
4273 L2CAP_CONF_REJECT, flags), rsp);
4274 goto unlock;
4275 }
4276
4277 /* Store config. */
4278 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4279 chan->conf_len += len;
4280
4281 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4282 /* Incomplete config. Send empty response. */
4283 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4284 l2cap_build_conf_rsp(chan, rsp,
4285 L2CAP_CONF_SUCCESS, flags), rsp);
4286 goto unlock;
4287 }
4288
4289 /* Complete config. */
4290 len = l2cap_parse_conf_req(chan, rsp);
4291 if (len < 0) {
4292 l2cap_send_disconn_req(chan, ECONNRESET);
4293 goto unlock;
4294 }
4295
4296 chan->ident = cmd->ident;
4297 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4298 chan->num_conf_rsp++;
4299
4300 /* Reset config buffer. */
4301 chan->conf_len = 0;
4302
4303 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4304 goto unlock;
4305
4306 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4307 set_default_fcs(chan);
4308
4309 if (chan->mode == L2CAP_MODE_ERTM ||
4310 chan->mode == L2CAP_MODE_STREAMING)
4311 err = l2cap_ertm_init(chan);
4312
4313 if (err < 0)
4314 l2cap_send_disconn_req(chan, -err);
4315 else
4316 l2cap_chan_ready(chan);
4317
4318 goto unlock;
4319 }
4320
4321 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4322 u8 buf[64];
4323 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4324 l2cap_build_conf_req(chan, buf), buf);
4325 chan->num_conf_req++;
4326 }
4327
4328 /* Got Conf Rsp PENDING from remote side and asume we sent
4329 Conf Rsp PENDING in the code above */
4330 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4331 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4332
4333 /* check compatibility */
4334
4335 /* Send rsp for BR/EDR channel */
4336 if (!chan->hs_hcon)
4337 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4338 else
4339 chan->ident = cmd->ident;
4340 }
4341
4342 unlock:
4343 l2cap_chan_unlock(chan);
4344 return err;
4345 }
4346
4347 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4348 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4349 u8 *data)
4350 {
4351 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4352 u16 scid, flags, result;
4353 struct l2cap_chan *chan;
4354 int len = cmd_len - sizeof(*rsp);
4355 int err = 0;
4356
4357 if (cmd_len < sizeof(*rsp))
4358 return -EPROTO;
4359
4360 scid = __le16_to_cpu(rsp->scid);
4361 flags = __le16_to_cpu(rsp->flags);
4362 result = __le16_to_cpu(rsp->result);
4363
4364 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4365 result, len);
4366
4367 chan = l2cap_get_chan_by_scid(conn, scid);
4368 if (!chan)
4369 return 0;
4370
4371 switch (result) {
4372 case L2CAP_CONF_SUCCESS:
4373 l2cap_conf_rfc_get(chan, rsp->data, len);
4374 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4375 break;
4376
4377 case L2CAP_CONF_PENDING:
4378 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4379
4380 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4381 char buf[64];
4382
4383 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4384 buf, &result);
4385 if (len < 0) {
4386 l2cap_send_disconn_req(chan, ECONNRESET);
4387 goto done;
4388 }
4389
4390 if (!chan->hs_hcon) {
4391 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4392 0);
4393 } else {
4394 if (l2cap_check_efs(chan)) {
4395 amp_create_logical_link(chan);
4396 chan->ident = cmd->ident;
4397 }
4398 }
4399 }
4400 goto done;
4401
4402 case L2CAP_CONF_UNACCEPT:
4403 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4404 char req[64];
4405
4406 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4407 l2cap_send_disconn_req(chan, ECONNRESET);
4408 goto done;
4409 }
4410
4411 /* throw out any old stored conf requests */
4412 result = L2CAP_CONF_SUCCESS;
4413 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4414 req, &result);
4415 if (len < 0) {
4416 l2cap_send_disconn_req(chan, ECONNRESET);
4417 goto done;
4418 }
4419
4420 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4421 L2CAP_CONF_REQ, len, req);
4422 chan->num_conf_req++;
4423 if (result != L2CAP_CONF_SUCCESS)
4424 goto done;
4425 break;
4426 }
4427
4428 default:
4429 l2cap_chan_set_err(chan, ECONNRESET);
4430
4431 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4432 l2cap_send_disconn_req(chan, ECONNRESET);
4433 goto done;
4434 }
4435
4436 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4437 goto done;
4438
4439 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4440
4441 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4442 set_default_fcs(chan);
4443
4444 if (chan->mode == L2CAP_MODE_ERTM ||
4445 chan->mode == L2CAP_MODE_STREAMING)
4446 err = l2cap_ertm_init(chan);
4447
4448 if (err < 0)
4449 l2cap_send_disconn_req(chan, -err);
4450 else
4451 l2cap_chan_ready(chan);
4452 }
4453
4454 done:
4455 l2cap_chan_unlock(chan);
4456 return err;
4457 }
4458
4459 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4460 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4461 u8 *data)
4462 {
4463 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4464 struct l2cap_disconn_rsp rsp;
4465 u16 dcid, scid;
4466 struct l2cap_chan *chan;
4467
4468 if (cmd_len != sizeof(*req))
4469 return -EPROTO;
4470
4471 scid = __le16_to_cpu(req->scid);
4472 dcid = __le16_to_cpu(req->dcid);
4473
4474 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4475
4476 mutex_lock(&conn->chan_lock);
4477
4478 chan = __l2cap_get_chan_by_scid(conn, dcid);
4479 if (!chan) {
4480 mutex_unlock(&conn->chan_lock);
4481 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4482 return 0;
4483 }
4484
4485 l2cap_chan_lock(chan);
4486
4487 rsp.dcid = cpu_to_le16(chan->scid);
4488 rsp.scid = cpu_to_le16(chan->dcid);
4489 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4490
4491 chan->ops->set_shutdown(chan);
4492
4493 l2cap_chan_hold(chan);
4494 l2cap_chan_del(chan, ECONNRESET);
4495
4496 l2cap_chan_unlock(chan);
4497
4498 chan->ops->close(chan);
4499 l2cap_chan_put(chan);
4500
4501 mutex_unlock(&conn->chan_lock);
4502
4503 return 0;
4504 }
4505
4506 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4507 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4508 u8 *data)
4509 {
4510 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4511 u16 dcid, scid;
4512 struct l2cap_chan *chan;
4513
4514 if (cmd_len != sizeof(*rsp))
4515 return -EPROTO;
4516
4517 scid = __le16_to_cpu(rsp->scid);
4518 dcid = __le16_to_cpu(rsp->dcid);
4519
4520 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4521
4522 mutex_lock(&conn->chan_lock);
4523
4524 chan = __l2cap_get_chan_by_scid(conn, scid);
4525 if (!chan) {
4526 mutex_unlock(&conn->chan_lock);
4527 return 0;
4528 }
4529
4530 l2cap_chan_lock(chan);
4531
4532 l2cap_chan_hold(chan);
4533 l2cap_chan_del(chan, 0);
4534
4535 l2cap_chan_unlock(chan);
4536
4537 chan->ops->close(chan);
4538 l2cap_chan_put(chan);
4539
4540 mutex_unlock(&conn->chan_lock);
4541
4542 return 0;
4543 }
4544
4545 static inline int l2cap_information_req(struct l2cap_conn *conn,
4546 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4547 u8 *data)
4548 {
4549 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4550 u16 type;
4551
4552 if (cmd_len != sizeof(*req))
4553 return -EPROTO;
4554
4555 type = __le16_to_cpu(req->type);
4556
4557 BT_DBG("type 0x%4.4x", type);
4558
4559 if (type == L2CAP_IT_FEAT_MASK) {
4560 u8 buf[8];
4561 u32 feat_mask = l2cap_feat_mask;
4562 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4563 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4564 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4565 if (!disable_ertm)
4566 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4567 | L2CAP_FEAT_FCS;
4568 if (conn->hs_enabled)
4569 feat_mask |= L2CAP_FEAT_EXT_FLOW
4570 | L2CAP_FEAT_EXT_WINDOW;
4571
4572 put_unaligned_le32(feat_mask, rsp->data);
4573 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4574 buf);
4575 } else if (type == L2CAP_IT_FIXED_CHAN) {
4576 u8 buf[12];
4577 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4578
4579 if (conn->hs_enabled)
4580 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4581 else
4582 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4583
4584 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4585 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4586 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4587 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4588 buf);
4589 } else {
4590 struct l2cap_info_rsp rsp;
4591 rsp.type = cpu_to_le16(type);
4592 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4593 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4594 &rsp);
4595 }
4596
4597 return 0;
4598 }
4599
4600 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4601 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4602 u8 *data)
4603 {
4604 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4605 u16 type, result;
4606
4607 if (cmd_len < sizeof(*rsp))
4608 return -EPROTO;
4609
4610 type = __le16_to_cpu(rsp->type);
4611 result = __le16_to_cpu(rsp->result);
4612
4613 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4614
4615 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4616 if (cmd->ident != conn->info_ident ||
4617 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4618 return 0;
4619
4620 cancel_delayed_work(&conn->info_timer);
4621
4622 if (result != L2CAP_IR_SUCCESS) {
4623 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4624 conn->info_ident = 0;
4625
4626 l2cap_conn_start(conn);
4627
4628 return 0;
4629 }
4630
4631 switch (type) {
4632 case L2CAP_IT_FEAT_MASK:
4633 conn->feat_mask = get_unaligned_le32(rsp->data);
4634
4635 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4636 struct l2cap_info_req req;
4637 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4638
4639 conn->info_ident = l2cap_get_ident(conn);
4640
4641 l2cap_send_cmd(conn, conn->info_ident,
4642 L2CAP_INFO_REQ, sizeof(req), &req);
4643 } else {
4644 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4645 conn->info_ident = 0;
4646
4647 l2cap_conn_start(conn);
4648 }
4649 break;
4650
4651 case L2CAP_IT_FIXED_CHAN:
4652 conn->fixed_chan_mask = rsp->data[0];
4653 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4654 conn->info_ident = 0;
4655
4656 l2cap_conn_start(conn);
4657 break;
4658 }
4659
4660 return 0;
4661 }
4662
4663 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4664 struct l2cap_cmd_hdr *cmd,
4665 u16 cmd_len, void *data)
4666 {
4667 struct l2cap_create_chan_req *req = data;
4668 struct l2cap_create_chan_rsp rsp;
4669 struct l2cap_chan *chan;
4670 struct hci_dev *hdev;
4671 u16 psm, scid;
4672
4673 if (cmd_len != sizeof(*req))
4674 return -EPROTO;
4675
4676 if (!conn->hs_enabled)
4677 return -EINVAL;
4678
4679 psm = le16_to_cpu(req->psm);
4680 scid = le16_to_cpu(req->scid);
4681
4682 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4683
4684 /* For controller id 0 make BR/EDR connection */
4685 if (req->amp_id == AMP_ID_BREDR) {
4686 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4687 req->amp_id);
4688 return 0;
4689 }
4690
4691 /* Validate AMP controller id */
4692 hdev = hci_dev_get(req->amp_id);
4693 if (!hdev)
4694 goto error;
4695
4696 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4697 hci_dev_put(hdev);
4698 goto error;
4699 }
4700
4701 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4702 req->amp_id);
4703 if (chan) {
4704 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4705 struct hci_conn *hs_hcon;
4706
4707 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4708 &conn->hcon->dst);
4709 if (!hs_hcon) {
4710 hci_dev_put(hdev);
4711 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4712 chan->dcid);
4713 return 0;
4714 }
4715
4716 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4717
4718 mgr->bredr_chan = chan;
4719 chan->hs_hcon = hs_hcon;
4720 chan->fcs = L2CAP_FCS_NONE;
4721 conn->mtu = hdev->block_mtu;
4722 }
4723
4724 hci_dev_put(hdev);
4725
4726 return 0;
4727
4728 error:
4729 rsp.dcid = 0;
4730 rsp.scid = cpu_to_le16(scid);
4731 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4732 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4733
4734 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4735 sizeof(rsp), &rsp);
4736
4737 return 0;
4738 }
4739
4740 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4741 {
4742 struct l2cap_move_chan_req req;
4743 u8 ident;
4744
4745 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4746
4747 ident = l2cap_get_ident(chan->conn);
4748 chan->ident = ident;
4749
4750 req.icid = cpu_to_le16(chan->scid);
4751 req.dest_amp_id = dest_amp_id;
4752
4753 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4754 &req);
4755
4756 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4757 }
4758
4759 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4760 {
4761 struct l2cap_move_chan_rsp rsp;
4762
4763 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4764
4765 rsp.icid = cpu_to_le16(chan->dcid);
4766 rsp.result = cpu_to_le16(result);
4767
4768 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4769 sizeof(rsp), &rsp);
4770 }
4771
4772 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4773 {
4774 struct l2cap_move_chan_cfm cfm;
4775
4776 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4777
4778 chan->ident = l2cap_get_ident(chan->conn);
4779
4780 cfm.icid = cpu_to_le16(chan->scid);
4781 cfm.result = cpu_to_le16(result);
4782
4783 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4784 sizeof(cfm), &cfm);
4785
4786 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4787 }
4788
4789 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4790 {
4791 struct l2cap_move_chan_cfm cfm;
4792
4793 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4794
4795 cfm.icid = cpu_to_le16(icid);
4796 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4797
4798 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4799 sizeof(cfm), &cfm);
4800 }
4801
4802 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4803 u16 icid)
4804 {
4805 struct l2cap_move_chan_cfm_rsp rsp;
4806
4807 BT_DBG("icid 0x%4.4x", icid);
4808
4809 rsp.icid = cpu_to_le16(icid);
4810 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4811 }
4812
4813 static void __release_logical_link(struct l2cap_chan *chan)
4814 {
4815 chan->hs_hchan = NULL;
4816 chan->hs_hcon = NULL;
4817
4818 /* Placeholder - release the logical link */
4819 }
4820
4821 static void l2cap_logical_fail(struct l2cap_chan *chan)
4822 {
4823 /* Logical link setup failed */
4824 if (chan->state != BT_CONNECTED) {
4825 /* Create channel failure, disconnect */
4826 l2cap_send_disconn_req(chan, ECONNRESET);
4827 return;
4828 }
4829
4830 switch (chan->move_role) {
4831 case L2CAP_MOVE_ROLE_RESPONDER:
4832 l2cap_move_done(chan);
4833 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4834 break;
4835 case L2CAP_MOVE_ROLE_INITIATOR:
4836 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4837 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4838 /* Remote has only sent pending or
4839 * success responses, clean up
4840 */
4841 l2cap_move_done(chan);
4842 }
4843
4844 /* Other amp move states imply that the move
4845 * has already aborted
4846 */
4847 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4848 break;
4849 }
4850 }
4851
4852 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4853 struct hci_chan *hchan)
4854 {
4855 struct l2cap_conf_rsp rsp;
4856
4857 chan->hs_hchan = hchan;
4858 chan->hs_hcon->l2cap_data = chan->conn;
4859
4860 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4861
4862 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4863 int err;
4864
4865 set_default_fcs(chan);
4866
4867 err = l2cap_ertm_init(chan);
4868 if (err < 0)
4869 l2cap_send_disconn_req(chan, -err);
4870 else
4871 l2cap_chan_ready(chan);
4872 }
4873 }
4874
4875 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4876 struct hci_chan *hchan)
4877 {
4878 chan->hs_hcon = hchan->conn;
4879 chan->hs_hcon->l2cap_data = chan->conn;
4880
4881 BT_DBG("move_state %d", chan->move_state);
4882
4883 switch (chan->move_state) {
4884 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4885 /* Move confirm will be sent after a success
4886 * response is received
4887 */
4888 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4889 break;
4890 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4891 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4892 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4893 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4894 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4895 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4896 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4897 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4898 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4899 }
4900 break;
4901 default:
4902 /* Move was not in expected state, free the channel */
4903 __release_logical_link(chan);
4904
4905 chan->move_state = L2CAP_MOVE_STABLE;
4906 }
4907 }
4908
4909 /* Call with chan locked */
4910 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4911 u8 status)
4912 {
4913 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4914
4915 if (status) {
4916 l2cap_logical_fail(chan);
4917 __release_logical_link(chan);
4918 return;
4919 }
4920
4921 if (chan->state != BT_CONNECTED) {
4922 /* Ignore logical link if channel is on BR/EDR */
4923 if (chan->local_amp_id != AMP_ID_BREDR)
4924 l2cap_logical_finish_create(chan, hchan);
4925 } else {
4926 l2cap_logical_finish_move(chan, hchan);
4927 }
4928 }
4929
4930 void l2cap_move_start(struct l2cap_chan *chan)
4931 {
4932 BT_DBG("chan %p", chan);
4933
4934 if (chan->local_amp_id == AMP_ID_BREDR) {
4935 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4936 return;
4937 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4938 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4939 /* Placeholder - start physical link setup */
4940 } else {
4941 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4942 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4943 chan->move_id = 0;
4944 l2cap_move_setup(chan);
4945 l2cap_send_move_chan_req(chan, 0);
4946 }
4947 }
4948
4949 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4950 u8 local_amp_id, u8 remote_amp_id)
4951 {
4952 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4953 local_amp_id, remote_amp_id);
4954
4955 chan->fcs = L2CAP_FCS_NONE;
4956
4957 /* Outgoing channel on AMP */
4958 if (chan->state == BT_CONNECT) {
4959 if (result == L2CAP_CR_SUCCESS) {
4960 chan->local_amp_id = local_amp_id;
4961 l2cap_send_create_chan_req(chan, remote_amp_id);
4962 } else {
4963 /* Revert to BR/EDR connect */
4964 l2cap_send_conn_req(chan);
4965 }
4966
4967 return;
4968 }
4969
4970 /* Incoming channel on AMP */
4971 if (__l2cap_no_conn_pending(chan)) {
4972 struct l2cap_conn_rsp rsp;
4973 char buf[128];
4974 rsp.scid = cpu_to_le16(chan->dcid);
4975 rsp.dcid = cpu_to_le16(chan->scid);
4976
4977 if (result == L2CAP_CR_SUCCESS) {
4978 /* Send successful response */
4979 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4980 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4981 } else {
4982 /* Send negative response */
4983 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4984 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4985 }
4986
4987 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4988 sizeof(rsp), &rsp);
4989
4990 if (result == L2CAP_CR_SUCCESS) {
4991 l2cap_state_change(chan, BT_CONFIG);
4992 set_bit(CONF_REQ_SENT, &chan->conf_state);
4993 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4994 L2CAP_CONF_REQ,
4995 l2cap_build_conf_req(chan, buf), buf);
4996 chan->num_conf_req++;
4997 }
4998 }
4999 }
5000
5001 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5002 u8 remote_amp_id)
5003 {
5004 l2cap_move_setup(chan);
5005 chan->move_id = local_amp_id;
5006 chan->move_state = L2CAP_MOVE_WAIT_RSP;
5007
5008 l2cap_send_move_chan_req(chan, remote_amp_id);
5009 }
5010
5011 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5012 {
5013 struct hci_chan *hchan = NULL;
5014
5015 /* Placeholder - get hci_chan for logical link */
5016
5017 if (hchan) {
5018 if (hchan->state == BT_CONNECTED) {
5019 /* Logical link is ready to go */
5020 chan->hs_hcon = hchan->conn;
5021 chan->hs_hcon->l2cap_data = chan->conn;
5022 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5023 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5024
5025 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5026 } else {
5027 /* Wait for logical link to be ready */
5028 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5029 }
5030 } else {
5031 /* Logical link not available */
5032 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5033 }
5034 }
5035
5036 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5037 {
5038 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5039 u8 rsp_result;
5040 if (result == -EINVAL)
5041 rsp_result = L2CAP_MR_BAD_ID;
5042 else
5043 rsp_result = L2CAP_MR_NOT_ALLOWED;
5044
5045 l2cap_send_move_chan_rsp(chan, rsp_result);
5046 }
5047
5048 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5049 chan->move_state = L2CAP_MOVE_STABLE;
5050
5051 /* Restart data transmission */
5052 l2cap_ertm_send(chan);
5053 }
5054
5055 /* Invoke with locked chan */
5056 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5057 {
5058 u8 local_amp_id = chan->local_amp_id;
5059 u8 remote_amp_id = chan->remote_amp_id;
5060
5061 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5062 chan, result, local_amp_id, remote_amp_id);
5063
5064 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5065 l2cap_chan_unlock(chan);
5066 return;
5067 }
5068
5069 if (chan->state != BT_CONNECTED) {
5070 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5071 } else if (result != L2CAP_MR_SUCCESS) {
5072 l2cap_do_move_cancel(chan, result);
5073 } else {
5074 switch (chan->move_role) {
5075 case L2CAP_MOVE_ROLE_INITIATOR:
5076 l2cap_do_move_initiate(chan, local_amp_id,
5077 remote_amp_id);
5078 break;
5079 case L2CAP_MOVE_ROLE_RESPONDER:
5080 l2cap_do_move_respond(chan, result);
5081 break;
5082 default:
5083 l2cap_do_move_cancel(chan, result);
5084 break;
5085 }
5086 }
5087 }
5088
5089 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5090 struct l2cap_cmd_hdr *cmd,
5091 u16 cmd_len, void *data)
5092 {
5093 struct l2cap_move_chan_req *req = data;
5094 struct l2cap_move_chan_rsp rsp;
5095 struct l2cap_chan *chan;
5096 u16 icid = 0;
5097 u16 result = L2CAP_MR_NOT_ALLOWED;
5098
5099 if (cmd_len != sizeof(*req))
5100 return -EPROTO;
5101
5102 icid = le16_to_cpu(req->icid);
5103
5104 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5105
5106 if (!conn->hs_enabled)
5107 return -EINVAL;
5108
5109 chan = l2cap_get_chan_by_dcid(conn, icid);
5110 if (!chan) {
5111 rsp.icid = cpu_to_le16(icid);
5112 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5113 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5114 sizeof(rsp), &rsp);
5115 return 0;
5116 }
5117
5118 chan->ident = cmd->ident;
5119
5120 if (chan->scid < L2CAP_CID_DYN_START ||
5121 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5122 (chan->mode != L2CAP_MODE_ERTM &&
5123 chan->mode != L2CAP_MODE_STREAMING)) {
5124 result = L2CAP_MR_NOT_ALLOWED;
5125 goto send_move_response;
5126 }
5127
5128 if (chan->local_amp_id == req->dest_amp_id) {
5129 result = L2CAP_MR_SAME_ID;
5130 goto send_move_response;
5131 }
5132
5133 if (req->dest_amp_id != AMP_ID_BREDR) {
5134 struct hci_dev *hdev;
5135 hdev = hci_dev_get(req->dest_amp_id);
5136 if (!hdev || hdev->dev_type != HCI_AMP ||
5137 !test_bit(HCI_UP, &hdev->flags)) {
5138 if (hdev)
5139 hci_dev_put(hdev);
5140
5141 result = L2CAP_MR_BAD_ID;
5142 goto send_move_response;
5143 }
5144 hci_dev_put(hdev);
5145 }
5146
5147 /* Detect a move collision. Only send a collision response
5148 * if this side has "lost", otherwise proceed with the move.
5149 * The winner has the larger bd_addr.
5150 */
5151 if ((__chan_is_moving(chan) ||
5152 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5153 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5154 result = L2CAP_MR_COLLISION;
5155 goto send_move_response;
5156 }
5157
5158 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5159 l2cap_move_setup(chan);
5160 chan->move_id = req->dest_amp_id;
5161 icid = chan->dcid;
5162
5163 if (req->dest_amp_id == AMP_ID_BREDR) {
5164 /* Moving to BR/EDR */
5165 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5166 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5167 result = L2CAP_MR_PEND;
5168 } else {
5169 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5170 result = L2CAP_MR_SUCCESS;
5171 }
5172 } else {
5173 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5174 /* Placeholder - uncomment when amp functions are available */
5175 /*amp_accept_physical(chan, req->dest_amp_id);*/
5176 result = L2CAP_MR_PEND;
5177 }
5178
5179 send_move_response:
5180 l2cap_send_move_chan_rsp(chan, result);
5181
5182 l2cap_chan_unlock(chan);
5183
5184 return 0;
5185 }
5186
5187 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5188 {
5189 struct l2cap_chan *chan;
5190 struct hci_chan *hchan = NULL;
5191
5192 chan = l2cap_get_chan_by_scid(conn, icid);
5193 if (!chan) {
5194 l2cap_send_move_chan_cfm_icid(conn, icid);
5195 return;
5196 }
5197
5198 __clear_chan_timer(chan);
5199 if (result == L2CAP_MR_PEND)
5200 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5201
5202 switch (chan->move_state) {
5203 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5204 /* Move confirm will be sent when logical link
5205 * is complete.
5206 */
5207 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5208 break;
5209 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5210 if (result == L2CAP_MR_PEND) {
5211 break;
5212 } else if (test_bit(CONN_LOCAL_BUSY,
5213 &chan->conn_state)) {
5214 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5215 } else {
5216 /* Logical link is up or moving to BR/EDR,
5217 * proceed with move
5218 */
5219 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5220 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5221 }
5222 break;
5223 case L2CAP_MOVE_WAIT_RSP:
5224 /* Moving to AMP */
5225 if (result == L2CAP_MR_SUCCESS) {
5226 /* Remote is ready, send confirm immediately
5227 * after logical link is ready
5228 */
5229 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5230 } else {
5231 /* Both logical link and move success
5232 * are required to confirm
5233 */
5234 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5235 }
5236
5237 /* Placeholder - get hci_chan for logical link */
5238 if (!hchan) {
5239 /* Logical link not available */
5240 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5241 break;
5242 }
5243
5244 /* If the logical link is not yet connected, do not
5245 * send confirmation.
5246 */
5247 if (hchan->state != BT_CONNECTED)
5248 break;
5249
5250 /* Logical link is already ready to go */
5251
5252 chan->hs_hcon = hchan->conn;
5253 chan->hs_hcon->l2cap_data = chan->conn;
5254
5255 if (result == L2CAP_MR_SUCCESS) {
5256 /* Can confirm now */
5257 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5258 } else {
5259 /* Now only need move success
5260 * to confirm
5261 */
5262 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5263 }
5264
5265 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5266 break;
5267 default:
5268 /* Any other amp move state means the move failed. */
5269 chan->move_id = chan->local_amp_id;
5270 l2cap_move_done(chan);
5271 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5272 }
5273
5274 l2cap_chan_unlock(chan);
5275 }
5276
5277 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5278 u16 result)
5279 {
5280 struct l2cap_chan *chan;
5281
5282 chan = l2cap_get_chan_by_ident(conn, ident);
5283 if (!chan) {
5284 /* Could not locate channel, icid is best guess */
5285 l2cap_send_move_chan_cfm_icid(conn, icid);
5286 return;
5287 }
5288
5289 __clear_chan_timer(chan);
5290
5291 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5292 if (result == L2CAP_MR_COLLISION) {
5293 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5294 } else {
5295 /* Cleanup - cancel move */
5296 chan->move_id = chan->local_amp_id;
5297 l2cap_move_done(chan);
5298 }
5299 }
5300
5301 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5302
5303 l2cap_chan_unlock(chan);
5304 }
5305
5306 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5307 struct l2cap_cmd_hdr *cmd,
5308 u16 cmd_len, void *data)
5309 {
5310 struct l2cap_move_chan_rsp *rsp = data;
5311 u16 icid, result;
5312
5313 if (cmd_len != sizeof(*rsp))
5314 return -EPROTO;
5315
5316 icid = le16_to_cpu(rsp->icid);
5317 result = le16_to_cpu(rsp->result);
5318
5319 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5320
5321 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5322 l2cap_move_continue(conn, icid, result);
5323 else
5324 l2cap_move_fail(conn, cmd->ident, icid, result);
5325
5326 return 0;
5327 }
5328
5329 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5330 struct l2cap_cmd_hdr *cmd,
5331 u16 cmd_len, void *data)
5332 {
5333 struct l2cap_move_chan_cfm *cfm = data;
5334 struct l2cap_chan *chan;
5335 u16 icid, result;
5336
5337 if (cmd_len != sizeof(*cfm))
5338 return -EPROTO;
5339
5340 icid = le16_to_cpu(cfm->icid);
5341 result = le16_to_cpu(cfm->result);
5342
5343 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5344
5345 chan = l2cap_get_chan_by_dcid(conn, icid);
5346 if (!chan) {
5347 /* Spec requires a response even if the icid was not found */
5348 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5349 return 0;
5350 }
5351
5352 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5353 if (result == L2CAP_MC_CONFIRMED) {
5354 chan->local_amp_id = chan->move_id;
5355 if (chan->local_amp_id == AMP_ID_BREDR)
5356 __release_logical_link(chan);
5357 } else {
5358 chan->move_id = chan->local_amp_id;
5359 }
5360
5361 l2cap_move_done(chan);
5362 }
5363
5364 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5365
5366 l2cap_chan_unlock(chan);
5367
5368 return 0;
5369 }
5370
5371 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5372 struct l2cap_cmd_hdr *cmd,
5373 u16 cmd_len, void *data)
5374 {
5375 struct l2cap_move_chan_cfm_rsp *rsp = data;
5376 struct l2cap_chan *chan;
5377 u16 icid;
5378
5379 if (cmd_len != sizeof(*rsp))
5380 return -EPROTO;
5381
5382 icid = le16_to_cpu(rsp->icid);
5383
5384 BT_DBG("icid 0x%4.4x", icid);
5385
5386 chan = l2cap_get_chan_by_scid(conn, icid);
5387 if (!chan)
5388 return 0;
5389
5390 __clear_chan_timer(chan);
5391
5392 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5393 chan->local_amp_id = chan->move_id;
5394
5395 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5396 __release_logical_link(chan);
5397
5398 l2cap_move_done(chan);
5399 }
5400
5401 l2cap_chan_unlock(chan);
5402
5403 return 0;
5404 }
5405
5406 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5407 u16 to_multiplier)
5408 {
5409 u16 max_latency;
5410
5411 if (min > max || min < 6 || max > 3200)
5412 return -EINVAL;
5413
5414 if (to_multiplier < 10 || to_multiplier > 3200)
5415 return -EINVAL;
5416
5417 if (max >= to_multiplier * 8)
5418 return -EINVAL;
5419
5420 max_latency = (to_multiplier * 8 / max) - 1;
5421 if (latency > 499 || latency > max_latency)
5422 return -EINVAL;
5423
5424 return 0;
5425 }
5426
5427 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5428 struct l2cap_cmd_hdr *cmd,
5429 u16 cmd_len, u8 *data)
5430 {
5431 struct hci_conn *hcon = conn->hcon;
5432 struct l2cap_conn_param_update_req *req;
5433 struct l2cap_conn_param_update_rsp rsp;
5434 u16 min, max, latency, to_multiplier;
5435 int err;
5436
5437 if (!(hcon->link_mode & HCI_LM_MASTER))
5438 return -EINVAL;
5439
5440 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5441 return -EPROTO;
5442
5443 req = (struct l2cap_conn_param_update_req *) data;
5444 min = __le16_to_cpu(req->min);
5445 max = __le16_to_cpu(req->max);
5446 latency = __le16_to_cpu(req->latency);
5447 to_multiplier = __le16_to_cpu(req->to_multiplier);
5448
5449 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5450 min, max, latency, to_multiplier);
5451
5452 memset(&rsp, 0, sizeof(rsp));
5453
5454 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5455 if (err)
5456 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5457 else
5458 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5459
5460 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5461 sizeof(rsp), &rsp);
5462
5463 if (!err)
5464 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5465
5466 return 0;
5467 }
5468
5469 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5470 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5471 u8 *data)
5472 {
5473 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5474 u16 dcid, mtu, mps, credits, result;
5475 struct l2cap_chan *chan;
5476 int err;
5477
5478 if (cmd_len < sizeof(*rsp))
5479 return -EPROTO;
5480
5481 dcid = __le16_to_cpu(rsp->dcid);
5482 mtu = __le16_to_cpu(rsp->mtu);
5483 mps = __le16_to_cpu(rsp->mps);
5484 credits = __le16_to_cpu(rsp->credits);
5485 result = __le16_to_cpu(rsp->result);
5486
5487 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5488 return -EPROTO;
5489
5490 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5491 dcid, mtu, mps, credits, result);
5492
5493 mutex_lock(&conn->chan_lock);
5494
5495 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5496 if (!chan) {
5497 err = -EBADSLT;
5498 goto unlock;
5499 }
5500
5501 err = 0;
5502
5503 l2cap_chan_lock(chan);
5504
5505 switch (result) {
5506 case L2CAP_CR_SUCCESS:
5507 chan->ident = 0;
5508 chan->dcid = dcid;
5509 chan->omtu = mtu;
5510 chan->remote_mps = mps;
5511 chan->tx_credits = credits;
5512 l2cap_chan_ready(chan);
5513 break;
5514
5515 default:
5516 l2cap_chan_del(chan, ECONNREFUSED);
5517 break;
5518 }
5519
5520 l2cap_chan_unlock(chan);
5521
5522 unlock:
5523 mutex_unlock(&conn->chan_lock);
5524
5525 return err;
5526 }
5527
5528 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5529 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5530 u8 *data)
5531 {
5532 int err = 0;
5533
5534 switch (cmd->code) {
5535 case L2CAP_COMMAND_REJ:
5536 l2cap_command_rej(conn, cmd, cmd_len, data);
5537 break;
5538
5539 case L2CAP_CONN_REQ:
5540 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5541 break;
5542
5543 case L2CAP_CONN_RSP:
5544 case L2CAP_CREATE_CHAN_RSP:
5545 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5546 break;
5547
5548 case L2CAP_CONF_REQ:
5549 err = l2cap_config_req(conn, cmd, cmd_len, data);
5550 break;
5551
5552 case L2CAP_CONF_RSP:
5553 l2cap_config_rsp(conn, cmd, cmd_len, data);
5554 break;
5555
5556 case L2CAP_DISCONN_REQ:
5557 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5558 break;
5559
5560 case L2CAP_DISCONN_RSP:
5561 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5562 break;
5563
5564 case L2CAP_ECHO_REQ:
5565 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5566 break;
5567
5568 case L2CAP_ECHO_RSP:
5569 break;
5570
5571 case L2CAP_INFO_REQ:
5572 err = l2cap_information_req(conn, cmd, cmd_len, data);
5573 break;
5574
5575 case L2CAP_INFO_RSP:
5576 l2cap_information_rsp(conn, cmd, cmd_len, data);
5577 break;
5578
5579 case L2CAP_CREATE_CHAN_REQ:
5580 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5581 break;
5582
5583 case L2CAP_MOVE_CHAN_REQ:
5584 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5585 break;
5586
5587 case L2CAP_MOVE_CHAN_RSP:
5588 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5589 break;
5590
5591 case L2CAP_MOVE_CHAN_CFM:
5592 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5593 break;
5594
5595 case L2CAP_MOVE_CHAN_CFM_RSP:
5596 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5597 break;
5598
5599 default:
5600 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5601 err = -EINVAL;
5602 break;
5603 }
5604
5605 return err;
5606 }
5607
5608 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5609 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5610 u8 *data)
5611 {
5612 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5613 struct l2cap_le_conn_rsp rsp;
5614 struct l2cap_chan *chan, *pchan;
5615 u16 dcid, scid, credits, mtu, mps;
5616 __le16 psm;
5617 u8 result;
5618
5619 if (cmd_len != sizeof(*req))
5620 return -EPROTO;
5621
5622 scid = __le16_to_cpu(req->scid);
5623 mtu = __le16_to_cpu(req->mtu);
5624 mps = __le16_to_cpu(req->mps);
5625 psm = req->psm;
5626 dcid = 0;
5627 credits = 0;
5628
5629 if (mtu < 23 || mps < 23)
5630 return -EPROTO;
5631
5632 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5633 scid, mtu, mps);
5634
5635 /* Check if we have socket listening on psm */
5636 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5637 &conn->hcon->dst, LE_LINK);
5638 if (!pchan) {
5639 result = L2CAP_CR_BAD_PSM;
5640 chan = NULL;
5641 goto response;
5642 }
5643
5644 mutex_lock(&conn->chan_lock);
5645 l2cap_chan_lock(pchan);
5646
5647 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5648 result = L2CAP_CR_AUTHENTICATION;
5649 chan = NULL;
5650 goto response_unlock;
5651 }
5652
5653 /* Check if we already have channel with that dcid */
5654 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5655 result = L2CAP_CR_NO_MEM;
5656 chan = NULL;
5657 goto response_unlock;
5658 }
5659
5660 chan = pchan->ops->new_connection(pchan);
5661 if (!chan) {
5662 result = L2CAP_CR_NO_MEM;
5663 goto response_unlock;
5664 }
5665
5666 bacpy(&chan->src, &conn->hcon->src);
5667 bacpy(&chan->dst, &conn->hcon->dst);
5668 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5669 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5670 chan->psm = psm;
5671 chan->dcid = scid;
5672 chan->omtu = mtu;
5673 chan->remote_mps = mps;
5674 chan->tx_credits = __le16_to_cpu(req->credits);
5675
5676 __l2cap_chan_add(conn, chan);
5677 dcid = chan->scid;
5678 credits = chan->rx_credits;
5679
5680 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5681
5682 chan->ident = cmd->ident;
5683
5684 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5685 l2cap_state_change(chan, BT_CONNECT2);
5686 result = L2CAP_CR_PEND;
5687 chan->ops->defer(chan);
5688 } else {
5689 l2cap_chan_ready(chan);
5690 result = L2CAP_CR_SUCCESS;
5691 }
5692
5693 response_unlock:
5694 l2cap_chan_unlock(pchan);
5695 mutex_unlock(&conn->chan_lock);
5696
5697 if (result == L2CAP_CR_PEND)
5698 return 0;
5699
5700 response:
5701 if (chan) {
5702 rsp.mtu = cpu_to_le16(chan->imtu);
5703 rsp.mps = cpu_to_le16(chan->mps);
5704 } else {
5705 rsp.mtu = 0;
5706 rsp.mps = 0;
5707 }
5708
5709 rsp.dcid = cpu_to_le16(dcid);
5710 rsp.credits = cpu_to_le16(credits);
5711 rsp.result = cpu_to_le16(result);
5712
5713 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5714
5715 return 0;
5716 }
5717
5718 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5719 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5720 u8 *data)
5721 {
5722 struct l2cap_le_credits *pkt;
5723 struct l2cap_chan *chan;
5724 u16 cid, credits;
5725
5726 if (cmd_len != sizeof(*pkt))
5727 return -EPROTO;
5728
5729 pkt = (struct l2cap_le_credits *) data;
5730 cid = __le16_to_cpu(pkt->cid);
5731 credits = __le16_to_cpu(pkt->credits);
5732
5733 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5734
5735 chan = l2cap_get_chan_by_dcid(conn, cid);
5736 if (!chan)
5737 return -EBADSLT;
5738
5739 chan->tx_credits += credits;
5740
5741 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5742 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5743 chan->tx_credits--;
5744 }
5745
5746 if (chan->tx_credits)
5747 chan->ops->resume(chan);
5748
5749 l2cap_chan_unlock(chan);
5750
5751 return 0;
5752 }
5753
5754 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5755 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5756 u8 *data)
5757 {
5758 int err = 0;
5759
5760 if (!enable_lecoc) {
5761 switch (cmd->code) {
5762 case L2CAP_LE_CONN_REQ:
5763 case L2CAP_LE_CONN_RSP:
5764 case L2CAP_LE_CREDITS:
5765 case L2CAP_DISCONN_REQ:
5766 case L2CAP_DISCONN_RSP:
5767 return -EINVAL;
5768 }
5769 }
5770
5771 switch (cmd->code) {
5772 case L2CAP_COMMAND_REJ:
5773 break;
5774
5775 case L2CAP_CONN_PARAM_UPDATE_REQ:
5776 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5777 break;
5778
5779 case L2CAP_CONN_PARAM_UPDATE_RSP:
5780 break;
5781
5782 case L2CAP_LE_CONN_RSP:
5783 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5784 break;
5785
5786 case L2CAP_LE_CONN_REQ:
5787 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5788 break;
5789
5790 case L2CAP_LE_CREDITS:
5791 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5792 break;
5793
5794 case L2CAP_DISCONN_REQ:
5795 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5796 break;
5797
5798 case L2CAP_DISCONN_RSP:
5799 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5800 break;
5801
5802 default:
5803 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5804 err = -EINVAL;
5805 break;
5806 }
5807
5808 return err;
5809 }
5810
5811 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5812 struct sk_buff *skb)
5813 {
5814 struct hci_conn *hcon = conn->hcon;
5815 struct l2cap_cmd_hdr *cmd;
5816 u16 len;
5817 int err;
5818
5819 if (hcon->type != LE_LINK)
5820 goto drop;
5821
5822 if (skb->len < L2CAP_CMD_HDR_SIZE)
5823 goto drop;
5824
5825 cmd = (void *) skb->data;
5826 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5827
5828 len = le16_to_cpu(cmd->len);
5829
5830 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5831
5832 if (len != skb->len || !cmd->ident) {
5833 BT_DBG("corrupted command");
5834 goto drop;
5835 }
5836
5837 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5838 if (err) {
5839 struct l2cap_cmd_rej_unk rej;
5840
5841 BT_ERR("Wrong link type (%d)", err);
5842
5843 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5844 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5845 sizeof(rej), &rej);
5846 }
5847
5848 drop:
5849 kfree_skb(skb);
5850 }
5851
5852 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5853 struct sk_buff *skb)
5854 {
5855 struct hci_conn *hcon = conn->hcon;
5856 u8 *data = skb->data;
5857 int len = skb->len;
5858 struct l2cap_cmd_hdr cmd;
5859 int err;
5860
5861 l2cap_raw_recv(conn, skb);
5862
5863 if (hcon->type != ACL_LINK)
5864 goto drop;
5865
5866 while (len >= L2CAP_CMD_HDR_SIZE) {
5867 u16 cmd_len;
5868 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5869 data += L2CAP_CMD_HDR_SIZE;
5870 len -= L2CAP_CMD_HDR_SIZE;
5871
5872 cmd_len = le16_to_cpu(cmd.len);
5873
5874 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5875 cmd.ident);
5876
5877 if (cmd_len > len || !cmd.ident) {
5878 BT_DBG("corrupted command");
5879 break;
5880 }
5881
5882 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5883 if (err) {
5884 struct l2cap_cmd_rej_unk rej;
5885
5886 BT_ERR("Wrong link type (%d)", err);
5887
5888 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5889 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5890 sizeof(rej), &rej);
5891 }
5892
5893 data += cmd_len;
5894 len -= cmd_len;
5895 }
5896
5897 drop:
5898 kfree_skb(skb);
5899 }
5900
5901 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5902 {
5903 u16 our_fcs, rcv_fcs;
5904 int hdr_size;
5905
5906 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5907 hdr_size = L2CAP_EXT_HDR_SIZE;
5908 else
5909 hdr_size = L2CAP_ENH_HDR_SIZE;
5910
5911 if (chan->fcs == L2CAP_FCS_CRC16) {
5912 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5913 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5914 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5915
5916 if (our_fcs != rcv_fcs)
5917 return -EBADMSG;
5918 }
5919 return 0;
5920 }
5921
5922 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5923 {
5924 struct l2cap_ctrl control;
5925
5926 BT_DBG("chan %p", chan);
5927
5928 memset(&control, 0, sizeof(control));
5929 control.sframe = 1;
5930 control.final = 1;
5931 control.reqseq = chan->buffer_seq;
5932 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5933
5934 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5935 control.super = L2CAP_SUPER_RNR;
5936 l2cap_send_sframe(chan, &control);
5937 }
5938
5939 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5940 chan->unacked_frames > 0)
5941 __set_retrans_timer(chan);
5942
5943 /* Send pending iframes */
5944 l2cap_ertm_send(chan);
5945
5946 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5947 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5948 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5949 * send it now.
5950 */
5951 control.super = L2CAP_SUPER_RR;
5952 l2cap_send_sframe(chan, &control);
5953 }
5954 }
5955
5956 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5957 struct sk_buff **last_frag)
5958 {
5959 /* skb->len reflects data in skb as well as all fragments
5960 * skb->data_len reflects only data in fragments
5961 */
5962 if (!skb_has_frag_list(skb))
5963 skb_shinfo(skb)->frag_list = new_frag;
5964
5965 new_frag->next = NULL;
5966
5967 (*last_frag)->next = new_frag;
5968 *last_frag = new_frag;
5969
5970 skb->len += new_frag->len;
5971 skb->data_len += new_frag->len;
5972 skb->truesize += new_frag->truesize;
5973 }
5974
5975 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5976 struct l2cap_ctrl *control)
5977 {
5978 int err = -EINVAL;
5979
5980 switch (control->sar) {
5981 case L2CAP_SAR_UNSEGMENTED:
5982 if (chan->sdu)
5983 break;
5984
5985 err = chan->ops->recv(chan, skb);
5986 break;
5987
5988 case L2CAP_SAR_START:
5989 if (chan->sdu)
5990 break;
5991
5992 chan->sdu_len = get_unaligned_le16(skb->data);
5993 skb_pull(skb, L2CAP_SDULEN_SIZE);
5994
5995 if (chan->sdu_len > chan->imtu) {
5996 err = -EMSGSIZE;
5997 break;
5998 }
5999
6000 if (skb->len >= chan->sdu_len)
6001 break;
6002
6003 chan->sdu = skb;
6004 chan->sdu_last_frag = skb;
6005
6006 skb = NULL;
6007 err = 0;
6008 break;
6009
6010 case L2CAP_SAR_CONTINUE:
6011 if (!chan->sdu)
6012 break;
6013
6014 append_skb_frag(chan->sdu, skb,
6015 &chan->sdu_last_frag);
6016 skb = NULL;
6017
6018 if (chan->sdu->len >= chan->sdu_len)
6019 break;
6020
6021 err = 0;
6022 break;
6023
6024 case L2CAP_SAR_END:
6025 if (!chan->sdu)
6026 break;
6027
6028 append_skb_frag(chan->sdu, skb,
6029 &chan->sdu_last_frag);
6030 skb = NULL;
6031
6032 if (chan->sdu->len != chan->sdu_len)
6033 break;
6034
6035 err = chan->ops->recv(chan, chan->sdu);
6036
6037 if (!err) {
6038 /* Reassembly complete */
6039 chan->sdu = NULL;
6040 chan->sdu_last_frag = NULL;
6041 chan->sdu_len = 0;
6042 }
6043 break;
6044 }
6045
6046 if (err) {
6047 kfree_skb(skb);
6048 kfree_skb(chan->sdu);
6049 chan->sdu = NULL;
6050 chan->sdu_last_frag = NULL;
6051 chan->sdu_len = 0;
6052 }
6053
6054 return err;
6055 }
6056
6057 static int l2cap_resegment(struct l2cap_chan *chan)
6058 {
6059 /* Placeholder */
6060 return 0;
6061 }
6062
6063 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6064 {
6065 u8 event;
6066
6067 if (chan->mode != L2CAP_MODE_ERTM)
6068 return;
6069
6070 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6071 l2cap_tx(chan, NULL, NULL, event);
6072 }
6073
6074 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6075 {
6076 int err = 0;
6077 /* Pass sequential frames to l2cap_reassemble_sdu()
6078 * until a gap is encountered.
6079 */
6080
6081 BT_DBG("chan %p", chan);
6082
6083 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6084 struct sk_buff *skb;
6085 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6086 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6087
6088 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6089
6090 if (!skb)
6091 break;
6092
6093 skb_unlink(skb, &chan->srej_q);
6094 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6095 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6096 if (err)
6097 break;
6098 }
6099
6100 if (skb_queue_empty(&chan->srej_q)) {
6101 chan->rx_state = L2CAP_RX_STATE_RECV;
6102 l2cap_send_ack(chan);
6103 }
6104
6105 return err;
6106 }
6107
6108 static void l2cap_handle_srej(struct l2cap_chan *chan,
6109 struct l2cap_ctrl *control)
6110 {
6111 struct sk_buff *skb;
6112
6113 BT_DBG("chan %p, control %p", chan, control);
6114
6115 if (control->reqseq == chan->next_tx_seq) {
6116 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6117 l2cap_send_disconn_req(chan, ECONNRESET);
6118 return;
6119 }
6120
6121 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6122
6123 if (skb == NULL) {
6124 BT_DBG("Seq %d not available for retransmission",
6125 control->reqseq);
6126 return;
6127 }
6128
6129 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6130 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6131 l2cap_send_disconn_req(chan, ECONNRESET);
6132 return;
6133 }
6134
6135 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6136
6137 if (control->poll) {
6138 l2cap_pass_to_tx(chan, control);
6139
6140 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6141 l2cap_retransmit(chan, control);
6142 l2cap_ertm_send(chan);
6143
6144 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6145 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6146 chan->srej_save_reqseq = control->reqseq;
6147 }
6148 } else {
6149 l2cap_pass_to_tx_fbit(chan, control);
6150
6151 if (control->final) {
6152 if (chan->srej_save_reqseq != control->reqseq ||
6153 !test_and_clear_bit(CONN_SREJ_ACT,
6154 &chan->conn_state))
6155 l2cap_retransmit(chan, control);
6156 } else {
6157 l2cap_retransmit(chan, control);
6158 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6159 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6160 chan->srej_save_reqseq = control->reqseq;
6161 }
6162 }
6163 }
6164 }
6165
6166 static void l2cap_handle_rej(struct l2cap_chan *chan,
6167 struct l2cap_ctrl *control)
6168 {
6169 struct sk_buff *skb;
6170
6171 BT_DBG("chan %p, control %p", chan, control);
6172
6173 if (control->reqseq == chan->next_tx_seq) {
6174 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6175 l2cap_send_disconn_req(chan, ECONNRESET);
6176 return;
6177 }
6178
6179 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6180
6181 if (chan->max_tx && skb &&
6182 bt_cb(skb)->control.retries >= chan->max_tx) {
6183 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6184 l2cap_send_disconn_req(chan, ECONNRESET);
6185 return;
6186 }
6187
6188 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6189
6190 l2cap_pass_to_tx(chan, control);
6191
6192 if (control->final) {
6193 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6194 l2cap_retransmit_all(chan, control);
6195 } else {
6196 l2cap_retransmit_all(chan, control);
6197 l2cap_ertm_send(chan);
6198 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6199 set_bit(CONN_REJ_ACT, &chan->conn_state);
6200 }
6201 }
6202
6203 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6204 {
6205 BT_DBG("chan %p, txseq %d", chan, txseq);
6206
6207 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6208 chan->expected_tx_seq);
6209
6210 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6211 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6212 chan->tx_win) {
6213 /* See notes below regarding "double poll" and
6214 * invalid packets.
6215 */
6216 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6217 BT_DBG("Invalid/Ignore - after SREJ");
6218 return L2CAP_TXSEQ_INVALID_IGNORE;
6219 } else {
6220 BT_DBG("Invalid - in window after SREJ sent");
6221 return L2CAP_TXSEQ_INVALID;
6222 }
6223 }
6224
6225 if (chan->srej_list.head == txseq) {
6226 BT_DBG("Expected SREJ");
6227 return L2CAP_TXSEQ_EXPECTED_SREJ;
6228 }
6229
6230 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6231 BT_DBG("Duplicate SREJ - txseq already stored");
6232 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6233 }
6234
6235 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6236 BT_DBG("Unexpected SREJ - not requested");
6237 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6238 }
6239 }
6240
6241 if (chan->expected_tx_seq == txseq) {
6242 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6243 chan->tx_win) {
6244 BT_DBG("Invalid - txseq outside tx window");
6245 return L2CAP_TXSEQ_INVALID;
6246 } else {
6247 BT_DBG("Expected");
6248 return L2CAP_TXSEQ_EXPECTED;
6249 }
6250 }
6251
6252 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6253 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6254 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6255 return L2CAP_TXSEQ_DUPLICATE;
6256 }
6257
6258 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6259 /* A source of invalid packets is a "double poll" condition,
6260 * where delays cause us to send multiple poll packets. If
6261 * the remote stack receives and processes both polls,
6262 * sequence numbers can wrap around in such a way that a
6263 * resent frame has a sequence number that looks like new data
6264 * with a sequence gap. This would trigger an erroneous SREJ
6265 * request.
6266 *
6267 * Fortunately, this is impossible with a tx window that's
6268 * less than half of the maximum sequence number, which allows
6269 * invalid frames to be safely ignored.
6270 *
6271 * With tx window sizes greater than half of the tx window
6272 * maximum, the frame is invalid and cannot be ignored. This
6273 * causes a disconnect.
6274 */
6275
6276 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6277 BT_DBG("Invalid/Ignore - txseq outside tx window");
6278 return L2CAP_TXSEQ_INVALID_IGNORE;
6279 } else {
6280 BT_DBG("Invalid - txseq outside tx window");
6281 return L2CAP_TXSEQ_INVALID;
6282 }
6283 } else {
6284 BT_DBG("Unexpected - txseq indicates missing frames");
6285 return L2CAP_TXSEQ_UNEXPECTED;
6286 }
6287 }
6288
6289 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6290 struct l2cap_ctrl *control,
6291 struct sk_buff *skb, u8 event)
6292 {
6293 int err = 0;
6294 bool skb_in_use = false;
6295
6296 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6297 event);
6298
6299 switch (event) {
6300 case L2CAP_EV_RECV_IFRAME:
6301 switch (l2cap_classify_txseq(chan, control->txseq)) {
6302 case L2CAP_TXSEQ_EXPECTED:
6303 l2cap_pass_to_tx(chan, control);
6304
6305 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6306 BT_DBG("Busy, discarding expected seq %d",
6307 control->txseq);
6308 break;
6309 }
6310
6311 chan->expected_tx_seq = __next_seq(chan,
6312 control->txseq);
6313
6314 chan->buffer_seq = chan->expected_tx_seq;
6315 skb_in_use = true;
6316
6317 err = l2cap_reassemble_sdu(chan, skb, control);
6318 if (err)
6319 break;
6320
6321 if (control->final) {
6322 if (!test_and_clear_bit(CONN_REJ_ACT,
6323 &chan->conn_state)) {
6324 control->final = 0;
6325 l2cap_retransmit_all(chan, control);
6326 l2cap_ertm_send(chan);
6327 }
6328 }
6329
6330 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6331 l2cap_send_ack(chan);
6332 break;
6333 case L2CAP_TXSEQ_UNEXPECTED:
6334 l2cap_pass_to_tx(chan, control);
6335
6336 /* Can't issue SREJ frames in the local busy state.
6337 * Drop this frame, it will be seen as missing
6338 * when local busy is exited.
6339 */
6340 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6341 BT_DBG("Busy, discarding unexpected seq %d",
6342 control->txseq);
6343 break;
6344 }
6345
6346 /* There was a gap in the sequence, so an SREJ
6347 * must be sent for each missing frame. The
6348 * current frame is stored for later use.
6349 */
6350 skb_queue_tail(&chan->srej_q, skb);
6351 skb_in_use = true;
6352 BT_DBG("Queued %p (queue len %d)", skb,
6353 skb_queue_len(&chan->srej_q));
6354
6355 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6356 l2cap_seq_list_clear(&chan->srej_list);
6357 l2cap_send_srej(chan, control->txseq);
6358
6359 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6360 break;
6361 case L2CAP_TXSEQ_DUPLICATE:
6362 l2cap_pass_to_tx(chan, control);
6363 break;
6364 case L2CAP_TXSEQ_INVALID_IGNORE:
6365 break;
6366 case L2CAP_TXSEQ_INVALID:
6367 default:
6368 l2cap_send_disconn_req(chan, ECONNRESET);
6369 break;
6370 }
6371 break;
6372 case L2CAP_EV_RECV_RR:
6373 l2cap_pass_to_tx(chan, control);
6374 if (control->final) {
6375 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6376
6377 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6378 !__chan_is_moving(chan)) {
6379 control->final = 0;
6380 l2cap_retransmit_all(chan, control);
6381 }
6382
6383 l2cap_ertm_send(chan);
6384 } else if (control->poll) {
6385 l2cap_send_i_or_rr_or_rnr(chan);
6386 } else {
6387 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6388 &chan->conn_state) &&
6389 chan->unacked_frames)
6390 __set_retrans_timer(chan);
6391
6392 l2cap_ertm_send(chan);
6393 }
6394 break;
6395 case L2CAP_EV_RECV_RNR:
6396 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6397 l2cap_pass_to_tx(chan, control);
6398 if (control && control->poll) {
6399 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6400 l2cap_send_rr_or_rnr(chan, 0);
6401 }
6402 __clear_retrans_timer(chan);
6403 l2cap_seq_list_clear(&chan->retrans_list);
6404 break;
6405 case L2CAP_EV_RECV_REJ:
6406 l2cap_handle_rej(chan, control);
6407 break;
6408 case L2CAP_EV_RECV_SREJ:
6409 l2cap_handle_srej(chan, control);
6410 break;
6411 default:
6412 break;
6413 }
6414
6415 if (skb && !skb_in_use) {
6416 BT_DBG("Freeing %p", skb);
6417 kfree_skb(skb);
6418 }
6419
6420 return err;
6421 }
6422
6423 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6424 struct l2cap_ctrl *control,
6425 struct sk_buff *skb, u8 event)
6426 {
6427 int err = 0;
6428 u16 txseq = control->txseq;
6429 bool skb_in_use = false;
6430
6431 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6432 event);
6433
6434 switch (event) {
6435 case L2CAP_EV_RECV_IFRAME:
6436 switch (l2cap_classify_txseq(chan, txseq)) {
6437 case L2CAP_TXSEQ_EXPECTED:
6438 /* Keep frame for reassembly later */
6439 l2cap_pass_to_tx(chan, control);
6440 skb_queue_tail(&chan->srej_q, skb);
6441 skb_in_use = true;
6442 BT_DBG("Queued %p (queue len %d)", skb,
6443 skb_queue_len(&chan->srej_q));
6444
6445 chan->expected_tx_seq = __next_seq(chan, txseq);
6446 break;
6447 case L2CAP_TXSEQ_EXPECTED_SREJ:
6448 l2cap_seq_list_pop(&chan->srej_list);
6449
6450 l2cap_pass_to_tx(chan, control);
6451 skb_queue_tail(&chan->srej_q, skb);
6452 skb_in_use = true;
6453 BT_DBG("Queued %p (queue len %d)", skb,
6454 skb_queue_len(&chan->srej_q));
6455
6456 err = l2cap_rx_queued_iframes(chan);
6457 if (err)
6458 break;
6459
6460 break;
6461 case L2CAP_TXSEQ_UNEXPECTED:
6462 /* Got a frame that can't be reassembled yet.
6463 * Save it for later, and send SREJs to cover
6464 * the missing frames.
6465 */
6466 skb_queue_tail(&chan->srej_q, skb);
6467 skb_in_use = true;
6468 BT_DBG("Queued %p (queue len %d)", skb,
6469 skb_queue_len(&chan->srej_q));
6470
6471 l2cap_pass_to_tx(chan, control);
6472 l2cap_send_srej(chan, control->txseq);
6473 break;
6474 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6475 /* This frame was requested with an SREJ, but
6476 * some expected retransmitted frames are
6477 * missing. Request retransmission of missing
6478 * SREJ'd frames.
6479 */
6480 skb_queue_tail(&chan->srej_q, skb);
6481 skb_in_use = true;
6482 BT_DBG("Queued %p (queue len %d)", skb,
6483 skb_queue_len(&chan->srej_q));
6484
6485 l2cap_pass_to_tx(chan, control);
6486 l2cap_send_srej_list(chan, control->txseq);
6487 break;
6488 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6489 /* We've already queued this frame. Drop this copy. */
6490 l2cap_pass_to_tx(chan, control);
6491 break;
6492 case L2CAP_TXSEQ_DUPLICATE:
6493 /* Expecting a later sequence number, so this frame
6494 * was already received. Ignore it completely.
6495 */
6496 break;
6497 case L2CAP_TXSEQ_INVALID_IGNORE:
6498 break;
6499 case L2CAP_TXSEQ_INVALID:
6500 default:
6501 l2cap_send_disconn_req(chan, ECONNRESET);
6502 break;
6503 }
6504 break;
6505 case L2CAP_EV_RECV_RR:
6506 l2cap_pass_to_tx(chan, control);
6507 if (control->final) {
6508 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6509
6510 if (!test_and_clear_bit(CONN_REJ_ACT,
6511 &chan->conn_state)) {
6512 control->final = 0;
6513 l2cap_retransmit_all(chan, control);
6514 }
6515
6516 l2cap_ertm_send(chan);
6517 } else if (control->poll) {
6518 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6519 &chan->conn_state) &&
6520 chan->unacked_frames) {
6521 __set_retrans_timer(chan);
6522 }
6523
6524 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6525 l2cap_send_srej_tail(chan);
6526 } else {
6527 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6528 &chan->conn_state) &&
6529 chan->unacked_frames)
6530 __set_retrans_timer(chan);
6531
6532 l2cap_send_ack(chan);
6533 }
6534 break;
6535 case L2CAP_EV_RECV_RNR:
6536 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6537 l2cap_pass_to_tx(chan, control);
6538 if (control->poll) {
6539 l2cap_send_srej_tail(chan);
6540 } else {
6541 struct l2cap_ctrl rr_control;
6542 memset(&rr_control, 0, sizeof(rr_control));
6543 rr_control.sframe = 1;
6544 rr_control.super = L2CAP_SUPER_RR;
6545 rr_control.reqseq = chan->buffer_seq;
6546 l2cap_send_sframe(chan, &rr_control);
6547 }
6548
6549 break;
6550 case L2CAP_EV_RECV_REJ:
6551 l2cap_handle_rej(chan, control);
6552 break;
6553 case L2CAP_EV_RECV_SREJ:
6554 l2cap_handle_srej(chan, control);
6555 break;
6556 }
6557
6558 if (skb && !skb_in_use) {
6559 BT_DBG("Freeing %p", skb);
6560 kfree_skb(skb);
6561 }
6562
6563 return err;
6564 }
6565
6566 static int l2cap_finish_move(struct l2cap_chan *chan)
6567 {
6568 BT_DBG("chan %p", chan);
6569
6570 chan->rx_state = L2CAP_RX_STATE_RECV;
6571
6572 if (chan->hs_hcon)
6573 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6574 else
6575 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6576
6577 return l2cap_resegment(chan);
6578 }
6579
6580 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6581 struct l2cap_ctrl *control,
6582 struct sk_buff *skb, u8 event)
6583 {
6584 int err;
6585
6586 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6587 event);
6588
6589 if (!control->poll)
6590 return -EPROTO;
6591
6592 l2cap_process_reqseq(chan, control->reqseq);
6593
6594 if (!skb_queue_empty(&chan->tx_q))
6595 chan->tx_send_head = skb_peek(&chan->tx_q);
6596 else
6597 chan->tx_send_head = NULL;
6598
6599 /* Rewind next_tx_seq to the point expected
6600 * by the receiver.
6601 */
6602 chan->next_tx_seq = control->reqseq;
6603 chan->unacked_frames = 0;
6604
6605 err = l2cap_finish_move(chan);
6606 if (err)
6607 return err;
6608
6609 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6610 l2cap_send_i_or_rr_or_rnr(chan);
6611
6612 if (event == L2CAP_EV_RECV_IFRAME)
6613 return -EPROTO;
6614
6615 return l2cap_rx_state_recv(chan, control, NULL, event);
6616 }
6617
6618 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6619 struct l2cap_ctrl *control,
6620 struct sk_buff *skb, u8 event)
6621 {
6622 int err;
6623
6624 if (!control->final)
6625 return -EPROTO;
6626
6627 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6628
6629 chan->rx_state = L2CAP_RX_STATE_RECV;
6630 l2cap_process_reqseq(chan, control->reqseq);
6631
6632 if (!skb_queue_empty(&chan->tx_q))
6633 chan->tx_send_head = skb_peek(&chan->tx_q);
6634 else
6635 chan->tx_send_head = NULL;
6636
6637 /* Rewind next_tx_seq to the point expected
6638 * by the receiver.
6639 */
6640 chan->next_tx_seq = control->reqseq;
6641 chan->unacked_frames = 0;
6642
6643 if (chan->hs_hcon)
6644 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6645 else
6646 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6647
6648 err = l2cap_resegment(chan);
6649
6650 if (!err)
6651 err = l2cap_rx_state_recv(chan, control, skb, event);
6652
6653 return err;
6654 }
6655
6656 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6657 {
6658 /* Make sure reqseq is for a packet that has been sent but not acked */
6659 u16 unacked;
6660
6661 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6662 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6663 }
6664
6665 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6666 struct sk_buff *skb, u8 event)
6667 {
6668 int err = 0;
6669
6670 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6671 control, skb, event, chan->rx_state);
6672
6673 if (__valid_reqseq(chan, control->reqseq)) {
6674 switch (chan->rx_state) {
6675 case L2CAP_RX_STATE_RECV:
6676 err = l2cap_rx_state_recv(chan, control, skb, event);
6677 break;
6678 case L2CAP_RX_STATE_SREJ_SENT:
6679 err = l2cap_rx_state_srej_sent(chan, control, skb,
6680 event);
6681 break;
6682 case L2CAP_RX_STATE_WAIT_P:
6683 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6684 break;
6685 case L2CAP_RX_STATE_WAIT_F:
6686 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6687 break;
6688 default:
6689 /* shut it down */
6690 break;
6691 }
6692 } else {
6693 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6694 control->reqseq, chan->next_tx_seq,
6695 chan->expected_ack_seq);
6696 l2cap_send_disconn_req(chan, ECONNRESET);
6697 }
6698
6699 return err;
6700 }
6701
6702 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6703 struct sk_buff *skb)
6704 {
6705 int err = 0;
6706
6707 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6708 chan->rx_state);
6709
6710 if (l2cap_classify_txseq(chan, control->txseq) ==
6711 L2CAP_TXSEQ_EXPECTED) {
6712 l2cap_pass_to_tx(chan, control);
6713
6714 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6715 __next_seq(chan, chan->buffer_seq));
6716
6717 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6718
6719 l2cap_reassemble_sdu(chan, skb, control);
6720 } else {
6721 if (chan->sdu) {
6722 kfree_skb(chan->sdu);
6723 chan->sdu = NULL;
6724 }
6725 chan->sdu_last_frag = NULL;
6726 chan->sdu_len = 0;
6727
6728 if (skb) {
6729 BT_DBG("Freeing %p", skb);
6730 kfree_skb(skb);
6731 }
6732 }
6733
6734 chan->last_acked_seq = control->txseq;
6735 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6736
6737 return err;
6738 }
6739
6740 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6741 {
6742 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6743 u16 len;
6744 u8 event;
6745
6746 __unpack_control(chan, skb);
6747
6748 len = skb->len;
6749
6750 /*
6751 * We can just drop the corrupted I-frame here.
6752 * Receiver will miss it and start proper recovery
6753 * procedures and ask for retransmission.
6754 */
6755 if (l2cap_check_fcs(chan, skb))
6756 goto drop;
6757
6758 if (!control->sframe && control->sar == L2CAP_SAR_START)
6759 len -= L2CAP_SDULEN_SIZE;
6760
6761 if (chan->fcs == L2CAP_FCS_CRC16)
6762 len -= L2CAP_FCS_SIZE;
6763
6764 if (len > chan->mps) {
6765 l2cap_send_disconn_req(chan, ECONNRESET);
6766 goto drop;
6767 }
6768
6769 if (!control->sframe) {
6770 int err;
6771
6772 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6773 control->sar, control->reqseq, control->final,
6774 control->txseq);
6775
6776 /* Validate F-bit - F=0 always valid, F=1 only
6777 * valid in TX WAIT_F
6778 */
6779 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6780 goto drop;
6781
6782 if (chan->mode != L2CAP_MODE_STREAMING) {
6783 event = L2CAP_EV_RECV_IFRAME;
6784 err = l2cap_rx(chan, control, skb, event);
6785 } else {
6786 err = l2cap_stream_rx(chan, control, skb);
6787 }
6788
6789 if (err)
6790 l2cap_send_disconn_req(chan, ECONNRESET);
6791 } else {
6792 const u8 rx_func_to_event[4] = {
6793 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6794 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6795 };
6796
6797 /* Only I-frames are expected in streaming mode */
6798 if (chan->mode == L2CAP_MODE_STREAMING)
6799 goto drop;
6800
6801 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6802 control->reqseq, control->final, control->poll,
6803 control->super);
6804
6805 if (len != 0) {
6806 BT_ERR("Trailing bytes: %d in sframe", len);
6807 l2cap_send_disconn_req(chan, ECONNRESET);
6808 goto drop;
6809 }
6810
6811 /* Validate F and P bits */
6812 if (control->final && (control->poll ||
6813 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6814 goto drop;
6815
6816 event = rx_func_to_event[control->super];
6817 if (l2cap_rx(chan, control, skb, event))
6818 l2cap_send_disconn_req(chan, ECONNRESET);
6819 }
6820
6821 return 0;
6822
6823 drop:
6824 kfree_skb(skb);
6825 return 0;
6826 }
6827
6828 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6829 {
6830 struct l2cap_conn *conn = chan->conn;
6831 struct l2cap_le_credits pkt;
6832 u16 return_credits;
6833
6834 /* We return more credits to the sender only after the amount of
6835 * credits falls below half of the initial amount.
6836 */
6837 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6838 return;
6839
6840 return_credits = le_max_credits - chan->rx_credits;
6841
6842 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6843
6844 chan->rx_credits += return_credits;
6845
6846 pkt.cid = cpu_to_le16(chan->scid);
6847 pkt.credits = cpu_to_le16(return_credits);
6848
6849 chan->ident = l2cap_get_ident(conn);
6850
6851 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6852 }
6853
6854 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6855 {
6856 int err;
6857
6858 if (!chan->rx_credits) {
6859 BT_ERR("No credits to receive LE L2CAP data");
6860 return -ENOBUFS;
6861 }
6862
6863 if (chan->imtu < skb->len) {
6864 BT_ERR("Too big LE L2CAP PDU");
6865 return -ENOBUFS;
6866 }
6867
6868 chan->rx_credits--;
6869 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6870
6871 l2cap_chan_le_send_credits(chan);
6872
6873 err = 0;
6874
6875 if (!chan->sdu) {
6876 u16 sdu_len;
6877
6878 sdu_len = get_unaligned_le16(skb->data);
6879 skb_pull(skb, L2CAP_SDULEN_SIZE);
6880
6881 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6882 sdu_len, skb->len, chan->imtu);
6883
6884 if (sdu_len > chan->imtu) {
6885 BT_ERR("Too big LE L2CAP SDU length received");
6886 err = -EMSGSIZE;
6887 goto failed;
6888 }
6889
6890 if (skb->len > sdu_len) {
6891 BT_ERR("Too much LE L2CAP data received");
6892 err = -EINVAL;
6893 goto failed;
6894 }
6895
6896 if (skb->len == sdu_len)
6897 return chan->ops->recv(chan, skb);
6898
6899 chan->sdu = skb;
6900 chan->sdu_len = sdu_len;
6901 chan->sdu_last_frag = skb;
6902
6903 return 0;
6904 }
6905
6906 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6907 chan->sdu->len, skb->len, chan->sdu_len);
6908
6909 if (chan->sdu->len + skb->len > chan->sdu_len) {
6910 BT_ERR("Too much LE L2CAP data received");
6911 err = -EINVAL;
6912 goto failed;
6913 }
6914
6915 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6916 skb = NULL;
6917
6918 if (chan->sdu->len == chan->sdu_len) {
6919 err = chan->ops->recv(chan, chan->sdu);
6920 if (!err) {
6921 chan->sdu = NULL;
6922 chan->sdu_last_frag = NULL;
6923 chan->sdu_len = 0;
6924 }
6925 }
6926
6927 failed:
6928 if (err) {
6929 kfree_skb(skb);
6930 kfree_skb(chan->sdu);
6931 chan->sdu = NULL;
6932 chan->sdu_last_frag = NULL;
6933 chan->sdu_len = 0;
6934 }
6935
6936 /* We can't return an error here since we took care of the skb
6937 * freeing internally. An error return would cause the caller to
6938 * do a double-free of the skb.
6939 */
6940 return 0;
6941 }
6942
6943 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6944 struct sk_buff *skb)
6945 {
6946 struct l2cap_chan *chan;
6947
6948 chan = l2cap_get_chan_by_scid(conn, cid);
6949 if (!chan) {
6950 if (cid == L2CAP_CID_A2MP) {
6951 chan = a2mp_channel_create(conn, skb);
6952 if (!chan) {
6953 kfree_skb(skb);
6954 return;
6955 }
6956
6957 l2cap_chan_lock(chan);
6958 } else {
6959 BT_DBG("unknown cid 0x%4.4x", cid);
6960 /* Drop packet and return */
6961 kfree_skb(skb);
6962 return;
6963 }
6964 }
6965
6966 BT_DBG("chan %p, len %d", chan, skb->len);
6967
6968 if (chan->state != BT_CONNECTED)
6969 goto drop;
6970
6971 switch (chan->mode) {
6972 case L2CAP_MODE_LE_FLOWCTL:
6973 if (l2cap_le_data_rcv(chan, skb) < 0)
6974 goto drop;
6975
6976 goto done;
6977
6978 case L2CAP_MODE_BASIC:
6979 /* If socket recv buffers overflows we drop data here
6980 * which is *bad* because L2CAP has to be reliable.
6981 * But we don't have any other choice. L2CAP doesn't
6982 * provide flow control mechanism. */
6983
6984 if (chan->imtu < skb->len)
6985 goto drop;
6986
6987 if (!chan->ops->recv(chan, skb))
6988 goto done;
6989 break;
6990
6991 case L2CAP_MODE_ERTM:
6992 case L2CAP_MODE_STREAMING:
6993 l2cap_data_rcv(chan, skb);
6994 goto done;
6995
6996 default:
6997 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6998 break;
6999 }
7000
7001 drop:
7002 kfree_skb(skb);
7003
7004 done:
7005 l2cap_chan_unlock(chan);
7006 }
7007
7008 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7009 struct sk_buff *skb)
7010 {
7011 struct hci_conn *hcon = conn->hcon;
7012 struct l2cap_chan *chan;
7013
7014 if (hcon->type != ACL_LINK)
7015 goto drop;
7016
7017 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7018 ACL_LINK);
7019 if (!chan)
7020 goto drop;
7021
7022 BT_DBG("chan %p, len %d", chan, skb->len);
7023
7024 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7025 goto drop;
7026
7027 if (chan->imtu < skb->len)
7028 goto drop;
7029
7030 /* Store remote BD_ADDR and PSM for msg_name */
7031 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
7032 bt_cb(skb)->psm = psm;
7033
7034 if (!chan->ops->recv(chan, skb))
7035 return;
7036
7037 drop:
7038 kfree_skb(skb);
7039 }
7040
7041 static void l2cap_att_channel(struct l2cap_conn *conn,
7042 struct sk_buff *skb)
7043 {
7044 struct hci_conn *hcon = conn->hcon;
7045 struct l2cap_chan *chan;
7046
7047 if (hcon->type != LE_LINK)
7048 goto drop;
7049
7050 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
7051 &hcon->src, &hcon->dst);
7052 if (!chan)
7053 goto drop;
7054
7055 BT_DBG("chan %p, len %d", chan, skb->len);
7056
7057 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
7058 goto drop;
7059
7060 if (chan->imtu < skb->len)
7061 goto drop;
7062
7063 if (!chan->ops->recv(chan, skb))
7064 return;
7065
7066 drop:
7067 kfree_skb(skb);
7068 }
7069
7070 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7071 {
7072 struct l2cap_hdr *lh = (void *) skb->data;
7073 u16 cid, len;
7074 __le16 psm;
7075
7076 skb_pull(skb, L2CAP_HDR_SIZE);
7077 cid = __le16_to_cpu(lh->cid);
7078 len = __le16_to_cpu(lh->len);
7079
7080 if (len != skb->len) {
7081 kfree_skb(skb);
7082 return;
7083 }
7084
7085 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7086
7087 switch (cid) {
7088 case L2CAP_CID_SIGNALING:
7089 l2cap_sig_channel(conn, skb);
7090 break;
7091
7092 case L2CAP_CID_CONN_LESS:
7093 psm = get_unaligned((__le16 *) skb->data);
7094 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7095 l2cap_conless_channel(conn, psm, skb);
7096 break;
7097
7098 case L2CAP_CID_ATT:
7099 l2cap_att_channel(conn, skb);
7100 break;
7101
7102 case L2CAP_CID_LE_SIGNALING:
7103 l2cap_le_sig_channel(conn, skb);
7104 break;
7105
7106 case L2CAP_CID_SMP:
7107 if (smp_sig_channel(conn, skb))
7108 l2cap_conn_del(conn->hcon, EACCES);
7109 break;
7110
7111 default:
7112 l2cap_data_channel(conn, cid, skb);
7113 break;
7114 }
7115 }
7116
7117 /* ---- L2CAP interface with lower layer (HCI) ---- */
7118
7119 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7120 {
7121 int exact = 0, lm1 = 0, lm2 = 0;
7122 struct l2cap_chan *c;
7123
7124 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7125
7126 /* Find listening sockets and check their link_mode */
7127 read_lock(&chan_list_lock);
7128 list_for_each_entry(c, &chan_list, global_l) {
7129 if (c->state != BT_LISTEN)
7130 continue;
7131
7132 if (!bacmp(&c->src, &hdev->bdaddr)) {
7133 lm1 |= HCI_LM_ACCEPT;
7134 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7135 lm1 |= HCI_LM_MASTER;
7136 exact++;
7137 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7138 lm2 |= HCI_LM_ACCEPT;
7139 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7140 lm2 |= HCI_LM_MASTER;
7141 }
7142 }
7143 read_unlock(&chan_list_lock);
7144
7145 return exact ? lm1 : lm2;
7146 }
7147
7148 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7149 {
7150 struct l2cap_conn *conn;
7151
7152 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7153
7154 if (!status) {
7155 conn = l2cap_conn_add(hcon);
7156 if (conn)
7157 l2cap_conn_ready(conn);
7158 } else {
7159 l2cap_conn_del(hcon, bt_to_errno(status));
7160 }
7161 }
7162
7163 int l2cap_disconn_ind(struct hci_conn *hcon)
7164 {
7165 struct l2cap_conn *conn = hcon->l2cap_data;
7166
7167 BT_DBG("hcon %p", hcon);
7168
7169 if (!conn)
7170 return HCI_ERROR_REMOTE_USER_TERM;
7171 return conn->disc_reason;
7172 }
7173
7174 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7175 {
7176 BT_DBG("hcon %p reason %d", hcon, reason);
7177
7178 l2cap_conn_del(hcon, bt_to_errno(reason));
7179 }
7180
7181 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7182 {
7183 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7184 return;
7185
7186 if (encrypt == 0x00) {
7187 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7188 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7189 } else if (chan->sec_level == BT_SECURITY_HIGH)
7190 l2cap_chan_close(chan, ECONNREFUSED);
7191 } else {
7192 if (chan->sec_level == BT_SECURITY_MEDIUM)
7193 __clear_chan_timer(chan);
7194 }
7195 }
7196
7197 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7198 {
7199 struct l2cap_conn *conn = hcon->l2cap_data;
7200 struct l2cap_chan *chan;
7201
7202 if (!conn)
7203 return 0;
7204
7205 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7206
7207 if (hcon->type == LE_LINK) {
7208 if (!status && encrypt)
7209 smp_distribute_keys(conn, 0);
7210 cancel_delayed_work(&conn->security_timer);
7211 }
7212
7213 mutex_lock(&conn->chan_lock);
7214
7215 list_for_each_entry(chan, &conn->chan_l, list) {
7216 l2cap_chan_lock(chan);
7217
7218 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7219 state_to_string(chan->state));
7220
7221 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7222 l2cap_chan_unlock(chan);
7223 continue;
7224 }
7225
7226 if (chan->scid == L2CAP_CID_ATT) {
7227 if (!status && encrypt) {
7228 chan->sec_level = hcon->sec_level;
7229 l2cap_chan_ready(chan);
7230 }
7231
7232 l2cap_chan_unlock(chan);
7233 continue;
7234 }
7235
7236 if (!__l2cap_no_conn_pending(chan)) {
7237 l2cap_chan_unlock(chan);
7238 continue;
7239 }
7240
7241 if (!status && (chan->state == BT_CONNECTED ||
7242 chan->state == BT_CONFIG)) {
7243 chan->ops->resume(chan);
7244 l2cap_check_encryption(chan, encrypt);
7245 l2cap_chan_unlock(chan);
7246 continue;
7247 }
7248
7249 if (chan->state == BT_CONNECT) {
7250 if (!status)
7251 l2cap_start_connection(chan);
7252 else
7253 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7254 } else if (chan->state == BT_CONNECT2) {
7255 struct l2cap_conn_rsp rsp;
7256 __u16 res, stat;
7257
7258 if (!status) {
7259 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7260 res = L2CAP_CR_PEND;
7261 stat = L2CAP_CS_AUTHOR_PEND;
7262 chan->ops->defer(chan);
7263 } else {
7264 l2cap_state_change(chan, BT_CONFIG);
7265 res = L2CAP_CR_SUCCESS;
7266 stat = L2CAP_CS_NO_INFO;
7267 }
7268 } else {
7269 l2cap_state_change(chan, BT_DISCONN);
7270 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7271 res = L2CAP_CR_SEC_BLOCK;
7272 stat = L2CAP_CS_NO_INFO;
7273 }
7274
7275 rsp.scid = cpu_to_le16(chan->dcid);
7276 rsp.dcid = cpu_to_le16(chan->scid);
7277 rsp.result = cpu_to_le16(res);
7278 rsp.status = cpu_to_le16(stat);
7279 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7280 sizeof(rsp), &rsp);
7281
7282 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7283 res == L2CAP_CR_SUCCESS) {
7284 char buf[128];
7285 set_bit(CONF_REQ_SENT, &chan->conf_state);
7286 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7287 L2CAP_CONF_REQ,
7288 l2cap_build_conf_req(chan, buf),
7289 buf);
7290 chan->num_conf_req++;
7291 }
7292 }
7293
7294 l2cap_chan_unlock(chan);
7295 }
7296
7297 mutex_unlock(&conn->chan_lock);
7298
7299 return 0;
7300 }
7301
7302 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7303 {
7304 struct l2cap_conn *conn = hcon->l2cap_data;
7305 struct l2cap_hdr *hdr;
7306 int len;
7307
7308 /* For AMP controller do not create l2cap conn */
7309 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7310 goto drop;
7311
7312 if (!conn)
7313 conn = l2cap_conn_add(hcon);
7314
7315 if (!conn)
7316 goto drop;
7317
7318 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7319
7320 switch (flags) {
7321 case ACL_START:
7322 case ACL_START_NO_FLUSH:
7323 case ACL_COMPLETE:
7324 if (conn->rx_len) {
7325 BT_ERR("Unexpected start frame (len %d)", skb->len);
7326 kfree_skb(conn->rx_skb);
7327 conn->rx_skb = NULL;
7328 conn->rx_len = 0;
7329 l2cap_conn_unreliable(conn, ECOMM);
7330 }
7331
7332 /* Start fragment always begin with Basic L2CAP header */
7333 if (skb->len < L2CAP_HDR_SIZE) {
7334 BT_ERR("Frame is too short (len %d)", skb->len);
7335 l2cap_conn_unreliable(conn, ECOMM);
7336 goto drop;
7337 }
7338
7339 hdr = (struct l2cap_hdr *) skb->data;
7340 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7341
7342 if (len == skb->len) {
7343 /* Complete frame received */
7344 l2cap_recv_frame(conn, skb);
7345 return 0;
7346 }
7347
7348 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7349
7350 if (skb->len > len) {
7351 BT_ERR("Frame is too long (len %d, expected len %d)",
7352 skb->len, len);
7353 l2cap_conn_unreliable(conn, ECOMM);
7354 goto drop;
7355 }
7356
7357 /* Allocate skb for the complete frame (with header) */
7358 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7359 if (!conn->rx_skb)
7360 goto drop;
7361
7362 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7363 skb->len);
7364 conn->rx_len = len - skb->len;
7365 break;
7366
7367 case ACL_CONT:
7368 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7369
7370 if (!conn->rx_len) {
7371 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7372 l2cap_conn_unreliable(conn, ECOMM);
7373 goto drop;
7374 }
7375
7376 if (skb->len > conn->rx_len) {
7377 BT_ERR("Fragment is too long (len %d, expected %d)",
7378 skb->len, conn->rx_len);
7379 kfree_skb(conn->rx_skb);
7380 conn->rx_skb = NULL;
7381 conn->rx_len = 0;
7382 l2cap_conn_unreliable(conn, ECOMM);
7383 goto drop;
7384 }
7385
7386 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7387 skb->len);
7388 conn->rx_len -= skb->len;
7389
7390 if (!conn->rx_len) {
7391 /* Complete frame received. l2cap_recv_frame
7392 * takes ownership of the skb so set the global
7393 * rx_skb pointer to NULL first.
7394 */
7395 struct sk_buff *rx_skb = conn->rx_skb;
7396 conn->rx_skb = NULL;
7397 l2cap_recv_frame(conn, rx_skb);
7398 }
7399 break;
7400 }
7401
7402 drop:
7403 kfree_skb(skb);
7404 return 0;
7405 }
7406
7407 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7408 {
7409 struct l2cap_chan *c;
7410
7411 read_lock(&chan_list_lock);
7412
7413 list_for_each_entry(c, &chan_list, global_l) {
7414 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7415 &c->src, &c->dst,
7416 c->state, __le16_to_cpu(c->psm),
7417 c->scid, c->dcid, c->imtu, c->omtu,
7418 c->sec_level, c->mode);
7419 }
7420
7421 read_unlock(&chan_list_lock);
7422
7423 return 0;
7424 }
7425
7426 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7427 {
7428 return single_open(file, l2cap_debugfs_show, inode->i_private);
7429 }
7430
7431 static const struct file_operations l2cap_debugfs_fops = {
7432 .open = l2cap_debugfs_open,
7433 .read = seq_read,
7434 .llseek = seq_lseek,
7435 .release = single_release,
7436 };
7437
7438 static struct dentry *l2cap_debugfs;
7439
7440 int __init l2cap_init(void)
7441 {
7442 int err;
7443
7444 err = l2cap_init_sockets();
7445 if (err < 0)
7446 return err;
7447
7448 if (IS_ERR_OR_NULL(bt_debugfs))
7449 return 0;
7450
7451 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7452 NULL, &l2cap_debugfs_fops);
7453
7454 debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs,
7455 &le_max_credits);
7456 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
7457 &le_default_mps);
7458
7459 return 0;
7460 }
7461
7462 void l2cap_exit(void)
7463 {
7464 debugfs_remove(l2cap_debugfs);
7465 l2cap_cleanup_sockets();
7466 }
7467
7468 module_param(disable_ertm, bool, 0644);
7469 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.17364 seconds and 6 git commands to generate.