Bluetooth: Add LE L2CAP flow control mode
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 bool disable_ertm;
45
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
48
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
51
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
55 void *data);
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
61
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
63 {
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
67 else
68 return BDADDR_LE_RANDOM;
69 }
70
71 return BDADDR_BREDR;
72 }
73
74 /* ---- L2CAP channels ---- */
75
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
77 u16 cid)
78 {
79 struct l2cap_chan *c;
80
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->dcid == cid)
83 return c;
84 }
85 return NULL;
86 }
87
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
89 u16 cid)
90 {
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98 }
99
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
109 if (c)
110 l2cap_chan_lock(c);
111 mutex_unlock(&conn->chan_lock);
112
113 return c;
114 }
115
116 /* Find channel with given DCID.
117 * Returns locked channel.
118 */
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
120 u16 cid)
121 {
122 struct l2cap_chan *c;
123
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
126 if (c)
127 l2cap_chan_lock(c);
128 mutex_unlock(&conn->chan_lock);
129
130 return c;
131 }
132
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
134 u8 ident)
135 {
136 struct l2cap_chan *c;
137
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
140 return c;
141 }
142 return NULL;
143 }
144
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 u8 ident)
147 {
148 struct l2cap_chan *c;
149
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
152 if (c)
153 l2cap_chan_lock(c);
154 mutex_unlock(&conn->chan_lock);
155
156 return c;
157 }
158
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
160 {
161 struct l2cap_chan *c;
162
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
165 return c;
166 }
167 return NULL;
168 }
169
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
171 {
172 int err;
173
174 write_lock(&chan_list_lock);
175
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
177 err = -EADDRINUSE;
178 goto done;
179 }
180
181 if (psm) {
182 chan->psm = psm;
183 chan->sport = psm;
184 err = 0;
185 } else {
186 u16 p;
187
188 err = -EINVAL;
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
193 err = 0;
194 break;
195 }
196 }
197
198 done:
199 write_unlock(&chan_list_lock);
200 return err;
201 }
202
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
204 {
205 write_lock(&chan_list_lock);
206
207 chan->scid = scid;
208
209 write_unlock(&chan_list_lock);
210
211 return 0;
212 }
213
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
215 {
216 u16 cid = L2CAP_CID_DYN_START;
217
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
220 return cid;
221 }
222
223 return 0;
224 }
225
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
227 {
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
230
231 chan->state = state;
232 chan->ops->state_change(chan, state, 0);
233 }
234
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
236 int state, int err)
237 {
238 chan->state = state;
239 chan->ops->state_change(chan, chan->state, err);
240 }
241
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
243 {
244 chan->ops->state_change(chan, chan->state, err);
245 }
246
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
253 }
254 }
255
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
262 }
263 }
264
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
267 {
268 struct sk_buff *skb;
269
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
273 }
274
275 return NULL;
276 }
277
278 /* ---- L2CAP sequence number lists ---- */
279
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
287 */
288
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 size_t alloc_size, i;
292
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
296 */
297 alloc_size = roundup_pow_of_two(size);
298
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
302
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308
309 return 0;
310 }
311
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 kfree(seq_list->list);
315 }
316
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
319 {
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 u16 mask = seq_list->mask;
327
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 }
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
347 }
348
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
354 }
355 return seq;
356 }
357
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
439
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441
442 chan->state = BT_OPEN;
443
444 kref_init(&chan->kref);
445
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448
449 BT_DBG("chan %p", chan);
450
451 return chan;
452 }
453
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457
458 BT_DBG("chan %p", chan);
459
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
463
464 kfree(chan);
465 }
466
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470
471 kref_get(&c->kref);
472 }
473
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477
478 kref_put(&c->kref, l2cap_chan_destroy);
479 }
480
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
489
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492
493 void l2cap_le_flowctl_init(struct l2cap_chan *chan)
494 {
495 chan->imtu = L2CAP_DEFAULT_MTU;
496 chan->omtu = L2CAP_LE_MIN_MTU;
497 chan->mode = L2CAP_MODE_LE_FLOWCTL;
498 }
499
500 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
501 {
502 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
503 __le16_to_cpu(chan->psm), chan->dcid);
504
505 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
506
507 chan->conn = conn;
508
509 switch (chan->chan_type) {
510 case L2CAP_CHAN_CONN_ORIENTED:
511 if (conn->hcon->type == LE_LINK) {
512 /* LE connection */
513 chan->omtu = L2CAP_DEFAULT_MTU;
514 if (chan->dcid == L2CAP_CID_ATT)
515 chan->scid = L2CAP_CID_ATT;
516 else
517 chan->scid = l2cap_alloc_cid(conn);
518 } else {
519 /* Alloc CID for connection-oriented socket */
520 chan->scid = l2cap_alloc_cid(conn);
521 chan->omtu = L2CAP_DEFAULT_MTU;
522 }
523 break;
524
525 case L2CAP_CHAN_CONN_LESS:
526 /* Connectionless socket */
527 chan->scid = L2CAP_CID_CONN_LESS;
528 chan->dcid = L2CAP_CID_CONN_LESS;
529 chan->omtu = L2CAP_DEFAULT_MTU;
530 break;
531
532 case L2CAP_CHAN_CONN_FIX_A2MP:
533 chan->scid = L2CAP_CID_A2MP;
534 chan->dcid = L2CAP_CID_A2MP;
535 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
536 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
537 break;
538
539 default:
540 /* Raw socket can send/recv signalling messages only */
541 chan->scid = L2CAP_CID_SIGNALING;
542 chan->dcid = L2CAP_CID_SIGNALING;
543 chan->omtu = L2CAP_DEFAULT_MTU;
544 }
545
546 chan->local_id = L2CAP_BESTEFFORT_ID;
547 chan->local_stype = L2CAP_SERV_BESTEFFORT;
548 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
549 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
550 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
551 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
552
553 l2cap_chan_hold(chan);
554
555 hci_conn_hold(conn->hcon);
556
557 list_add(&chan->list, &conn->chan_l);
558 }
559
560 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
561 {
562 mutex_lock(&conn->chan_lock);
563 __l2cap_chan_add(conn, chan);
564 mutex_unlock(&conn->chan_lock);
565 }
566
567 void l2cap_chan_del(struct l2cap_chan *chan, int err)
568 {
569 struct l2cap_conn *conn = chan->conn;
570
571 __clear_chan_timer(chan);
572
573 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
574
575 if (conn) {
576 struct amp_mgr *mgr = conn->hcon->amp_mgr;
577 /* Delete from channel list */
578 list_del(&chan->list);
579
580 l2cap_chan_put(chan);
581
582 chan->conn = NULL;
583
584 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
585 hci_conn_drop(conn->hcon);
586
587 if (mgr && mgr->bredr_chan == chan)
588 mgr->bredr_chan = NULL;
589 }
590
591 if (chan->hs_hchan) {
592 struct hci_chan *hs_hchan = chan->hs_hchan;
593
594 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
595 amp_disconnect_logical_link(hs_hchan);
596 }
597
598 chan->ops->teardown(chan, err);
599
600 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
601 return;
602
603 switch(chan->mode) {
604 case L2CAP_MODE_BASIC:
605 break;
606
607 case L2CAP_MODE_LE_FLOWCTL:
608 break;
609
610 case L2CAP_MODE_ERTM:
611 __clear_retrans_timer(chan);
612 __clear_monitor_timer(chan);
613 __clear_ack_timer(chan);
614
615 skb_queue_purge(&chan->srej_q);
616
617 l2cap_seq_list_free(&chan->srej_list);
618 l2cap_seq_list_free(&chan->retrans_list);
619
620 /* fall through */
621
622 case L2CAP_MODE_STREAMING:
623 skb_queue_purge(&chan->tx_q);
624 break;
625 }
626
627 return;
628 }
629
630 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
631 {
632 struct l2cap_conn *conn = chan->conn;
633 struct l2cap_le_conn_rsp rsp;
634 u16 result;
635
636 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
637 result = L2CAP_CR_AUTHORIZATION;
638 else
639 result = L2CAP_CR_BAD_PSM;
640
641 l2cap_state_change(chan, BT_DISCONN);
642
643 rsp.dcid = cpu_to_le16(chan->scid);
644 rsp.mtu = cpu_to_le16(chan->imtu);
645 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
646 rsp.credits = __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS);
647 rsp.result = cpu_to_le16(result);
648
649 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
650 &rsp);
651 }
652
653 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
654 {
655 struct l2cap_conn *conn = chan->conn;
656 struct l2cap_conn_rsp rsp;
657 u16 result;
658
659 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
660 result = L2CAP_CR_SEC_BLOCK;
661 else
662 result = L2CAP_CR_BAD_PSM;
663
664 l2cap_state_change(chan, BT_DISCONN);
665
666 rsp.scid = cpu_to_le16(chan->dcid);
667 rsp.dcid = cpu_to_le16(chan->scid);
668 rsp.result = cpu_to_le16(result);
669 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
670
671 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
672 }
673
674 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
675 {
676 struct l2cap_conn *conn = chan->conn;
677
678 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
679
680 switch (chan->state) {
681 case BT_LISTEN:
682 chan->ops->teardown(chan, 0);
683 break;
684
685 case BT_CONNECTED:
686 case BT_CONFIG:
687 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
688 * check for chan->psm.
689 */
690 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
691 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
692 l2cap_send_disconn_req(chan, reason);
693 } else
694 l2cap_chan_del(chan, reason);
695 break;
696
697 case BT_CONNECT2:
698 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
699 if (conn->hcon->type == ACL_LINK)
700 l2cap_chan_connect_reject(chan);
701 else if (conn->hcon->type == LE_LINK)
702 l2cap_chan_le_connect_reject(chan);
703 }
704
705 l2cap_chan_del(chan, reason);
706 break;
707
708 case BT_CONNECT:
709 case BT_DISCONN:
710 l2cap_chan_del(chan, reason);
711 break;
712
713 default:
714 chan->ops->teardown(chan, 0);
715 break;
716 }
717 }
718
719 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
720 {
721 switch (chan->chan_type) {
722 case L2CAP_CHAN_RAW:
723 switch (chan->sec_level) {
724 case BT_SECURITY_HIGH:
725 return HCI_AT_DEDICATED_BONDING_MITM;
726 case BT_SECURITY_MEDIUM:
727 return HCI_AT_DEDICATED_BONDING;
728 default:
729 return HCI_AT_NO_BONDING;
730 }
731 break;
732 case L2CAP_CHAN_CONN_LESS:
733 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
734 if (chan->sec_level == BT_SECURITY_LOW)
735 chan->sec_level = BT_SECURITY_SDP;
736 }
737 if (chan->sec_level == BT_SECURITY_HIGH)
738 return HCI_AT_NO_BONDING_MITM;
739 else
740 return HCI_AT_NO_BONDING;
741 break;
742 case L2CAP_CHAN_CONN_ORIENTED:
743 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
744 if (chan->sec_level == BT_SECURITY_LOW)
745 chan->sec_level = BT_SECURITY_SDP;
746
747 if (chan->sec_level == BT_SECURITY_HIGH)
748 return HCI_AT_NO_BONDING_MITM;
749 else
750 return HCI_AT_NO_BONDING;
751 }
752 /* fall through */
753 default:
754 switch (chan->sec_level) {
755 case BT_SECURITY_HIGH:
756 return HCI_AT_GENERAL_BONDING_MITM;
757 case BT_SECURITY_MEDIUM:
758 return HCI_AT_GENERAL_BONDING;
759 default:
760 return HCI_AT_NO_BONDING;
761 }
762 break;
763 }
764 }
765
766 /* Service level security */
767 int l2cap_chan_check_security(struct l2cap_chan *chan)
768 {
769 struct l2cap_conn *conn = chan->conn;
770 __u8 auth_type;
771
772 if (conn->hcon->type == LE_LINK)
773 return smp_conn_security(conn->hcon, chan->sec_level);
774
775 auth_type = l2cap_get_auth_type(chan);
776
777 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
778 }
779
780 static u8 l2cap_get_ident(struct l2cap_conn *conn)
781 {
782 u8 id;
783
784 /* Get next available identificator.
785 * 1 - 128 are used by kernel.
786 * 129 - 199 are reserved.
787 * 200 - 254 are used by utilities like l2ping, etc.
788 */
789
790 spin_lock(&conn->lock);
791
792 if (++conn->tx_ident > 128)
793 conn->tx_ident = 1;
794
795 id = conn->tx_ident;
796
797 spin_unlock(&conn->lock);
798
799 return id;
800 }
801
802 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
803 void *data)
804 {
805 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
806 u8 flags;
807
808 BT_DBG("code 0x%2.2x", code);
809
810 if (!skb)
811 return;
812
813 if (lmp_no_flush_capable(conn->hcon->hdev))
814 flags = ACL_START_NO_FLUSH;
815 else
816 flags = ACL_START;
817
818 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
819 skb->priority = HCI_PRIO_MAX;
820
821 hci_send_acl(conn->hchan, skb, flags);
822 }
823
824 static bool __chan_is_moving(struct l2cap_chan *chan)
825 {
826 return chan->move_state != L2CAP_MOVE_STABLE &&
827 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
828 }
829
830 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
831 {
832 struct hci_conn *hcon = chan->conn->hcon;
833 u16 flags;
834
835 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
836 skb->priority);
837
838 if (chan->hs_hcon && !__chan_is_moving(chan)) {
839 if (chan->hs_hchan)
840 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
841 else
842 kfree_skb(skb);
843
844 return;
845 }
846
847 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
848 lmp_no_flush_capable(hcon->hdev))
849 flags = ACL_START_NO_FLUSH;
850 else
851 flags = ACL_START;
852
853 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
854 hci_send_acl(chan->conn->hchan, skb, flags);
855 }
856
857 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
858 {
859 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
860 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
861
862 if (enh & L2CAP_CTRL_FRAME_TYPE) {
863 /* S-Frame */
864 control->sframe = 1;
865 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
866 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
867
868 control->sar = 0;
869 control->txseq = 0;
870 } else {
871 /* I-Frame */
872 control->sframe = 0;
873 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
874 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
875
876 control->poll = 0;
877 control->super = 0;
878 }
879 }
880
881 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
882 {
883 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
884 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
885
886 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
887 /* S-Frame */
888 control->sframe = 1;
889 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
890 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
891
892 control->sar = 0;
893 control->txseq = 0;
894 } else {
895 /* I-Frame */
896 control->sframe = 0;
897 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
898 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
899
900 control->poll = 0;
901 control->super = 0;
902 }
903 }
904
905 static inline void __unpack_control(struct l2cap_chan *chan,
906 struct sk_buff *skb)
907 {
908 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
909 __unpack_extended_control(get_unaligned_le32(skb->data),
910 &bt_cb(skb)->control);
911 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
912 } else {
913 __unpack_enhanced_control(get_unaligned_le16(skb->data),
914 &bt_cb(skb)->control);
915 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
916 }
917 }
918
919 static u32 __pack_extended_control(struct l2cap_ctrl *control)
920 {
921 u32 packed;
922
923 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
924 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
925
926 if (control->sframe) {
927 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
928 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
929 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
930 } else {
931 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
932 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
933 }
934
935 return packed;
936 }
937
938 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
939 {
940 u16 packed;
941
942 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
943 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
944
945 if (control->sframe) {
946 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
947 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
948 packed |= L2CAP_CTRL_FRAME_TYPE;
949 } else {
950 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
951 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
952 }
953
954 return packed;
955 }
956
957 static inline void __pack_control(struct l2cap_chan *chan,
958 struct l2cap_ctrl *control,
959 struct sk_buff *skb)
960 {
961 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
962 put_unaligned_le32(__pack_extended_control(control),
963 skb->data + L2CAP_HDR_SIZE);
964 } else {
965 put_unaligned_le16(__pack_enhanced_control(control),
966 skb->data + L2CAP_HDR_SIZE);
967 }
968 }
969
970 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
971 {
972 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
973 return L2CAP_EXT_HDR_SIZE;
974 else
975 return L2CAP_ENH_HDR_SIZE;
976 }
977
978 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
979 u32 control)
980 {
981 struct sk_buff *skb;
982 struct l2cap_hdr *lh;
983 int hlen = __ertm_hdr_size(chan);
984
985 if (chan->fcs == L2CAP_FCS_CRC16)
986 hlen += L2CAP_FCS_SIZE;
987
988 skb = bt_skb_alloc(hlen, GFP_KERNEL);
989
990 if (!skb)
991 return ERR_PTR(-ENOMEM);
992
993 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
994 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
995 lh->cid = cpu_to_le16(chan->dcid);
996
997 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
998 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
999 else
1000 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1001
1002 if (chan->fcs == L2CAP_FCS_CRC16) {
1003 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1004 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1005 }
1006
1007 skb->priority = HCI_PRIO_MAX;
1008 return skb;
1009 }
1010
1011 static void l2cap_send_sframe(struct l2cap_chan *chan,
1012 struct l2cap_ctrl *control)
1013 {
1014 struct sk_buff *skb;
1015 u32 control_field;
1016
1017 BT_DBG("chan %p, control %p", chan, control);
1018
1019 if (!control->sframe)
1020 return;
1021
1022 if (__chan_is_moving(chan))
1023 return;
1024
1025 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1026 !control->poll)
1027 control->final = 1;
1028
1029 if (control->super == L2CAP_SUPER_RR)
1030 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1031 else if (control->super == L2CAP_SUPER_RNR)
1032 set_bit(CONN_RNR_SENT, &chan->conn_state);
1033
1034 if (control->super != L2CAP_SUPER_SREJ) {
1035 chan->last_acked_seq = control->reqseq;
1036 __clear_ack_timer(chan);
1037 }
1038
1039 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1040 control->final, control->poll, control->super);
1041
1042 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1043 control_field = __pack_extended_control(control);
1044 else
1045 control_field = __pack_enhanced_control(control);
1046
1047 skb = l2cap_create_sframe_pdu(chan, control_field);
1048 if (!IS_ERR(skb))
1049 l2cap_do_send(chan, skb);
1050 }
1051
1052 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1053 {
1054 struct l2cap_ctrl control;
1055
1056 BT_DBG("chan %p, poll %d", chan, poll);
1057
1058 memset(&control, 0, sizeof(control));
1059 control.sframe = 1;
1060 control.poll = poll;
1061
1062 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1063 control.super = L2CAP_SUPER_RNR;
1064 else
1065 control.super = L2CAP_SUPER_RR;
1066
1067 control.reqseq = chan->buffer_seq;
1068 l2cap_send_sframe(chan, &control);
1069 }
1070
1071 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1072 {
1073 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1074 }
1075
1076 static bool __amp_capable(struct l2cap_chan *chan)
1077 {
1078 struct l2cap_conn *conn = chan->conn;
1079 struct hci_dev *hdev;
1080 bool amp_available = false;
1081
1082 if (!conn->hs_enabled)
1083 return false;
1084
1085 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1086 return false;
1087
1088 read_lock(&hci_dev_list_lock);
1089 list_for_each_entry(hdev, &hci_dev_list, list) {
1090 if (hdev->amp_type != AMP_TYPE_BREDR &&
1091 test_bit(HCI_UP, &hdev->flags)) {
1092 amp_available = true;
1093 break;
1094 }
1095 }
1096 read_unlock(&hci_dev_list_lock);
1097
1098 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1099 return amp_available;
1100
1101 return false;
1102 }
1103
1104 static bool l2cap_check_efs(struct l2cap_chan *chan)
1105 {
1106 /* Check EFS parameters */
1107 return true;
1108 }
1109
1110 void l2cap_send_conn_req(struct l2cap_chan *chan)
1111 {
1112 struct l2cap_conn *conn = chan->conn;
1113 struct l2cap_conn_req req;
1114
1115 req.scid = cpu_to_le16(chan->scid);
1116 req.psm = chan->psm;
1117
1118 chan->ident = l2cap_get_ident(conn);
1119
1120 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1121
1122 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1123 }
1124
1125 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1126 {
1127 struct l2cap_create_chan_req req;
1128 req.scid = cpu_to_le16(chan->scid);
1129 req.psm = chan->psm;
1130 req.amp_id = amp_id;
1131
1132 chan->ident = l2cap_get_ident(chan->conn);
1133
1134 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1135 sizeof(req), &req);
1136 }
1137
1138 static void l2cap_move_setup(struct l2cap_chan *chan)
1139 {
1140 struct sk_buff *skb;
1141
1142 BT_DBG("chan %p", chan);
1143
1144 if (chan->mode != L2CAP_MODE_ERTM)
1145 return;
1146
1147 __clear_retrans_timer(chan);
1148 __clear_monitor_timer(chan);
1149 __clear_ack_timer(chan);
1150
1151 chan->retry_count = 0;
1152 skb_queue_walk(&chan->tx_q, skb) {
1153 if (bt_cb(skb)->control.retries)
1154 bt_cb(skb)->control.retries = 1;
1155 else
1156 break;
1157 }
1158
1159 chan->expected_tx_seq = chan->buffer_seq;
1160
1161 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1162 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1163 l2cap_seq_list_clear(&chan->retrans_list);
1164 l2cap_seq_list_clear(&chan->srej_list);
1165 skb_queue_purge(&chan->srej_q);
1166
1167 chan->tx_state = L2CAP_TX_STATE_XMIT;
1168 chan->rx_state = L2CAP_RX_STATE_MOVE;
1169
1170 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1171 }
1172
1173 static void l2cap_move_done(struct l2cap_chan *chan)
1174 {
1175 u8 move_role = chan->move_role;
1176 BT_DBG("chan %p", chan);
1177
1178 chan->move_state = L2CAP_MOVE_STABLE;
1179 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1180
1181 if (chan->mode != L2CAP_MODE_ERTM)
1182 return;
1183
1184 switch (move_role) {
1185 case L2CAP_MOVE_ROLE_INITIATOR:
1186 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1187 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1188 break;
1189 case L2CAP_MOVE_ROLE_RESPONDER:
1190 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1191 break;
1192 }
1193 }
1194
1195 static void l2cap_chan_ready(struct l2cap_chan *chan)
1196 {
1197 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1198 chan->conf_state = 0;
1199 __clear_chan_timer(chan);
1200
1201 chan->state = BT_CONNECTED;
1202
1203 chan->ops->ready(chan);
1204 }
1205
1206 static void l2cap_le_connect(struct l2cap_chan *chan)
1207 {
1208 struct l2cap_conn *conn = chan->conn;
1209 struct l2cap_le_conn_req req;
1210
1211 req.psm = chan->psm;
1212 req.scid = cpu_to_le16(chan->scid);
1213 req.mtu = cpu_to_le16(chan->imtu);
1214 req.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
1215 req.credits = __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS);
1216
1217 chan->ident = l2cap_get_ident(conn);
1218
1219 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1220 sizeof(req), &req);
1221 }
1222
1223 static void l2cap_le_start(struct l2cap_chan *chan)
1224 {
1225 struct l2cap_conn *conn = chan->conn;
1226
1227 if (!smp_conn_security(conn->hcon, chan->sec_level))
1228 return;
1229
1230 if (!chan->psm) {
1231 l2cap_chan_ready(chan);
1232 return;
1233 }
1234
1235 if (chan->state == BT_CONNECT)
1236 l2cap_le_connect(chan);
1237 }
1238
1239 static void l2cap_start_connection(struct l2cap_chan *chan)
1240 {
1241 if (__amp_capable(chan)) {
1242 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1243 a2mp_discover_amp(chan);
1244 } else if (chan->conn->hcon->type == LE_LINK) {
1245 l2cap_le_start(chan);
1246 } else {
1247 l2cap_send_conn_req(chan);
1248 }
1249 }
1250
1251 static void l2cap_do_start(struct l2cap_chan *chan)
1252 {
1253 struct l2cap_conn *conn = chan->conn;
1254
1255 if (conn->hcon->type == LE_LINK) {
1256 l2cap_le_start(chan);
1257 return;
1258 }
1259
1260 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1261 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1262 return;
1263
1264 if (l2cap_chan_check_security(chan) &&
1265 __l2cap_no_conn_pending(chan)) {
1266 l2cap_start_connection(chan);
1267 }
1268 } else {
1269 struct l2cap_info_req req;
1270 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1271
1272 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1273 conn->info_ident = l2cap_get_ident(conn);
1274
1275 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1276
1277 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1278 sizeof(req), &req);
1279 }
1280 }
1281
1282 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1283 {
1284 u32 local_feat_mask = l2cap_feat_mask;
1285 if (!disable_ertm)
1286 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1287
1288 switch (mode) {
1289 case L2CAP_MODE_ERTM:
1290 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1291 case L2CAP_MODE_STREAMING:
1292 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1293 default:
1294 return 0x00;
1295 }
1296 }
1297
1298 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1299 {
1300 struct l2cap_conn *conn = chan->conn;
1301 struct l2cap_disconn_req req;
1302
1303 if (!conn)
1304 return;
1305
1306 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1307 __clear_retrans_timer(chan);
1308 __clear_monitor_timer(chan);
1309 __clear_ack_timer(chan);
1310 }
1311
1312 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1313 l2cap_state_change(chan, BT_DISCONN);
1314 return;
1315 }
1316
1317 req.dcid = cpu_to_le16(chan->dcid);
1318 req.scid = cpu_to_le16(chan->scid);
1319 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1320 sizeof(req), &req);
1321
1322 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1323 }
1324
1325 /* ---- L2CAP connections ---- */
1326 static void l2cap_conn_start(struct l2cap_conn *conn)
1327 {
1328 struct l2cap_chan *chan, *tmp;
1329
1330 BT_DBG("conn %p", conn);
1331
1332 mutex_lock(&conn->chan_lock);
1333
1334 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1335 l2cap_chan_lock(chan);
1336
1337 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1338 l2cap_chan_unlock(chan);
1339 continue;
1340 }
1341
1342 if (chan->state == BT_CONNECT) {
1343 if (!l2cap_chan_check_security(chan) ||
1344 !__l2cap_no_conn_pending(chan)) {
1345 l2cap_chan_unlock(chan);
1346 continue;
1347 }
1348
1349 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1350 && test_bit(CONF_STATE2_DEVICE,
1351 &chan->conf_state)) {
1352 l2cap_chan_close(chan, ECONNRESET);
1353 l2cap_chan_unlock(chan);
1354 continue;
1355 }
1356
1357 l2cap_start_connection(chan);
1358
1359 } else if (chan->state == BT_CONNECT2) {
1360 struct l2cap_conn_rsp rsp;
1361 char buf[128];
1362 rsp.scid = cpu_to_le16(chan->dcid);
1363 rsp.dcid = cpu_to_le16(chan->scid);
1364
1365 if (l2cap_chan_check_security(chan)) {
1366 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1367 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1368 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1369 chan->ops->defer(chan);
1370
1371 } else {
1372 l2cap_state_change(chan, BT_CONFIG);
1373 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1374 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1375 }
1376 } else {
1377 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1378 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1379 }
1380
1381 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1382 sizeof(rsp), &rsp);
1383
1384 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1385 rsp.result != L2CAP_CR_SUCCESS) {
1386 l2cap_chan_unlock(chan);
1387 continue;
1388 }
1389
1390 set_bit(CONF_REQ_SENT, &chan->conf_state);
1391 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1392 l2cap_build_conf_req(chan, buf), buf);
1393 chan->num_conf_req++;
1394 }
1395
1396 l2cap_chan_unlock(chan);
1397 }
1398
1399 mutex_unlock(&conn->chan_lock);
1400 }
1401
1402 /* Find socket with cid and source/destination bdaddr.
1403 * Returns closest match, locked.
1404 */
1405 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1406 bdaddr_t *src,
1407 bdaddr_t *dst)
1408 {
1409 struct l2cap_chan *c, *c1 = NULL;
1410
1411 read_lock(&chan_list_lock);
1412
1413 list_for_each_entry(c, &chan_list, global_l) {
1414 if (state && c->state != state)
1415 continue;
1416
1417 if (c->scid == cid) {
1418 int src_match, dst_match;
1419 int src_any, dst_any;
1420
1421 /* Exact match. */
1422 src_match = !bacmp(&c->src, src);
1423 dst_match = !bacmp(&c->dst, dst);
1424 if (src_match && dst_match) {
1425 read_unlock(&chan_list_lock);
1426 return c;
1427 }
1428
1429 /* Closest match */
1430 src_any = !bacmp(&c->src, BDADDR_ANY);
1431 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1432 if ((src_match && dst_any) || (src_any && dst_match) ||
1433 (src_any && dst_any))
1434 c1 = c;
1435 }
1436 }
1437
1438 read_unlock(&chan_list_lock);
1439
1440 return c1;
1441 }
1442
1443 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1444 {
1445 struct hci_conn *hcon = conn->hcon;
1446 struct l2cap_chan *chan, *pchan;
1447 u8 dst_type;
1448
1449 BT_DBG("");
1450
1451 /* Check if we have socket listening on cid */
1452 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1453 &hcon->src, &hcon->dst);
1454 if (!pchan)
1455 return;
1456
1457 /* Client ATT sockets should override the server one */
1458 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1459 return;
1460
1461 dst_type = bdaddr_type(hcon, hcon->dst_type);
1462
1463 /* If device is blocked, do not create a channel for it */
1464 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1465 return;
1466
1467 l2cap_chan_lock(pchan);
1468
1469 chan = pchan->ops->new_connection(pchan);
1470 if (!chan)
1471 goto clean;
1472
1473 chan->dcid = L2CAP_CID_ATT;
1474
1475 bacpy(&chan->src, &hcon->src);
1476 bacpy(&chan->dst, &hcon->dst);
1477 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1478 chan->dst_type = dst_type;
1479
1480 __l2cap_chan_add(conn, chan);
1481
1482 clean:
1483 l2cap_chan_unlock(pchan);
1484 }
1485
1486 static void l2cap_conn_ready(struct l2cap_conn *conn)
1487 {
1488 struct l2cap_chan *chan;
1489 struct hci_conn *hcon = conn->hcon;
1490
1491 BT_DBG("conn %p", conn);
1492
1493 /* For outgoing pairing which doesn't necessarily have an
1494 * associated socket (e.g. mgmt_pair_device).
1495 */
1496 if (hcon->out && hcon->type == LE_LINK)
1497 smp_conn_security(hcon, hcon->pending_sec_level);
1498
1499 mutex_lock(&conn->chan_lock);
1500
1501 if (hcon->type == LE_LINK)
1502 l2cap_le_conn_ready(conn);
1503
1504 list_for_each_entry(chan, &conn->chan_l, list) {
1505
1506 l2cap_chan_lock(chan);
1507
1508 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1509 l2cap_chan_unlock(chan);
1510 continue;
1511 }
1512
1513 if (hcon->type == LE_LINK) {
1514 l2cap_le_start(chan);
1515 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1516 l2cap_chan_ready(chan);
1517
1518 } else if (chan->state == BT_CONNECT) {
1519 l2cap_do_start(chan);
1520 }
1521
1522 l2cap_chan_unlock(chan);
1523 }
1524
1525 mutex_unlock(&conn->chan_lock);
1526 }
1527
1528 /* Notify sockets that we cannot guaranty reliability anymore */
1529 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1530 {
1531 struct l2cap_chan *chan;
1532
1533 BT_DBG("conn %p", conn);
1534
1535 mutex_lock(&conn->chan_lock);
1536
1537 list_for_each_entry(chan, &conn->chan_l, list) {
1538 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1539 l2cap_chan_set_err(chan, err);
1540 }
1541
1542 mutex_unlock(&conn->chan_lock);
1543 }
1544
1545 static void l2cap_info_timeout(struct work_struct *work)
1546 {
1547 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1548 info_timer.work);
1549
1550 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1551 conn->info_ident = 0;
1552
1553 l2cap_conn_start(conn);
1554 }
1555
1556 /*
1557 * l2cap_user
1558 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1559 * callback is called during registration. The ->remove callback is called
1560 * during unregistration.
1561 * An l2cap_user object can either be explicitly unregistered or when the
1562 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1563 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1564 * External modules must own a reference to the l2cap_conn object if they intend
1565 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1566 * any time if they don't.
1567 */
1568
1569 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1570 {
1571 struct hci_dev *hdev = conn->hcon->hdev;
1572 int ret;
1573
1574 /* We need to check whether l2cap_conn is registered. If it is not, we
1575 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1576 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1577 * relies on the parent hci_conn object to be locked. This itself relies
1578 * on the hci_dev object to be locked. So we must lock the hci device
1579 * here, too. */
1580
1581 hci_dev_lock(hdev);
1582
1583 if (user->list.next || user->list.prev) {
1584 ret = -EINVAL;
1585 goto out_unlock;
1586 }
1587
1588 /* conn->hchan is NULL after l2cap_conn_del() was called */
1589 if (!conn->hchan) {
1590 ret = -ENODEV;
1591 goto out_unlock;
1592 }
1593
1594 ret = user->probe(conn, user);
1595 if (ret)
1596 goto out_unlock;
1597
1598 list_add(&user->list, &conn->users);
1599 ret = 0;
1600
1601 out_unlock:
1602 hci_dev_unlock(hdev);
1603 return ret;
1604 }
1605 EXPORT_SYMBOL(l2cap_register_user);
1606
1607 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1608 {
1609 struct hci_dev *hdev = conn->hcon->hdev;
1610
1611 hci_dev_lock(hdev);
1612
1613 if (!user->list.next || !user->list.prev)
1614 goto out_unlock;
1615
1616 list_del(&user->list);
1617 user->list.next = NULL;
1618 user->list.prev = NULL;
1619 user->remove(conn, user);
1620
1621 out_unlock:
1622 hci_dev_unlock(hdev);
1623 }
1624 EXPORT_SYMBOL(l2cap_unregister_user);
1625
1626 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1627 {
1628 struct l2cap_user *user;
1629
1630 while (!list_empty(&conn->users)) {
1631 user = list_first_entry(&conn->users, struct l2cap_user, list);
1632 list_del(&user->list);
1633 user->list.next = NULL;
1634 user->list.prev = NULL;
1635 user->remove(conn, user);
1636 }
1637 }
1638
1639 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1640 {
1641 struct l2cap_conn *conn = hcon->l2cap_data;
1642 struct l2cap_chan *chan, *l;
1643
1644 if (!conn)
1645 return;
1646
1647 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1648
1649 kfree_skb(conn->rx_skb);
1650
1651 l2cap_unregister_all_users(conn);
1652
1653 mutex_lock(&conn->chan_lock);
1654
1655 /* Kill channels */
1656 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1657 l2cap_chan_hold(chan);
1658 l2cap_chan_lock(chan);
1659
1660 l2cap_chan_del(chan, err);
1661
1662 l2cap_chan_unlock(chan);
1663
1664 chan->ops->close(chan);
1665 l2cap_chan_put(chan);
1666 }
1667
1668 mutex_unlock(&conn->chan_lock);
1669
1670 hci_chan_del(conn->hchan);
1671
1672 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1673 cancel_delayed_work_sync(&conn->info_timer);
1674
1675 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1676 cancel_delayed_work_sync(&conn->security_timer);
1677 smp_chan_destroy(conn);
1678 }
1679
1680 hcon->l2cap_data = NULL;
1681 conn->hchan = NULL;
1682 l2cap_conn_put(conn);
1683 }
1684
1685 static void security_timeout(struct work_struct *work)
1686 {
1687 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1688 security_timer.work);
1689
1690 BT_DBG("conn %p", conn);
1691
1692 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1693 smp_chan_destroy(conn);
1694 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1695 }
1696 }
1697
1698 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1699 {
1700 struct l2cap_conn *conn = hcon->l2cap_data;
1701 struct hci_chan *hchan;
1702
1703 if (conn)
1704 return conn;
1705
1706 hchan = hci_chan_create(hcon);
1707 if (!hchan)
1708 return NULL;
1709
1710 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1711 if (!conn) {
1712 hci_chan_del(hchan);
1713 return NULL;
1714 }
1715
1716 kref_init(&conn->ref);
1717 hcon->l2cap_data = conn;
1718 conn->hcon = hcon;
1719 hci_conn_get(conn->hcon);
1720 conn->hchan = hchan;
1721
1722 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1723
1724 switch (hcon->type) {
1725 case LE_LINK:
1726 if (hcon->hdev->le_mtu) {
1727 conn->mtu = hcon->hdev->le_mtu;
1728 break;
1729 }
1730 /* fall through */
1731 default:
1732 conn->mtu = hcon->hdev->acl_mtu;
1733 break;
1734 }
1735
1736 conn->feat_mask = 0;
1737
1738 if (hcon->type == ACL_LINK)
1739 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1740 &hcon->hdev->dev_flags);
1741
1742 spin_lock_init(&conn->lock);
1743 mutex_init(&conn->chan_lock);
1744
1745 INIT_LIST_HEAD(&conn->chan_l);
1746 INIT_LIST_HEAD(&conn->users);
1747
1748 if (hcon->type == LE_LINK)
1749 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1750 else
1751 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1752
1753 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1754
1755 return conn;
1756 }
1757
1758 static void l2cap_conn_free(struct kref *ref)
1759 {
1760 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1761
1762 hci_conn_put(conn->hcon);
1763 kfree(conn);
1764 }
1765
1766 void l2cap_conn_get(struct l2cap_conn *conn)
1767 {
1768 kref_get(&conn->ref);
1769 }
1770 EXPORT_SYMBOL(l2cap_conn_get);
1771
1772 void l2cap_conn_put(struct l2cap_conn *conn)
1773 {
1774 kref_put(&conn->ref, l2cap_conn_free);
1775 }
1776 EXPORT_SYMBOL(l2cap_conn_put);
1777
1778 /* ---- Socket interface ---- */
1779
1780 /* Find socket with psm and source / destination bdaddr.
1781 * Returns closest match.
1782 */
1783 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1784 bdaddr_t *src,
1785 bdaddr_t *dst,
1786 u8 link_type)
1787 {
1788 struct l2cap_chan *c, *c1 = NULL;
1789
1790 read_lock(&chan_list_lock);
1791
1792 list_for_each_entry(c, &chan_list, global_l) {
1793 if (state && c->state != state)
1794 continue;
1795
1796 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1797 continue;
1798
1799 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1800 continue;
1801
1802 if (c->psm == psm) {
1803 int src_match, dst_match;
1804 int src_any, dst_any;
1805
1806 /* Exact match. */
1807 src_match = !bacmp(&c->src, src);
1808 dst_match = !bacmp(&c->dst, dst);
1809 if (src_match && dst_match) {
1810 read_unlock(&chan_list_lock);
1811 return c;
1812 }
1813
1814 /* Closest match */
1815 src_any = !bacmp(&c->src, BDADDR_ANY);
1816 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1817 if ((src_match && dst_any) || (src_any && dst_match) ||
1818 (src_any && dst_any))
1819 c1 = c;
1820 }
1821 }
1822
1823 read_unlock(&chan_list_lock);
1824
1825 return c1;
1826 }
1827
1828 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1829 bdaddr_t *dst, u8 dst_type)
1830 {
1831 struct l2cap_conn *conn;
1832 struct hci_conn *hcon;
1833 struct hci_dev *hdev;
1834 __u8 auth_type;
1835 int err;
1836
1837 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1838 dst_type, __le16_to_cpu(psm));
1839
1840 hdev = hci_get_route(dst, &chan->src);
1841 if (!hdev)
1842 return -EHOSTUNREACH;
1843
1844 hci_dev_lock(hdev);
1845
1846 l2cap_chan_lock(chan);
1847
1848 /* PSM must be odd and lsb of upper byte must be 0 */
1849 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1850 chan->chan_type != L2CAP_CHAN_RAW) {
1851 err = -EINVAL;
1852 goto done;
1853 }
1854
1855 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1856 err = -EINVAL;
1857 goto done;
1858 }
1859
1860 switch (chan->mode) {
1861 case L2CAP_MODE_BASIC:
1862 case L2CAP_MODE_LE_FLOWCTL:
1863 break;
1864 case L2CAP_MODE_ERTM:
1865 case L2CAP_MODE_STREAMING:
1866 if (!disable_ertm)
1867 break;
1868 /* fall through */
1869 default:
1870 err = -ENOTSUPP;
1871 goto done;
1872 }
1873
1874 switch (chan->state) {
1875 case BT_CONNECT:
1876 case BT_CONNECT2:
1877 case BT_CONFIG:
1878 /* Already connecting */
1879 err = 0;
1880 goto done;
1881
1882 case BT_CONNECTED:
1883 /* Already connected */
1884 err = -EISCONN;
1885 goto done;
1886
1887 case BT_OPEN:
1888 case BT_BOUND:
1889 /* Can connect */
1890 break;
1891
1892 default:
1893 err = -EBADFD;
1894 goto done;
1895 }
1896
1897 /* Set destination address and psm */
1898 bacpy(&chan->dst, dst);
1899 chan->dst_type = dst_type;
1900
1901 chan->psm = psm;
1902 chan->dcid = cid;
1903
1904 auth_type = l2cap_get_auth_type(chan);
1905
1906 if (bdaddr_type_is_le(dst_type))
1907 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1908 chan->sec_level, auth_type);
1909 else
1910 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1911 chan->sec_level, auth_type);
1912
1913 if (IS_ERR(hcon)) {
1914 err = PTR_ERR(hcon);
1915 goto done;
1916 }
1917
1918 conn = l2cap_conn_add(hcon);
1919 if (!conn) {
1920 hci_conn_drop(hcon);
1921 err = -ENOMEM;
1922 goto done;
1923 }
1924
1925 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1926 hci_conn_drop(hcon);
1927 err = -EBUSY;
1928 goto done;
1929 }
1930
1931 /* Update source addr of the socket */
1932 bacpy(&chan->src, &hcon->src);
1933 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1934
1935 l2cap_chan_unlock(chan);
1936 l2cap_chan_add(conn, chan);
1937 l2cap_chan_lock(chan);
1938
1939 /* l2cap_chan_add takes its own ref so we can drop this one */
1940 hci_conn_drop(hcon);
1941
1942 l2cap_state_change(chan, BT_CONNECT);
1943 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1944
1945 if (hcon->state == BT_CONNECTED) {
1946 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1947 __clear_chan_timer(chan);
1948 if (l2cap_chan_check_security(chan))
1949 l2cap_state_change(chan, BT_CONNECTED);
1950 } else
1951 l2cap_do_start(chan);
1952 }
1953
1954 err = 0;
1955
1956 done:
1957 l2cap_chan_unlock(chan);
1958 hci_dev_unlock(hdev);
1959 hci_dev_put(hdev);
1960 return err;
1961 }
1962
1963 static void l2cap_monitor_timeout(struct work_struct *work)
1964 {
1965 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1966 monitor_timer.work);
1967
1968 BT_DBG("chan %p", chan);
1969
1970 l2cap_chan_lock(chan);
1971
1972 if (!chan->conn) {
1973 l2cap_chan_unlock(chan);
1974 l2cap_chan_put(chan);
1975 return;
1976 }
1977
1978 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1979
1980 l2cap_chan_unlock(chan);
1981 l2cap_chan_put(chan);
1982 }
1983
1984 static void l2cap_retrans_timeout(struct work_struct *work)
1985 {
1986 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1987 retrans_timer.work);
1988
1989 BT_DBG("chan %p", chan);
1990
1991 l2cap_chan_lock(chan);
1992
1993 if (!chan->conn) {
1994 l2cap_chan_unlock(chan);
1995 l2cap_chan_put(chan);
1996 return;
1997 }
1998
1999 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2000 l2cap_chan_unlock(chan);
2001 l2cap_chan_put(chan);
2002 }
2003
2004 static void l2cap_streaming_send(struct l2cap_chan *chan,
2005 struct sk_buff_head *skbs)
2006 {
2007 struct sk_buff *skb;
2008 struct l2cap_ctrl *control;
2009
2010 BT_DBG("chan %p, skbs %p", chan, skbs);
2011
2012 if (__chan_is_moving(chan))
2013 return;
2014
2015 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2016
2017 while (!skb_queue_empty(&chan->tx_q)) {
2018
2019 skb = skb_dequeue(&chan->tx_q);
2020
2021 bt_cb(skb)->control.retries = 1;
2022 control = &bt_cb(skb)->control;
2023
2024 control->reqseq = 0;
2025 control->txseq = chan->next_tx_seq;
2026
2027 __pack_control(chan, control, skb);
2028
2029 if (chan->fcs == L2CAP_FCS_CRC16) {
2030 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2031 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2032 }
2033
2034 l2cap_do_send(chan, skb);
2035
2036 BT_DBG("Sent txseq %u", control->txseq);
2037
2038 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2039 chan->frames_sent++;
2040 }
2041 }
2042
2043 static int l2cap_ertm_send(struct l2cap_chan *chan)
2044 {
2045 struct sk_buff *skb, *tx_skb;
2046 struct l2cap_ctrl *control;
2047 int sent = 0;
2048
2049 BT_DBG("chan %p", chan);
2050
2051 if (chan->state != BT_CONNECTED)
2052 return -ENOTCONN;
2053
2054 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2055 return 0;
2056
2057 if (__chan_is_moving(chan))
2058 return 0;
2059
2060 while (chan->tx_send_head &&
2061 chan->unacked_frames < chan->remote_tx_win &&
2062 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2063
2064 skb = chan->tx_send_head;
2065
2066 bt_cb(skb)->control.retries = 1;
2067 control = &bt_cb(skb)->control;
2068
2069 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2070 control->final = 1;
2071
2072 control->reqseq = chan->buffer_seq;
2073 chan->last_acked_seq = chan->buffer_seq;
2074 control->txseq = chan->next_tx_seq;
2075
2076 __pack_control(chan, control, skb);
2077
2078 if (chan->fcs == L2CAP_FCS_CRC16) {
2079 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2080 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2081 }
2082
2083 /* Clone after data has been modified. Data is assumed to be
2084 read-only (for locking purposes) on cloned sk_buffs.
2085 */
2086 tx_skb = skb_clone(skb, GFP_KERNEL);
2087
2088 if (!tx_skb)
2089 break;
2090
2091 __set_retrans_timer(chan);
2092
2093 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2094 chan->unacked_frames++;
2095 chan->frames_sent++;
2096 sent++;
2097
2098 if (skb_queue_is_last(&chan->tx_q, skb))
2099 chan->tx_send_head = NULL;
2100 else
2101 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2102
2103 l2cap_do_send(chan, tx_skb);
2104 BT_DBG("Sent txseq %u", control->txseq);
2105 }
2106
2107 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2108 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2109
2110 return sent;
2111 }
2112
2113 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2114 {
2115 struct l2cap_ctrl control;
2116 struct sk_buff *skb;
2117 struct sk_buff *tx_skb;
2118 u16 seq;
2119
2120 BT_DBG("chan %p", chan);
2121
2122 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2123 return;
2124
2125 if (__chan_is_moving(chan))
2126 return;
2127
2128 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2129 seq = l2cap_seq_list_pop(&chan->retrans_list);
2130
2131 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2132 if (!skb) {
2133 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2134 seq);
2135 continue;
2136 }
2137
2138 bt_cb(skb)->control.retries++;
2139 control = bt_cb(skb)->control;
2140
2141 if (chan->max_tx != 0 &&
2142 bt_cb(skb)->control.retries > chan->max_tx) {
2143 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2144 l2cap_send_disconn_req(chan, ECONNRESET);
2145 l2cap_seq_list_clear(&chan->retrans_list);
2146 break;
2147 }
2148
2149 control.reqseq = chan->buffer_seq;
2150 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2151 control.final = 1;
2152 else
2153 control.final = 0;
2154
2155 if (skb_cloned(skb)) {
2156 /* Cloned sk_buffs are read-only, so we need a
2157 * writeable copy
2158 */
2159 tx_skb = skb_copy(skb, GFP_KERNEL);
2160 } else {
2161 tx_skb = skb_clone(skb, GFP_KERNEL);
2162 }
2163
2164 if (!tx_skb) {
2165 l2cap_seq_list_clear(&chan->retrans_list);
2166 break;
2167 }
2168
2169 /* Update skb contents */
2170 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2171 put_unaligned_le32(__pack_extended_control(&control),
2172 tx_skb->data + L2CAP_HDR_SIZE);
2173 } else {
2174 put_unaligned_le16(__pack_enhanced_control(&control),
2175 tx_skb->data + L2CAP_HDR_SIZE);
2176 }
2177
2178 if (chan->fcs == L2CAP_FCS_CRC16) {
2179 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2180 put_unaligned_le16(fcs, skb_put(tx_skb,
2181 L2CAP_FCS_SIZE));
2182 }
2183
2184 l2cap_do_send(chan, tx_skb);
2185
2186 BT_DBG("Resent txseq %d", control.txseq);
2187
2188 chan->last_acked_seq = chan->buffer_seq;
2189 }
2190 }
2191
2192 static void l2cap_retransmit(struct l2cap_chan *chan,
2193 struct l2cap_ctrl *control)
2194 {
2195 BT_DBG("chan %p, control %p", chan, control);
2196
2197 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2198 l2cap_ertm_resend(chan);
2199 }
2200
2201 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2202 struct l2cap_ctrl *control)
2203 {
2204 struct sk_buff *skb;
2205
2206 BT_DBG("chan %p, control %p", chan, control);
2207
2208 if (control->poll)
2209 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2210
2211 l2cap_seq_list_clear(&chan->retrans_list);
2212
2213 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2214 return;
2215
2216 if (chan->unacked_frames) {
2217 skb_queue_walk(&chan->tx_q, skb) {
2218 if (bt_cb(skb)->control.txseq == control->reqseq ||
2219 skb == chan->tx_send_head)
2220 break;
2221 }
2222
2223 skb_queue_walk_from(&chan->tx_q, skb) {
2224 if (skb == chan->tx_send_head)
2225 break;
2226
2227 l2cap_seq_list_append(&chan->retrans_list,
2228 bt_cb(skb)->control.txseq);
2229 }
2230
2231 l2cap_ertm_resend(chan);
2232 }
2233 }
2234
2235 static void l2cap_send_ack(struct l2cap_chan *chan)
2236 {
2237 struct l2cap_ctrl control;
2238 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2239 chan->last_acked_seq);
2240 int threshold;
2241
2242 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2243 chan, chan->last_acked_seq, chan->buffer_seq);
2244
2245 memset(&control, 0, sizeof(control));
2246 control.sframe = 1;
2247
2248 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2249 chan->rx_state == L2CAP_RX_STATE_RECV) {
2250 __clear_ack_timer(chan);
2251 control.super = L2CAP_SUPER_RNR;
2252 control.reqseq = chan->buffer_seq;
2253 l2cap_send_sframe(chan, &control);
2254 } else {
2255 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2256 l2cap_ertm_send(chan);
2257 /* If any i-frames were sent, they included an ack */
2258 if (chan->buffer_seq == chan->last_acked_seq)
2259 frames_to_ack = 0;
2260 }
2261
2262 /* Ack now if the window is 3/4ths full.
2263 * Calculate without mul or div
2264 */
2265 threshold = chan->ack_win;
2266 threshold += threshold << 1;
2267 threshold >>= 2;
2268
2269 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2270 threshold);
2271
2272 if (frames_to_ack >= threshold) {
2273 __clear_ack_timer(chan);
2274 control.super = L2CAP_SUPER_RR;
2275 control.reqseq = chan->buffer_seq;
2276 l2cap_send_sframe(chan, &control);
2277 frames_to_ack = 0;
2278 }
2279
2280 if (frames_to_ack)
2281 __set_ack_timer(chan);
2282 }
2283 }
2284
2285 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2286 struct msghdr *msg, int len,
2287 int count, struct sk_buff *skb)
2288 {
2289 struct l2cap_conn *conn = chan->conn;
2290 struct sk_buff **frag;
2291 int sent = 0;
2292
2293 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2294 return -EFAULT;
2295
2296 sent += count;
2297 len -= count;
2298
2299 /* Continuation fragments (no L2CAP header) */
2300 frag = &skb_shinfo(skb)->frag_list;
2301 while (len) {
2302 struct sk_buff *tmp;
2303
2304 count = min_t(unsigned int, conn->mtu, len);
2305
2306 tmp = chan->ops->alloc_skb(chan, count,
2307 msg->msg_flags & MSG_DONTWAIT);
2308 if (IS_ERR(tmp))
2309 return PTR_ERR(tmp);
2310
2311 *frag = tmp;
2312
2313 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2314 return -EFAULT;
2315
2316 (*frag)->priority = skb->priority;
2317
2318 sent += count;
2319 len -= count;
2320
2321 skb->len += (*frag)->len;
2322 skb->data_len += (*frag)->len;
2323
2324 frag = &(*frag)->next;
2325 }
2326
2327 return sent;
2328 }
2329
2330 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2331 struct msghdr *msg, size_t len,
2332 u32 priority)
2333 {
2334 struct l2cap_conn *conn = chan->conn;
2335 struct sk_buff *skb;
2336 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2337 struct l2cap_hdr *lh;
2338
2339 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2340 __le16_to_cpu(chan->psm), len, priority);
2341
2342 count = min_t(unsigned int, (conn->mtu - hlen), len);
2343
2344 skb = chan->ops->alloc_skb(chan, count + hlen,
2345 msg->msg_flags & MSG_DONTWAIT);
2346 if (IS_ERR(skb))
2347 return skb;
2348
2349 skb->priority = priority;
2350
2351 /* Create L2CAP header */
2352 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2353 lh->cid = cpu_to_le16(chan->dcid);
2354 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2355 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2356
2357 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2358 if (unlikely(err < 0)) {
2359 kfree_skb(skb);
2360 return ERR_PTR(err);
2361 }
2362 return skb;
2363 }
2364
2365 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2366 struct msghdr *msg, size_t len,
2367 u32 priority)
2368 {
2369 struct l2cap_conn *conn = chan->conn;
2370 struct sk_buff *skb;
2371 int err, count;
2372 struct l2cap_hdr *lh;
2373
2374 BT_DBG("chan %p len %zu", chan, len);
2375
2376 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2377
2378 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2379 msg->msg_flags & MSG_DONTWAIT);
2380 if (IS_ERR(skb))
2381 return skb;
2382
2383 skb->priority = priority;
2384
2385 /* Create L2CAP header */
2386 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2387 lh->cid = cpu_to_le16(chan->dcid);
2388 lh->len = cpu_to_le16(len);
2389
2390 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2391 if (unlikely(err < 0)) {
2392 kfree_skb(skb);
2393 return ERR_PTR(err);
2394 }
2395 return skb;
2396 }
2397
2398 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2399 struct msghdr *msg, size_t len,
2400 u16 sdulen)
2401 {
2402 struct l2cap_conn *conn = chan->conn;
2403 struct sk_buff *skb;
2404 int err, count, hlen;
2405 struct l2cap_hdr *lh;
2406
2407 BT_DBG("chan %p len %zu", chan, len);
2408
2409 if (!conn)
2410 return ERR_PTR(-ENOTCONN);
2411
2412 hlen = __ertm_hdr_size(chan);
2413
2414 if (sdulen)
2415 hlen += L2CAP_SDULEN_SIZE;
2416
2417 if (chan->fcs == L2CAP_FCS_CRC16)
2418 hlen += L2CAP_FCS_SIZE;
2419
2420 count = min_t(unsigned int, (conn->mtu - hlen), len);
2421
2422 skb = chan->ops->alloc_skb(chan, count + hlen,
2423 msg->msg_flags & MSG_DONTWAIT);
2424 if (IS_ERR(skb))
2425 return skb;
2426
2427 /* Create L2CAP header */
2428 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2429 lh->cid = cpu_to_le16(chan->dcid);
2430 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2431
2432 /* Control header is populated later */
2433 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2434 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2435 else
2436 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2437
2438 if (sdulen)
2439 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2440
2441 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2442 if (unlikely(err < 0)) {
2443 kfree_skb(skb);
2444 return ERR_PTR(err);
2445 }
2446
2447 bt_cb(skb)->control.fcs = chan->fcs;
2448 bt_cb(skb)->control.retries = 0;
2449 return skb;
2450 }
2451
2452 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2453 struct sk_buff_head *seg_queue,
2454 struct msghdr *msg, size_t len)
2455 {
2456 struct sk_buff *skb;
2457 u16 sdu_len;
2458 size_t pdu_len;
2459 u8 sar;
2460
2461 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2462
2463 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2464 * so fragmented skbs are not used. The HCI layer's handling
2465 * of fragmented skbs is not compatible with ERTM's queueing.
2466 */
2467
2468 /* PDU size is derived from the HCI MTU */
2469 pdu_len = chan->conn->mtu;
2470
2471 /* Constrain PDU size for BR/EDR connections */
2472 if (!chan->hs_hcon)
2473 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2474
2475 /* Adjust for largest possible L2CAP overhead. */
2476 if (chan->fcs)
2477 pdu_len -= L2CAP_FCS_SIZE;
2478
2479 pdu_len -= __ertm_hdr_size(chan);
2480
2481 /* Remote device may have requested smaller PDUs */
2482 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2483
2484 if (len <= pdu_len) {
2485 sar = L2CAP_SAR_UNSEGMENTED;
2486 sdu_len = 0;
2487 pdu_len = len;
2488 } else {
2489 sar = L2CAP_SAR_START;
2490 sdu_len = len;
2491 pdu_len -= L2CAP_SDULEN_SIZE;
2492 }
2493
2494 while (len > 0) {
2495 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2496
2497 if (IS_ERR(skb)) {
2498 __skb_queue_purge(seg_queue);
2499 return PTR_ERR(skb);
2500 }
2501
2502 bt_cb(skb)->control.sar = sar;
2503 __skb_queue_tail(seg_queue, skb);
2504
2505 len -= pdu_len;
2506 if (sdu_len) {
2507 sdu_len = 0;
2508 pdu_len += L2CAP_SDULEN_SIZE;
2509 }
2510
2511 if (len <= pdu_len) {
2512 sar = L2CAP_SAR_END;
2513 pdu_len = len;
2514 } else {
2515 sar = L2CAP_SAR_CONTINUE;
2516 }
2517 }
2518
2519 return 0;
2520 }
2521
2522 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2523 u32 priority)
2524 {
2525 struct sk_buff *skb;
2526 int err;
2527 struct sk_buff_head seg_queue;
2528
2529 if (!chan->conn)
2530 return -ENOTCONN;
2531
2532 /* Connectionless channel */
2533 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2534 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2535 if (IS_ERR(skb))
2536 return PTR_ERR(skb);
2537
2538 l2cap_do_send(chan, skb);
2539 return len;
2540 }
2541
2542 switch (chan->mode) {
2543 case L2CAP_MODE_BASIC:
2544 case L2CAP_MODE_LE_FLOWCTL:
2545 /* Check outgoing MTU */
2546 if (len > chan->omtu)
2547 return -EMSGSIZE;
2548
2549 /* Create a basic PDU */
2550 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2551 if (IS_ERR(skb))
2552 return PTR_ERR(skb);
2553
2554 l2cap_do_send(chan, skb);
2555 err = len;
2556 break;
2557
2558 case L2CAP_MODE_ERTM:
2559 case L2CAP_MODE_STREAMING:
2560 /* Check outgoing MTU */
2561 if (len > chan->omtu) {
2562 err = -EMSGSIZE;
2563 break;
2564 }
2565
2566 __skb_queue_head_init(&seg_queue);
2567
2568 /* Do segmentation before calling in to the state machine,
2569 * since it's possible to block while waiting for memory
2570 * allocation.
2571 */
2572 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2573
2574 /* The channel could have been closed while segmenting,
2575 * check that it is still connected.
2576 */
2577 if (chan->state != BT_CONNECTED) {
2578 __skb_queue_purge(&seg_queue);
2579 err = -ENOTCONN;
2580 }
2581
2582 if (err)
2583 break;
2584
2585 if (chan->mode == L2CAP_MODE_ERTM)
2586 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2587 else
2588 l2cap_streaming_send(chan, &seg_queue);
2589
2590 err = len;
2591
2592 /* If the skbs were not queued for sending, they'll still be in
2593 * seg_queue and need to be purged.
2594 */
2595 __skb_queue_purge(&seg_queue);
2596 break;
2597
2598 default:
2599 BT_DBG("bad state %1.1x", chan->mode);
2600 err = -EBADFD;
2601 }
2602
2603 return err;
2604 }
2605
2606 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2607 {
2608 struct l2cap_ctrl control;
2609 u16 seq;
2610
2611 BT_DBG("chan %p, txseq %u", chan, txseq);
2612
2613 memset(&control, 0, sizeof(control));
2614 control.sframe = 1;
2615 control.super = L2CAP_SUPER_SREJ;
2616
2617 for (seq = chan->expected_tx_seq; seq != txseq;
2618 seq = __next_seq(chan, seq)) {
2619 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2620 control.reqseq = seq;
2621 l2cap_send_sframe(chan, &control);
2622 l2cap_seq_list_append(&chan->srej_list, seq);
2623 }
2624 }
2625
2626 chan->expected_tx_seq = __next_seq(chan, txseq);
2627 }
2628
2629 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2630 {
2631 struct l2cap_ctrl control;
2632
2633 BT_DBG("chan %p", chan);
2634
2635 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2636 return;
2637
2638 memset(&control, 0, sizeof(control));
2639 control.sframe = 1;
2640 control.super = L2CAP_SUPER_SREJ;
2641 control.reqseq = chan->srej_list.tail;
2642 l2cap_send_sframe(chan, &control);
2643 }
2644
2645 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2646 {
2647 struct l2cap_ctrl control;
2648 u16 initial_head;
2649 u16 seq;
2650
2651 BT_DBG("chan %p, txseq %u", chan, txseq);
2652
2653 memset(&control, 0, sizeof(control));
2654 control.sframe = 1;
2655 control.super = L2CAP_SUPER_SREJ;
2656
2657 /* Capture initial list head to allow only one pass through the list. */
2658 initial_head = chan->srej_list.head;
2659
2660 do {
2661 seq = l2cap_seq_list_pop(&chan->srej_list);
2662 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2663 break;
2664
2665 control.reqseq = seq;
2666 l2cap_send_sframe(chan, &control);
2667 l2cap_seq_list_append(&chan->srej_list, seq);
2668 } while (chan->srej_list.head != initial_head);
2669 }
2670
2671 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2672 {
2673 struct sk_buff *acked_skb;
2674 u16 ackseq;
2675
2676 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2677
2678 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2679 return;
2680
2681 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2682 chan->expected_ack_seq, chan->unacked_frames);
2683
2684 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2685 ackseq = __next_seq(chan, ackseq)) {
2686
2687 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2688 if (acked_skb) {
2689 skb_unlink(acked_skb, &chan->tx_q);
2690 kfree_skb(acked_skb);
2691 chan->unacked_frames--;
2692 }
2693 }
2694
2695 chan->expected_ack_seq = reqseq;
2696
2697 if (chan->unacked_frames == 0)
2698 __clear_retrans_timer(chan);
2699
2700 BT_DBG("unacked_frames %u", chan->unacked_frames);
2701 }
2702
2703 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2704 {
2705 BT_DBG("chan %p", chan);
2706
2707 chan->expected_tx_seq = chan->buffer_seq;
2708 l2cap_seq_list_clear(&chan->srej_list);
2709 skb_queue_purge(&chan->srej_q);
2710 chan->rx_state = L2CAP_RX_STATE_RECV;
2711 }
2712
2713 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2714 struct l2cap_ctrl *control,
2715 struct sk_buff_head *skbs, u8 event)
2716 {
2717 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2718 event);
2719
2720 switch (event) {
2721 case L2CAP_EV_DATA_REQUEST:
2722 if (chan->tx_send_head == NULL)
2723 chan->tx_send_head = skb_peek(skbs);
2724
2725 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2726 l2cap_ertm_send(chan);
2727 break;
2728 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2729 BT_DBG("Enter LOCAL_BUSY");
2730 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2731
2732 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2733 /* The SREJ_SENT state must be aborted if we are to
2734 * enter the LOCAL_BUSY state.
2735 */
2736 l2cap_abort_rx_srej_sent(chan);
2737 }
2738
2739 l2cap_send_ack(chan);
2740
2741 break;
2742 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2743 BT_DBG("Exit LOCAL_BUSY");
2744 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2745
2746 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2747 struct l2cap_ctrl local_control;
2748
2749 memset(&local_control, 0, sizeof(local_control));
2750 local_control.sframe = 1;
2751 local_control.super = L2CAP_SUPER_RR;
2752 local_control.poll = 1;
2753 local_control.reqseq = chan->buffer_seq;
2754 l2cap_send_sframe(chan, &local_control);
2755
2756 chan->retry_count = 1;
2757 __set_monitor_timer(chan);
2758 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2759 }
2760 break;
2761 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2762 l2cap_process_reqseq(chan, control->reqseq);
2763 break;
2764 case L2CAP_EV_EXPLICIT_POLL:
2765 l2cap_send_rr_or_rnr(chan, 1);
2766 chan->retry_count = 1;
2767 __set_monitor_timer(chan);
2768 __clear_ack_timer(chan);
2769 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2770 break;
2771 case L2CAP_EV_RETRANS_TO:
2772 l2cap_send_rr_or_rnr(chan, 1);
2773 chan->retry_count = 1;
2774 __set_monitor_timer(chan);
2775 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2776 break;
2777 case L2CAP_EV_RECV_FBIT:
2778 /* Nothing to process */
2779 break;
2780 default:
2781 break;
2782 }
2783 }
2784
2785 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2786 struct l2cap_ctrl *control,
2787 struct sk_buff_head *skbs, u8 event)
2788 {
2789 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2790 event);
2791
2792 switch (event) {
2793 case L2CAP_EV_DATA_REQUEST:
2794 if (chan->tx_send_head == NULL)
2795 chan->tx_send_head = skb_peek(skbs);
2796 /* Queue data, but don't send. */
2797 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2798 break;
2799 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2800 BT_DBG("Enter LOCAL_BUSY");
2801 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2802
2803 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2804 /* The SREJ_SENT state must be aborted if we are to
2805 * enter the LOCAL_BUSY state.
2806 */
2807 l2cap_abort_rx_srej_sent(chan);
2808 }
2809
2810 l2cap_send_ack(chan);
2811
2812 break;
2813 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2814 BT_DBG("Exit LOCAL_BUSY");
2815 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2816
2817 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2818 struct l2cap_ctrl local_control;
2819 memset(&local_control, 0, sizeof(local_control));
2820 local_control.sframe = 1;
2821 local_control.super = L2CAP_SUPER_RR;
2822 local_control.poll = 1;
2823 local_control.reqseq = chan->buffer_seq;
2824 l2cap_send_sframe(chan, &local_control);
2825
2826 chan->retry_count = 1;
2827 __set_monitor_timer(chan);
2828 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2829 }
2830 break;
2831 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2832 l2cap_process_reqseq(chan, control->reqseq);
2833
2834 /* Fall through */
2835
2836 case L2CAP_EV_RECV_FBIT:
2837 if (control && control->final) {
2838 __clear_monitor_timer(chan);
2839 if (chan->unacked_frames > 0)
2840 __set_retrans_timer(chan);
2841 chan->retry_count = 0;
2842 chan->tx_state = L2CAP_TX_STATE_XMIT;
2843 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2844 }
2845 break;
2846 case L2CAP_EV_EXPLICIT_POLL:
2847 /* Ignore */
2848 break;
2849 case L2CAP_EV_MONITOR_TO:
2850 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2851 l2cap_send_rr_or_rnr(chan, 1);
2852 __set_monitor_timer(chan);
2853 chan->retry_count++;
2854 } else {
2855 l2cap_send_disconn_req(chan, ECONNABORTED);
2856 }
2857 break;
2858 default:
2859 break;
2860 }
2861 }
2862
2863 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2864 struct sk_buff_head *skbs, u8 event)
2865 {
2866 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2867 chan, control, skbs, event, chan->tx_state);
2868
2869 switch (chan->tx_state) {
2870 case L2CAP_TX_STATE_XMIT:
2871 l2cap_tx_state_xmit(chan, control, skbs, event);
2872 break;
2873 case L2CAP_TX_STATE_WAIT_F:
2874 l2cap_tx_state_wait_f(chan, control, skbs, event);
2875 break;
2876 default:
2877 /* Ignore event */
2878 break;
2879 }
2880 }
2881
2882 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2883 struct l2cap_ctrl *control)
2884 {
2885 BT_DBG("chan %p, control %p", chan, control);
2886 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2887 }
2888
2889 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2890 struct l2cap_ctrl *control)
2891 {
2892 BT_DBG("chan %p, control %p", chan, control);
2893 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2894 }
2895
2896 /* Copy frame to all raw sockets on that connection */
2897 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2898 {
2899 struct sk_buff *nskb;
2900 struct l2cap_chan *chan;
2901
2902 BT_DBG("conn %p", conn);
2903
2904 mutex_lock(&conn->chan_lock);
2905
2906 list_for_each_entry(chan, &conn->chan_l, list) {
2907 if (chan->chan_type != L2CAP_CHAN_RAW)
2908 continue;
2909
2910 /* Don't send frame to the channel it came from */
2911 if (bt_cb(skb)->chan == chan)
2912 continue;
2913
2914 nskb = skb_clone(skb, GFP_KERNEL);
2915 if (!nskb)
2916 continue;
2917 if (chan->ops->recv(chan, nskb))
2918 kfree_skb(nskb);
2919 }
2920
2921 mutex_unlock(&conn->chan_lock);
2922 }
2923
2924 /* ---- L2CAP signalling commands ---- */
2925 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2926 u8 ident, u16 dlen, void *data)
2927 {
2928 struct sk_buff *skb, **frag;
2929 struct l2cap_cmd_hdr *cmd;
2930 struct l2cap_hdr *lh;
2931 int len, count;
2932
2933 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2934 conn, code, ident, dlen);
2935
2936 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2937 return NULL;
2938
2939 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2940 count = min_t(unsigned int, conn->mtu, len);
2941
2942 skb = bt_skb_alloc(count, GFP_KERNEL);
2943 if (!skb)
2944 return NULL;
2945
2946 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2947 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2948
2949 if (conn->hcon->type == LE_LINK)
2950 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2951 else
2952 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2953
2954 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2955 cmd->code = code;
2956 cmd->ident = ident;
2957 cmd->len = cpu_to_le16(dlen);
2958
2959 if (dlen) {
2960 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2961 memcpy(skb_put(skb, count), data, count);
2962 data += count;
2963 }
2964
2965 len -= skb->len;
2966
2967 /* Continuation fragments (no L2CAP header) */
2968 frag = &skb_shinfo(skb)->frag_list;
2969 while (len) {
2970 count = min_t(unsigned int, conn->mtu, len);
2971
2972 *frag = bt_skb_alloc(count, GFP_KERNEL);
2973 if (!*frag)
2974 goto fail;
2975
2976 memcpy(skb_put(*frag, count), data, count);
2977
2978 len -= count;
2979 data += count;
2980
2981 frag = &(*frag)->next;
2982 }
2983
2984 return skb;
2985
2986 fail:
2987 kfree_skb(skb);
2988 return NULL;
2989 }
2990
2991 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2992 unsigned long *val)
2993 {
2994 struct l2cap_conf_opt *opt = *ptr;
2995 int len;
2996
2997 len = L2CAP_CONF_OPT_SIZE + opt->len;
2998 *ptr += len;
2999
3000 *type = opt->type;
3001 *olen = opt->len;
3002
3003 switch (opt->len) {
3004 case 1:
3005 *val = *((u8 *) opt->val);
3006 break;
3007
3008 case 2:
3009 *val = get_unaligned_le16(opt->val);
3010 break;
3011
3012 case 4:
3013 *val = get_unaligned_le32(opt->val);
3014 break;
3015
3016 default:
3017 *val = (unsigned long) opt->val;
3018 break;
3019 }
3020
3021 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3022 return len;
3023 }
3024
3025 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3026 {
3027 struct l2cap_conf_opt *opt = *ptr;
3028
3029 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3030
3031 opt->type = type;
3032 opt->len = len;
3033
3034 switch (len) {
3035 case 1:
3036 *((u8 *) opt->val) = val;
3037 break;
3038
3039 case 2:
3040 put_unaligned_le16(val, opt->val);
3041 break;
3042
3043 case 4:
3044 put_unaligned_le32(val, opt->val);
3045 break;
3046
3047 default:
3048 memcpy(opt->val, (void *) val, len);
3049 break;
3050 }
3051
3052 *ptr += L2CAP_CONF_OPT_SIZE + len;
3053 }
3054
3055 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3056 {
3057 struct l2cap_conf_efs efs;
3058
3059 switch (chan->mode) {
3060 case L2CAP_MODE_ERTM:
3061 efs.id = chan->local_id;
3062 efs.stype = chan->local_stype;
3063 efs.msdu = cpu_to_le16(chan->local_msdu);
3064 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3065 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3066 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3067 break;
3068
3069 case L2CAP_MODE_STREAMING:
3070 efs.id = 1;
3071 efs.stype = L2CAP_SERV_BESTEFFORT;
3072 efs.msdu = cpu_to_le16(chan->local_msdu);
3073 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3074 efs.acc_lat = 0;
3075 efs.flush_to = 0;
3076 break;
3077
3078 default:
3079 return;
3080 }
3081
3082 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3083 (unsigned long) &efs);
3084 }
3085
3086 static void l2cap_ack_timeout(struct work_struct *work)
3087 {
3088 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3089 ack_timer.work);
3090 u16 frames_to_ack;
3091
3092 BT_DBG("chan %p", chan);
3093
3094 l2cap_chan_lock(chan);
3095
3096 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3097 chan->last_acked_seq);
3098
3099 if (frames_to_ack)
3100 l2cap_send_rr_or_rnr(chan, 0);
3101
3102 l2cap_chan_unlock(chan);
3103 l2cap_chan_put(chan);
3104 }
3105
3106 int l2cap_ertm_init(struct l2cap_chan *chan)
3107 {
3108 int err;
3109
3110 chan->next_tx_seq = 0;
3111 chan->expected_tx_seq = 0;
3112 chan->expected_ack_seq = 0;
3113 chan->unacked_frames = 0;
3114 chan->buffer_seq = 0;
3115 chan->frames_sent = 0;
3116 chan->last_acked_seq = 0;
3117 chan->sdu = NULL;
3118 chan->sdu_last_frag = NULL;
3119 chan->sdu_len = 0;
3120
3121 skb_queue_head_init(&chan->tx_q);
3122
3123 chan->local_amp_id = AMP_ID_BREDR;
3124 chan->move_id = AMP_ID_BREDR;
3125 chan->move_state = L2CAP_MOVE_STABLE;
3126 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3127
3128 if (chan->mode != L2CAP_MODE_ERTM)
3129 return 0;
3130
3131 chan->rx_state = L2CAP_RX_STATE_RECV;
3132 chan->tx_state = L2CAP_TX_STATE_XMIT;
3133
3134 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3135 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3136 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3137
3138 skb_queue_head_init(&chan->srej_q);
3139
3140 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3141 if (err < 0)
3142 return err;
3143
3144 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3145 if (err < 0)
3146 l2cap_seq_list_free(&chan->srej_list);
3147
3148 return err;
3149 }
3150
3151 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3152 {
3153 switch (mode) {
3154 case L2CAP_MODE_STREAMING:
3155 case L2CAP_MODE_ERTM:
3156 if (l2cap_mode_supported(mode, remote_feat_mask))
3157 return mode;
3158 /* fall through */
3159 default:
3160 return L2CAP_MODE_BASIC;
3161 }
3162 }
3163
3164 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3165 {
3166 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3167 }
3168
3169 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3170 {
3171 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3172 }
3173
3174 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3175 struct l2cap_conf_rfc *rfc)
3176 {
3177 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3178 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3179
3180 /* Class 1 devices have must have ERTM timeouts
3181 * exceeding the Link Supervision Timeout. The
3182 * default Link Supervision Timeout for AMP
3183 * controllers is 10 seconds.
3184 *
3185 * Class 1 devices use 0xffffffff for their
3186 * best-effort flush timeout, so the clamping logic
3187 * will result in a timeout that meets the above
3188 * requirement. ERTM timeouts are 16-bit values, so
3189 * the maximum timeout is 65.535 seconds.
3190 */
3191
3192 /* Convert timeout to milliseconds and round */
3193 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3194
3195 /* This is the recommended formula for class 2 devices
3196 * that start ERTM timers when packets are sent to the
3197 * controller.
3198 */
3199 ertm_to = 3 * ertm_to + 500;
3200
3201 if (ertm_to > 0xffff)
3202 ertm_to = 0xffff;
3203
3204 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3205 rfc->monitor_timeout = rfc->retrans_timeout;
3206 } else {
3207 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3208 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3209 }
3210 }
3211
3212 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3213 {
3214 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3215 __l2cap_ews_supported(chan->conn)) {
3216 /* use extended control field */
3217 set_bit(FLAG_EXT_CTRL, &chan->flags);
3218 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3219 } else {
3220 chan->tx_win = min_t(u16, chan->tx_win,
3221 L2CAP_DEFAULT_TX_WINDOW);
3222 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3223 }
3224 chan->ack_win = chan->tx_win;
3225 }
3226
3227 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3228 {
3229 struct l2cap_conf_req *req = data;
3230 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3231 void *ptr = req->data;
3232 u16 size;
3233
3234 BT_DBG("chan %p", chan);
3235
3236 if (chan->num_conf_req || chan->num_conf_rsp)
3237 goto done;
3238
3239 switch (chan->mode) {
3240 case L2CAP_MODE_STREAMING:
3241 case L2CAP_MODE_ERTM:
3242 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3243 break;
3244
3245 if (__l2cap_efs_supported(chan->conn))
3246 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3247
3248 /* fall through */
3249 default:
3250 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3251 break;
3252 }
3253
3254 done:
3255 if (chan->imtu != L2CAP_DEFAULT_MTU)
3256 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3257
3258 switch (chan->mode) {
3259 case L2CAP_MODE_BASIC:
3260 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3261 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3262 break;
3263
3264 rfc.mode = L2CAP_MODE_BASIC;
3265 rfc.txwin_size = 0;
3266 rfc.max_transmit = 0;
3267 rfc.retrans_timeout = 0;
3268 rfc.monitor_timeout = 0;
3269 rfc.max_pdu_size = 0;
3270
3271 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3272 (unsigned long) &rfc);
3273 break;
3274
3275 case L2CAP_MODE_ERTM:
3276 rfc.mode = L2CAP_MODE_ERTM;
3277 rfc.max_transmit = chan->max_tx;
3278
3279 __l2cap_set_ertm_timeouts(chan, &rfc);
3280
3281 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3282 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3283 L2CAP_FCS_SIZE);
3284 rfc.max_pdu_size = cpu_to_le16(size);
3285
3286 l2cap_txwin_setup(chan);
3287
3288 rfc.txwin_size = min_t(u16, chan->tx_win,
3289 L2CAP_DEFAULT_TX_WINDOW);
3290
3291 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3292 (unsigned long) &rfc);
3293
3294 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3295 l2cap_add_opt_efs(&ptr, chan);
3296
3297 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3298 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3299 chan->tx_win);
3300
3301 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3302 if (chan->fcs == L2CAP_FCS_NONE ||
3303 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3304 chan->fcs = L2CAP_FCS_NONE;
3305 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3306 chan->fcs);
3307 }
3308 break;
3309
3310 case L2CAP_MODE_STREAMING:
3311 l2cap_txwin_setup(chan);
3312 rfc.mode = L2CAP_MODE_STREAMING;
3313 rfc.txwin_size = 0;
3314 rfc.max_transmit = 0;
3315 rfc.retrans_timeout = 0;
3316 rfc.monitor_timeout = 0;
3317
3318 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3319 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3320 L2CAP_FCS_SIZE);
3321 rfc.max_pdu_size = cpu_to_le16(size);
3322
3323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3324 (unsigned long) &rfc);
3325
3326 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3327 l2cap_add_opt_efs(&ptr, chan);
3328
3329 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3330 if (chan->fcs == L2CAP_FCS_NONE ||
3331 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3332 chan->fcs = L2CAP_FCS_NONE;
3333 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3334 chan->fcs);
3335 }
3336 break;
3337 }
3338
3339 req->dcid = cpu_to_le16(chan->dcid);
3340 req->flags = __constant_cpu_to_le16(0);
3341
3342 return ptr - data;
3343 }
3344
3345 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3346 {
3347 struct l2cap_conf_rsp *rsp = data;
3348 void *ptr = rsp->data;
3349 void *req = chan->conf_req;
3350 int len = chan->conf_len;
3351 int type, hint, olen;
3352 unsigned long val;
3353 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3354 struct l2cap_conf_efs efs;
3355 u8 remote_efs = 0;
3356 u16 mtu = L2CAP_DEFAULT_MTU;
3357 u16 result = L2CAP_CONF_SUCCESS;
3358 u16 size;
3359
3360 BT_DBG("chan %p", chan);
3361
3362 while (len >= L2CAP_CONF_OPT_SIZE) {
3363 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3364
3365 hint = type & L2CAP_CONF_HINT;
3366 type &= L2CAP_CONF_MASK;
3367
3368 switch (type) {
3369 case L2CAP_CONF_MTU:
3370 mtu = val;
3371 break;
3372
3373 case L2CAP_CONF_FLUSH_TO:
3374 chan->flush_to = val;
3375 break;
3376
3377 case L2CAP_CONF_QOS:
3378 break;
3379
3380 case L2CAP_CONF_RFC:
3381 if (olen == sizeof(rfc))
3382 memcpy(&rfc, (void *) val, olen);
3383 break;
3384
3385 case L2CAP_CONF_FCS:
3386 if (val == L2CAP_FCS_NONE)
3387 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3388 break;
3389
3390 case L2CAP_CONF_EFS:
3391 remote_efs = 1;
3392 if (olen == sizeof(efs))
3393 memcpy(&efs, (void *) val, olen);
3394 break;
3395
3396 case L2CAP_CONF_EWS:
3397 if (!chan->conn->hs_enabled)
3398 return -ECONNREFUSED;
3399
3400 set_bit(FLAG_EXT_CTRL, &chan->flags);
3401 set_bit(CONF_EWS_RECV, &chan->conf_state);
3402 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3403 chan->remote_tx_win = val;
3404 break;
3405
3406 default:
3407 if (hint)
3408 break;
3409
3410 result = L2CAP_CONF_UNKNOWN;
3411 *((u8 *) ptr++) = type;
3412 break;
3413 }
3414 }
3415
3416 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3417 goto done;
3418
3419 switch (chan->mode) {
3420 case L2CAP_MODE_STREAMING:
3421 case L2CAP_MODE_ERTM:
3422 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3423 chan->mode = l2cap_select_mode(rfc.mode,
3424 chan->conn->feat_mask);
3425 break;
3426 }
3427
3428 if (remote_efs) {
3429 if (__l2cap_efs_supported(chan->conn))
3430 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3431 else
3432 return -ECONNREFUSED;
3433 }
3434
3435 if (chan->mode != rfc.mode)
3436 return -ECONNREFUSED;
3437
3438 break;
3439 }
3440
3441 done:
3442 if (chan->mode != rfc.mode) {
3443 result = L2CAP_CONF_UNACCEPT;
3444 rfc.mode = chan->mode;
3445
3446 if (chan->num_conf_rsp == 1)
3447 return -ECONNREFUSED;
3448
3449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3450 (unsigned long) &rfc);
3451 }
3452
3453 if (result == L2CAP_CONF_SUCCESS) {
3454 /* Configure output options and let the other side know
3455 * which ones we don't like. */
3456
3457 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3458 result = L2CAP_CONF_UNACCEPT;
3459 else {
3460 chan->omtu = mtu;
3461 set_bit(CONF_MTU_DONE, &chan->conf_state);
3462 }
3463 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3464
3465 if (remote_efs) {
3466 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3467 efs.stype != L2CAP_SERV_NOTRAFIC &&
3468 efs.stype != chan->local_stype) {
3469
3470 result = L2CAP_CONF_UNACCEPT;
3471
3472 if (chan->num_conf_req >= 1)
3473 return -ECONNREFUSED;
3474
3475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3476 sizeof(efs),
3477 (unsigned long) &efs);
3478 } else {
3479 /* Send PENDING Conf Rsp */
3480 result = L2CAP_CONF_PENDING;
3481 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3482 }
3483 }
3484
3485 switch (rfc.mode) {
3486 case L2CAP_MODE_BASIC:
3487 chan->fcs = L2CAP_FCS_NONE;
3488 set_bit(CONF_MODE_DONE, &chan->conf_state);
3489 break;
3490
3491 case L2CAP_MODE_ERTM:
3492 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3493 chan->remote_tx_win = rfc.txwin_size;
3494 else
3495 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3496
3497 chan->remote_max_tx = rfc.max_transmit;
3498
3499 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3500 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3501 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3502 rfc.max_pdu_size = cpu_to_le16(size);
3503 chan->remote_mps = size;
3504
3505 __l2cap_set_ertm_timeouts(chan, &rfc);
3506
3507 set_bit(CONF_MODE_DONE, &chan->conf_state);
3508
3509 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3510 sizeof(rfc), (unsigned long) &rfc);
3511
3512 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3513 chan->remote_id = efs.id;
3514 chan->remote_stype = efs.stype;
3515 chan->remote_msdu = le16_to_cpu(efs.msdu);
3516 chan->remote_flush_to =
3517 le32_to_cpu(efs.flush_to);
3518 chan->remote_acc_lat =
3519 le32_to_cpu(efs.acc_lat);
3520 chan->remote_sdu_itime =
3521 le32_to_cpu(efs.sdu_itime);
3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3523 sizeof(efs),
3524 (unsigned long) &efs);
3525 }
3526 break;
3527
3528 case L2CAP_MODE_STREAMING:
3529 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3530 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3531 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3532 rfc.max_pdu_size = cpu_to_le16(size);
3533 chan->remote_mps = size;
3534
3535 set_bit(CONF_MODE_DONE, &chan->conf_state);
3536
3537 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3538 (unsigned long) &rfc);
3539
3540 break;
3541
3542 default:
3543 result = L2CAP_CONF_UNACCEPT;
3544
3545 memset(&rfc, 0, sizeof(rfc));
3546 rfc.mode = chan->mode;
3547 }
3548
3549 if (result == L2CAP_CONF_SUCCESS)
3550 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3551 }
3552 rsp->scid = cpu_to_le16(chan->dcid);
3553 rsp->result = cpu_to_le16(result);
3554 rsp->flags = __constant_cpu_to_le16(0);
3555
3556 return ptr - data;
3557 }
3558
3559 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3560 void *data, u16 *result)
3561 {
3562 struct l2cap_conf_req *req = data;
3563 void *ptr = req->data;
3564 int type, olen;
3565 unsigned long val;
3566 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3567 struct l2cap_conf_efs efs;
3568
3569 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3570
3571 while (len >= L2CAP_CONF_OPT_SIZE) {
3572 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3573
3574 switch (type) {
3575 case L2CAP_CONF_MTU:
3576 if (val < L2CAP_DEFAULT_MIN_MTU) {
3577 *result = L2CAP_CONF_UNACCEPT;
3578 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3579 } else
3580 chan->imtu = val;
3581 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3582 break;
3583
3584 case L2CAP_CONF_FLUSH_TO:
3585 chan->flush_to = val;
3586 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3587 2, chan->flush_to);
3588 break;
3589
3590 case L2CAP_CONF_RFC:
3591 if (olen == sizeof(rfc))
3592 memcpy(&rfc, (void *)val, olen);
3593
3594 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3595 rfc.mode != chan->mode)
3596 return -ECONNREFUSED;
3597
3598 chan->fcs = 0;
3599
3600 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3601 sizeof(rfc), (unsigned long) &rfc);
3602 break;
3603
3604 case L2CAP_CONF_EWS:
3605 chan->ack_win = min_t(u16, val, chan->ack_win);
3606 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3607 chan->tx_win);
3608 break;
3609
3610 case L2CAP_CONF_EFS:
3611 if (olen == sizeof(efs))
3612 memcpy(&efs, (void *)val, olen);
3613
3614 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3615 efs.stype != L2CAP_SERV_NOTRAFIC &&
3616 efs.stype != chan->local_stype)
3617 return -ECONNREFUSED;
3618
3619 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3620 (unsigned long) &efs);
3621 break;
3622
3623 case L2CAP_CONF_FCS:
3624 if (*result == L2CAP_CONF_PENDING)
3625 if (val == L2CAP_FCS_NONE)
3626 set_bit(CONF_RECV_NO_FCS,
3627 &chan->conf_state);
3628 break;
3629 }
3630 }
3631
3632 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3633 return -ECONNREFUSED;
3634
3635 chan->mode = rfc.mode;
3636
3637 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3638 switch (rfc.mode) {
3639 case L2CAP_MODE_ERTM:
3640 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3641 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3642 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3643 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3644 chan->ack_win = min_t(u16, chan->ack_win,
3645 rfc.txwin_size);
3646
3647 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3648 chan->local_msdu = le16_to_cpu(efs.msdu);
3649 chan->local_sdu_itime =
3650 le32_to_cpu(efs.sdu_itime);
3651 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3652 chan->local_flush_to =
3653 le32_to_cpu(efs.flush_to);
3654 }
3655 break;
3656
3657 case L2CAP_MODE_STREAMING:
3658 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3659 }
3660 }
3661
3662 req->dcid = cpu_to_le16(chan->dcid);
3663 req->flags = __constant_cpu_to_le16(0);
3664
3665 return ptr - data;
3666 }
3667
3668 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3669 u16 result, u16 flags)
3670 {
3671 struct l2cap_conf_rsp *rsp = data;
3672 void *ptr = rsp->data;
3673
3674 BT_DBG("chan %p", chan);
3675
3676 rsp->scid = cpu_to_le16(chan->dcid);
3677 rsp->result = cpu_to_le16(result);
3678 rsp->flags = cpu_to_le16(flags);
3679
3680 return ptr - data;
3681 }
3682
3683 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3684 {
3685 struct l2cap_le_conn_rsp rsp;
3686 struct l2cap_conn *conn = chan->conn;
3687
3688 BT_DBG("chan %p", chan);
3689
3690 rsp.dcid = cpu_to_le16(chan->scid);
3691 rsp.mtu = cpu_to_le16(chan->imtu);
3692 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
3693 rsp.credits = __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS);
3694 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3695
3696 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3697 &rsp);
3698 }
3699
3700 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3701 {
3702 struct l2cap_conn_rsp rsp;
3703 struct l2cap_conn *conn = chan->conn;
3704 u8 buf[128];
3705 u8 rsp_code;
3706
3707 rsp.scid = cpu_to_le16(chan->dcid);
3708 rsp.dcid = cpu_to_le16(chan->scid);
3709 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3710 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3711
3712 if (chan->hs_hcon)
3713 rsp_code = L2CAP_CREATE_CHAN_RSP;
3714 else
3715 rsp_code = L2CAP_CONN_RSP;
3716
3717 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3718
3719 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3720
3721 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3722 return;
3723
3724 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3725 l2cap_build_conf_req(chan, buf), buf);
3726 chan->num_conf_req++;
3727 }
3728
3729 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3730 {
3731 int type, olen;
3732 unsigned long val;
3733 /* Use sane default values in case a misbehaving remote device
3734 * did not send an RFC or extended window size option.
3735 */
3736 u16 txwin_ext = chan->ack_win;
3737 struct l2cap_conf_rfc rfc = {
3738 .mode = chan->mode,
3739 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3740 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3741 .max_pdu_size = cpu_to_le16(chan->imtu),
3742 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3743 };
3744
3745 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3746
3747 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3748 return;
3749
3750 while (len >= L2CAP_CONF_OPT_SIZE) {
3751 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3752
3753 switch (type) {
3754 case L2CAP_CONF_RFC:
3755 if (olen == sizeof(rfc))
3756 memcpy(&rfc, (void *)val, olen);
3757 break;
3758 case L2CAP_CONF_EWS:
3759 txwin_ext = val;
3760 break;
3761 }
3762 }
3763
3764 switch (rfc.mode) {
3765 case L2CAP_MODE_ERTM:
3766 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3767 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3768 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3769 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3770 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3771 else
3772 chan->ack_win = min_t(u16, chan->ack_win,
3773 rfc.txwin_size);
3774 break;
3775 case L2CAP_MODE_STREAMING:
3776 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3777 }
3778 }
3779
3780 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3781 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3782 u8 *data)
3783 {
3784 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3785
3786 if (cmd_len < sizeof(*rej))
3787 return -EPROTO;
3788
3789 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3790 return 0;
3791
3792 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3793 cmd->ident == conn->info_ident) {
3794 cancel_delayed_work(&conn->info_timer);
3795
3796 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3797 conn->info_ident = 0;
3798
3799 l2cap_conn_start(conn);
3800 }
3801
3802 return 0;
3803 }
3804
3805 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3806 struct l2cap_cmd_hdr *cmd,
3807 u8 *data, u8 rsp_code, u8 amp_id)
3808 {
3809 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3810 struct l2cap_conn_rsp rsp;
3811 struct l2cap_chan *chan = NULL, *pchan;
3812 int result, status = L2CAP_CS_NO_INFO;
3813
3814 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3815 __le16 psm = req->psm;
3816
3817 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3818
3819 /* Check if we have socket listening on psm */
3820 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3821 &conn->hcon->dst, ACL_LINK);
3822 if (!pchan) {
3823 result = L2CAP_CR_BAD_PSM;
3824 goto sendresp;
3825 }
3826
3827 mutex_lock(&conn->chan_lock);
3828 l2cap_chan_lock(pchan);
3829
3830 /* Check if the ACL is secure enough (if not SDP) */
3831 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3832 !hci_conn_check_link_mode(conn->hcon)) {
3833 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3834 result = L2CAP_CR_SEC_BLOCK;
3835 goto response;
3836 }
3837
3838 result = L2CAP_CR_NO_MEM;
3839
3840 /* Check if we already have channel with that dcid */
3841 if (__l2cap_get_chan_by_dcid(conn, scid))
3842 goto response;
3843
3844 chan = pchan->ops->new_connection(pchan);
3845 if (!chan)
3846 goto response;
3847
3848 /* For certain devices (ex: HID mouse), support for authentication,
3849 * pairing and bonding is optional. For such devices, inorder to avoid
3850 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3851 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3852 */
3853 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3854
3855 bacpy(&chan->src, &conn->hcon->src);
3856 bacpy(&chan->dst, &conn->hcon->dst);
3857 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3858 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3859 chan->psm = psm;
3860 chan->dcid = scid;
3861 chan->local_amp_id = amp_id;
3862
3863 __l2cap_chan_add(conn, chan);
3864
3865 dcid = chan->scid;
3866
3867 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3868
3869 chan->ident = cmd->ident;
3870
3871 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3872 if (l2cap_chan_check_security(chan)) {
3873 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3874 l2cap_state_change(chan, BT_CONNECT2);
3875 result = L2CAP_CR_PEND;
3876 status = L2CAP_CS_AUTHOR_PEND;
3877 chan->ops->defer(chan);
3878 } else {
3879 /* Force pending result for AMP controllers.
3880 * The connection will succeed after the
3881 * physical link is up.
3882 */
3883 if (amp_id == AMP_ID_BREDR) {
3884 l2cap_state_change(chan, BT_CONFIG);
3885 result = L2CAP_CR_SUCCESS;
3886 } else {
3887 l2cap_state_change(chan, BT_CONNECT2);
3888 result = L2CAP_CR_PEND;
3889 }
3890 status = L2CAP_CS_NO_INFO;
3891 }
3892 } else {
3893 l2cap_state_change(chan, BT_CONNECT2);
3894 result = L2CAP_CR_PEND;
3895 status = L2CAP_CS_AUTHEN_PEND;
3896 }
3897 } else {
3898 l2cap_state_change(chan, BT_CONNECT2);
3899 result = L2CAP_CR_PEND;
3900 status = L2CAP_CS_NO_INFO;
3901 }
3902
3903 response:
3904 l2cap_chan_unlock(pchan);
3905 mutex_unlock(&conn->chan_lock);
3906
3907 sendresp:
3908 rsp.scid = cpu_to_le16(scid);
3909 rsp.dcid = cpu_to_le16(dcid);
3910 rsp.result = cpu_to_le16(result);
3911 rsp.status = cpu_to_le16(status);
3912 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3913
3914 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3915 struct l2cap_info_req info;
3916 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3917
3918 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3919 conn->info_ident = l2cap_get_ident(conn);
3920
3921 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3922
3923 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3924 sizeof(info), &info);
3925 }
3926
3927 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3928 result == L2CAP_CR_SUCCESS) {
3929 u8 buf[128];
3930 set_bit(CONF_REQ_SENT, &chan->conf_state);
3931 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3932 l2cap_build_conf_req(chan, buf), buf);
3933 chan->num_conf_req++;
3934 }
3935
3936 return chan;
3937 }
3938
3939 static int l2cap_connect_req(struct l2cap_conn *conn,
3940 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3941 {
3942 struct hci_dev *hdev = conn->hcon->hdev;
3943 struct hci_conn *hcon = conn->hcon;
3944
3945 if (cmd_len < sizeof(struct l2cap_conn_req))
3946 return -EPROTO;
3947
3948 hci_dev_lock(hdev);
3949 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3950 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3951 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3952 hcon->dst_type, 0, NULL, 0,
3953 hcon->dev_class);
3954 hci_dev_unlock(hdev);
3955
3956 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3957 return 0;
3958 }
3959
3960 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3961 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3962 u8 *data)
3963 {
3964 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3965 u16 scid, dcid, result, status;
3966 struct l2cap_chan *chan;
3967 u8 req[128];
3968 int err;
3969
3970 if (cmd_len < sizeof(*rsp))
3971 return -EPROTO;
3972
3973 scid = __le16_to_cpu(rsp->scid);
3974 dcid = __le16_to_cpu(rsp->dcid);
3975 result = __le16_to_cpu(rsp->result);
3976 status = __le16_to_cpu(rsp->status);
3977
3978 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3979 dcid, scid, result, status);
3980
3981 mutex_lock(&conn->chan_lock);
3982
3983 if (scid) {
3984 chan = __l2cap_get_chan_by_scid(conn, scid);
3985 if (!chan) {
3986 err = -EBADSLT;
3987 goto unlock;
3988 }
3989 } else {
3990 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3991 if (!chan) {
3992 err = -EBADSLT;
3993 goto unlock;
3994 }
3995 }
3996
3997 err = 0;
3998
3999 l2cap_chan_lock(chan);
4000
4001 switch (result) {
4002 case L2CAP_CR_SUCCESS:
4003 l2cap_state_change(chan, BT_CONFIG);
4004 chan->ident = 0;
4005 chan->dcid = dcid;
4006 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4007
4008 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4009 break;
4010
4011 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4012 l2cap_build_conf_req(chan, req), req);
4013 chan->num_conf_req++;
4014 break;
4015
4016 case L2CAP_CR_PEND:
4017 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4018 break;
4019
4020 default:
4021 l2cap_chan_del(chan, ECONNREFUSED);
4022 break;
4023 }
4024
4025 l2cap_chan_unlock(chan);
4026
4027 unlock:
4028 mutex_unlock(&conn->chan_lock);
4029
4030 return err;
4031 }
4032
4033 static inline void set_default_fcs(struct l2cap_chan *chan)
4034 {
4035 /* FCS is enabled only in ERTM or streaming mode, if one or both
4036 * sides request it.
4037 */
4038 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4039 chan->fcs = L2CAP_FCS_NONE;
4040 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4041 chan->fcs = L2CAP_FCS_CRC16;
4042 }
4043
4044 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4045 u8 ident, u16 flags)
4046 {
4047 struct l2cap_conn *conn = chan->conn;
4048
4049 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4050 flags);
4051
4052 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4053 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4054
4055 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4056 l2cap_build_conf_rsp(chan, data,
4057 L2CAP_CONF_SUCCESS, flags), data);
4058 }
4059
4060 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4061 u16 scid, u16 dcid)
4062 {
4063 struct l2cap_cmd_rej_cid rej;
4064
4065 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4066 rej.scid = __cpu_to_le16(scid);
4067 rej.dcid = __cpu_to_le16(dcid);
4068
4069 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4070 }
4071
4072 static inline int l2cap_config_req(struct l2cap_conn *conn,
4073 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4074 u8 *data)
4075 {
4076 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4077 u16 dcid, flags;
4078 u8 rsp[64];
4079 struct l2cap_chan *chan;
4080 int len, err = 0;
4081
4082 if (cmd_len < sizeof(*req))
4083 return -EPROTO;
4084
4085 dcid = __le16_to_cpu(req->dcid);
4086 flags = __le16_to_cpu(req->flags);
4087
4088 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4089
4090 chan = l2cap_get_chan_by_scid(conn, dcid);
4091 if (!chan) {
4092 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4093 return 0;
4094 }
4095
4096 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4097 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4098 chan->dcid);
4099 goto unlock;
4100 }
4101
4102 /* Reject if config buffer is too small. */
4103 len = cmd_len - sizeof(*req);
4104 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4105 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4106 l2cap_build_conf_rsp(chan, rsp,
4107 L2CAP_CONF_REJECT, flags), rsp);
4108 goto unlock;
4109 }
4110
4111 /* Store config. */
4112 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4113 chan->conf_len += len;
4114
4115 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4116 /* Incomplete config. Send empty response. */
4117 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4118 l2cap_build_conf_rsp(chan, rsp,
4119 L2CAP_CONF_SUCCESS, flags), rsp);
4120 goto unlock;
4121 }
4122
4123 /* Complete config. */
4124 len = l2cap_parse_conf_req(chan, rsp);
4125 if (len < 0) {
4126 l2cap_send_disconn_req(chan, ECONNRESET);
4127 goto unlock;
4128 }
4129
4130 chan->ident = cmd->ident;
4131 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4132 chan->num_conf_rsp++;
4133
4134 /* Reset config buffer. */
4135 chan->conf_len = 0;
4136
4137 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4138 goto unlock;
4139
4140 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4141 set_default_fcs(chan);
4142
4143 if (chan->mode == L2CAP_MODE_ERTM ||
4144 chan->mode == L2CAP_MODE_STREAMING)
4145 err = l2cap_ertm_init(chan);
4146
4147 if (err < 0)
4148 l2cap_send_disconn_req(chan, -err);
4149 else
4150 l2cap_chan_ready(chan);
4151
4152 goto unlock;
4153 }
4154
4155 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4156 u8 buf[64];
4157 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4158 l2cap_build_conf_req(chan, buf), buf);
4159 chan->num_conf_req++;
4160 }
4161
4162 /* Got Conf Rsp PENDING from remote side and asume we sent
4163 Conf Rsp PENDING in the code above */
4164 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4165 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4166
4167 /* check compatibility */
4168
4169 /* Send rsp for BR/EDR channel */
4170 if (!chan->hs_hcon)
4171 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4172 else
4173 chan->ident = cmd->ident;
4174 }
4175
4176 unlock:
4177 l2cap_chan_unlock(chan);
4178 return err;
4179 }
4180
4181 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4182 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4183 u8 *data)
4184 {
4185 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4186 u16 scid, flags, result;
4187 struct l2cap_chan *chan;
4188 int len = cmd_len - sizeof(*rsp);
4189 int err = 0;
4190
4191 if (cmd_len < sizeof(*rsp))
4192 return -EPROTO;
4193
4194 scid = __le16_to_cpu(rsp->scid);
4195 flags = __le16_to_cpu(rsp->flags);
4196 result = __le16_to_cpu(rsp->result);
4197
4198 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4199 result, len);
4200
4201 chan = l2cap_get_chan_by_scid(conn, scid);
4202 if (!chan)
4203 return 0;
4204
4205 switch (result) {
4206 case L2CAP_CONF_SUCCESS:
4207 l2cap_conf_rfc_get(chan, rsp->data, len);
4208 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4209 break;
4210
4211 case L2CAP_CONF_PENDING:
4212 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4213
4214 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4215 char buf[64];
4216
4217 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4218 buf, &result);
4219 if (len < 0) {
4220 l2cap_send_disconn_req(chan, ECONNRESET);
4221 goto done;
4222 }
4223
4224 if (!chan->hs_hcon) {
4225 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4226 0);
4227 } else {
4228 if (l2cap_check_efs(chan)) {
4229 amp_create_logical_link(chan);
4230 chan->ident = cmd->ident;
4231 }
4232 }
4233 }
4234 goto done;
4235
4236 case L2CAP_CONF_UNACCEPT:
4237 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4238 char req[64];
4239
4240 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4241 l2cap_send_disconn_req(chan, ECONNRESET);
4242 goto done;
4243 }
4244
4245 /* throw out any old stored conf requests */
4246 result = L2CAP_CONF_SUCCESS;
4247 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4248 req, &result);
4249 if (len < 0) {
4250 l2cap_send_disconn_req(chan, ECONNRESET);
4251 goto done;
4252 }
4253
4254 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4255 L2CAP_CONF_REQ, len, req);
4256 chan->num_conf_req++;
4257 if (result != L2CAP_CONF_SUCCESS)
4258 goto done;
4259 break;
4260 }
4261
4262 default:
4263 l2cap_chan_set_err(chan, ECONNRESET);
4264
4265 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4266 l2cap_send_disconn_req(chan, ECONNRESET);
4267 goto done;
4268 }
4269
4270 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4271 goto done;
4272
4273 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4274
4275 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4276 set_default_fcs(chan);
4277
4278 if (chan->mode == L2CAP_MODE_ERTM ||
4279 chan->mode == L2CAP_MODE_STREAMING)
4280 err = l2cap_ertm_init(chan);
4281
4282 if (err < 0)
4283 l2cap_send_disconn_req(chan, -err);
4284 else
4285 l2cap_chan_ready(chan);
4286 }
4287
4288 done:
4289 l2cap_chan_unlock(chan);
4290 return err;
4291 }
4292
4293 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4294 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4295 u8 *data)
4296 {
4297 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4298 struct l2cap_disconn_rsp rsp;
4299 u16 dcid, scid;
4300 struct l2cap_chan *chan;
4301
4302 if (cmd_len != sizeof(*req))
4303 return -EPROTO;
4304
4305 scid = __le16_to_cpu(req->scid);
4306 dcid = __le16_to_cpu(req->dcid);
4307
4308 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4309
4310 mutex_lock(&conn->chan_lock);
4311
4312 chan = __l2cap_get_chan_by_scid(conn, dcid);
4313 if (!chan) {
4314 mutex_unlock(&conn->chan_lock);
4315 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4316 return 0;
4317 }
4318
4319 l2cap_chan_lock(chan);
4320
4321 rsp.dcid = cpu_to_le16(chan->scid);
4322 rsp.scid = cpu_to_le16(chan->dcid);
4323 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4324
4325 chan->ops->set_shutdown(chan);
4326
4327 l2cap_chan_hold(chan);
4328 l2cap_chan_del(chan, ECONNRESET);
4329
4330 l2cap_chan_unlock(chan);
4331
4332 chan->ops->close(chan);
4333 l2cap_chan_put(chan);
4334
4335 mutex_unlock(&conn->chan_lock);
4336
4337 return 0;
4338 }
4339
4340 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4341 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4342 u8 *data)
4343 {
4344 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4345 u16 dcid, scid;
4346 struct l2cap_chan *chan;
4347
4348 if (cmd_len != sizeof(*rsp))
4349 return -EPROTO;
4350
4351 scid = __le16_to_cpu(rsp->scid);
4352 dcid = __le16_to_cpu(rsp->dcid);
4353
4354 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4355
4356 mutex_lock(&conn->chan_lock);
4357
4358 chan = __l2cap_get_chan_by_scid(conn, scid);
4359 if (!chan) {
4360 mutex_unlock(&conn->chan_lock);
4361 return 0;
4362 }
4363
4364 l2cap_chan_lock(chan);
4365
4366 l2cap_chan_hold(chan);
4367 l2cap_chan_del(chan, 0);
4368
4369 l2cap_chan_unlock(chan);
4370
4371 chan->ops->close(chan);
4372 l2cap_chan_put(chan);
4373
4374 mutex_unlock(&conn->chan_lock);
4375
4376 return 0;
4377 }
4378
4379 static inline int l2cap_information_req(struct l2cap_conn *conn,
4380 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4381 u8 *data)
4382 {
4383 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4384 u16 type;
4385
4386 if (cmd_len != sizeof(*req))
4387 return -EPROTO;
4388
4389 type = __le16_to_cpu(req->type);
4390
4391 BT_DBG("type 0x%4.4x", type);
4392
4393 if (type == L2CAP_IT_FEAT_MASK) {
4394 u8 buf[8];
4395 u32 feat_mask = l2cap_feat_mask;
4396 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4397 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4398 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4399 if (!disable_ertm)
4400 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4401 | L2CAP_FEAT_FCS;
4402 if (conn->hs_enabled)
4403 feat_mask |= L2CAP_FEAT_EXT_FLOW
4404 | L2CAP_FEAT_EXT_WINDOW;
4405
4406 put_unaligned_le32(feat_mask, rsp->data);
4407 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4408 buf);
4409 } else if (type == L2CAP_IT_FIXED_CHAN) {
4410 u8 buf[12];
4411 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4412
4413 if (conn->hs_enabled)
4414 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4415 else
4416 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4417
4418 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4419 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4420 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4421 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4422 buf);
4423 } else {
4424 struct l2cap_info_rsp rsp;
4425 rsp.type = cpu_to_le16(type);
4426 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4427 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4428 &rsp);
4429 }
4430
4431 return 0;
4432 }
4433
4434 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4435 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4436 u8 *data)
4437 {
4438 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4439 u16 type, result;
4440
4441 if (cmd_len < sizeof(*rsp))
4442 return -EPROTO;
4443
4444 type = __le16_to_cpu(rsp->type);
4445 result = __le16_to_cpu(rsp->result);
4446
4447 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4448
4449 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4450 if (cmd->ident != conn->info_ident ||
4451 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4452 return 0;
4453
4454 cancel_delayed_work(&conn->info_timer);
4455
4456 if (result != L2CAP_IR_SUCCESS) {
4457 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4458 conn->info_ident = 0;
4459
4460 l2cap_conn_start(conn);
4461
4462 return 0;
4463 }
4464
4465 switch (type) {
4466 case L2CAP_IT_FEAT_MASK:
4467 conn->feat_mask = get_unaligned_le32(rsp->data);
4468
4469 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4470 struct l2cap_info_req req;
4471 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4472
4473 conn->info_ident = l2cap_get_ident(conn);
4474
4475 l2cap_send_cmd(conn, conn->info_ident,
4476 L2CAP_INFO_REQ, sizeof(req), &req);
4477 } else {
4478 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4479 conn->info_ident = 0;
4480
4481 l2cap_conn_start(conn);
4482 }
4483 break;
4484
4485 case L2CAP_IT_FIXED_CHAN:
4486 conn->fixed_chan_mask = rsp->data[0];
4487 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4488 conn->info_ident = 0;
4489
4490 l2cap_conn_start(conn);
4491 break;
4492 }
4493
4494 return 0;
4495 }
4496
4497 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4498 struct l2cap_cmd_hdr *cmd,
4499 u16 cmd_len, void *data)
4500 {
4501 struct l2cap_create_chan_req *req = data;
4502 struct l2cap_create_chan_rsp rsp;
4503 struct l2cap_chan *chan;
4504 struct hci_dev *hdev;
4505 u16 psm, scid;
4506
4507 if (cmd_len != sizeof(*req))
4508 return -EPROTO;
4509
4510 if (!conn->hs_enabled)
4511 return -EINVAL;
4512
4513 psm = le16_to_cpu(req->psm);
4514 scid = le16_to_cpu(req->scid);
4515
4516 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4517
4518 /* For controller id 0 make BR/EDR connection */
4519 if (req->amp_id == AMP_ID_BREDR) {
4520 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4521 req->amp_id);
4522 return 0;
4523 }
4524
4525 /* Validate AMP controller id */
4526 hdev = hci_dev_get(req->amp_id);
4527 if (!hdev)
4528 goto error;
4529
4530 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4531 hci_dev_put(hdev);
4532 goto error;
4533 }
4534
4535 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4536 req->amp_id);
4537 if (chan) {
4538 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4539 struct hci_conn *hs_hcon;
4540
4541 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4542 &conn->hcon->dst);
4543 if (!hs_hcon) {
4544 hci_dev_put(hdev);
4545 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4546 chan->dcid);
4547 return 0;
4548 }
4549
4550 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4551
4552 mgr->bredr_chan = chan;
4553 chan->hs_hcon = hs_hcon;
4554 chan->fcs = L2CAP_FCS_NONE;
4555 conn->mtu = hdev->block_mtu;
4556 }
4557
4558 hci_dev_put(hdev);
4559
4560 return 0;
4561
4562 error:
4563 rsp.dcid = 0;
4564 rsp.scid = cpu_to_le16(scid);
4565 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4566 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4567
4568 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4569 sizeof(rsp), &rsp);
4570
4571 return 0;
4572 }
4573
4574 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4575 {
4576 struct l2cap_move_chan_req req;
4577 u8 ident;
4578
4579 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4580
4581 ident = l2cap_get_ident(chan->conn);
4582 chan->ident = ident;
4583
4584 req.icid = cpu_to_le16(chan->scid);
4585 req.dest_amp_id = dest_amp_id;
4586
4587 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4588 &req);
4589
4590 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4591 }
4592
4593 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4594 {
4595 struct l2cap_move_chan_rsp rsp;
4596
4597 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4598
4599 rsp.icid = cpu_to_le16(chan->dcid);
4600 rsp.result = cpu_to_le16(result);
4601
4602 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4603 sizeof(rsp), &rsp);
4604 }
4605
4606 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4607 {
4608 struct l2cap_move_chan_cfm cfm;
4609
4610 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4611
4612 chan->ident = l2cap_get_ident(chan->conn);
4613
4614 cfm.icid = cpu_to_le16(chan->scid);
4615 cfm.result = cpu_to_le16(result);
4616
4617 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4618 sizeof(cfm), &cfm);
4619
4620 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4621 }
4622
4623 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4624 {
4625 struct l2cap_move_chan_cfm cfm;
4626
4627 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4628
4629 cfm.icid = cpu_to_le16(icid);
4630 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4631
4632 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4633 sizeof(cfm), &cfm);
4634 }
4635
4636 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4637 u16 icid)
4638 {
4639 struct l2cap_move_chan_cfm_rsp rsp;
4640
4641 BT_DBG("icid 0x%4.4x", icid);
4642
4643 rsp.icid = cpu_to_le16(icid);
4644 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4645 }
4646
4647 static void __release_logical_link(struct l2cap_chan *chan)
4648 {
4649 chan->hs_hchan = NULL;
4650 chan->hs_hcon = NULL;
4651
4652 /* Placeholder - release the logical link */
4653 }
4654
4655 static void l2cap_logical_fail(struct l2cap_chan *chan)
4656 {
4657 /* Logical link setup failed */
4658 if (chan->state != BT_CONNECTED) {
4659 /* Create channel failure, disconnect */
4660 l2cap_send_disconn_req(chan, ECONNRESET);
4661 return;
4662 }
4663
4664 switch (chan->move_role) {
4665 case L2CAP_MOVE_ROLE_RESPONDER:
4666 l2cap_move_done(chan);
4667 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4668 break;
4669 case L2CAP_MOVE_ROLE_INITIATOR:
4670 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4671 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4672 /* Remote has only sent pending or
4673 * success responses, clean up
4674 */
4675 l2cap_move_done(chan);
4676 }
4677
4678 /* Other amp move states imply that the move
4679 * has already aborted
4680 */
4681 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4682 break;
4683 }
4684 }
4685
4686 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4687 struct hci_chan *hchan)
4688 {
4689 struct l2cap_conf_rsp rsp;
4690
4691 chan->hs_hchan = hchan;
4692 chan->hs_hcon->l2cap_data = chan->conn;
4693
4694 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4695
4696 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4697 int err;
4698
4699 set_default_fcs(chan);
4700
4701 err = l2cap_ertm_init(chan);
4702 if (err < 0)
4703 l2cap_send_disconn_req(chan, -err);
4704 else
4705 l2cap_chan_ready(chan);
4706 }
4707 }
4708
4709 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4710 struct hci_chan *hchan)
4711 {
4712 chan->hs_hcon = hchan->conn;
4713 chan->hs_hcon->l2cap_data = chan->conn;
4714
4715 BT_DBG("move_state %d", chan->move_state);
4716
4717 switch (chan->move_state) {
4718 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4719 /* Move confirm will be sent after a success
4720 * response is received
4721 */
4722 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4723 break;
4724 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4725 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4726 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4727 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4728 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4729 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4730 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4731 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4732 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4733 }
4734 break;
4735 default:
4736 /* Move was not in expected state, free the channel */
4737 __release_logical_link(chan);
4738
4739 chan->move_state = L2CAP_MOVE_STABLE;
4740 }
4741 }
4742
4743 /* Call with chan locked */
4744 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4745 u8 status)
4746 {
4747 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4748
4749 if (status) {
4750 l2cap_logical_fail(chan);
4751 __release_logical_link(chan);
4752 return;
4753 }
4754
4755 if (chan->state != BT_CONNECTED) {
4756 /* Ignore logical link if channel is on BR/EDR */
4757 if (chan->local_amp_id != AMP_ID_BREDR)
4758 l2cap_logical_finish_create(chan, hchan);
4759 } else {
4760 l2cap_logical_finish_move(chan, hchan);
4761 }
4762 }
4763
4764 void l2cap_move_start(struct l2cap_chan *chan)
4765 {
4766 BT_DBG("chan %p", chan);
4767
4768 if (chan->local_amp_id == AMP_ID_BREDR) {
4769 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4770 return;
4771 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4772 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4773 /* Placeholder - start physical link setup */
4774 } else {
4775 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4776 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4777 chan->move_id = 0;
4778 l2cap_move_setup(chan);
4779 l2cap_send_move_chan_req(chan, 0);
4780 }
4781 }
4782
4783 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4784 u8 local_amp_id, u8 remote_amp_id)
4785 {
4786 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4787 local_amp_id, remote_amp_id);
4788
4789 chan->fcs = L2CAP_FCS_NONE;
4790
4791 /* Outgoing channel on AMP */
4792 if (chan->state == BT_CONNECT) {
4793 if (result == L2CAP_CR_SUCCESS) {
4794 chan->local_amp_id = local_amp_id;
4795 l2cap_send_create_chan_req(chan, remote_amp_id);
4796 } else {
4797 /* Revert to BR/EDR connect */
4798 l2cap_send_conn_req(chan);
4799 }
4800
4801 return;
4802 }
4803
4804 /* Incoming channel on AMP */
4805 if (__l2cap_no_conn_pending(chan)) {
4806 struct l2cap_conn_rsp rsp;
4807 char buf[128];
4808 rsp.scid = cpu_to_le16(chan->dcid);
4809 rsp.dcid = cpu_to_le16(chan->scid);
4810
4811 if (result == L2CAP_CR_SUCCESS) {
4812 /* Send successful response */
4813 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4814 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4815 } else {
4816 /* Send negative response */
4817 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4818 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4819 }
4820
4821 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4822 sizeof(rsp), &rsp);
4823
4824 if (result == L2CAP_CR_SUCCESS) {
4825 l2cap_state_change(chan, BT_CONFIG);
4826 set_bit(CONF_REQ_SENT, &chan->conf_state);
4827 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4828 L2CAP_CONF_REQ,
4829 l2cap_build_conf_req(chan, buf), buf);
4830 chan->num_conf_req++;
4831 }
4832 }
4833 }
4834
4835 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4836 u8 remote_amp_id)
4837 {
4838 l2cap_move_setup(chan);
4839 chan->move_id = local_amp_id;
4840 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4841
4842 l2cap_send_move_chan_req(chan, remote_amp_id);
4843 }
4844
4845 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4846 {
4847 struct hci_chan *hchan = NULL;
4848
4849 /* Placeholder - get hci_chan for logical link */
4850
4851 if (hchan) {
4852 if (hchan->state == BT_CONNECTED) {
4853 /* Logical link is ready to go */
4854 chan->hs_hcon = hchan->conn;
4855 chan->hs_hcon->l2cap_data = chan->conn;
4856 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4857 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4858
4859 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4860 } else {
4861 /* Wait for logical link to be ready */
4862 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4863 }
4864 } else {
4865 /* Logical link not available */
4866 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4867 }
4868 }
4869
4870 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4871 {
4872 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4873 u8 rsp_result;
4874 if (result == -EINVAL)
4875 rsp_result = L2CAP_MR_BAD_ID;
4876 else
4877 rsp_result = L2CAP_MR_NOT_ALLOWED;
4878
4879 l2cap_send_move_chan_rsp(chan, rsp_result);
4880 }
4881
4882 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4883 chan->move_state = L2CAP_MOVE_STABLE;
4884
4885 /* Restart data transmission */
4886 l2cap_ertm_send(chan);
4887 }
4888
4889 /* Invoke with locked chan */
4890 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4891 {
4892 u8 local_amp_id = chan->local_amp_id;
4893 u8 remote_amp_id = chan->remote_amp_id;
4894
4895 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4896 chan, result, local_amp_id, remote_amp_id);
4897
4898 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4899 l2cap_chan_unlock(chan);
4900 return;
4901 }
4902
4903 if (chan->state != BT_CONNECTED) {
4904 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4905 } else if (result != L2CAP_MR_SUCCESS) {
4906 l2cap_do_move_cancel(chan, result);
4907 } else {
4908 switch (chan->move_role) {
4909 case L2CAP_MOVE_ROLE_INITIATOR:
4910 l2cap_do_move_initiate(chan, local_amp_id,
4911 remote_amp_id);
4912 break;
4913 case L2CAP_MOVE_ROLE_RESPONDER:
4914 l2cap_do_move_respond(chan, result);
4915 break;
4916 default:
4917 l2cap_do_move_cancel(chan, result);
4918 break;
4919 }
4920 }
4921 }
4922
4923 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4924 struct l2cap_cmd_hdr *cmd,
4925 u16 cmd_len, void *data)
4926 {
4927 struct l2cap_move_chan_req *req = data;
4928 struct l2cap_move_chan_rsp rsp;
4929 struct l2cap_chan *chan;
4930 u16 icid = 0;
4931 u16 result = L2CAP_MR_NOT_ALLOWED;
4932
4933 if (cmd_len != sizeof(*req))
4934 return -EPROTO;
4935
4936 icid = le16_to_cpu(req->icid);
4937
4938 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4939
4940 if (!conn->hs_enabled)
4941 return -EINVAL;
4942
4943 chan = l2cap_get_chan_by_dcid(conn, icid);
4944 if (!chan) {
4945 rsp.icid = cpu_to_le16(icid);
4946 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4947 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4948 sizeof(rsp), &rsp);
4949 return 0;
4950 }
4951
4952 chan->ident = cmd->ident;
4953
4954 if (chan->scid < L2CAP_CID_DYN_START ||
4955 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4956 (chan->mode != L2CAP_MODE_ERTM &&
4957 chan->mode != L2CAP_MODE_STREAMING)) {
4958 result = L2CAP_MR_NOT_ALLOWED;
4959 goto send_move_response;
4960 }
4961
4962 if (chan->local_amp_id == req->dest_amp_id) {
4963 result = L2CAP_MR_SAME_ID;
4964 goto send_move_response;
4965 }
4966
4967 if (req->dest_amp_id != AMP_ID_BREDR) {
4968 struct hci_dev *hdev;
4969 hdev = hci_dev_get(req->dest_amp_id);
4970 if (!hdev || hdev->dev_type != HCI_AMP ||
4971 !test_bit(HCI_UP, &hdev->flags)) {
4972 if (hdev)
4973 hci_dev_put(hdev);
4974
4975 result = L2CAP_MR_BAD_ID;
4976 goto send_move_response;
4977 }
4978 hci_dev_put(hdev);
4979 }
4980
4981 /* Detect a move collision. Only send a collision response
4982 * if this side has "lost", otherwise proceed with the move.
4983 * The winner has the larger bd_addr.
4984 */
4985 if ((__chan_is_moving(chan) ||
4986 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4987 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4988 result = L2CAP_MR_COLLISION;
4989 goto send_move_response;
4990 }
4991
4992 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4993 l2cap_move_setup(chan);
4994 chan->move_id = req->dest_amp_id;
4995 icid = chan->dcid;
4996
4997 if (req->dest_amp_id == AMP_ID_BREDR) {
4998 /* Moving to BR/EDR */
4999 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5000 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5001 result = L2CAP_MR_PEND;
5002 } else {
5003 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5004 result = L2CAP_MR_SUCCESS;
5005 }
5006 } else {
5007 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5008 /* Placeholder - uncomment when amp functions are available */
5009 /*amp_accept_physical(chan, req->dest_amp_id);*/
5010 result = L2CAP_MR_PEND;
5011 }
5012
5013 send_move_response:
5014 l2cap_send_move_chan_rsp(chan, result);
5015
5016 l2cap_chan_unlock(chan);
5017
5018 return 0;
5019 }
5020
5021 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5022 {
5023 struct l2cap_chan *chan;
5024 struct hci_chan *hchan = NULL;
5025
5026 chan = l2cap_get_chan_by_scid(conn, icid);
5027 if (!chan) {
5028 l2cap_send_move_chan_cfm_icid(conn, icid);
5029 return;
5030 }
5031
5032 __clear_chan_timer(chan);
5033 if (result == L2CAP_MR_PEND)
5034 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5035
5036 switch (chan->move_state) {
5037 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5038 /* Move confirm will be sent when logical link
5039 * is complete.
5040 */
5041 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5042 break;
5043 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5044 if (result == L2CAP_MR_PEND) {
5045 break;
5046 } else if (test_bit(CONN_LOCAL_BUSY,
5047 &chan->conn_state)) {
5048 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5049 } else {
5050 /* Logical link is up or moving to BR/EDR,
5051 * proceed with move
5052 */
5053 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5054 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5055 }
5056 break;
5057 case L2CAP_MOVE_WAIT_RSP:
5058 /* Moving to AMP */
5059 if (result == L2CAP_MR_SUCCESS) {
5060 /* Remote is ready, send confirm immediately
5061 * after logical link is ready
5062 */
5063 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5064 } else {
5065 /* Both logical link and move success
5066 * are required to confirm
5067 */
5068 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5069 }
5070
5071 /* Placeholder - get hci_chan for logical link */
5072 if (!hchan) {
5073 /* Logical link not available */
5074 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5075 break;
5076 }
5077
5078 /* If the logical link is not yet connected, do not
5079 * send confirmation.
5080 */
5081 if (hchan->state != BT_CONNECTED)
5082 break;
5083
5084 /* Logical link is already ready to go */
5085
5086 chan->hs_hcon = hchan->conn;
5087 chan->hs_hcon->l2cap_data = chan->conn;
5088
5089 if (result == L2CAP_MR_SUCCESS) {
5090 /* Can confirm now */
5091 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5092 } else {
5093 /* Now only need move success
5094 * to confirm
5095 */
5096 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5097 }
5098
5099 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5100 break;
5101 default:
5102 /* Any other amp move state means the move failed. */
5103 chan->move_id = chan->local_amp_id;
5104 l2cap_move_done(chan);
5105 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5106 }
5107
5108 l2cap_chan_unlock(chan);
5109 }
5110
5111 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5112 u16 result)
5113 {
5114 struct l2cap_chan *chan;
5115
5116 chan = l2cap_get_chan_by_ident(conn, ident);
5117 if (!chan) {
5118 /* Could not locate channel, icid is best guess */
5119 l2cap_send_move_chan_cfm_icid(conn, icid);
5120 return;
5121 }
5122
5123 __clear_chan_timer(chan);
5124
5125 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5126 if (result == L2CAP_MR_COLLISION) {
5127 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5128 } else {
5129 /* Cleanup - cancel move */
5130 chan->move_id = chan->local_amp_id;
5131 l2cap_move_done(chan);
5132 }
5133 }
5134
5135 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5136
5137 l2cap_chan_unlock(chan);
5138 }
5139
5140 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5141 struct l2cap_cmd_hdr *cmd,
5142 u16 cmd_len, void *data)
5143 {
5144 struct l2cap_move_chan_rsp *rsp = data;
5145 u16 icid, result;
5146
5147 if (cmd_len != sizeof(*rsp))
5148 return -EPROTO;
5149
5150 icid = le16_to_cpu(rsp->icid);
5151 result = le16_to_cpu(rsp->result);
5152
5153 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5154
5155 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5156 l2cap_move_continue(conn, icid, result);
5157 else
5158 l2cap_move_fail(conn, cmd->ident, icid, result);
5159
5160 return 0;
5161 }
5162
5163 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5164 struct l2cap_cmd_hdr *cmd,
5165 u16 cmd_len, void *data)
5166 {
5167 struct l2cap_move_chan_cfm *cfm = data;
5168 struct l2cap_chan *chan;
5169 u16 icid, result;
5170
5171 if (cmd_len != sizeof(*cfm))
5172 return -EPROTO;
5173
5174 icid = le16_to_cpu(cfm->icid);
5175 result = le16_to_cpu(cfm->result);
5176
5177 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5178
5179 chan = l2cap_get_chan_by_dcid(conn, icid);
5180 if (!chan) {
5181 /* Spec requires a response even if the icid was not found */
5182 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5183 return 0;
5184 }
5185
5186 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5187 if (result == L2CAP_MC_CONFIRMED) {
5188 chan->local_amp_id = chan->move_id;
5189 if (chan->local_amp_id == AMP_ID_BREDR)
5190 __release_logical_link(chan);
5191 } else {
5192 chan->move_id = chan->local_amp_id;
5193 }
5194
5195 l2cap_move_done(chan);
5196 }
5197
5198 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5199
5200 l2cap_chan_unlock(chan);
5201
5202 return 0;
5203 }
5204
5205 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5206 struct l2cap_cmd_hdr *cmd,
5207 u16 cmd_len, void *data)
5208 {
5209 struct l2cap_move_chan_cfm_rsp *rsp = data;
5210 struct l2cap_chan *chan;
5211 u16 icid;
5212
5213 if (cmd_len != sizeof(*rsp))
5214 return -EPROTO;
5215
5216 icid = le16_to_cpu(rsp->icid);
5217
5218 BT_DBG("icid 0x%4.4x", icid);
5219
5220 chan = l2cap_get_chan_by_scid(conn, icid);
5221 if (!chan)
5222 return 0;
5223
5224 __clear_chan_timer(chan);
5225
5226 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5227 chan->local_amp_id = chan->move_id;
5228
5229 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5230 __release_logical_link(chan);
5231
5232 l2cap_move_done(chan);
5233 }
5234
5235 l2cap_chan_unlock(chan);
5236
5237 return 0;
5238 }
5239
5240 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5241 u16 to_multiplier)
5242 {
5243 u16 max_latency;
5244
5245 if (min > max || min < 6 || max > 3200)
5246 return -EINVAL;
5247
5248 if (to_multiplier < 10 || to_multiplier > 3200)
5249 return -EINVAL;
5250
5251 if (max >= to_multiplier * 8)
5252 return -EINVAL;
5253
5254 max_latency = (to_multiplier * 8 / max) - 1;
5255 if (latency > 499 || latency > max_latency)
5256 return -EINVAL;
5257
5258 return 0;
5259 }
5260
5261 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5262 struct l2cap_cmd_hdr *cmd,
5263 u16 cmd_len, u8 *data)
5264 {
5265 struct hci_conn *hcon = conn->hcon;
5266 struct l2cap_conn_param_update_req *req;
5267 struct l2cap_conn_param_update_rsp rsp;
5268 u16 min, max, latency, to_multiplier;
5269 int err;
5270
5271 if (!(hcon->link_mode & HCI_LM_MASTER))
5272 return -EINVAL;
5273
5274 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5275 return -EPROTO;
5276
5277 req = (struct l2cap_conn_param_update_req *) data;
5278 min = __le16_to_cpu(req->min);
5279 max = __le16_to_cpu(req->max);
5280 latency = __le16_to_cpu(req->latency);
5281 to_multiplier = __le16_to_cpu(req->to_multiplier);
5282
5283 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5284 min, max, latency, to_multiplier);
5285
5286 memset(&rsp, 0, sizeof(rsp));
5287
5288 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5289 if (err)
5290 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5291 else
5292 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5293
5294 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5295 sizeof(rsp), &rsp);
5296
5297 if (!err)
5298 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5299
5300 return 0;
5301 }
5302
5303 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5304 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5305 u8 *data)
5306 {
5307 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5308 u16 dcid, mtu, mps, credits, result;
5309 struct l2cap_chan *chan;
5310 int err;
5311
5312 if (cmd_len < sizeof(*rsp))
5313 return -EPROTO;
5314
5315 dcid = __le16_to_cpu(rsp->dcid);
5316 mtu = __le16_to_cpu(rsp->mtu);
5317 mps = __le16_to_cpu(rsp->mps);
5318 credits = __le16_to_cpu(rsp->credits);
5319 result = __le16_to_cpu(rsp->result);
5320
5321 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5322 return -EPROTO;
5323
5324 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5325 dcid, mtu, mps, credits, result);
5326
5327 mutex_lock(&conn->chan_lock);
5328
5329 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5330 if (!chan) {
5331 err = -EBADSLT;
5332 goto unlock;
5333 }
5334
5335 err = 0;
5336
5337 l2cap_chan_lock(chan);
5338
5339 switch (result) {
5340 case L2CAP_CR_SUCCESS:
5341 chan->ident = 0;
5342 chan->dcid = dcid;
5343 chan->omtu = mtu;
5344 chan->remote_mps = mps;
5345 l2cap_chan_ready(chan);
5346 break;
5347
5348 default:
5349 l2cap_chan_del(chan, ECONNREFUSED);
5350 break;
5351 }
5352
5353 l2cap_chan_unlock(chan);
5354
5355 unlock:
5356 mutex_unlock(&conn->chan_lock);
5357
5358 return err;
5359 }
5360
5361 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5362 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5363 u8 *data)
5364 {
5365 int err = 0;
5366
5367 switch (cmd->code) {
5368 case L2CAP_COMMAND_REJ:
5369 l2cap_command_rej(conn, cmd, cmd_len, data);
5370 break;
5371
5372 case L2CAP_CONN_REQ:
5373 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5374 break;
5375
5376 case L2CAP_CONN_RSP:
5377 case L2CAP_CREATE_CHAN_RSP:
5378 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5379 break;
5380
5381 case L2CAP_CONF_REQ:
5382 err = l2cap_config_req(conn, cmd, cmd_len, data);
5383 break;
5384
5385 case L2CAP_CONF_RSP:
5386 l2cap_config_rsp(conn, cmd, cmd_len, data);
5387 break;
5388
5389 case L2CAP_DISCONN_REQ:
5390 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5391 break;
5392
5393 case L2CAP_DISCONN_RSP:
5394 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5395 break;
5396
5397 case L2CAP_ECHO_REQ:
5398 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5399 break;
5400
5401 case L2CAP_ECHO_RSP:
5402 break;
5403
5404 case L2CAP_INFO_REQ:
5405 err = l2cap_information_req(conn, cmd, cmd_len, data);
5406 break;
5407
5408 case L2CAP_INFO_RSP:
5409 l2cap_information_rsp(conn, cmd, cmd_len, data);
5410 break;
5411
5412 case L2CAP_CREATE_CHAN_REQ:
5413 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5414 break;
5415
5416 case L2CAP_MOVE_CHAN_REQ:
5417 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5418 break;
5419
5420 case L2CAP_MOVE_CHAN_RSP:
5421 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5422 break;
5423
5424 case L2CAP_MOVE_CHAN_CFM:
5425 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5426 break;
5427
5428 case L2CAP_MOVE_CHAN_CFM_RSP:
5429 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5430 break;
5431
5432 default:
5433 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5434 err = -EINVAL;
5435 break;
5436 }
5437
5438 return err;
5439 }
5440
5441 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5442 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5443 u8 *data)
5444 {
5445 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5446 struct l2cap_le_conn_rsp rsp;
5447 struct l2cap_chan *chan, *pchan;
5448 u16 dcid, scid, mtu, mps;
5449 __le16 psm;
5450 u8 result;
5451
5452 if (cmd_len != sizeof(*req))
5453 return -EPROTO;
5454
5455 scid = __le16_to_cpu(req->scid);
5456 mtu = __le16_to_cpu(req->mtu);
5457 mps = __le16_to_cpu(req->mps);
5458 psm = req->psm;
5459 dcid = 0;
5460
5461 if (mtu < 23 || mps < 23)
5462 return -EPROTO;
5463
5464 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5465 scid, mtu, mps);
5466
5467 /* Check if we have socket listening on psm */
5468 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5469 &conn->hcon->dst, LE_LINK);
5470 if (!pchan) {
5471 result = L2CAP_CR_BAD_PSM;
5472 chan = NULL;
5473 goto response;
5474 }
5475
5476 mutex_lock(&conn->chan_lock);
5477 l2cap_chan_lock(pchan);
5478
5479 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5480 result = L2CAP_CR_AUTHENTICATION;
5481 chan = NULL;
5482 goto response_unlock;
5483 }
5484
5485 /* Check if we already have channel with that dcid */
5486 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5487 result = L2CAP_CR_NO_MEM;
5488 chan = NULL;
5489 goto response_unlock;
5490 }
5491
5492 chan = pchan->ops->new_connection(pchan);
5493 if (!chan) {
5494 result = L2CAP_CR_NO_MEM;
5495 goto response_unlock;
5496 }
5497
5498 bacpy(&chan->src, &conn->hcon->src);
5499 bacpy(&chan->dst, &conn->hcon->dst);
5500 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5501 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5502 chan->psm = psm;
5503 chan->dcid = scid;
5504 chan->omtu = mtu;
5505 chan->remote_mps = mps;
5506
5507 __l2cap_chan_add(conn, chan);
5508 dcid = chan->scid;
5509
5510 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5511
5512 chan->ident = cmd->ident;
5513
5514 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5515 l2cap_state_change(chan, BT_CONNECT2);
5516 result = L2CAP_CR_PEND;
5517 chan->ops->defer(chan);
5518 } else {
5519 l2cap_chan_ready(chan);
5520 result = L2CAP_CR_SUCCESS;
5521 }
5522
5523 response_unlock:
5524 l2cap_chan_unlock(pchan);
5525 mutex_unlock(&conn->chan_lock);
5526
5527 if (result == L2CAP_CR_PEND)
5528 return 0;
5529
5530 response:
5531 if (chan) {
5532 rsp.mtu = cpu_to_le16(chan->imtu);
5533 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
5534 } else {
5535 rsp.mtu = 0;
5536 rsp.mps = 0;
5537 }
5538
5539 rsp.dcid = cpu_to_le16(dcid);
5540 rsp.credits = __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS);
5541 rsp.result = cpu_to_le16(result);
5542
5543 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5544
5545 return 0;
5546 }
5547
5548 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5549 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5550 u8 *data)
5551 {
5552 int err = 0;
5553
5554 switch (cmd->code) {
5555 case L2CAP_COMMAND_REJ:
5556 break;
5557
5558 case L2CAP_CONN_PARAM_UPDATE_REQ:
5559 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5560 break;
5561
5562 case L2CAP_CONN_PARAM_UPDATE_RSP:
5563 break;
5564
5565 case L2CAP_LE_CONN_RSP:
5566 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5567 break;
5568
5569 case L2CAP_LE_CONN_REQ:
5570 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5571 break;
5572
5573 case L2CAP_DISCONN_REQ:
5574 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5575 break;
5576
5577 case L2CAP_DISCONN_RSP:
5578 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5579 break;
5580
5581 default:
5582 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5583 err = -EINVAL;
5584 break;
5585 }
5586
5587 return err;
5588 }
5589
5590 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5591 struct sk_buff *skb)
5592 {
5593 struct hci_conn *hcon = conn->hcon;
5594 struct l2cap_cmd_hdr *cmd;
5595 u16 len;
5596 int err;
5597
5598 if (hcon->type != LE_LINK)
5599 goto drop;
5600
5601 if (skb->len < L2CAP_CMD_HDR_SIZE)
5602 goto drop;
5603
5604 cmd = (void *) skb->data;
5605 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5606
5607 len = le16_to_cpu(cmd->len);
5608
5609 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5610
5611 if (len != skb->len || !cmd->ident) {
5612 BT_DBG("corrupted command");
5613 goto drop;
5614 }
5615
5616 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5617 if (err) {
5618 struct l2cap_cmd_rej_unk rej;
5619
5620 BT_ERR("Wrong link type (%d)", err);
5621
5622 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5623 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5624 sizeof(rej), &rej);
5625 }
5626
5627 drop:
5628 kfree_skb(skb);
5629 }
5630
5631 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5632 struct sk_buff *skb)
5633 {
5634 struct hci_conn *hcon = conn->hcon;
5635 u8 *data = skb->data;
5636 int len = skb->len;
5637 struct l2cap_cmd_hdr cmd;
5638 int err;
5639
5640 l2cap_raw_recv(conn, skb);
5641
5642 if (hcon->type != ACL_LINK)
5643 goto drop;
5644
5645 while (len >= L2CAP_CMD_HDR_SIZE) {
5646 u16 cmd_len;
5647 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5648 data += L2CAP_CMD_HDR_SIZE;
5649 len -= L2CAP_CMD_HDR_SIZE;
5650
5651 cmd_len = le16_to_cpu(cmd.len);
5652
5653 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5654 cmd.ident);
5655
5656 if (cmd_len > len || !cmd.ident) {
5657 BT_DBG("corrupted command");
5658 break;
5659 }
5660
5661 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5662 if (err) {
5663 struct l2cap_cmd_rej_unk rej;
5664
5665 BT_ERR("Wrong link type (%d)", err);
5666
5667 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5668 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5669 sizeof(rej), &rej);
5670 }
5671
5672 data += cmd_len;
5673 len -= cmd_len;
5674 }
5675
5676 drop:
5677 kfree_skb(skb);
5678 }
5679
5680 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5681 {
5682 u16 our_fcs, rcv_fcs;
5683 int hdr_size;
5684
5685 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5686 hdr_size = L2CAP_EXT_HDR_SIZE;
5687 else
5688 hdr_size = L2CAP_ENH_HDR_SIZE;
5689
5690 if (chan->fcs == L2CAP_FCS_CRC16) {
5691 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5692 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5693 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5694
5695 if (our_fcs != rcv_fcs)
5696 return -EBADMSG;
5697 }
5698 return 0;
5699 }
5700
5701 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5702 {
5703 struct l2cap_ctrl control;
5704
5705 BT_DBG("chan %p", chan);
5706
5707 memset(&control, 0, sizeof(control));
5708 control.sframe = 1;
5709 control.final = 1;
5710 control.reqseq = chan->buffer_seq;
5711 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5712
5713 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5714 control.super = L2CAP_SUPER_RNR;
5715 l2cap_send_sframe(chan, &control);
5716 }
5717
5718 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5719 chan->unacked_frames > 0)
5720 __set_retrans_timer(chan);
5721
5722 /* Send pending iframes */
5723 l2cap_ertm_send(chan);
5724
5725 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5726 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5727 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5728 * send it now.
5729 */
5730 control.super = L2CAP_SUPER_RR;
5731 l2cap_send_sframe(chan, &control);
5732 }
5733 }
5734
5735 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5736 struct sk_buff **last_frag)
5737 {
5738 /* skb->len reflects data in skb as well as all fragments
5739 * skb->data_len reflects only data in fragments
5740 */
5741 if (!skb_has_frag_list(skb))
5742 skb_shinfo(skb)->frag_list = new_frag;
5743
5744 new_frag->next = NULL;
5745
5746 (*last_frag)->next = new_frag;
5747 *last_frag = new_frag;
5748
5749 skb->len += new_frag->len;
5750 skb->data_len += new_frag->len;
5751 skb->truesize += new_frag->truesize;
5752 }
5753
5754 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5755 struct l2cap_ctrl *control)
5756 {
5757 int err = -EINVAL;
5758
5759 switch (control->sar) {
5760 case L2CAP_SAR_UNSEGMENTED:
5761 if (chan->sdu)
5762 break;
5763
5764 err = chan->ops->recv(chan, skb);
5765 break;
5766
5767 case L2CAP_SAR_START:
5768 if (chan->sdu)
5769 break;
5770
5771 chan->sdu_len = get_unaligned_le16(skb->data);
5772 skb_pull(skb, L2CAP_SDULEN_SIZE);
5773
5774 if (chan->sdu_len > chan->imtu) {
5775 err = -EMSGSIZE;
5776 break;
5777 }
5778
5779 if (skb->len >= chan->sdu_len)
5780 break;
5781
5782 chan->sdu = skb;
5783 chan->sdu_last_frag = skb;
5784
5785 skb = NULL;
5786 err = 0;
5787 break;
5788
5789 case L2CAP_SAR_CONTINUE:
5790 if (!chan->sdu)
5791 break;
5792
5793 append_skb_frag(chan->sdu, skb,
5794 &chan->sdu_last_frag);
5795 skb = NULL;
5796
5797 if (chan->sdu->len >= chan->sdu_len)
5798 break;
5799
5800 err = 0;
5801 break;
5802
5803 case L2CAP_SAR_END:
5804 if (!chan->sdu)
5805 break;
5806
5807 append_skb_frag(chan->sdu, skb,
5808 &chan->sdu_last_frag);
5809 skb = NULL;
5810
5811 if (chan->sdu->len != chan->sdu_len)
5812 break;
5813
5814 err = chan->ops->recv(chan, chan->sdu);
5815
5816 if (!err) {
5817 /* Reassembly complete */
5818 chan->sdu = NULL;
5819 chan->sdu_last_frag = NULL;
5820 chan->sdu_len = 0;
5821 }
5822 break;
5823 }
5824
5825 if (err) {
5826 kfree_skb(skb);
5827 kfree_skb(chan->sdu);
5828 chan->sdu = NULL;
5829 chan->sdu_last_frag = NULL;
5830 chan->sdu_len = 0;
5831 }
5832
5833 return err;
5834 }
5835
5836 static int l2cap_resegment(struct l2cap_chan *chan)
5837 {
5838 /* Placeholder */
5839 return 0;
5840 }
5841
5842 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5843 {
5844 u8 event;
5845
5846 if (chan->mode != L2CAP_MODE_ERTM)
5847 return;
5848
5849 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5850 l2cap_tx(chan, NULL, NULL, event);
5851 }
5852
5853 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5854 {
5855 int err = 0;
5856 /* Pass sequential frames to l2cap_reassemble_sdu()
5857 * until a gap is encountered.
5858 */
5859
5860 BT_DBG("chan %p", chan);
5861
5862 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5863 struct sk_buff *skb;
5864 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5865 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5866
5867 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5868
5869 if (!skb)
5870 break;
5871
5872 skb_unlink(skb, &chan->srej_q);
5873 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5874 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5875 if (err)
5876 break;
5877 }
5878
5879 if (skb_queue_empty(&chan->srej_q)) {
5880 chan->rx_state = L2CAP_RX_STATE_RECV;
5881 l2cap_send_ack(chan);
5882 }
5883
5884 return err;
5885 }
5886
5887 static void l2cap_handle_srej(struct l2cap_chan *chan,
5888 struct l2cap_ctrl *control)
5889 {
5890 struct sk_buff *skb;
5891
5892 BT_DBG("chan %p, control %p", chan, control);
5893
5894 if (control->reqseq == chan->next_tx_seq) {
5895 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5896 l2cap_send_disconn_req(chan, ECONNRESET);
5897 return;
5898 }
5899
5900 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5901
5902 if (skb == NULL) {
5903 BT_DBG("Seq %d not available for retransmission",
5904 control->reqseq);
5905 return;
5906 }
5907
5908 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5909 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5910 l2cap_send_disconn_req(chan, ECONNRESET);
5911 return;
5912 }
5913
5914 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5915
5916 if (control->poll) {
5917 l2cap_pass_to_tx(chan, control);
5918
5919 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5920 l2cap_retransmit(chan, control);
5921 l2cap_ertm_send(chan);
5922
5923 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5924 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5925 chan->srej_save_reqseq = control->reqseq;
5926 }
5927 } else {
5928 l2cap_pass_to_tx_fbit(chan, control);
5929
5930 if (control->final) {
5931 if (chan->srej_save_reqseq != control->reqseq ||
5932 !test_and_clear_bit(CONN_SREJ_ACT,
5933 &chan->conn_state))
5934 l2cap_retransmit(chan, control);
5935 } else {
5936 l2cap_retransmit(chan, control);
5937 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5938 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5939 chan->srej_save_reqseq = control->reqseq;
5940 }
5941 }
5942 }
5943 }
5944
5945 static void l2cap_handle_rej(struct l2cap_chan *chan,
5946 struct l2cap_ctrl *control)
5947 {
5948 struct sk_buff *skb;
5949
5950 BT_DBG("chan %p, control %p", chan, control);
5951
5952 if (control->reqseq == chan->next_tx_seq) {
5953 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5954 l2cap_send_disconn_req(chan, ECONNRESET);
5955 return;
5956 }
5957
5958 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5959
5960 if (chan->max_tx && skb &&
5961 bt_cb(skb)->control.retries >= chan->max_tx) {
5962 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5963 l2cap_send_disconn_req(chan, ECONNRESET);
5964 return;
5965 }
5966
5967 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5968
5969 l2cap_pass_to_tx(chan, control);
5970
5971 if (control->final) {
5972 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5973 l2cap_retransmit_all(chan, control);
5974 } else {
5975 l2cap_retransmit_all(chan, control);
5976 l2cap_ertm_send(chan);
5977 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5978 set_bit(CONN_REJ_ACT, &chan->conn_state);
5979 }
5980 }
5981
5982 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5983 {
5984 BT_DBG("chan %p, txseq %d", chan, txseq);
5985
5986 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5987 chan->expected_tx_seq);
5988
5989 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5990 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5991 chan->tx_win) {
5992 /* See notes below regarding "double poll" and
5993 * invalid packets.
5994 */
5995 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5996 BT_DBG("Invalid/Ignore - after SREJ");
5997 return L2CAP_TXSEQ_INVALID_IGNORE;
5998 } else {
5999 BT_DBG("Invalid - in window after SREJ sent");
6000 return L2CAP_TXSEQ_INVALID;
6001 }
6002 }
6003
6004 if (chan->srej_list.head == txseq) {
6005 BT_DBG("Expected SREJ");
6006 return L2CAP_TXSEQ_EXPECTED_SREJ;
6007 }
6008
6009 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6010 BT_DBG("Duplicate SREJ - txseq already stored");
6011 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6012 }
6013
6014 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6015 BT_DBG("Unexpected SREJ - not requested");
6016 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6017 }
6018 }
6019
6020 if (chan->expected_tx_seq == txseq) {
6021 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6022 chan->tx_win) {
6023 BT_DBG("Invalid - txseq outside tx window");
6024 return L2CAP_TXSEQ_INVALID;
6025 } else {
6026 BT_DBG("Expected");
6027 return L2CAP_TXSEQ_EXPECTED;
6028 }
6029 }
6030
6031 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6032 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6033 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6034 return L2CAP_TXSEQ_DUPLICATE;
6035 }
6036
6037 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6038 /* A source of invalid packets is a "double poll" condition,
6039 * where delays cause us to send multiple poll packets. If
6040 * the remote stack receives and processes both polls,
6041 * sequence numbers can wrap around in such a way that a
6042 * resent frame has a sequence number that looks like new data
6043 * with a sequence gap. This would trigger an erroneous SREJ
6044 * request.
6045 *
6046 * Fortunately, this is impossible with a tx window that's
6047 * less than half of the maximum sequence number, which allows
6048 * invalid frames to be safely ignored.
6049 *
6050 * With tx window sizes greater than half of the tx window
6051 * maximum, the frame is invalid and cannot be ignored. This
6052 * causes a disconnect.
6053 */
6054
6055 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6056 BT_DBG("Invalid/Ignore - txseq outside tx window");
6057 return L2CAP_TXSEQ_INVALID_IGNORE;
6058 } else {
6059 BT_DBG("Invalid - txseq outside tx window");
6060 return L2CAP_TXSEQ_INVALID;
6061 }
6062 } else {
6063 BT_DBG("Unexpected - txseq indicates missing frames");
6064 return L2CAP_TXSEQ_UNEXPECTED;
6065 }
6066 }
6067
6068 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6069 struct l2cap_ctrl *control,
6070 struct sk_buff *skb, u8 event)
6071 {
6072 int err = 0;
6073 bool skb_in_use = false;
6074
6075 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6076 event);
6077
6078 switch (event) {
6079 case L2CAP_EV_RECV_IFRAME:
6080 switch (l2cap_classify_txseq(chan, control->txseq)) {
6081 case L2CAP_TXSEQ_EXPECTED:
6082 l2cap_pass_to_tx(chan, control);
6083
6084 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6085 BT_DBG("Busy, discarding expected seq %d",
6086 control->txseq);
6087 break;
6088 }
6089
6090 chan->expected_tx_seq = __next_seq(chan,
6091 control->txseq);
6092
6093 chan->buffer_seq = chan->expected_tx_seq;
6094 skb_in_use = true;
6095
6096 err = l2cap_reassemble_sdu(chan, skb, control);
6097 if (err)
6098 break;
6099
6100 if (control->final) {
6101 if (!test_and_clear_bit(CONN_REJ_ACT,
6102 &chan->conn_state)) {
6103 control->final = 0;
6104 l2cap_retransmit_all(chan, control);
6105 l2cap_ertm_send(chan);
6106 }
6107 }
6108
6109 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6110 l2cap_send_ack(chan);
6111 break;
6112 case L2CAP_TXSEQ_UNEXPECTED:
6113 l2cap_pass_to_tx(chan, control);
6114
6115 /* Can't issue SREJ frames in the local busy state.
6116 * Drop this frame, it will be seen as missing
6117 * when local busy is exited.
6118 */
6119 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6120 BT_DBG("Busy, discarding unexpected seq %d",
6121 control->txseq);
6122 break;
6123 }
6124
6125 /* There was a gap in the sequence, so an SREJ
6126 * must be sent for each missing frame. The
6127 * current frame is stored for later use.
6128 */
6129 skb_queue_tail(&chan->srej_q, skb);
6130 skb_in_use = true;
6131 BT_DBG("Queued %p (queue len %d)", skb,
6132 skb_queue_len(&chan->srej_q));
6133
6134 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6135 l2cap_seq_list_clear(&chan->srej_list);
6136 l2cap_send_srej(chan, control->txseq);
6137
6138 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6139 break;
6140 case L2CAP_TXSEQ_DUPLICATE:
6141 l2cap_pass_to_tx(chan, control);
6142 break;
6143 case L2CAP_TXSEQ_INVALID_IGNORE:
6144 break;
6145 case L2CAP_TXSEQ_INVALID:
6146 default:
6147 l2cap_send_disconn_req(chan, ECONNRESET);
6148 break;
6149 }
6150 break;
6151 case L2CAP_EV_RECV_RR:
6152 l2cap_pass_to_tx(chan, control);
6153 if (control->final) {
6154 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6155
6156 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6157 !__chan_is_moving(chan)) {
6158 control->final = 0;
6159 l2cap_retransmit_all(chan, control);
6160 }
6161
6162 l2cap_ertm_send(chan);
6163 } else if (control->poll) {
6164 l2cap_send_i_or_rr_or_rnr(chan);
6165 } else {
6166 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6167 &chan->conn_state) &&
6168 chan->unacked_frames)
6169 __set_retrans_timer(chan);
6170
6171 l2cap_ertm_send(chan);
6172 }
6173 break;
6174 case L2CAP_EV_RECV_RNR:
6175 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6176 l2cap_pass_to_tx(chan, control);
6177 if (control && control->poll) {
6178 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6179 l2cap_send_rr_or_rnr(chan, 0);
6180 }
6181 __clear_retrans_timer(chan);
6182 l2cap_seq_list_clear(&chan->retrans_list);
6183 break;
6184 case L2CAP_EV_RECV_REJ:
6185 l2cap_handle_rej(chan, control);
6186 break;
6187 case L2CAP_EV_RECV_SREJ:
6188 l2cap_handle_srej(chan, control);
6189 break;
6190 default:
6191 break;
6192 }
6193
6194 if (skb && !skb_in_use) {
6195 BT_DBG("Freeing %p", skb);
6196 kfree_skb(skb);
6197 }
6198
6199 return err;
6200 }
6201
6202 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6203 struct l2cap_ctrl *control,
6204 struct sk_buff *skb, u8 event)
6205 {
6206 int err = 0;
6207 u16 txseq = control->txseq;
6208 bool skb_in_use = false;
6209
6210 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6211 event);
6212
6213 switch (event) {
6214 case L2CAP_EV_RECV_IFRAME:
6215 switch (l2cap_classify_txseq(chan, txseq)) {
6216 case L2CAP_TXSEQ_EXPECTED:
6217 /* Keep frame for reassembly later */
6218 l2cap_pass_to_tx(chan, control);
6219 skb_queue_tail(&chan->srej_q, skb);
6220 skb_in_use = true;
6221 BT_DBG("Queued %p (queue len %d)", skb,
6222 skb_queue_len(&chan->srej_q));
6223
6224 chan->expected_tx_seq = __next_seq(chan, txseq);
6225 break;
6226 case L2CAP_TXSEQ_EXPECTED_SREJ:
6227 l2cap_seq_list_pop(&chan->srej_list);
6228
6229 l2cap_pass_to_tx(chan, control);
6230 skb_queue_tail(&chan->srej_q, skb);
6231 skb_in_use = true;
6232 BT_DBG("Queued %p (queue len %d)", skb,
6233 skb_queue_len(&chan->srej_q));
6234
6235 err = l2cap_rx_queued_iframes(chan);
6236 if (err)
6237 break;
6238
6239 break;
6240 case L2CAP_TXSEQ_UNEXPECTED:
6241 /* Got a frame that can't be reassembled yet.
6242 * Save it for later, and send SREJs to cover
6243 * the missing frames.
6244 */
6245 skb_queue_tail(&chan->srej_q, skb);
6246 skb_in_use = true;
6247 BT_DBG("Queued %p (queue len %d)", skb,
6248 skb_queue_len(&chan->srej_q));
6249
6250 l2cap_pass_to_tx(chan, control);
6251 l2cap_send_srej(chan, control->txseq);
6252 break;
6253 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6254 /* This frame was requested with an SREJ, but
6255 * some expected retransmitted frames are
6256 * missing. Request retransmission of missing
6257 * SREJ'd frames.
6258 */
6259 skb_queue_tail(&chan->srej_q, skb);
6260 skb_in_use = true;
6261 BT_DBG("Queued %p (queue len %d)", skb,
6262 skb_queue_len(&chan->srej_q));
6263
6264 l2cap_pass_to_tx(chan, control);
6265 l2cap_send_srej_list(chan, control->txseq);
6266 break;
6267 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6268 /* We've already queued this frame. Drop this copy. */
6269 l2cap_pass_to_tx(chan, control);
6270 break;
6271 case L2CAP_TXSEQ_DUPLICATE:
6272 /* Expecting a later sequence number, so this frame
6273 * was already received. Ignore it completely.
6274 */
6275 break;
6276 case L2CAP_TXSEQ_INVALID_IGNORE:
6277 break;
6278 case L2CAP_TXSEQ_INVALID:
6279 default:
6280 l2cap_send_disconn_req(chan, ECONNRESET);
6281 break;
6282 }
6283 break;
6284 case L2CAP_EV_RECV_RR:
6285 l2cap_pass_to_tx(chan, control);
6286 if (control->final) {
6287 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6288
6289 if (!test_and_clear_bit(CONN_REJ_ACT,
6290 &chan->conn_state)) {
6291 control->final = 0;
6292 l2cap_retransmit_all(chan, control);
6293 }
6294
6295 l2cap_ertm_send(chan);
6296 } else if (control->poll) {
6297 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6298 &chan->conn_state) &&
6299 chan->unacked_frames) {
6300 __set_retrans_timer(chan);
6301 }
6302
6303 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6304 l2cap_send_srej_tail(chan);
6305 } else {
6306 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6307 &chan->conn_state) &&
6308 chan->unacked_frames)
6309 __set_retrans_timer(chan);
6310
6311 l2cap_send_ack(chan);
6312 }
6313 break;
6314 case L2CAP_EV_RECV_RNR:
6315 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6316 l2cap_pass_to_tx(chan, control);
6317 if (control->poll) {
6318 l2cap_send_srej_tail(chan);
6319 } else {
6320 struct l2cap_ctrl rr_control;
6321 memset(&rr_control, 0, sizeof(rr_control));
6322 rr_control.sframe = 1;
6323 rr_control.super = L2CAP_SUPER_RR;
6324 rr_control.reqseq = chan->buffer_seq;
6325 l2cap_send_sframe(chan, &rr_control);
6326 }
6327
6328 break;
6329 case L2CAP_EV_RECV_REJ:
6330 l2cap_handle_rej(chan, control);
6331 break;
6332 case L2CAP_EV_RECV_SREJ:
6333 l2cap_handle_srej(chan, control);
6334 break;
6335 }
6336
6337 if (skb && !skb_in_use) {
6338 BT_DBG("Freeing %p", skb);
6339 kfree_skb(skb);
6340 }
6341
6342 return err;
6343 }
6344
6345 static int l2cap_finish_move(struct l2cap_chan *chan)
6346 {
6347 BT_DBG("chan %p", chan);
6348
6349 chan->rx_state = L2CAP_RX_STATE_RECV;
6350
6351 if (chan->hs_hcon)
6352 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6353 else
6354 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6355
6356 return l2cap_resegment(chan);
6357 }
6358
6359 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6360 struct l2cap_ctrl *control,
6361 struct sk_buff *skb, u8 event)
6362 {
6363 int err;
6364
6365 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6366 event);
6367
6368 if (!control->poll)
6369 return -EPROTO;
6370
6371 l2cap_process_reqseq(chan, control->reqseq);
6372
6373 if (!skb_queue_empty(&chan->tx_q))
6374 chan->tx_send_head = skb_peek(&chan->tx_q);
6375 else
6376 chan->tx_send_head = NULL;
6377
6378 /* Rewind next_tx_seq to the point expected
6379 * by the receiver.
6380 */
6381 chan->next_tx_seq = control->reqseq;
6382 chan->unacked_frames = 0;
6383
6384 err = l2cap_finish_move(chan);
6385 if (err)
6386 return err;
6387
6388 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6389 l2cap_send_i_or_rr_or_rnr(chan);
6390
6391 if (event == L2CAP_EV_RECV_IFRAME)
6392 return -EPROTO;
6393
6394 return l2cap_rx_state_recv(chan, control, NULL, event);
6395 }
6396
6397 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6398 struct l2cap_ctrl *control,
6399 struct sk_buff *skb, u8 event)
6400 {
6401 int err;
6402
6403 if (!control->final)
6404 return -EPROTO;
6405
6406 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6407
6408 chan->rx_state = L2CAP_RX_STATE_RECV;
6409 l2cap_process_reqseq(chan, control->reqseq);
6410
6411 if (!skb_queue_empty(&chan->tx_q))
6412 chan->tx_send_head = skb_peek(&chan->tx_q);
6413 else
6414 chan->tx_send_head = NULL;
6415
6416 /* Rewind next_tx_seq to the point expected
6417 * by the receiver.
6418 */
6419 chan->next_tx_seq = control->reqseq;
6420 chan->unacked_frames = 0;
6421
6422 if (chan->hs_hcon)
6423 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6424 else
6425 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6426
6427 err = l2cap_resegment(chan);
6428
6429 if (!err)
6430 err = l2cap_rx_state_recv(chan, control, skb, event);
6431
6432 return err;
6433 }
6434
6435 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6436 {
6437 /* Make sure reqseq is for a packet that has been sent but not acked */
6438 u16 unacked;
6439
6440 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6441 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6442 }
6443
6444 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6445 struct sk_buff *skb, u8 event)
6446 {
6447 int err = 0;
6448
6449 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6450 control, skb, event, chan->rx_state);
6451
6452 if (__valid_reqseq(chan, control->reqseq)) {
6453 switch (chan->rx_state) {
6454 case L2CAP_RX_STATE_RECV:
6455 err = l2cap_rx_state_recv(chan, control, skb, event);
6456 break;
6457 case L2CAP_RX_STATE_SREJ_SENT:
6458 err = l2cap_rx_state_srej_sent(chan, control, skb,
6459 event);
6460 break;
6461 case L2CAP_RX_STATE_WAIT_P:
6462 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6463 break;
6464 case L2CAP_RX_STATE_WAIT_F:
6465 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6466 break;
6467 default:
6468 /* shut it down */
6469 break;
6470 }
6471 } else {
6472 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6473 control->reqseq, chan->next_tx_seq,
6474 chan->expected_ack_seq);
6475 l2cap_send_disconn_req(chan, ECONNRESET);
6476 }
6477
6478 return err;
6479 }
6480
6481 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6482 struct sk_buff *skb)
6483 {
6484 int err = 0;
6485
6486 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6487 chan->rx_state);
6488
6489 if (l2cap_classify_txseq(chan, control->txseq) ==
6490 L2CAP_TXSEQ_EXPECTED) {
6491 l2cap_pass_to_tx(chan, control);
6492
6493 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6494 __next_seq(chan, chan->buffer_seq));
6495
6496 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6497
6498 l2cap_reassemble_sdu(chan, skb, control);
6499 } else {
6500 if (chan->sdu) {
6501 kfree_skb(chan->sdu);
6502 chan->sdu = NULL;
6503 }
6504 chan->sdu_last_frag = NULL;
6505 chan->sdu_len = 0;
6506
6507 if (skb) {
6508 BT_DBG("Freeing %p", skb);
6509 kfree_skb(skb);
6510 }
6511 }
6512
6513 chan->last_acked_seq = control->txseq;
6514 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6515
6516 return err;
6517 }
6518
6519 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6520 {
6521 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6522 u16 len;
6523 u8 event;
6524
6525 __unpack_control(chan, skb);
6526
6527 len = skb->len;
6528
6529 /*
6530 * We can just drop the corrupted I-frame here.
6531 * Receiver will miss it and start proper recovery
6532 * procedures and ask for retransmission.
6533 */
6534 if (l2cap_check_fcs(chan, skb))
6535 goto drop;
6536
6537 if (!control->sframe && control->sar == L2CAP_SAR_START)
6538 len -= L2CAP_SDULEN_SIZE;
6539
6540 if (chan->fcs == L2CAP_FCS_CRC16)
6541 len -= L2CAP_FCS_SIZE;
6542
6543 if (len > chan->mps) {
6544 l2cap_send_disconn_req(chan, ECONNRESET);
6545 goto drop;
6546 }
6547
6548 if (!control->sframe) {
6549 int err;
6550
6551 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6552 control->sar, control->reqseq, control->final,
6553 control->txseq);
6554
6555 /* Validate F-bit - F=0 always valid, F=1 only
6556 * valid in TX WAIT_F
6557 */
6558 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6559 goto drop;
6560
6561 if (chan->mode != L2CAP_MODE_STREAMING) {
6562 event = L2CAP_EV_RECV_IFRAME;
6563 err = l2cap_rx(chan, control, skb, event);
6564 } else {
6565 err = l2cap_stream_rx(chan, control, skb);
6566 }
6567
6568 if (err)
6569 l2cap_send_disconn_req(chan, ECONNRESET);
6570 } else {
6571 const u8 rx_func_to_event[4] = {
6572 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6573 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6574 };
6575
6576 /* Only I-frames are expected in streaming mode */
6577 if (chan->mode == L2CAP_MODE_STREAMING)
6578 goto drop;
6579
6580 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6581 control->reqseq, control->final, control->poll,
6582 control->super);
6583
6584 if (len != 0) {
6585 BT_ERR("Trailing bytes: %d in sframe", len);
6586 l2cap_send_disconn_req(chan, ECONNRESET);
6587 goto drop;
6588 }
6589
6590 /* Validate F and P bits */
6591 if (control->final && (control->poll ||
6592 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6593 goto drop;
6594
6595 event = rx_func_to_event[control->super];
6596 if (l2cap_rx(chan, control, skb, event))
6597 l2cap_send_disconn_req(chan, ECONNRESET);
6598 }
6599
6600 return 0;
6601
6602 drop:
6603 kfree_skb(skb);
6604 return 0;
6605 }
6606
6607 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6608 struct sk_buff *skb)
6609 {
6610 struct l2cap_chan *chan;
6611
6612 chan = l2cap_get_chan_by_scid(conn, cid);
6613 if (!chan) {
6614 if (cid == L2CAP_CID_A2MP) {
6615 chan = a2mp_channel_create(conn, skb);
6616 if (!chan) {
6617 kfree_skb(skb);
6618 return;
6619 }
6620
6621 l2cap_chan_lock(chan);
6622 } else {
6623 BT_DBG("unknown cid 0x%4.4x", cid);
6624 /* Drop packet and return */
6625 kfree_skb(skb);
6626 return;
6627 }
6628 }
6629
6630 BT_DBG("chan %p, len %d", chan, skb->len);
6631
6632 if (chan->state != BT_CONNECTED)
6633 goto drop;
6634
6635 switch (chan->mode) {
6636 case L2CAP_MODE_LE_FLOWCTL:
6637 case L2CAP_MODE_BASIC:
6638 /* If socket recv buffers overflows we drop data here
6639 * which is *bad* because L2CAP has to be reliable.
6640 * But we don't have any other choice. L2CAP doesn't
6641 * provide flow control mechanism. */
6642
6643 if (chan->imtu < skb->len)
6644 goto drop;
6645
6646 if (!chan->ops->recv(chan, skb))
6647 goto done;
6648 break;
6649
6650 case L2CAP_MODE_ERTM:
6651 case L2CAP_MODE_STREAMING:
6652 l2cap_data_rcv(chan, skb);
6653 goto done;
6654
6655 default:
6656 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6657 break;
6658 }
6659
6660 drop:
6661 kfree_skb(skb);
6662
6663 done:
6664 l2cap_chan_unlock(chan);
6665 }
6666
6667 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6668 struct sk_buff *skb)
6669 {
6670 struct hci_conn *hcon = conn->hcon;
6671 struct l2cap_chan *chan;
6672
6673 if (hcon->type != ACL_LINK)
6674 goto drop;
6675
6676 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6677 ACL_LINK);
6678 if (!chan)
6679 goto drop;
6680
6681 BT_DBG("chan %p, len %d", chan, skb->len);
6682
6683 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6684 goto drop;
6685
6686 if (chan->imtu < skb->len)
6687 goto drop;
6688
6689 /* Store remote BD_ADDR and PSM for msg_name */
6690 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6691 bt_cb(skb)->psm = psm;
6692
6693 if (!chan->ops->recv(chan, skb))
6694 return;
6695
6696 drop:
6697 kfree_skb(skb);
6698 }
6699
6700 static void l2cap_att_channel(struct l2cap_conn *conn,
6701 struct sk_buff *skb)
6702 {
6703 struct hci_conn *hcon = conn->hcon;
6704 struct l2cap_chan *chan;
6705
6706 if (hcon->type != LE_LINK)
6707 goto drop;
6708
6709 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6710 &hcon->src, &hcon->dst);
6711 if (!chan)
6712 goto drop;
6713
6714 BT_DBG("chan %p, len %d", chan, skb->len);
6715
6716 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6717 goto drop;
6718
6719 if (chan->imtu < skb->len)
6720 goto drop;
6721
6722 if (!chan->ops->recv(chan, skb))
6723 return;
6724
6725 drop:
6726 kfree_skb(skb);
6727 }
6728
6729 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6730 {
6731 struct l2cap_hdr *lh = (void *) skb->data;
6732 u16 cid, len;
6733 __le16 psm;
6734
6735 skb_pull(skb, L2CAP_HDR_SIZE);
6736 cid = __le16_to_cpu(lh->cid);
6737 len = __le16_to_cpu(lh->len);
6738
6739 if (len != skb->len) {
6740 kfree_skb(skb);
6741 return;
6742 }
6743
6744 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6745
6746 switch (cid) {
6747 case L2CAP_CID_SIGNALING:
6748 l2cap_sig_channel(conn, skb);
6749 break;
6750
6751 case L2CAP_CID_CONN_LESS:
6752 psm = get_unaligned((__le16 *) skb->data);
6753 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6754 l2cap_conless_channel(conn, psm, skb);
6755 break;
6756
6757 case L2CAP_CID_ATT:
6758 l2cap_att_channel(conn, skb);
6759 break;
6760
6761 case L2CAP_CID_LE_SIGNALING:
6762 l2cap_le_sig_channel(conn, skb);
6763 break;
6764
6765 case L2CAP_CID_SMP:
6766 if (smp_sig_channel(conn, skb))
6767 l2cap_conn_del(conn->hcon, EACCES);
6768 break;
6769
6770 default:
6771 l2cap_data_channel(conn, cid, skb);
6772 break;
6773 }
6774 }
6775
6776 /* ---- L2CAP interface with lower layer (HCI) ---- */
6777
6778 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6779 {
6780 int exact = 0, lm1 = 0, lm2 = 0;
6781 struct l2cap_chan *c;
6782
6783 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6784
6785 /* Find listening sockets and check their link_mode */
6786 read_lock(&chan_list_lock);
6787 list_for_each_entry(c, &chan_list, global_l) {
6788 if (c->state != BT_LISTEN)
6789 continue;
6790
6791 if (!bacmp(&c->src, &hdev->bdaddr)) {
6792 lm1 |= HCI_LM_ACCEPT;
6793 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6794 lm1 |= HCI_LM_MASTER;
6795 exact++;
6796 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6797 lm2 |= HCI_LM_ACCEPT;
6798 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6799 lm2 |= HCI_LM_MASTER;
6800 }
6801 }
6802 read_unlock(&chan_list_lock);
6803
6804 return exact ? lm1 : lm2;
6805 }
6806
6807 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6808 {
6809 struct l2cap_conn *conn;
6810
6811 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6812
6813 if (!status) {
6814 conn = l2cap_conn_add(hcon);
6815 if (conn)
6816 l2cap_conn_ready(conn);
6817 } else {
6818 l2cap_conn_del(hcon, bt_to_errno(status));
6819 }
6820 }
6821
6822 int l2cap_disconn_ind(struct hci_conn *hcon)
6823 {
6824 struct l2cap_conn *conn = hcon->l2cap_data;
6825
6826 BT_DBG("hcon %p", hcon);
6827
6828 if (!conn)
6829 return HCI_ERROR_REMOTE_USER_TERM;
6830 return conn->disc_reason;
6831 }
6832
6833 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6834 {
6835 BT_DBG("hcon %p reason %d", hcon, reason);
6836
6837 l2cap_conn_del(hcon, bt_to_errno(reason));
6838 }
6839
6840 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6841 {
6842 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6843 return;
6844
6845 if (encrypt == 0x00) {
6846 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6847 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6848 } else if (chan->sec_level == BT_SECURITY_HIGH)
6849 l2cap_chan_close(chan, ECONNREFUSED);
6850 } else {
6851 if (chan->sec_level == BT_SECURITY_MEDIUM)
6852 __clear_chan_timer(chan);
6853 }
6854 }
6855
6856 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6857 {
6858 struct l2cap_conn *conn = hcon->l2cap_data;
6859 struct l2cap_chan *chan;
6860
6861 if (!conn)
6862 return 0;
6863
6864 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6865
6866 if (hcon->type == LE_LINK) {
6867 if (!status && encrypt)
6868 smp_distribute_keys(conn, 0);
6869 cancel_delayed_work(&conn->security_timer);
6870 }
6871
6872 mutex_lock(&conn->chan_lock);
6873
6874 list_for_each_entry(chan, &conn->chan_l, list) {
6875 l2cap_chan_lock(chan);
6876
6877 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6878 state_to_string(chan->state));
6879
6880 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6881 l2cap_chan_unlock(chan);
6882 continue;
6883 }
6884
6885 if (chan->scid == L2CAP_CID_ATT) {
6886 if (!status && encrypt) {
6887 chan->sec_level = hcon->sec_level;
6888 l2cap_chan_ready(chan);
6889 }
6890
6891 l2cap_chan_unlock(chan);
6892 continue;
6893 }
6894
6895 if (!__l2cap_no_conn_pending(chan)) {
6896 l2cap_chan_unlock(chan);
6897 continue;
6898 }
6899
6900 if (!status && (chan->state == BT_CONNECTED ||
6901 chan->state == BT_CONFIG)) {
6902 chan->ops->resume(chan);
6903 l2cap_check_encryption(chan, encrypt);
6904 l2cap_chan_unlock(chan);
6905 continue;
6906 }
6907
6908 if (chan->state == BT_CONNECT) {
6909 if (!status)
6910 l2cap_start_connection(chan);
6911 else
6912 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6913 } else if (chan->state == BT_CONNECT2) {
6914 struct l2cap_conn_rsp rsp;
6915 __u16 res, stat;
6916
6917 if (!status) {
6918 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6919 res = L2CAP_CR_PEND;
6920 stat = L2CAP_CS_AUTHOR_PEND;
6921 chan->ops->defer(chan);
6922 } else {
6923 l2cap_state_change(chan, BT_CONFIG);
6924 res = L2CAP_CR_SUCCESS;
6925 stat = L2CAP_CS_NO_INFO;
6926 }
6927 } else {
6928 l2cap_state_change(chan, BT_DISCONN);
6929 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6930 res = L2CAP_CR_SEC_BLOCK;
6931 stat = L2CAP_CS_NO_INFO;
6932 }
6933
6934 rsp.scid = cpu_to_le16(chan->dcid);
6935 rsp.dcid = cpu_to_le16(chan->scid);
6936 rsp.result = cpu_to_le16(res);
6937 rsp.status = cpu_to_le16(stat);
6938 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6939 sizeof(rsp), &rsp);
6940
6941 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6942 res == L2CAP_CR_SUCCESS) {
6943 char buf[128];
6944 set_bit(CONF_REQ_SENT, &chan->conf_state);
6945 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6946 L2CAP_CONF_REQ,
6947 l2cap_build_conf_req(chan, buf),
6948 buf);
6949 chan->num_conf_req++;
6950 }
6951 }
6952
6953 l2cap_chan_unlock(chan);
6954 }
6955
6956 mutex_unlock(&conn->chan_lock);
6957
6958 return 0;
6959 }
6960
6961 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6962 {
6963 struct l2cap_conn *conn = hcon->l2cap_data;
6964 struct l2cap_hdr *hdr;
6965 int len;
6966
6967 /* For AMP controller do not create l2cap conn */
6968 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6969 goto drop;
6970
6971 if (!conn)
6972 conn = l2cap_conn_add(hcon);
6973
6974 if (!conn)
6975 goto drop;
6976
6977 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6978
6979 switch (flags) {
6980 case ACL_START:
6981 case ACL_START_NO_FLUSH:
6982 case ACL_COMPLETE:
6983 if (conn->rx_len) {
6984 BT_ERR("Unexpected start frame (len %d)", skb->len);
6985 kfree_skb(conn->rx_skb);
6986 conn->rx_skb = NULL;
6987 conn->rx_len = 0;
6988 l2cap_conn_unreliable(conn, ECOMM);
6989 }
6990
6991 /* Start fragment always begin with Basic L2CAP header */
6992 if (skb->len < L2CAP_HDR_SIZE) {
6993 BT_ERR("Frame is too short (len %d)", skb->len);
6994 l2cap_conn_unreliable(conn, ECOMM);
6995 goto drop;
6996 }
6997
6998 hdr = (struct l2cap_hdr *) skb->data;
6999 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7000
7001 if (len == skb->len) {
7002 /* Complete frame received */
7003 l2cap_recv_frame(conn, skb);
7004 return 0;
7005 }
7006
7007 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7008
7009 if (skb->len > len) {
7010 BT_ERR("Frame is too long (len %d, expected len %d)",
7011 skb->len, len);
7012 l2cap_conn_unreliable(conn, ECOMM);
7013 goto drop;
7014 }
7015
7016 /* Allocate skb for the complete frame (with header) */
7017 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7018 if (!conn->rx_skb)
7019 goto drop;
7020
7021 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7022 skb->len);
7023 conn->rx_len = len - skb->len;
7024 break;
7025
7026 case ACL_CONT:
7027 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7028
7029 if (!conn->rx_len) {
7030 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7031 l2cap_conn_unreliable(conn, ECOMM);
7032 goto drop;
7033 }
7034
7035 if (skb->len > conn->rx_len) {
7036 BT_ERR("Fragment is too long (len %d, expected %d)",
7037 skb->len, conn->rx_len);
7038 kfree_skb(conn->rx_skb);
7039 conn->rx_skb = NULL;
7040 conn->rx_len = 0;
7041 l2cap_conn_unreliable(conn, ECOMM);
7042 goto drop;
7043 }
7044
7045 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7046 skb->len);
7047 conn->rx_len -= skb->len;
7048
7049 if (!conn->rx_len) {
7050 /* Complete frame received. l2cap_recv_frame
7051 * takes ownership of the skb so set the global
7052 * rx_skb pointer to NULL first.
7053 */
7054 struct sk_buff *rx_skb = conn->rx_skb;
7055 conn->rx_skb = NULL;
7056 l2cap_recv_frame(conn, rx_skb);
7057 }
7058 break;
7059 }
7060
7061 drop:
7062 kfree_skb(skb);
7063 return 0;
7064 }
7065
7066 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7067 {
7068 struct l2cap_chan *c;
7069
7070 read_lock(&chan_list_lock);
7071
7072 list_for_each_entry(c, &chan_list, global_l) {
7073 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7074 &c->src, &c->dst,
7075 c->state, __le16_to_cpu(c->psm),
7076 c->scid, c->dcid, c->imtu, c->omtu,
7077 c->sec_level, c->mode);
7078 }
7079
7080 read_unlock(&chan_list_lock);
7081
7082 return 0;
7083 }
7084
7085 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7086 {
7087 return single_open(file, l2cap_debugfs_show, inode->i_private);
7088 }
7089
7090 static const struct file_operations l2cap_debugfs_fops = {
7091 .open = l2cap_debugfs_open,
7092 .read = seq_read,
7093 .llseek = seq_lseek,
7094 .release = single_release,
7095 };
7096
7097 static struct dentry *l2cap_debugfs;
7098
7099 int __init l2cap_init(void)
7100 {
7101 int err;
7102
7103 err = l2cap_init_sockets();
7104 if (err < 0)
7105 return err;
7106
7107 if (IS_ERR_OR_NULL(bt_debugfs))
7108 return 0;
7109
7110 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7111 NULL, &l2cap_debugfs_fops);
7112
7113 return 0;
7114 }
7115
7116 void l2cap_exit(void)
7117 {
7118 debugfs_remove(l2cap_debugfs);
7119 l2cap_cleanup_sockets();
7120 }
7121
7122 module_param(disable_ertm, bool, 0644);
7123 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.263459 seconds and 5 git commands to generate.