Bluetooth: Fix clearing of chan->omtu for LE CoC channels
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 bool disable_ertm;
45
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
48
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
51
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
55 void *data);
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
61
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
63 {
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
67 else
68 return BDADDR_LE_RANDOM;
69 }
70
71 return BDADDR_BREDR;
72 }
73
74 /* ---- L2CAP channels ---- */
75
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
77 u16 cid)
78 {
79 struct l2cap_chan *c;
80
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->dcid == cid)
83 return c;
84 }
85 return NULL;
86 }
87
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
89 u16 cid)
90 {
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98 }
99
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
109 if (c)
110 l2cap_chan_lock(c);
111 mutex_unlock(&conn->chan_lock);
112
113 return c;
114 }
115
116 /* Find channel with given DCID.
117 * Returns locked channel.
118 */
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
120 u16 cid)
121 {
122 struct l2cap_chan *c;
123
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
126 if (c)
127 l2cap_chan_lock(c);
128 mutex_unlock(&conn->chan_lock);
129
130 return c;
131 }
132
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
134 u8 ident)
135 {
136 struct l2cap_chan *c;
137
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
140 return c;
141 }
142 return NULL;
143 }
144
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 u8 ident)
147 {
148 struct l2cap_chan *c;
149
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
152 if (c)
153 l2cap_chan_lock(c);
154 mutex_unlock(&conn->chan_lock);
155
156 return c;
157 }
158
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
160 {
161 struct l2cap_chan *c;
162
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
165 return c;
166 }
167 return NULL;
168 }
169
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
171 {
172 int err;
173
174 write_lock(&chan_list_lock);
175
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
177 err = -EADDRINUSE;
178 goto done;
179 }
180
181 if (psm) {
182 chan->psm = psm;
183 chan->sport = psm;
184 err = 0;
185 } else {
186 u16 p;
187
188 err = -EINVAL;
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
193 err = 0;
194 break;
195 }
196 }
197
198 done:
199 write_unlock(&chan_list_lock);
200 return err;
201 }
202
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
204 {
205 write_lock(&chan_list_lock);
206
207 chan->scid = scid;
208
209 write_unlock(&chan_list_lock);
210
211 return 0;
212 }
213
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
215 {
216 u16 cid = L2CAP_CID_DYN_START;
217
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
220 return cid;
221 }
222
223 return 0;
224 }
225
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
227 {
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
230
231 chan->state = state;
232 chan->ops->state_change(chan, state, 0);
233 }
234
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
236 int state, int err)
237 {
238 chan->state = state;
239 chan->ops->state_change(chan, chan->state, err);
240 }
241
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
243 {
244 chan->ops->state_change(chan, chan->state, err);
245 }
246
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
253 }
254 }
255
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
262 }
263 }
264
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
267 {
268 struct sk_buff *skb;
269
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
273 }
274
275 return NULL;
276 }
277
278 /* ---- L2CAP sequence number lists ---- */
279
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
287 */
288
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 size_t alloc_size, i;
292
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
296 */
297 alloc_size = roundup_pow_of_two(size);
298
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
302
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308
309 return 0;
310 }
311
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 kfree(seq_list->list);
315 }
316
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
319 {
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 u16 mask = seq_list->mask;
327
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 }
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
347 }
348
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
354 }
355 return seq;
356 }
357
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
439
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441
442 chan->state = BT_OPEN;
443
444 kref_init(&chan->kref);
445
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448
449 BT_DBG("chan %p", chan);
450
451 return chan;
452 }
453
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457
458 BT_DBG("chan %p", chan);
459
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
463
464 kfree(chan);
465 }
466
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470
471 kref_get(&c->kref);
472 }
473
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477
478 kref_put(&c->kref, l2cap_chan_destroy);
479 }
480
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
489
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492
493 void l2cap_le_flowctl_init(struct l2cap_chan *chan)
494 {
495 chan->imtu = L2CAP_DEFAULT_MTU;
496 chan->omtu = L2CAP_LE_MIN_MTU;
497 chan->mode = L2CAP_MODE_LE_FLOWCTL;
498 chan->tx_credits = 0;
499 chan->rx_credits = L2CAP_LE_MAX_CREDITS;
500
501 if (chan->imtu < L2CAP_LE_DEFAULT_MPS)
502 chan->mps = chan->imtu;
503 else
504 chan->mps = L2CAP_LE_DEFAULT_MPS;
505 }
506
507 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
508 {
509 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
510 __le16_to_cpu(chan->psm), chan->dcid);
511
512 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
513
514 chan->conn = conn;
515
516 switch (chan->chan_type) {
517 case L2CAP_CHAN_CONN_ORIENTED:
518 if (conn->hcon->type == LE_LINK) {
519 if (chan->dcid == L2CAP_CID_ATT) {
520 chan->omtu = L2CAP_DEFAULT_MTU;
521 chan->scid = L2CAP_CID_ATT;
522 } else {
523 chan->scid = l2cap_alloc_cid(conn);
524 }
525 } else {
526 /* Alloc CID for connection-oriented socket */
527 chan->scid = l2cap_alloc_cid(conn);
528 chan->omtu = L2CAP_DEFAULT_MTU;
529 }
530 break;
531
532 case L2CAP_CHAN_CONN_LESS:
533 /* Connectionless socket */
534 chan->scid = L2CAP_CID_CONN_LESS;
535 chan->dcid = L2CAP_CID_CONN_LESS;
536 chan->omtu = L2CAP_DEFAULT_MTU;
537 break;
538
539 case L2CAP_CHAN_CONN_FIX_A2MP:
540 chan->scid = L2CAP_CID_A2MP;
541 chan->dcid = L2CAP_CID_A2MP;
542 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
543 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
544 break;
545
546 default:
547 /* Raw socket can send/recv signalling messages only */
548 chan->scid = L2CAP_CID_SIGNALING;
549 chan->dcid = L2CAP_CID_SIGNALING;
550 chan->omtu = L2CAP_DEFAULT_MTU;
551 }
552
553 chan->local_id = L2CAP_BESTEFFORT_ID;
554 chan->local_stype = L2CAP_SERV_BESTEFFORT;
555 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
556 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
557 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
558 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
559
560 l2cap_chan_hold(chan);
561
562 hci_conn_hold(conn->hcon);
563
564 list_add(&chan->list, &conn->chan_l);
565 }
566
567 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
568 {
569 mutex_lock(&conn->chan_lock);
570 __l2cap_chan_add(conn, chan);
571 mutex_unlock(&conn->chan_lock);
572 }
573
574 void l2cap_chan_del(struct l2cap_chan *chan, int err)
575 {
576 struct l2cap_conn *conn = chan->conn;
577
578 __clear_chan_timer(chan);
579
580 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
581
582 if (conn) {
583 struct amp_mgr *mgr = conn->hcon->amp_mgr;
584 /* Delete from channel list */
585 list_del(&chan->list);
586
587 l2cap_chan_put(chan);
588
589 chan->conn = NULL;
590
591 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
592 hci_conn_drop(conn->hcon);
593
594 if (mgr && mgr->bredr_chan == chan)
595 mgr->bredr_chan = NULL;
596 }
597
598 if (chan->hs_hchan) {
599 struct hci_chan *hs_hchan = chan->hs_hchan;
600
601 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
602 amp_disconnect_logical_link(hs_hchan);
603 }
604
605 chan->ops->teardown(chan, err);
606
607 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
608 return;
609
610 switch(chan->mode) {
611 case L2CAP_MODE_BASIC:
612 break;
613
614 case L2CAP_MODE_LE_FLOWCTL:
615 skb_queue_purge(&chan->tx_q);
616 break;
617
618 case L2CAP_MODE_ERTM:
619 __clear_retrans_timer(chan);
620 __clear_monitor_timer(chan);
621 __clear_ack_timer(chan);
622
623 skb_queue_purge(&chan->srej_q);
624
625 l2cap_seq_list_free(&chan->srej_list);
626 l2cap_seq_list_free(&chan->retrans_list);
627
628 /* fall through */
629
630 case L2CAP_MODE_STREAMING:
631 skb_queue_purge(&chan->tx_q);
632 break;
633 }
634
635 return;
636 }
637
638 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
639 {
640 struct l2cap_conn *conn = chan->conn;
641 struct l2cap_le_conn_rsp rsp;
642 u16 result;
643
644 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
645 result = L2CAP_CR_AUTHORIZATION;
646 else
647 result = L2CAP_CR_BAD_PSM;
648
649 l2cap_state_change(chan, BT_DISCONN);
650
651 rsp.dcid = cpu_to_le16(chan->scid);
652 rsp.mtu = cpu_to_le16(chan->imtu);
653 rsp.mps = cpu_to_le16(chan->mps);
654 rsp.credits = cpu_to_le16(chan->rx_credits);
655 rsp.result = cpu_to_le16(result);
656
657 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
658 &rsp);
659 }
660
661 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
662 {
663 struct l2cap_conn *conn = chan->conn;
664 struct l2cap_conn_rsp rsp;
665 u16 result;
666
667 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
668 result = L2CAP_CR_SEC_BLOCK;
669 else
670 result = L2CAP_CR_BAD_PSM;
671
672 l2cap_state_change(chan, BT_DISCONN);
673
674 rsp.scid = cpu_to_le16(chan->dcid);
675 rsp.dcid = cpu_to_le16(chan->scid);
676 rsp.result = cpu_to_le16(result);
677 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
678
679 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
680 }
681
682 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
683 {
684 struct l2cap_conn *conn = chan->conn;
685
686 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
687
688 switch (chan->state) {
689 case BT_LISTEN:
690 chan->ops->teardown(chan, 0);
691 break;
692
693 case BT_CONNECTED:
694 case BT_CONFIG:
695 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
696 * check for chan->psm.
697 */
698 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
699 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
700 l2cap_send_disconn_req(chan, reason);
701 } else
702 l2cap_chan_del(chan, reason);
703 break;
704
705 case BT_CONNECT2:
706 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
707 if (conn->hcon->type == ACL_LINK)
708 l2cap_chan_connect_reject(chan);
709 else if (conn->hcon->type == LE_LINK)
710 l2cap_chan_le_connect_reject(chan);
711 }
712
713 l2cap_chan_del(chan, reason);
714 break;
715
716 case BT_CONNECT:
717 case BT_DISCONN:
718 l2cap_chan_del(chan, reason);
719 break;
720
721 default:
722 chan->ops->teardown(chan, 0);
723 break;
724 }
725 }
726
727 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
728 {
729 switch (chan->chan_type) {
730 case L2CAP_CHAN_RAW:
731 switch (chan->sec_level) {
732 case BT_SECURITY_HIGH:
733 return HCI_AT_DEDICATED_BONDING_MITM;
734 case BT_SECURITY_MEDIUM:
735 return HCI_AT_DEDICATED_BONDING;
736 default:
737 return HCI_AT_NO_BONDING;
738 }
739 break;
740 case L2CAP_CHAN_CONN_LESS:
741 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
742 if (chan->sec_level == BT_SECURITY_LOW)
743 chan->sec_level = BT_SECURITY_SDP;
744 }
745 if (chan->sec_level == BT_SECURITY_HIGH)
746 return HCI_AT_NO_BONDING_MITM;
747 else
748 return HCI_AT_NO_BONDING;
749 break;
750 case L2CAP_CHAN_CONN_ORIENTED:
751 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
752 if (chan->sec_level == BT_SECURITY_LOW)
753 chan->sec_level = BT_SECURITY_SDP;
754
755 if (chan->sec_level == BT_SECURITY_HIGH)
756 return HCI_AT_NO_BONDING_MITM;
757 else
758 return HCI_AT_NO_BONDING;
759 }
760 /* fall through */
761 default:
762 switch (chan->sec_level) {
763 case BT_SECURITY_HIGH:
764 return HCI_AT_GENERAL_BONDING_MITM;
765 case BT_SECURITY_MEDIUM:
766 return HCI_AT_GENERAL_BONDING;
767 default:
768 return HCI_AT_NO_BONDING;
769 }
770 break;
771 }
772 }
773
774 /* Service level security */
775 int l2cap_chan_check_security(struct l2cap_chan *chan)
776 {
777 struct l2cap_conn *conn = chan->conn;
778 __u8 auth_type;
779
780 if (conn->hcon->type == LE_LINK)
781 return smp_conn_security(conn->hcon, chan->sec_level);
782
783 auth_type = l2cap_get_auth_type(chan);
784
785 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
786 }
787
788 static u8 l2cap_get_ident(struct l2cap_conn *conn)
789 {
790 u8 id;
791
792 /* Get next available identificator.
793 * 1 - 128 are used by kernel.
794 * 129 - 199 are reserved.
795 * 200 - 254 are used by utilities like l2ping, etc.
796 */
797
798 spin_lock(&conn->lock);
799
800 if (++conn->tx_ident > 128)
801 conn->tx_ident = 1;
802
803 id = conn->tx_ident;
804
805 spin_unlock(&conn->lock);
806
807 return id;
808 }
809
810 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
811 void *data)
812 {
813 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
814 u8 flags;
815
816 BT_DBG("code 0x%2.2x", code);
817
818 if (!skb)
819 return;
820
821 if (lmp_no_flush_capable(conn->hcon->hdev))
822 flags = ACL_START_NO_FLUSH;
823 else
824 flags = ACL_START;
825
826 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
827 skb->priority = HCI_PRIO_MAX;
828
829 hci_send_acl(conn->hchan, skb, flags);
830 }
831
832 static bool __chan_is_moving(struct l2cap_chan *chan)
833 {
834 return chan->move_state != L2CAP_MOVE_STABLE &&
835 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
836 }
837
838 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
839 {
840 struct hci_conn *hcon = chan->conn->hcon;
841 u16 flags;
842
843 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
844 skb->priority);
845
846 if (chan->hs_hcon && !__chan_is_moving(chan)) {
847 if (chan->hs_hchan)
848 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
849 else
850 kfree_skb(skb);
851
852 return;
853 }
854
855 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
856 lmp_no_flush_capable(hcon->hdev))
857 flags = ACL_START_NO_FLUSH;
858 else
859 flags = ACL_START;
860
861 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
862 hci_send_acl(chan->conn->hchan, skb, flags);
863 }
864
865 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
866 {
867 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
868 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
869
870 if (enh & L2CAP_CTRL_FRAME_TYPE) {
871 /* S-Frame */
872 control->sframe = 1;
873 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
874 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
875
876 control->sar = 0;
877 control->txseq = 0;
878 } else {
879 /* I-Frame */
880 control->sframe = 0;
881 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
882 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
883
884 control->poll = 0;
885 control->super = 0;
886 }
887 }
888
889 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
890 {
891 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
892 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
893
894 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
895 /* S-Frame */
896 control->sframe = 1;
897 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
898 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
899
900 control->sar = 0;
901 control->txseq = 0;
902 } else {
903 /* I-Frame */
904 control->sframe = 0;
905 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
906 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
907
908 control->poll = 0;
909 control->super = 0;
910 }
911 }
912
913 static inline void __unpack_control(struct l2cap_chan *chan,
914 struct sk_buff *skb)
915 {
916 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
917 __unpack_extended_control(get_unaligned_le32(skb->data),
918 &bt_cb(skb)->control);
919 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
920 } else {
921 __unpack_enhanced_control(get_unaligned_le16(skb->data),
922 &bt_cb(skb)->control);
923 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
924 }
925 }
926
927 static u32 __pack_extended_control(struct l2cap_ctrl *control)
928 {
929 u32 packed;
930
931 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
932 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
933
934 if (control->sframe) {
935 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
936 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
937 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
938 } else {
939 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
940 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
941 }
942
943 return packed;
944 }
945
946 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
947 {
948 u16 packed;
949
950 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
951 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
952
953 if (control->sframe) {
954 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
955 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
956 packed |= L2CAP_CTRL_FRAME_TYPE;
957 } else {
958 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
959 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
960 }
961
962 return packed;
963 }
964
965 static inline void __pack_control(struct l2cap_chan *chan,
966 struct l2cap_ctrl *control,
967 struct sk_buff *skb)
968 {
969 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
970 put_unaligned_le32(__pack_extended_control(control),
971 skb->data + L2CAP_HDR_SIZE);
972 } else {
973 put_unaligned_le16(__pack_enhanced_control(control),
974 skb->data + L2CAP_HDR_SIZE);
975 }
976 }
977
978 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
979 {
980 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
981 return L2CAP_EXT_HDR_SIZE;
982 else
983 return L2CAP_ENH_HDR_SIZE;
984 }
985
986 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
987 u32 control)
988 {
989 struct sk_buff *skb;
990 struct l2cap_hdr *lh;
991 int hlen = __ertm_hdr_size(chan);
992
993 if (chan->fcs == L2CAP_FCS_CRC16)
994 hlen += L2CAP_FCS_SIZE;
995
996 skb = bt_skb_alloc(hlen, GFP_KERNEL);
997
998 if (!skb)
999 return ERR_PTR(-ENOMEM);
1000
1001 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1002 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1003 lh->cid = cpu_to_le16(chan->dcid);
1004
1005 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1006 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1007 else
1008 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1009
1010 if (chan->fcs == L2CAP_FCS_CRC16) {
1011 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1012 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1013 }
1014
1015 skb->priority = HCI_PRIO_MAX;
1016 return skb;
1017 }
1018
1019 static void l2cap_send_sframe(struct l2cap_chan *chan,
1020 struct l2cap_ctrl *control)
1021 {
1022 struct sk_buff *skb;
1023 u32 control_field;
1024
1025 BT_DBG("chan %p, control %p", chan, control);
1026
1027 if (!control->sframe)
1028 return;
1029
1030 if (__chan_is_moving(chan))
1031 return;
1032
1033 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1034 !control->poll)
1035 control->final = 1;
1036
1037 if (control->super == L2CAP_SUPER_RR)
1038 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1039 else if (control->super == L2CAP_SUPER_RNR)
1040 set_bit(CONN_RNR_SENT, &chan->conn_state);
1041
1042 if (control->super != L2CAP_SUPER_SREJ) {
1043 chan->last_acked_seq = control->reqseq;
1044 __clear_ack_timer(chan);
1045 }
1046
1047 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1048 control->final, control->poll, control->super);
1049
1050 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1051 control_field = __pack_extended_control(control);
1052 else
1053 control_field = __pack_enhanced_control(control);
1054
1055 skb = l2cap_create_sframe_pdu(chan, control_field);
1056 if (!IS_ERR(skb))
1057 l2cap_do_send(chan, skb);
1058 }
1059
1060 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1061 {
1062 struct l2cap_ctrl control;
1063
1064 BT_DBG("chan %p, poll %d", chan, poll);
1065
1066 memset(&control, 0, sizeof(control));
1067 control.sframe = 1;
1068 control.poll = poll;
1069
1070 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1071 control.super = L2CAP_SUPER_RNR;
1072 else
1073 control.super = L2CAP_SUPER_RR;
1074
1075 control.reqseq = chan->buffer_seq;
1076 l2cap_send_sframe(chan, &control);
1077 }
1078
1079 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1080 {
1081 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1082 }
1083
1084 static bool __amp_capable(struct l2cap_chan *chan)
1085 {
1086 struct l2cap_conn *conn = chan->conn;
1087 struct hci_dev *hdev;
1088 bool amp_available = false;
1089
1090 if (!conn->hs_enabled)
1091 return false;
1092
1093 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1094 return false;
1095
1096 read_lock(&hci_dev_list_lock);
1097 list_for_each_entry(hdev, &hci_dev_list, list) {
1098 if (hdev->amp_type != AMP_TYPE_BREDR &&
1099 test_bit(HCI_UP, &hdev->flags)) {
1100 amp_available = true;
1101 break;
1102 }
1103 }
1104 read_unlock(&hci_dev_list_lock);
1105
1106 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1107 return amp_available;
1108
1109 return false;
1110 }
1111
1112 static bool l2cap_check_efs(struct l2cap_chan *chan)
1113 {
1114 /* Check EFS parameters */
1115 return true;
1116 }
1117
1118 void l2cap_send_conn_req(struct l2cap_chan *chan)
1119 {
1120 struct l2cap_conn *conn = chan->conn;
1121 struct l2cap_conn_req req;
1122
1123 req.scid = cpu_to_le16(chan->scid);
1124 req.psm = chan->psm;
1125
1126 chan->ident = l2cap_get_ident(conn);
1127
1128 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1129
1130 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1131 }
1132
1133 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1134 {
1135 struct l2cap_create_chan_req req;
1136 req.scid = cpu_to_le16(chan->scid);
1137 req.psm = chan->psm;
1138 req.amp_id = amp_id;
1139
1140 chan->ident = l2cap_get_ident(chan->conn);
1141
1142 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1143 sizeof(req), &req);
1144 }
1145
1146 static void l2cap_move_setup(struct l2cap_chan *chan)
1147 {
1148 struct sk_buff *skb;
1149
1150 BT_DBG("chan %p", chan);
1151
1152 if (chan->mode != L2CAP_MODE_ERTM)
1153 return;
1154
1155 __clear_retrans_timer(chan);
1156 __clear_monitor_timer(chan);
1157 __clear_ack_timer(chan);
1158
1159 chan->retry_count = 0;
1160 skb_queue_walk(&chan->tx_q, skb) {
1161 if (bt_cb(skb)->control.retries)
1162 bt_cb(skb)->control.retries = 1;
1163 else
1164 break;
1165 }
1166
1167 chan->expected_tx_seq = chan->buffer_seq;
1168
1169 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1170 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1171 l2cap_seq_list_clear(&chan->retrans_list);
1172 l2cap_seq_list_clear(&chan->srej_list);
1173 skb_queue_purge(&chan->srej_q);
1174
1175 chan->tx_state = L2CAP_TX_STATE_XMIT;
1176 chan->rx_state = L2CAP_RX_STATE_MOVE;
1177
1178 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1179 }
1180
1181 static void l2cap_move_done(struct l2cap_chan *chan)
1182 {
1183 u8 move_role = chan->move_role;
1184 BT_DBG("chan %p", chan);
1185
1186 chan->move_state = L2CAP_MOVE_STABLE;
1187 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1188
1189 if (chan->mode != L2CAP_MODE_ERTM)
1190 return;
1191
1192 switch (move_role) {
1193 case L2CAP_MOVE_ROLE_INITIATOR:
1194 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1195 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1196 break;
1197 case L2CAP_MOVE_ROLE_RESPONDER:
1198 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1199 break;
1200 }
1201 }
1202
1203 static void l2cap_le_flowctl_start(struct l2cap_chan *chan)
1204 {
1205 chan->sdu = NULL;
1206 chan->sdu_last_frag = NULL;
1207 chan->sdu_len = 0;
1208
1209 if (chan->imtu < L2CAP_LE_DEFAULT_MPS)
1210 chan->mps = chan->imtu;
1211 else
1212 chan->mps = L2CAP_LE_DEFAULT_MPS;
1213
1214 skb_queue_head_init(&chan->tx_q);
1215
1216 if (!chan->tx_credits)
1217 chan->ops->suspend(chan);
1218 }
1219
1220 static void l2cap_chan_ready(struct l2cap_chan *chan)
1221 {
1222 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1223 chan->conf_state = 0;
1224 __clear_chan_timer(chan);
1225
1226 if (chan->mode == L2CAP_MODE_LE_FLOWCTL)
1227 l2cap_le_flowctl_start(chan);
1228
1229 chan->state = BT_CONNECTED;
1230
1231 chan->ops->ready(chan);
1232 }
1233
1234 static void l2cap_le_connect(struct l2cap_chan *chan)
1235 {
1236 struct l2cap_conn *conn = chan->conn;
1237 struct l2cap_le_conn_req req;
1238
1239 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1240 return;
1241
1242 req.psm = chan->psm;
1243 req.scid = cpu_to_le16(chan->scid);
1244 req.mtu = cpu_to_le16(chan->imtu);
1245 req.mps = cpu_to_le16(chan->mps);
1246 req.credits = cpu_to_le16(chan->rx_credits);
1247
1248 chan->ident = l2cap_get_ident(conn);
1249
1250 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1251 sizeof(req), &req);
1252 }
1253
1254 static void l2cap_le_start(struct l2cap_chan *chan)
1255 {
1256 struct l2cap_conn *conn = chan->conn;
1257
1258 if (!smp_conn_security(conn->hcon, chan->sec_level))
1259 return;
1260
1261 if (!chan->psm) {
1262 l2cap_chan_ready(chan);
1263 return;
1264 }
1265
1266 if (chan->state == BT_CONNECT)
1267 l2cap_le_connect(chan);
1268 }
1269
1270 static void l2cap_start_connection(struct l2cap_chan *chan)
1271 {
1272 if (__amp_capable(chan)) {
1273 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1274 a2mp_discover_amp(chan);
1275 } else if (chan->conn->hcon->type == LE_LINK) {
1276 l2cap_le_start(chan);
1277 } else {
1278 l2cap_send_conn_req(chan);
1279 }
1280 }
1281
1282 static void l2cap_do_start(struct l2cap_chan *chan)
1283 {
1284 struct l2cap_conn *conn = chan->conn;
1285
1286 if (conn->hcon->type == LE_LINK) {
1287 l2cap_le_start(chan);
1288 return;
1289 }
1290
1291 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1292 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1293 return;
1294
1295 if (l2cap_chan_check_security(chan) &&
1296 __l2cap_no_conn_pending(chan)) {
1297 l2cap_start_connection(chan);
1298 }
1299 } else {
1300 struct l2cap_info_req req;
1301 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1302
1303 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1304 conn->info_ident = l2cap_get_ident(conn);
1305
1306 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1307
1308 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1309 sizeof(req), &req);
1310 }
1311 }
1312
1313 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1314 {
1315 u32 local_feat_mask = l2cap_feat_mask;
1316 if (!disable_ertm)
1317 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1318
1319 switch (mode) {
1320 case L2CAP_MODE_ERTM:
1321 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1322 case L2CAP_MODE_STREAMING:
1323 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1324 default:
1325 return 0x00;
1326 }
1327 }
1328
1329 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1330 {
1331 struct l2cap_conn *conn = chan->conn;
1332 struct l2cap_disconn_req req;
1333
1334 if (!conn)
1335 return;
1336
1337 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1338 __clear_retrans_timer(chan);
1339 __clear_monitor_timer(chan);
1340 __clear_ack_timer(chan);
1341 }
1342
1343 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1344 l2cap_state_change(chan, BT_DISCONN);
1345 return;
1346 }
1347
1348 req.dcid = cpu_to_le16(chan->dcid);
1349 req.scid = cpu_to_le16(chan->scid);
1350 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1351 sizeof(req), &req);
1352
1353 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1354 }
1355
1356 /* ---- L2CAP connections ---- */
1357 static void l2cap_conn_start(struct l2cap_conn *conn)
1358 {
1359 struct l2cap_chan *chan, *tmp;
1360
1361 BT_DBG("conn %p", conn);
1362
1363 mutex_lock(&conn->chan_lock);
1364
1365 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1366 l2cap_chan_lock(chan);
1367
1368 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1369 l2cap_chan_unlock(chan);
1370 continue;
1371 }
1372
1373 if (chan->state == BT_CONNECT) {
1374 if (!l2cap_chan_check_security(chan) ||
1375 !__l2cap_no_conn_pending(chan)) {
1376 l2cap_chan_unlock(chan);
1377 continue;
1378 }
1379
1380 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1381 && test_bit(CONF_STATE2_DEVICE,
1382 &chan->conf_state)) {
1383 l2cap_chan_close(chan, ECONNRESET);
1384 l2cap_chan_unlock(chan);
1385 continue;
1386 }
1387
1388 l2cap_start_connection(chan);
1389
1390 } else if (chan->state == BT_CONNECT2) {
1391 struct l2cap_conn_rsp rsp;
1392 char buf[128];
1393 rsp.scid = cpu_to_le16(chan->dcid);
1394 rsp.dcid = cpu_to_le16(chan->scid);
1395
1396 if (l2cap_chan_check_security(chan)) {
1397 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1398 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1399 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1400 chan->ops->defer(chan);
1401
1402 } else {
1403 l2cap_state_change(chan, BT_CONFIG);
1404 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1405 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1406 }
1407 } else {
1408 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1409 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1410 }
1411
1412 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1413 sizeof(rsp), &rsp);
1414
1415 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1416 rsp.result != L2CAP_CR_SUCCESS) {
1417 l2cap_chan_unlock(chan);
1418 continue;
1419 }
1420
1421 set_bit(CONF_REQ_SENT, &chan->conf_state);
1422 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1423 l2cap_build_conf_req(chan, buf), buf);
1424 chan->num_conf_req++;
1425 }
1426
1427 l2cap_chan_unlock(chan);
1428 }
1429
1430 mutex_unlock(&conn->chan_lock);
1431 }
1432
1433 /* Find socket with cid and source/destination bdaddr.
1434 * Returns closest match, locked.
1435 */
1436 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1437 bdaddr_t *src,
1438 bdaddr_t *dst)
1439 {
1440 struct l2cap_chan *c, *c1 = NULL;
1441
1442 read_lock(&chan_list_lock);
1443
1444 list_for_each_entry(c, &chan_list, global_l) {
1445 if (state && c->state != state)
1446 continue;
1447
1448 if (c->scid == cid) {
1449 int src_match, dst_match;
1450 int src_any, dst_any;
1451
1452 /* Exact match. */
1453 src_match = !bacmp(&c->src, src);
1454 dst_match = !bacmp(&c->dst, dst);
1455 if (src_match && dst_match) {
1456 read_unlock(&chan_list_lock);
1457 return c;
1458 }
1459
1460 /* Closest match */
1461 src_any = !bacmp(&c->src, BDADDR_ANY);
1462 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1463 if ((src_match && dst_any) || (src_any && dst_match) ||
1464 (src_any && dst_any))
1465 c1 = c;
1466 }
1467 }
1468
1469 read_unlock(&chan_list_lock);
1470
1471 return c1;
1472 }
1473
1474 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1475 {
1476 struct hci_conn *hcon = conn->hcon;
1477 struct l2cap_chan *chan, *pchan;
1478 u8 dst_type;
1479
1480 BT_DBG("");
1481
1482 /* Check if we have socket listening on cid */
1483 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1484 &hcon->src, &hcon->dst);
1485 if (!pchan)
1486 return;
1487
1488 /* Client ATT sockets should override the server one */
1489 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1490 return;
1491
1492 dst_type = bdaddr_type(hcon, hcon->dst_type);
1493
1494 /* If device is blocked, do not create a channel for it */
1495 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1496 return;
1497
1498 l2cap_chan_lock(pchan);
1499
1500 chan = pchan->ops->new_connection(pchan);
1501 if (!chan)
1502 goto clean;
1503
1504 chan->dcid = L2CAP_CID_ATT;
1505
1506 bacpy(&chan->src, &hcon->src);
1507 bacpy(&chan->dst, &hcon->dst);
1508 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1509 chan->dst_type = dst_type;
1510
1511 __l2cap_chan_add(conn, chan);
1512
1513 clean:
1514 l2cap_chan_unlock(pchan);
1515 }
1516
1517 static void l2cap_conn_ready(struct l2cap_conn *conn)
1518 {
1519 struct l2cap_chan *chan;
1520 struct hci_conn *hcon = conn->hcon;
1521
1522 BT_DBG("conn %p", conn);
1523
1524 /* For outgoing pairing which doesn't necessarily have an
1525 * associated socket (e.g. mgmt_pair_device).
1526 */
1527 if (hcon->out && hcon->type == LE_LINK)
1528 smp_conn_security(hcon, hcon->pending_sec_level);
1529
1530 mutex_lock(&conn->chan_lock);
1531
1532 if (hcon->type == LE_LINK)
1533 l2cap_le_conn_ready(conn);
1534
1535 list_for_each_entry(chan, &conn->chan_l, list) {
1536
1537 l2cap_chan_lock(chan);
1538
1539 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1540 l2cap_chan_unlock(chan);
1541 continue;
1542 }
1543
1544 if (hcon->type == LE_LINK) {
1545 l2cap_le_start(chan);
1546 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1547 l2cap_chan_ready(chan);
1548
1549 } else if (chan->state == BT_CONNECT) {
1550 l2cap_do_start(chan);
1551 }
1552
1553 l2cap_chan_unlock(chan);
1554 }
1555
1556 mutex_unlock(&conn->chan_lock);
1557 }
1558
1559 /* Notify sockets that we cannot guaranty reliability anymore */
1560 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1561 {
1562 struct l2cap_chan *chan;
1563
1564 BT_DBG("conn %p", conn);
1565
1566 mutex_lock(&conn->chan_lock);
1567
1568 list_for_each_entry(chan, &conn->chan_l, list) {
1569 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1570 l2cap_chan_set_err(chan, err);
1571 }
1572
1573 mutex_unlock(&conn->chan_lock);
1574 }
1575
1576 static void l2cap_info_timeout(struct work_struct *work)
1577 {
1578 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1579 info_timer.work);
1580
1581 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1582 conn->info_ident = 0;
1583
1584 l2cap_conn_start(conn);
1585 }
1586
1587 /*
1588 * l2cap_user
1589 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1590 * callback is called during registration. The ->remove callback is called
1591 * during unregistration.
1592 * An l2cap_user object can either be explicitly unregistered or when the
1593 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1594 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1595 * External modules must own a reference to the l2cap_conn object if they intend
1596 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1597 * any time if they don't.
1598 */
1599
1600 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1601 {
1602 struct hci_dev *hdev = conn->hcon->hdev;
1603 int ret;
1604
1605 /* We need to check whether l2cap_conn is registered. If it is not, we
1606 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1607 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1608 * relies on the parent hci_conn object to be locked. This itself relies
1609 * on the hci_dev object to be locked. So we must lock the hci device
1610 * here, too. */
1611
1612 hci_dev_lock(hdev);
1613
1614 if (user->list.next || user->list.prev) {
1615 ret = -EINVAL;
1616 goto out_unlock;
1617 }
1618
1619 /* conn->hchan is NULL after l2cap_conn_del() was called */
1620 if (!conn->hchan) {
1621 ret = -ENODEV;
1622 goto out_unlock;
1623 }
1624
1625 ret = user->probe(conn, user);
1626 if (ret)
1627 goto out_unlock;
1628
1629 list_add(&user->list, &conn->users);
1630 ret = 0;
1631
1632 out_unlock:
1633 hci_dev_unlock(hdev);
1634 return ret;
1635 }
1636 EXPORT_SYMBOL(l2cap_register_user);
1637
1638 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1639 {
1640 struct hci_dev *hdev = conn->hcon->hdev;
1641
1642 hci_dev_lock(hdev);
1643
1644 if (!user->list.next || !user->list.prev)
1645 goto out_unlock;
1646
1647 list_del(&user->list);
1648 user->list.next = NULL;
1649 user->list.prev = NULL;
1650 user->remove(conn, user);
1651
1652 out_unlock:
1653 hci_dev_unlock(hdev);
1654 }
1655 EXPORT_SYMBOL(l2cap_unregister_user);
1656
1657 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1658 {
1659 struct l2cap_user *user;
1660
1661 while (!list_empty(&conn->users)) {
1662 user = list_first_entry(&conn->users, struct l2cap_user, list);
1663 list_del(&user->list);
1664 user->list.next = NULL;
1665 user->list.prev = NULL;
1666 user->remove(conn, user);
1667 }
1668 }
1669
1670 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1671 {
1672 struct l2cap_conn *conn = hcon->l2cap_data;
1673 struct l2cap_chan *chan, *l;
1674
1675 if (!conn)
1676 return;
1677
1678 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1679
1680 kfree_skb(conn->rx_skb);
1681
1682 l2cap_unregister_all_users(conn);
1683
1684 mutex_lock(&conn->chan_lock);
1685
1686 /* Kill channels */
1687 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1688 l2cap_chan_hold(chan);
1689 l2cap_chan_lock(chan);
1690
1691 l2cap_chan_del(chan, err);
1692
1693 l2cap_chan_unlock(chan);
1694
1695 chan->ops->close(chan);
1696 l2cap_chan_put(chan);
1697 }
1698
1699 mutex_unlock(&conn->chan_lock);
1700
1701 hci_chan_del(conn->hchan);
1702
1703 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1704 cancel_delayed_work_sync(&conn->info_timer);
1705
1706 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1707 cancel_delayed_work_sync(&conn->security_timer);
1708 smp_chan_destroy(conn);
1709 }
1710
1711 hcon->l2cap_data = NULL;
1712 conn->hchan = NULL;
1713 l2cap_conn_put(conn);
1714 }
1715
1716 static void security_timeout(struct work_struct *work)
1717 {
1718 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1719 security_timer.work);
1720
1721 BT_DBG("conn %p", conn);
1722
1723 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1724 smp_chan_destroy(conn);
1725 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1726 }
1727 }
1728
1729 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1730 {
1731 struct l2cap_conn *conn = hcon->l2cap_data;
1732 struct hci_chan *hchan;
1733
1734 if (conn)
1735 return conn;
1736
1737 hchan = hci_chan_create(hcon);
1738 if (!hchan)
1739 return NULL;
1740
1741 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1742 if (!conn) {
1743 hci_chan_del(hchan);
1744 return NULL;
1745 }
1746
1747 kref_init(&conn->ref);
1748 hcon->l2cap_data = conn;
1749 conn->hcon = hcon;
1750 hci_conn_get(conn->hcon);
1751 conn->hchan = hchan;
1752
1753 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1754
1755 switch (hcon->type) {
1756 case LE_LINK:
1757 if (hcon->hdev->le_mtu) {
1758 conn->mtu = hcon->hdev->le_mtu;
1759 break;
1760 }
1761 /* fall through */
1762 default:
1763 conn->mtu = hcon->hdev->acl_mtu;
1764 break;
1765 }
1766
1767 conn->feat_mask = 0;
1768
1769 if (hcon->type == ACL_LINK)
1770 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1771 &hcon->hdev->dev_flags);
1772
1773 spin_lock_init(&conn->lock);
1774 mutex_init(&conn->chan_lock);
1775
1776 INIT_LIST_HEAD(&conn->chan_l);
1777 INIT_LIST_HEAD(&conn->users);
1778
1779 if (hcon->type == LE_LINK)
1780 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1781 else
1782 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1783
1784 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1785
1786 return conn;
1787 }
1788
1789 static void l2cap_conn_free(struct kref *ref)
1790 {
1791 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1792
1793 hci_conn_put(conn->hcon);
1794 kfree(conn);
1795 }
1796
1797 void l2cap_conn_get(struct l2cap_conn *conn)
1798 {
1799 kref_get(&conn->ref);
1800 }
1801 EXPORT_SYMBOL(l2cap_conn_get);
1802
1803 void l2cap_conn_put(struct l2cap_conn *conn)
1804 {
1805 kref_put(&conn->ref, l2cap_conn_free);
1806 }
1807 EXPORT_SYMBOL(l2cap_conn_put);
1808
1809 /* ---- Socket interface ---- */
1810
1811 /* Find socket with psm and source / destination bdaddr.
1812 * Returns closest match.
1813 */
1814 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1815 bdaddr_t *src,
1816 bdaddr_t *dst,
1817 u8 link_type)
1818 {
1819 struct l2cap_chan *c, *c1 = NULL;
1820
1821 read_lock(&chan_list_lock);
1822
1823 list_for_each_entry(c, &chan_list, global_l) {
1824 if (state && c->state != state)
1825 continue;
1826
1827 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1828 continue;
1829
1830 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1831 continue;
1832
1833 if (c->psm == psm) {
1834 int src_match, dst_match;
1835 int src_any, dst_any;
1836
1837 /* Exact match. */
1838 src_match = !bacmp(&c->src, src);
1839 dst_match = !bacmp(&c->dst, dst);
1840 if (src_match && dst_match) {
1841 read_unlock(&chan_list_lock);
1842 return c;
1843 }
1844
1845 /* Closest match */
1846 src_any = !bacmp(&c->src, BDADDR_ANY);
1847 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1848 if ((src_match && dst_any) || (src_any && dst_match) ||
1849 (src_any && dst_any))
1850 c1 = c;
1851 }
1852 }
1853
1854 read_unlock(&chan_list_lock);
1855
1856 return c1;
1857 }
1858
1859 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1860 bdaddr_t *dst, u8 dst_type)
1861 {
1862 struct l2cap_conn *conn;
1863 struct hci_conn *hcon;
1864 struct hci_dev *hdev;
1865 __u8 auth_type;
1866 int err;
1867
1868 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1869 dst_type, __le16_to_cpu(psm));
1870
1871 hdev = hci_get_route(dst, &chan->src);
1872 if (!hdev)
1873 return -EHOSTUNREACH;
1874
1875 hci_dev_lock(hdev);
1876
1877 l2cap_chan_lock(chan);
1878
1879 /* PSM must be odd and lsb of upper byte must be 0 */
1880 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1881 chan->chan_type != L2CAP_CHAN_RAW) {
1882 err = -EINVAL;
1883 goto done;
1884 }
1885
1886 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1887 err = -EINVAL;
1888 goto done;
1889 }
1890
1891 switch (chan->mode) {
1892 case L2CAP_MODE_BASIC:
1893 case L2CAP_MODE_LE_FLOWCTL:
1894 break;
1895 case L2CAP_MODE_ERTM:
1896 case L2CAP_MODE_STREAMING:
1897 if (!disable_ertm)
1898 break;
1899 /* fall through */
1900 default:
1901 err = -ENOTSUPP;
1902 goto done;
1903 }
1904
1905 switch (chan->state) {
1906 case BT_CONNECT:
1907 case BT_CONNECT2:
1908 case BT_CONFIG:
1909 /* Already connecting */
1910 err = 0;
1911 goto done;
1912
1913 case BT_CONNECTED:
1914 /* Already connected */
1915 err = -EISCONN;
1916 goto done;
1917
1918 case BT_OPEN:
1919 case BT_BOUND:
1920 /* Can connect */
1921 break;
1922
1923 default:
1924 err = -EBADFD;
1925 goto done;
1926 }
1927
1928 /* Set destination address and psm */
1929 bacpy(&chan->dst, dst);
1930 chan->dst_type = dst_type;
1931
1932 chan->psm = psm;
1933 chan->dcid = cid;
1934
1935 auth_type = l2cap_get_auth_type(chan);
1936
1937 if (bdaddr_type_is_le(dst_type))
1938 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1939 chan->sec_level, auth_type);
1940 else
1941 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1942 chan->sec_level, auth_type);
1943
1944 if (IS_ERR(hcon)) {
1945 err = PTR_ERR(hcon);
1946 goto done;
1947 }
1948
1949 conn = l2cap_conn_add(hcon);
1950 if (!conn) {
1951 hci_conn_drop(hcon);
1952 err = -ENOMEM;
1953 goto done;
1954 }
1955
1956 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1957 hci_conn_drop(hcon);
1958 err = -EBUSY;
1959 goto done;
1960 }
1961
1962 /* Update source addr of the socket */
1963 bacpy(&chan->src, &hcon->src);
1964 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1965
1966 l2cap_chan_unlock(chan);
1967 l2cap_chan_add(conn, chan);
1968 l2cap_chan_lock(chan);
1969
1970 /* l2cap_chan_add takes its own ref so we can drop this one */
1971 hci_conn_drop(hcon);
1972
1973 l2cap_state_change(chan, BT_CONNECT);
1974 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1975
1976 if (hcon->state == BT_CONNECTED) {
1977 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1978 __clear_chan_timer(chan);
1979 if (l2cap_chan_check_security(chan))
1980 l2cap_state_change(chan, BT_CONNECTED);
1981 } else
1982 l2cap_do_start(chan);
1983 }
1984
1985 err = 0;
1986
1987 done:
1988 l2cap_chan_unlock(chan);
1989 hci_dev_unlock(hdev);
1990 hci_dev_put(hdev);
1991 return err;
1992 }
1993
1994 static void l2cap_monitor_timeout(struct work_struct *work)
1995 {
1996 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1997 monitor_timer.work);
1998
1999 BT_DBG("chan %p", chan);
2000
2001 l2cap_chan_lock(chan);
2002
2003 if (!chan->conn) {
2004 l2cap_chan_unlock(chan);
2005 l2cap_chan_put(chan);
2006 return;
2007 }
2008
2009 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2010
2011 l2cap_chan_unlock(chan);
2012 l2cap_chan_put(chan);
2013 }
2014
2015 static void l2cap_retrans_timeout(struct work_struct *work)
2016 {
2017 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2018 retrans_timer.work);
2019
2020 BT_DBG("chan %p", chan);
2021
2022 l2cap_chan_lock(chan);
2023
2024 if (!chan->conn) {
2025 l2cap_chan_unlock(chan);
2026 l2cap_chan_put(chan);
2027 return;
2028 }
2029
2030 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2031 l2cap_chan_unlock(chan);
2032 l2cap_chan_put(chan);
2033 }
2034
2035 static void l2cap_streaming_send(struct l2cap_chan *chan,
2036 struct sk_buff_head *skbs)
2037 {
2038 struct sk_buff *skb;
2039 struct l2cap_ctrl *control;
2040
2041 BT_DBG("chan %p, skbs %p", chan, skbs);
2042
2043 if (__chan_is_moving(chan))
2044 return;
2045
2046 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2047
2048 while (!skb_queue_empty(&chan->tx_q)) {
2049
2050 skb = skb_dequeue(&chan->tx_q);
2051
2052 bt_cb(skb)->control.retries = 1;
2053 control = &bt_cb(skb)->control;
2054
2055 control->reqseq = 0;
2056 control->txseq = chan->next_tx_seq;
2057
2058 __pack_control(chan, control, skb);
2059
2060 if (chan->fcs == L2CAP_FCS_CRC16) {
2061 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2062 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2063 }
2064
2065 l2cap_do_send(chan, skb);
2066
2067 BT_DBG("Sent txseq %u", control->txseq);
2068
2069 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2070 chan->frames_sent++;
2071 }
2072 }
2073
2074 static int l2cap_ertm_send(struct l2cap_chan *chan)
2075 {
2076 struct sk_buff *skb, *tx_skb;
2077 struct l2cap_ctrl *control;
2078 int sent = 0;
2079
2080 BT_DBG("chan %p", chan);
2081
2082 if (chan->state != BT_CONNECTED)
2083 return -ENOTCONN;
2084
2085 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2086 return 0;
2087
2088 if (__chan_is_moving(chan))
2089 return 0;
2090
2091 while (chan->tx_send_head &&
2092 chan->unacked_frames < chan->remote_tx_win &&
2093 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2094
2095 skb = chan->tx_send_head;
2096
2097 bt_cb(skb)->control.retries = 1;
2098 control = &bt_cb(skb)->control;
2099
2100 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2101 control->final = 1;
2102
2103 control->reqseq = chan->buffer_seq;
2104 chan->last_acked_seq = chan->buffer_seq;
2105 control->txseq = chan->next_tx_seq;
2106
2107 __pack_control(chan, control, skb);
2108
2109 if (chan->fcs == L2CAP_FCS_CRC16) {
2110 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2111 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2112 }
2113
2114 /* Clone after data has been modified. Data is assumed to be
2115 read-only (for locking purposes) on cloned sk_buffs.
2116 */
2117 tx_skb = skb_clone(skb, GFP_KERNEL);
2118
2119 if (!tx_skb)
2120 break;
2121
2122 __set_retrans_timer(chan);
2123
2124 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2125 chan->unacked_frames++;
2126 chan->frames_sent++;
2127 sent++;
2128
2129 if (skb_queue_is_last(&chan->tx_q, skb))
2130 chan->tx_send_head = NULL;
2131 else
2132 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2133
2134 l2cap_do_send(chan, tx_skb);
2135 BT_DBG("Sent txseq %u", control->txseq);
2136 }
2137
2138 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2139 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2140
2141 return sent;
2142 }
2143
2144 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2145 {
2146 struct l2cap_ctrl control;
2147 struct sk_buff *skb;
2148 struct sk_buff *tx_skb;
2149 u16 seq;
2150
2151 BT_DBG("chan %p", chan);
2152
2153 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2154 return;
2155
2156 if (__chan_is_moving(chan))
2157 return;
2158
2159 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2160 seq = l2cap_seq_list_pop(&chan->retrans_list);
2161
2162 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2163 if (!skb) {
2164 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2165 seq);
2166 continue;
2167 }
2168
2169 bt_cb(skb)->control.retries++;
2170 control = bt_cb(skb)->control;
2171
2172 if (chan->max_tx != 0 &&
2173 bt_cb(skb)->control.retries > chan->max_tx) {
2174 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2175 l2cap_send_disconn_req(chan, ECONNRESET);
2176 l2cap_seq_list_clear(&chan->retrans_list);
2177 break;
2178 }
2179
2180 control.reqseq = chan->buffer_seq;
2181 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2182 control.final = 1;
2183 else
2184 control.final = 0;
2185
2186 if (skb_cloned(skb)) {
2187 /* Cloned sk_buffs are read-only, so we need a
2188 * writeable copy
2189 */
2190 tx_skb = skb_copy(skb, GFP_KERNEL);
2191 } else {
2192 tx_skb = skb_clone(skb, GFP_KERNEL);
2193 }
2194
2195 if (!tx_skb) {
2196 l2cap_seq_list_clear(&chan->retrans_list);
2197 break;
2198 }
2199
2200 /* Update skb contents */
2201 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2202 put_unaligned_le32(__pack_extended_control(&control),
2203 tx_skb->data + L2CAP_HDR_SIZE);
2204 } else {
2205 put_unaligned_le16(__pack_enhanced_control(&control),
2206 tx_skb->data + L2CAP_HDR_SIZE);
2207 }
2208
2209 if (chan->fcs == L2CAP_FCS_CRC16) {
2210 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2211 put_unaligned_le16(fcs, skb_put(tx_skb,
2212 L2CAP_FCS_SIZE));
2213 }
2214
2215 l2cap_do_send(chan, tx_skb);
2216
2217 BT_DBG("Resent txseq %d", control.txseq);
2218
2219 chan->last_acked_seq = chan->buffer_seq;
2220 }
2221 }
2222
2223 static void l2cap_retransmit(struct l2cap_chan *chan,
2224 struct l2cap_ctrl *control)
2225 {
2226 BT_DBG("chan %p, control %p", chan, control);
2227
2228 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2229 l2cap_ertm_resend(chan);
2230 }
2231
2232 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2233 struct l2cap_ctrl *control)
2234 {
2235 struct sk_buff *skb;
2236
2237 BT_DBG("chan %p, control %p", chan, control);
2238
2239 if (control->poll)
2240 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2241
2242 l2cap_seq_list_clear(&chan->retrans_list);
2243
2244 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2245 return;
2246
2247 if (chan->unacked_frames) {
2248 skb_queue_walk(&chan->tx_q, skb) {
2249 if (bt_cb(skb)->control.txseq == control->reqseq ||
2250 skb == chan->tx_send_head)
2251 break;
2252 }
2253
2254 skb_queue_walk_from(&chan->tx_q, skb) {
2255 if (skb == chan->tx_send_head)
2256 break;
2257
2258 l2cap_seq_list_append(&chan->retrans_list,
2259 bt_cb(skb)->control.txseq);
2260 }
2261
2262 l2cap_ertm_resend(chan);
2263 }
2264 }
2265
2266 static void l2cap_send_ack(struct l2cap_chan *chan)
2267 {
2268 struct l2cap_ctrl control;
2269 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2270 chan->last_acked_seq);
2271 int threshold;
2272
2273 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2274 chan, chan->last_acked_seq, chan->buffer_seq);
2275
2276 memset(&control, 0, sizeof(control));
2277 control.sframe = 1;
2278
2279 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2280 chan->rx_state == L2CAP_RX_STATE_RECV) {
2281 __clear_ack_timer(chan);
2282 control.super = L2CAP_SUPER_RNR;
2283 control.reqseq = chan->buffer_seq;
2284 l2cap_send_sframe(chan, &control);
2285 } else {
2286 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2287 l2cap_ertm_send(chan);
2288 /* If any i-frames were sent, they included an ack */
2289 if (chan->buffer_seq == chan->last_acked_seq)
2290 frames_to_ack = 0;
2291 }
2292
2293 /* Ack now if the window is 3/4ths full.
2294 * Calculate without mul or div
2295 */
2296 threshold = chan->ack_win;
2297 threshold += threshold << 1;
2298 threshold >>= 2;
2299
2300 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2301 threshold);
2302
2303 if (frames_to_ack >= threshold) {
2304 __clear_ack_timer(chan);
2305 control.super = L2CAP_SUPER_RR;
2306 control.reqseq = chan->buffer_seq;
2307 l2cap_send_sframe(chan, &control);
2308 frames_to_ack = 0;
2309 }
2310
2311 if (frames_to_ack)
2312 __set_ack_timer(chan);
2313 }
2314 }
2315
2316 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2317 struct msghdr *msg, int len,
2318 int count, struct sk_buff *skb)
2319 {
2320 struct l2cap_conn *conn = chan->conn;
2321 struct sk_buff **frag;
2322 int sent = 0;
2323
2324 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2325 return -EFAULT;
2326
2327 sent += count;
2328 len -= count;
2329
2330 /* Continuation fragments (no L2CAP header) */
2331 frag = &skb_shinfo(skb)->frag_list;
2332 while (len) {
2333 struct sk_buff *tmp;
2334
2335 count = min_t(unsigned int, conn->mtu, len);
2336
2337 tmp = chan->ops->alloc_skb(chan, count,
2338 msg->msg_flags & MSG_DONTWAIT);
2339 if (IS_ERR(tmp))
2340 return PTR_ERR(tmp);
2341
2342 *frag = tmp;
2343
2344 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2345 return -EFAULT;
2346
2347 (*frag)->priority = skb->priority;
2348
2349 sent += count;
2350 len -= count;
2351
2352 skb->len += (*frag)->len;
2353 skb->data_len += (*frag)->len;
2354
2355 frag = &(*frag)->next;
2356 }
2357
2358 return sent;
2359 }
2360
2361 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2362 struct msghdr *msg, size_t len,
2363 u32 priority)
2364 {
2365 struct l2cap_conn *conn = chan->conn;
2366 struct sk_buff *skb;
2367 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2368 struct l2cap_hdr *lh;
2369
2370 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2371 __le16_to_cpu(chan->psm), len, priority);
2372
2373 count = min_t(unsigned int, (conn->mtu - hlen), len);
2374
2375 skb = chan->ops->alloc_skb(chan, count + hlen,
2376 msg->msg_flags & MSG_DONTWAIT);
2377 if (IS_ERR(skb))
2378 return skb;
2379
2380 skb->priority = priority;
2381
2382 /* Create L2CAP header */
2383 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2384 lh->cid = cpu_to_le16(chan->dcid);
2385 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2386 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2387
2388 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2389 if (unlikely(err < 0)) {
2390 kfree_skb(skb);
2391 return ERR_PTR(err);
2392 }
2393 return skb;
2394 }
2395
2396 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2397 struct msghdr *msg, size_t len,
2398 u32 priority)
2399 {
2400 struct l2cap_conn *conn = chan->conn;
2401 struct sk_buff *skb;
2402 int err, count;
2403 struct l2cap_hdr *lh;
2404
2405 BT_DBG("chan %p len %zu", chan, len);
2406
2407 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2408
2409 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2410 msg->msg_flags & MSG_DONTWAIT);
2411 if (IS_ERR(skb))
2412 return skb;
2413
2414 skb->priority = priority;
2415
2416 /* Create L2CAP header */
2417 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2418 lh->cid = cpu_to_le16(chan->dcid);
2419 lh->len = cpu_to_le16(len);
2420
2421 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2422 if (unlikely(err < 0)) {
2423 kfree_skb(skb);
2424 return ERR_PTR(err);
2425 }
2426 return skb;
2427 }
2428
2429 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2430 struct msghdr *msg, size_t len,
2431 u16 sdulen)
2432 {
2433 struct l2cap_conn *conn = chan->conn;
2434 struct sk_buff *skb;
2435 int err, count, hlen;
2436 struct l2cap_hdr *lh;
2437
2438 BT_DBG("chan %p len %zu", chan, len);
2439
2440 if (!conn)
2441 return ERR_PTR(-ENOTCONN);
2442
2443 hlen = __ertm_hdr_size(chan);
2444
2445 if (sdulen)
2446 hlen += L2CAP_SDULEN_SIZE;
2447
2448 if (chan->fcs == L2CAP_FCS_CRC16)
2449 hlen += L2CAP_FCS_SIZE;
2450
2451 count = min_t(unsigned int, (conn->mtu - hlen), len);
2452
2453 skb = chan->ops->alloc_skb(chan, count + hlen,
2454 msg->msg_flags & MSG_DONTWAIT);
2455 if (IS_ERR(skb))
2456 return skb;
2457
2458 /* Create L2CAP header */
2459 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2460 lh->cid = cpu_to_le16(chan->dcid);
2461 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2462
2463 /* Control header is populated later */
2464 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2465 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2466 else
2467 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2468
2469 if (sdulen)
2470 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2471
2472 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2473 if (unlikely(err < 0)) {
2474 kfree_skb(skb);
2475 return ERR_PTR(err);
2476 }
2477
2478 bt_cb(skb)->control.fcs = chan->fcs;
2479 bt_cb(skb)->control.retries = 0;
2480 return skb;
2481 }
2482
2483 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2484 struct sk_buff_head *seg_queue,
2485 struct msghdr *msg, size_t len)
2486 {
2487 struct sk_buff *skb;
2488 u16 sdu_len;
2489 size_t pdu_len;
2490 u8 sar;
2491
2492 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2493
2494 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2495 * so fragmented skbs are not used. The HCI layer's handling
2496 * of fragmented skbs is not compatible with ERTM's queueing.
2497 */
2498
2499 /* PDU size is derived from the HCI MTU */
2500 pdu_len = chan->conn->mtu;
2501
2502 /* Constrain PDU size for BR/EDR connections */
2503 if (!chan->hs_hcon)
2504 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2505
2506 /* Adjust for largest possible L2CAP overhead. */
2507 if (chan->fcs)
2508 pdu_len -= L2CAP_FCS_SIZE;
2509
2510 pdu_len -= __ertm_hdr_size(chan);
2511
2512 /* Remote device may have requested smaller PDUs */
2513 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2514
2515 if (len <= pdu_len) {
2516 sar = L2CAP_SAR_UNSEGMENTED;
2517 sdu_len = 0;
2518 pdu_len = len;
2519 } else {
2520 sar = L2CAP_SAR_START;
2521 sdu_len = len;
2522 pdu_len -= L2CAP_SDULEN_SIZE;
2523 }
2524
2525 while (len > 0) {
2526 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2527
2528 if (IS_ERR(skb)) {
2529 __skb_queue_purge(seg_queue);
2530 return PTR_ERR(skb);
2531 }
2532
2533 bt_cb(skb)->control.sar = sar;
2534 __skb_queue_tail(seg_queue, skb);
2535
2536 len -= pdu_len;
2537 if (sdu_len) {
2538 sdu_len = 0;
2539 pdu_len += L2CAP_SDULEN_SIZE;
2540 }
2541
2542 if (len <= pdu_len) {
2543 sar = L2CAP_SAR_END;
2544 pdu_len = len;
2545 } else {
2546 sar = L2CAP_SAR_CONTINUE;
2547 }
2548 }
2549
2550 return 0;
2551 }
2552
2553 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2554 struct msghdr *msg,
2555 size_t len, u16 sdulen)
2556 {
2557 struct l2cap_conn *conn = chan->conn;
2558 struct sk_buff *skb;
2559 int err, count, hlen;
2560 struct l2cap_hdr *lh;
2561
2562 BT_DBG("chan %p len %zu", chan, len);
2563
2564 if (!conn)
2565 return ERR_PTR(-ENOTCONN);
2566
2567 hlen = L2CAP_HDR_SIZE;
2568
2569 if (sdulen)
2570 hlen += L2CAP_SDULEN_SIZE;
2571
2572 count = min_t(unsigned int, (conn->mtu - hlen), len);
2573
2574 skb = chan->ops->alloc_skb(chan, count + hlen,
2575 msg->msg_flags & MSG_DONTWAIT);
2576 if (IS_ERR(skb))
2577 return skb;
2578
2579 /* Create L2CAP header */
2580 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2581 lh->cid = cpu_to_le16(chan->dcid);
2582 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2583
2584 if (sdulen)
2585 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2586
2587 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2588 if (unlikely(err < 0)) {
2589 kfree_skb(skb);
2590 return ERR_PTR(err);
2591 }
2592
2593 return skb;
2594 }
2595
2596 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2597 struct sk_buff_head *seg_queue,
2598 struct msghdr *msg, size_t len)
2599 {
2600 struct sk_buff *skb;
2601 size_t pdu_len;
2602 u16 sdu_len;
2603
2604 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2605
2606 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2607
2608 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2609
2610 sdu_len = len;
2611 pdu_len -= L2CAP_SDULEN_SIZE;
2612
2613 while (len > 0) {
2614 if (len <= pdu_len)
2615 pdu_len = len;
2616
2617 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2618 if (IS_ERR(skb)) {
2619 __skb_queue_purge(seg_queue);
2620 return PTR_ERR(skb);
2621 }
2622
2623 __skb_queue_tail(seg_queue, skb);
2624
2625 len -= pdu_len;
2626
2627 if (sdu_len) {
2628 sdu_len = 0;
2629 pdu_len += L2CAP_SDULEN_SIZE;
2630 }
2631 }
2632
2633 return 0;
2634 }
2635
2636 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2637 u32 priority)
2638 {
2639 struct sk_buff *skb;
2640 int err;
2641 struct sk_buff_head seg_queue;
2642
2643 if (!chan->conn)
2644 return -ENOTCONN;
2645
2646 /* Connectionless channel */
2647 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2648 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2649 if (IS_ERR(skb))
2650 return PTR_ERR(skb);
2651
2652 l2cap_do_send(chan, skb);
2653 return len;
2654 }
2655
2656 switch (chan->mode) {
2657 case L2CAP_MODE_LE_FLOWCTL:
2658 /* Check outgoing MTU */
2659 if (len > chan->omtu)
2660 return -EMSGSIZE;
2661
2662 if (!chan->tx_credits)
2663 return -EAGAIN;
2664
2665 __skb_queue_head_init(&seg_queue);
2666
2667 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2668
2669 if (chan->state != BT_CONNECTED) {
2670 __skb_queue_purge(&seg_queue);
2671 err = -ENOTCONN;
2672 }
2673
2674 if (err)
2675 return err;
2676
2677 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2678
2679 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2680 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2681 chan->tx_credits--;
2682 }
2683
2684 if (!chan->tx_credits)
2685 chan->ops->suspend(chan);
2686
2687 err = len;
2688
2689 break;
2690
2691 case L2CAP_MODE_BASIC:
2692 /* Check outgoing MTU */
2693 if (len > chan->omtu)
2694 return -EMSGSIZE;
2695
2696 /* Create a basic PDU */
2697 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2698 if (IS_ERR(skb))
2699 return PTR_ERR(skb);
2700
2701 l2cap_do_send(chan, skb);
2702 err = len;
2703 break;
2704
2705 case L2CAP_MODE_ERTM:
2706 case L2CAP_MODE_STREAMING:
2707 /* Check outgoing MTU */
2708 if (len > chan->omtu) {
2709 err = -EMSGSIZE;
2710 break;
2711 }
2712
2713 __skb_queue_head_init(&seg_queue);
2714
2715 /* Do segmentation before calling in to the state machine,
2716 * since it's possible to block while waiting for memory
2717 * allocation.
2718 */
2719 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2720
2721 /* The channel could have been closed while segmenting,
2722 * check that it is still connected.
2723 */
2724 if (chan->state != BT_CONNECTED) {
2725 __skb_queue_purge(&seg_queue);
2726 err = -ENOTCONN;
2727 }
2728
2729 if (err)
2730 break;
2731
2732 if (chan->mode == L2CAP_MODE_ERTM)
2733 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2734 else
2735 l2cap_streaming_send(chan, &seg_queue);
2736
2737 err = len;
2738
2739 /* If the skbs were not queued for sending, they'll still be in
2740 * seg_queue and need to be purged.
2741 */
2742 __skb_queue_purge(&seg_queue);
2743 break;
2744
2745 default:
2746 BT_DBG("bad state %1.1x", chan->mode);
2747 err = -EBADFD;
2748 }
2749
2750 return err;
2751 }
2752
2753 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2754 {
2755 struct l2cap_ctrl control;
2756 u16 seq;
2757
2758 BT_DBG("chan %p, txseq %u", chan, txseq);
2759
2760 memset(&control, 0, sizeof(control));
2761 control.sframe = 1;
2762 control.super = L2CAP_SUPER_SREJ;
2763
2764 for (seq = chan->expected_tx_seq; seq != txseq;
2765 seq = __next_seq(chan, seq)) {
2766 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2767 control.reqseq = seq;
2768 l2cap_send_sframe(chan, &control);
2769 l2cap_seq_list_append(&chan->srej_list, seq);
2770 }
2771 }
2772
2773 chan->expected_tx_seq = __next_seq(chan, txseq);
2774 }
2775
2776 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2777 {
2778 struct l2cap_ctrl control;
2779
2780 BT_DBG("chan %p", chan);
2781
2782 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2783 return;
2784
2785 memset(&control, 0, sizeof(control));
2786 control.sframe = 1;
2787 control.super = L2CAP_SUPER_SREJ;
2788 control.reqseq = chan->srej_list.tail;
2789 l2cap_send_sframe(chan, &control);
2790 }
2791
2792 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2793 {
2794 struct l2cap_ctrl control;
2795 u16 initial_head;
2796 u16 seq;
2797
2798 BT_DBG("chan %p, txseq %u", chan, txseq);
2799
2800 memset(&control, 0, sizeof(control));
2801 control.sframe = 1;
2802 control.super = L2CAP_SUPER_SREJ;
2803
2804 /* Capture initial list head to allow only one pass through the list. */
2805 initial_head = chan->srej_list.head;
2806
2807 do {
2808 seq = l2cap_seq_list_pop(&chan->srej_list);
2809 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2810 break;
2811
2812 control.reqseq = seq;
2813 l2cap_send_sframe(chan, &control);
2814 l2cap_seq_list_append(&chan->srej_list, seq);
2815 } while (chan->srej_list.head != initial_head);
2816 }
2817
2818 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2819 {
2820 struct sk_buff *acked_skb;
2821 u16 ackseq;
2822
2823 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2824
2825 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2826 return;
2827
2828 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2829 chan->expected_ack_seq, chan->unacked_frames);
2830
2831 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2832 ackseq = __next_seq(chan, ackseq)) {
2833
2834 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2835 if (acked_skb) {
2836 skb_unlink(acked_skb, &chan->tx_q);
2837 kfree_skb(acked_skb);
2838 chan->unacked_frames--;
2839 }
2840 }
2841
2842 chan->expected_ack_seq = reqseq;
2843
2844 if (chan->unacked_frames == 0)
2845 __clear_retrans_timer(chan);
2846
2847 BT_DBG("unacked_frames %u", chan->unacked_frames);
2848 }
2849
2850 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2851 {
2852 BT_DBG("chan %p", chan);
2853
2854 chan->expected_tx_seq = chan->buffer_seq;
2855 l2cap_seq_list_clear(&chan->srej_list);
2856 skb_queue_purge(&chan->srej_q);
2857 chan->rx_state = L2CAP_RX_STATE_RECV;
2858 }
2859
2860 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2861 struct l2cap_ctrl *control,
2862 struct sk_buff_head *skbs, u8 event)
2863 {
2864 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2865 event);
2866
2867 switch (event) {
2868 case L2CAP_EV_DATA_REQUEST:
2869 if (chan->tx_send_head == NULL)
2870 chan->tx_send_head = skb_peek(skbs);
2871
2872 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2873 l2cap_ertm_send(chan);
2874 break;
2875 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2876 BT_DBG("Enter LOCAL_BUSY");
2877 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2878
2879 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2880 /* The SREJ_SENT state must be aborted if we are to
2881 * enter the LOCAL_BUSY state.
2882 */
2883 l2cap_abort_rx_srej_sent(chan);
2884 }
2885
2886 l2cap_send_ack(chan);
2887
2888 break;
2889 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2890 BT_DBG("Exit LOCAL_BUSY");
2891 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2892
2893 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2894 struct l2cap_ctrl local_control;
2895
2896 memset(&local_control, 0, sizeof(local_control));
2897 local_control.sframe = 1;
2898 local_control.super = L2CAP_SUPER_RR;
2899 local_control.poll = 1;
2900 local_control.reqseq = chan->buffer_seq;
2901 l2cap_send_sframe(chan, &local_control);
2902
2903 chan->retry_count = 1;
2904 __set_monitor_timer(chan);
2905 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2906 }
2907 break;
2908 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2909 l2cap_process_reqseq(chan, control->reqseq);
2910 break;
2911 case L2CAP_EV_EXPLICIT_POLL:
2912 l2cap_send_rr_or_rnr(chan, 1);
2913 chan->retry_count = 1;
2914 __set_monitor_timer(chan);
2915 __clear_ack_timer(chan);
2916 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2917 break;
2918 case L2CAP_EV_RETRANS_TO:
2919 l2cap_send_rr_or_rnr(chan, 1);
2920 chan->retry_count = 1;
2921 __set_monitor_timer(chan);
2922 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2923 break;
2924 case L2CAP_EV_RECV_FBIT:
2925 /* Nothing to process */
2926 break;
2927 default:
2928 break;
2929 }
2930 }
2931
2932 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2933 struct l2cap_ctrl *control,
2934 struct sk_buff_head *skbs, u8 event)
2935 {
2936 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2937 event);
2938
2939 switch (event) {
2940 case L2CAP_EV_DATA_REQUEST:
2941 if (chan->tx_send_head == NULL)
2942 chan->tx_send_head = skb_peek(skbs);
2943 /* Queue data, but don't send. */
2944 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2945 break;
2946 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2947 BT_DBG("Enter LOCAL_BUSY");
2948 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2949
2950 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2951 /* The SREJ_SENT state must be aborted if we are to
2952 * enter the LOCAL_BUSY state.
2953 */
2954 l2cap_abort_rx_srej_sent(chan);
2955 }
2956
2957 l2cap_send_ack(chan);
2958
2959 break;
2960 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2961 BT_DBG("Exit LOCAL_BUSY");
2962 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2963
2964 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2965 struct l2cap_ctrl local_control;
2966 memset(&local_control, 0, sizeof(local_control));
2967 local_control.sframe = 1;
2968 local_control.super = L2CAP_SUPER_RR;
2969 local_control.poll = 1;
2970 local_control.reqseq = chan->buffer_seq;
2971 l2cap_send_sframe(chan, &local_control);
2972
2973 chan->retry_count = 1;
2974 __set_monitor_timer(chan);
2975 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2976 }
2977 break;
2978 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2979 l2cap_process_reqseq(chan, control->reqseq);
2980
2981 /* Fall through */
2982
2983 case L2CAP_EV_RECV_FBIT:
2984 if (control && control->final) {
2985 __clear_monitor_timer(chan);
2986 if (chan->unacked_frames > 0)
2987 __set_retrans_timer(chan);
2988 chan->retry_count = 0;
2989 chan->tx_state = L2CAP_TX_STATE_XMIT;
2990 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2991 }
2992 break;
2993 case L2CAP_EV_EXPLICIT_POLL:
2994 /* Ignore */
2995 break;
2996 case L2CAP_EV_MONITOR_TO:
2997 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2998 l2cap_send_rr_or_rnr(chan, 1);
2999 __set_monitor_timer(chan);
3000 chan->retry_count++;
3001 } else {
3002 l2cap_send_disconn_req(chan, ECONNABORTED);
3003 }
3004 break;
3005 default:
3006 break;
3007 }
3008 }
3009
3010 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3011 struct sk_buff_head *skbs, u8 event)
3012 {
3013 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3014 chan, control, skbs, event, chan->tx_state);
3015
3016 switch (chan->tx_state) {
3017 case L2CAP_TX_STATE_XMIT:
3018 l2cap_tx_state_xmit(chan, control, skbs, event);
3019 break;
3020 case L2CAP_TX_STATE_WAIT_F:
3021 l2cap_tx_state_wait_f(chan, control, skbs, event);
3022 break;
3023 default:
3024 /* Ignore event */
3025 break;
3026 }
3027 }
3028
3029 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3030 struct l2cap_ctrl *control)
3031 {
3032 BT_DBG("chan %p, control %p", chan, control);
3033 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3034 }
3035
3036 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3037 struct l2cap_ctrl *control)
3038 {
3039 BT_DBG("chan %p, control %p", chan, control);
3040 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3041 }
3042
3043 /* Copy frame to all raw sockets on that connection */
3044 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3045 {
3046 struct sk_buff *nskb;
3047 struct l2cap_chan *chan;
3048
3049 BT_DBG("conn %p", conn);
3050
3051 mutex_lock(&conn->chan_lock);
3052
3053 list_for_each_entry(chan, &conn->chan_l, list) {
3054 if (chan->chan_type != L2CAP_CHAN_RAW)
3055 continue;
3056
3057 /* Don't send frame to the channel it came from */
3058 if (bt_cb(skb)->chan == chan)
3059 continue;
3060
3061 nskb = skb_clone(skb, GFP_KERNEL);
3062 if (!nskb)
3063 continue;
3064 if (chan->ops->recv(chan, nskb))
3065 kfree_skb(nskb);
3066 }
3067
3068 mutex_unlock(&conn->chan_lock);
3069 }
3070
3071 /* ---- L2CAP signalling commands ---- */
3072 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3073 u8 ident, u16 dlen, void *data)
3074 {
3075 struct sk_buff *skb, **frag;
3076 struct l2cap_cmd_hdr *cmd;
3077 struct l2cap_hdr *lh;
3078 int len, count;
3079
3080 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3081 conn, code, ident, dlen);
3082
3083 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3084 return NULL;
3085
3086 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3087 count = min_t(unsigned int, conn->mtu, len);
3088
3089 skb = bt_skb_alloc(count, GFP_KERNEL);
3090 if (!skb)
3091 return NULL;
3092
3093 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3094 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3095
3096 if (conn->hcon->type == LE_LINK)
3097 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3098 else
3099 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3100
3101 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3102 cmd->code = code;
3103 cmd->ident = ident;
3104 cmd->len = cpu_to_le16(dlen);
3105
3106 if (dlen) {
3107 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3108 memcpy(skb_put(skb, count), data, count);
3109 data += count;
3110 }
3111
3112 len -= skb->len;
3113
3114 /* Continuation fragments (no L2CAP header) */
3115 frag = &skb_shinfo(skb)->frag_list;
3116 while (len) {
3117 count = min_t(unsigned int, conn->mtu, len);
3118
3119 *frag = bt_skb_alloc(count, GFP_KERNEL);
3120 if (!*frag)
3121 goto fail;
3122
3123 memcpy(skb_put(*frag, count), data, count);
3124
3125 len -= count;
3126 data += count;
3127
3128 frag = &(*frag)->next;
3129 }
3130
3131 return skb;
3132
3133 fail:
3134 kfree_skb(skb);
3135 return NULL;
3136 }
3137
3138 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3139 unsigned long *val)
3140 {
3141 struct l2cap_conf_opt *opt = *ptr;
3142 int len;
3143
3144 len = L2CAP_CONF_OPT_SIZE + opt->len;
3145 *ptr += len;
3146
3147 *type = opt->type;
3148 *olen = opt->len;
3149
3150 switch (opt->len) {
3151 case 1:
3152 *val = *((u8 *) opt->val);
3153 break;
3154
3155 case 2:
3156 *val = get_unaligned_le16(opt->val);
3157 break;
3158
3159 case 4:
3160 *val = get_unaligned_le32(opt->val);
3161 break;
3162
3163 default:
3164 *val = (unsigned long) opt->val;
3165 break;
3166 }
3167
3168 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3169 return len;
3170 }
3171
3172 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3173 {
3174 struct l2cap_conf_opt *opt = *ptr;
3175
3176 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3177
3178 opt->type = type;
3179 opt->len = len;
3180
3181 switch (len) {
3182 case 1:
3183 *((u8 *) opt->val) = val;
3184 break;
3185
3186 case 2:
3187 put_unaligned_le16(val, opt->val);
3188 break;
3189
3190 case 4:
3191 put_unaligned_le32(val, opt->val);
3192 break;
3193
3194 default:
3195 memcpy(opt->val, (void *) val, len);
3196 break;
3197 }
3198
3199 *ptr += L2CAP_CONF_OPT_SIZE + len;
3200 }
3201
3202 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3203 {
3204 struct l2cap_conf_efs efs;
3205
3206 switch (chan->mode) {
3207 case L2CAP_MODE_ERTM:
3208 efs.id = chan->local_id;
3209 efs.stype = chan->local_stype;
3210 efs.msdu = cpu_to_le16(chan->local_msdu);
3211 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3212 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3213 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3214 break;
3215
3216 case L2CAP_MODE_STREAMING:
3217 efs.id = 1;
3218 efs.stype = L2CAP_SERV_BESTEFFORT;
3219 efs.msdu = cpu_to_le16(chan->local_msdu);
3220 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3221 efs.acc_lat = 0;
3222 efs.flush_to = 0;
3223 break;
3224
3225 default:
3226 return;
3227 }
3228
3229 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3230 (unsigned long) &efs);
3231 }
3232
3233 static void l2cap_ack_timeout(struct work_struct *work)
3234 {
3235 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3236 ack_timer.work);
3237 u16 frames_to_ack;
3238
3239 BT_DBG("chan %p", chan);
3240
3241 l2cap_chan_lock(chan);
3242
3243 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3244 chan->last_acked_seq);
3245
3246 if (frames_to_ack)
3247 l2cap_send_rr_or_rnr(chan, 0);
3248
3249 l2cap_chan_unlock(chan);
3250 l2cap_chan_put(chan);
3251 }
3252
3253 int l2cap_ertm_init(struct l2cap_chan *chan)
3254 {
3255 int err;
3256
3257 chan->next_tx_seq = 0;
3258 chan->expected_tx_seq = 0;
3259 chan->expected_ack_seq = 0;
3260 chan->unacked_frames = 0;
3261 chan->buffer_seq = 0;
3262 chan->frames_sent = 0;
3263 chan->last_acked_seq = 0;
3264 chan->sdu = NULL;
3265 chan->sdu_last_frag = NULL;
3266 chan->sdu_len = 0;
3267
3268 skb_queue_head_init(&chan->tx_q);
3269
3270 chan->local_amp_id = AMP_ID_BREDR;
3271 chan->move_id = AMP_ID_BREDR;
3272 chan->move_state = L2CAP_MOVE_STABLE;
3273 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3274
3275 if (chan->mode != L2CAP_MODE_ERTM)
3276 return 0;
3277
3278 chan->rx_state = L2CAP_RX_STATE_RECV;
3279 chan->tx_state = L2CAP_TX_STATE_XMIT;
3280
3281 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3282 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3283 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3284
3285 skb_queue_head_init(&chan->srej_q);
3286
3287 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3288 if (err < 0)
3289 return err;
3290
3291 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3292 if (err < 0)
3293 l2cap_seq_list_free(&chan->srej_list);
3294
3295 return err;
3296 }
3297
3298 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3299 {
3300 switch (mode) {
3301 case L2CAP_MODE_STREAMING:
3302 case L2CAP_MODE_ERTM:
3303 if (l2cap_mode_supported(mode, remote_feat_mask))
3304 return mode;
3305 /* fall through */
3306 default:
3307 return L2CAP_MODE_BASIC;
3308 }
3309 }
3310
3311 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3312 {
3313 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3314 }
3315
3316 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3317 {
3318 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3319 }
3320
3321 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3322 struct l2cap_conf_rfc *rfc)
3323 {
3324 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3325 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3326
3327 /* Class 1 devices have must have ERTM timeouts
3328 * exceeding the Link Supervision Timeout. The
3329 * default Link Supervision Timeout for AMP
3330 * controllers is 10 seconds.
3331 *
3332 * Class 1 devices use 0xffffffff for their
3333 * best-effort flush timeout, so the clamping logic
3334 * will result in a timeout that meets the above
3335 * requirement. ERTM timeouts are 16-bit values, so
3336 * the maximum timeout is 65.535 seconds.
3337 */
3338
3339 /* Convert timeout to milliseconds and round */
3340 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3341
3342 /* This is the recommended formula for class 2 devices
3343 * that start ERTM timers when packets are sent to the
3344 * controller.
3345 */
3346 ertm_to = 3 * ertm_to + 500;
3347
3348 if (ertm_to > 0xffff)
3349 ertm_to = 0xffff;
3350
3351 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3352 rfc->monitor_timeout = rfc->retrans_timeout;
3353 } else {
3354 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3355 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3356 }
3357 }
3358
3359 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3360 {
3361 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3362 __l2cap_ews_supported(chan->conn)) {
3363 /* use extended control field */
3364 set_bit(FLAG_EXT_CTRL, &chan->flags);
3365 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3366 } else {
3367 chan->tx_win = min_t(u16, chan->tx_win,
3368 L2CAP_DEFAULT_TX_WINDOW);
3369 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3370 }
3371 chan->ack_win = chan->tx_win;
3372 }
3373
3374 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3375 {
3376 struct l2cap_conf_req *req = data;
3377 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3378 void *ptr = req->data;
3379 u16 size;
3380
3381 BT_DBG("chan %p", chan);
3382
3383 if (chan->num_conf_req || chan->num_conf_rsp)
3384 goto done;
3385
3386 switch (chan->mode) {
3387 case L2CAP_MODE_STREAMING:
3388 case L2CAP_MODE_ERTM:
3389 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3390 break;
3391
3392 if (__l2cap_efs_supported(chan->conn))
3393 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3394
3395 /* fall through */
3396 default:
3397 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3398 break;
3399 }
3400
3401 done:
3402 if (chan->imtu != L2CAP_DEFAULT_MTU)
3403 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3404
3405 switch (chan->mode) {
3406 case L2CAP_MODE_BASIC:
3407 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3408 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3409 break;
3410
3411 rfc.mode = L2CAP_MODE_BASIC;
3412 rfc.txwin_size = 0;
3413 rfc.max_transmit = 0;
3414 rfc.retrans_timeout = 0;
3415 rfc.monitor_timeout = 0;
3416 rfc.max_pdu_size = 0;
3417
3418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3419 (unsigned long) &rfc);
3420 break;
3421
3422 case L2CAP_MODE_ERTM:
3423 rfc.mode = L2CAP_MODE_ERTM;
3424 rfc.max_transmit = chan->max_tx;
3425
3426 __l2cap_set_ertm_timeouts(chan, &rfc);
3427
3428 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3429 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3430 L2CAP_FCS_SIZE);
3431 rfc.max_pdu_size = cpu_to_le16(size);
3432
3433 l2cap_txwin_setup(chan);
3434
3435 rfc.txwin_size = min_t(u16, chan->tx_win,
3436 L2CAP_DEFAULT_TX_WINDOW);
3437
3438 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3439 (unsigned long) &rfc);
3440
3441 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3442 l2cap_add_opt_efs(&ptr, chan);
3443
3444 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3445 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3446 chan->tx_win);
3447
3448 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3449 if (chan->fcs == L2CAP_FCS_NONE ||
3450 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3451 chan->fcs = L2CAP_FCS_NONE;
3452 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3453 chan->fcs);
3454 }
3455 break;
3456
3457 case L2CAP_MODE_STREAMING:
3458 l2cap_txwin_setup(chan);
3459 rfc.mode = L2CAP_MODE_STREAMING;
3460 rfc.txwin_size = 0;
3461 rfc.max_transmit = 0;
3462 rfc.retrans_timeout = 0;
3463 rfc.monitor_timeout = 0;
3464
3465 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3466 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3467 L2CAP_FCS_SIZE);
3468 rfc.max_pdu_size = cpu_to_le16(size);
3469
3470 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3471 (unsigned long) &rfc);
3472
3473 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3474 l2cap_add_opt_efs(&ptr, chan);
3475
3476 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3477 if (chan->fcs == L2CAP_FCS_NONE ||
3478 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3479 chan->fcs = L2CAP_FCS_NONE;
3480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3481 chan->fcs);
3482 }
3483 break;
3484 }
3485
3486 req->dcid = cpu_to_le16(chan->dcid);
3487 req->flags = __constant_cpu_to_le16(0);
3488
3489 return ptr - data;
3490 }
3491
3492 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3493 {
3494 struct l2cap_conf_rsp *rsp = data;
3495 void *ptr = rsp->data;
3496 void *req = chan->conf_req;
3497 int len = chan->conf_len;
3498 int type, hint, olen;
3499 unsigned long val;
3500 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3501 struct l2cap_conf_efs efs;
3502 u8 remote_efs = 0;
3503 u16 mtu = L2CAP_DEFAULT_MTU;
3504 u16 result = L2CAP_CONF_SUCCESS;
3505 u16 size;
3506
3507 BT_DBG("chan %p", chan);
3508
3509 while (len >= L2CAP_CONF_OPT_SIZE) {
3510 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3511
3512 hint = type & L2CAP_CONF_HINT;
3513 type &= L2CAP_CONF_MASK;
3514
3515 switch (type) {
3516 case L2CAP_CONF_MTU:
3517 mtu = val;
3518 break;
3519
3520 case L2CAP_CONF_FLUSH_TO:
3521 chan->flush_to = val;
3522 break;
3523
3524 case L2CAP_CONF_QOS:
3525 break;
3526
3527 case L2CAP_CONF_RFC:
3528 if (olen == sizeof(rfc))
3529 memcpy(&rfc, (void *) val, olen);
3530 break;
3531
3532 case L2CAP_CONF_FCS:
3533 if (val == L2CAP_FCS_NONE)
3534 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3535 break;
3536
3537 case L2CAP_CONF_EFS:
3538 remote_efs = 1;
3539 if (olen == sizeof(efs))
3540 memcpy(&efs, (void *) val, olen);
3541 break;
3542
3543 case L2CAP_CONF_EWS:
3544 if (!chan->conn->hs_enabled)
3545 return -ECONNREFUSED;
3546
3547 set_bit(FLAG_EXT_CTRL, &chan->flags);
3548 set_bit(CONF_EWS_RECV, &chan->conf_state);
3549 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3550 chan->remote_tx_win = val;
3551 break;
3552
3553 default:
3554 if (hint)
3555 break;
3556
3557 result = L2CAP_CONF_UNKNOWN;
3558 *((u8 *) ptr++) = type;
3559 break;
3560 }
3561 }
3562
3563 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3564 goto done;
3565
3566 switch (chan->mode) {
3567 case L2CAP_MODE_STREAMING:
3568 case L2CAP_MODE_ERTM:
3569 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3570 chan->mode = l2cap_select_mode(rfc.mode,
3571 chan->conn->feat_mask);
3572 break;
3573 }
3574
3575 if (remote_efs) {
3576 if (__l2cap_efs_supported(chan->conn))
3577 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3578 else
3579 return -ECONNREFUSED;
3580 }
3581
3582 if (chan->mode != rfc.mode)
3583 return -ECONNREFUSED;
3584
3585 break;
3586 }
3587
3588 done:
3589 if (chan->mode != rfc.mode) {
3590 result = L2CAP_CONF_UNACCEPT;
3591 rfc.mode = chan->mode;
3592
3593 if (chan->num_conf_rsp == 1)
3594 return -ECONNREFUSED;
3595
3596 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3597 (unsigned long) &rfc);
3598 }
3599
3600 if (result == L2CAP_CONF_SUCCESS) {
3601 /* Configure output options and let the other side know
3602 * which ones we don't like. */
3603
3604 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3605 result = L2CAP_CONF_UNACCEPT;
3606 else {
3607 chan->omtu = mtu;
3608 set_bit(CONF_MTU_DONE, &chan->conf_state);
3609 }
3610 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3611
3612 if (remote_efs) {
3613 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3614 efs.stype != L2CAP_SERV_NOTRAFIC &&
3615 efs.stype != chan->local_stype) {
3616
3617 result = L2CAP_CONF_UNACCEPT;
3618
3619 if (chan->num_conf_req >= 1)
3620 return -ECONNREFUSED;
3621
3622 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3623 sizeof(efs),
3624 (unsigned long) &efs);
3625 } else {
3626 /* Send PENDING Conf Rsp */
3627 result = L2CAP_CONF_PENDING;
3628 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3629 }
3630 }
3631
3632 switch (rfc.mode) {
3633 case L2CAP_MODE_BASIC:
3634 chan->fcs = L2CAP_FCS_NONE;
3635 set_bit(CONF_MODE_DONE, &chan->conf_state);
3636 break;
3637
3638 case L2CAP_MODE_ERTM:
3639 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3640 chan->remote_tx_win = rfc.txwin_size;
3641 else
3642 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3643
3644 chan->remote_max_tx = rfc.max_transmit;
3645
3646 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3647 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3648 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3649 rfc.max_pdu_size = cpu_to_le16(size);
3650 chan->remote_mps = size;
3651
3652 __l2cap_set_ertm_timeouts(chan, &rfc);
3653
3654 set_bit(CONF_MODE_DONE, &chan->conf_state);
3655
3656 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3657 sizeof(rfc), (unsigned long) &rfc);
3658
3659 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3660 chan->remote_id = efs.id;
3661 chan->remote_stype = efs.stype;
3662 chan->remote_msdu = le16_to_cpu(efs.msdu);
3663 chan->remote_flush_to =
3664 le32_to_cpu(efs.flush_to);
3665 chan->remote_acc_lat =
3666 le32_to_cpu(efs.acc_lat);
3667 chan->remote_sdu_itime =
3668 le32_to_cpu(efs.sdu_itime);
3669 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3670 sizeof(efs),
3671 (unsigned long) &efs);
3672 }
3673 break;
3674
3675 case L2CAP_MODE_STREAMING:
3676 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3677 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3678 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3679 rfc.max_pdu_size = cpu_to_le16(size);
3680 chan->remote_mps = size;
3681
3682 set_bit(CONF_MODE_DONE, &chan->conf_state);
3683
3684 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3685 (unsigned long) &rfc);
3686
3687 break;
3688
3689 default:
3690 result = L2CAP_CONF_UNACCEPT;
3691
3692 memset(&rfc, 0, sizeof(rfc));
3693 rfc.mode = chan->mode;
3694 }
3695
3696 if (result == L2CAP_CONF_SUCCESS)
3697 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3698 }
3699 rsp->scid = cpu_to_le16(chan->dcid);
3700 rsp->result = cpu_to_le16(result);
3701 rsp->flags = __constant_cpu_to_le16(0);
3702
3703 return ptr - data;
3704 }
3705
3706 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3707 void *data, u16 *result)
3708 {
3709 struct l2cap_conf_req *req = data;
3710 void *ptr = req->data;
3711 int type, olen;
3712 unsigned long val;
3713 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3714 struct l2cap_conf_efs efs;
3715
3716 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3717
3718 while (len >= L2CAP_CONF_OPT_SIZE) {
3719 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3720
3721 switch (type) {
3722 case L2CAP_CONF_MTU:
3723 if (val < L2CAP_DEFAULT_MIN_MTU) {
3724 *result = L2CAP_CONF_UNACCEPT;
3725 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3726 } else
3727 chan->imtu = val;
3728 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3729 break;
3730
3731 case L2CAP_CONF_FLUSH_TO:
3732 chan->flush_to = val;
3733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3734 2, chan->flush_to);
3735 break;
3736
3737 case L2CAP_CONF_RFC:
3738 if (olen == sizeof(rfc))
3739 memcpy(&rfc, (void *)val, olen);
3740
3741 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3742 rfc.mode != chan->mode)
3743 return -ECONNREFUSED;
3744
3745 chan->fcs = 0;
3746
3747 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3748 sizeof(rfc), (unsigned long) &rfc);
3749 break;
3750
3751 case L2CAP_CONF_EWS:
3752 chan->ack_win = min_t(u16, val, chan->ack_win);
3753 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3754 chan->tx_win);
3755 break;
3756
3757 case L2CAP_CONF_EFS:
3758 if (olen == sizeof(efs))
3759 memcpy(&efs, (void *)val, olen);
3760
3761 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3762 efs.stype != L2CAP_SERV_NOTRAFIC &&
3763 efs.stype != chan->local_stype)
3764 return -ECONNREFUSED;
3765
3766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3767 (unsigned long) &efs);
3768 break;
3769
3770 case L2CAP_CONF_FCS:
3771 if (*result == L2CAP_CONF_PENDING)
3772 if (val == L2CAP_FCS_NONE)
3773 set_bit(CONF_RECV_NO_FCS,
3774 &chan->conf_state);
3775 break;
3776 }
3777 }
3778
3779 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3780 return -ECONNREFUSED;
3781
3782 chan->mode = rfc.mode;
3783
3784 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3785 switch (rfc.mode) {
3786 case L2CAP_MODE_ERTM:
3787 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3788 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3789 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3790 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3791 chan->ack_win = min_t(u16, chan->ack_win,
3792 rfc.txwin_size);
3793
3794 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3795 chan->local_msdu = le16_to_cpu(efs.msdu);
3796 chan->local_sdu_itime =
3797 le32_to_cpu(efs.sdu_itime);
3798 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3799 chan->local_flush_to =
3800 le32_to_cpu(efs.flush_to);
3801 }
3802 break;
3803
3804 case L2CAP_MODE_STREAMING:
3805 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3806 }
3807 }
3808
3809 req->dcid = cpu_to_le16(chan->dcid);
3810 req->flags = __constant_cpu_to_le16(0);
3811
3812 return ptr - data;
3813 }
3814
3815 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3816 u16 result, u16 flags)
3817 {
3818 struct l2cap_conf_rsp *rsp = data;
3819 void *ptr = rsp->data;
3820
3821 BT_DBG("chan %p", chan);
3822
3823 rsp->scid = cpu_to_le16(chan->dcid);
3824 rsp->result = cpu_to_le16(result);
3825 rsp->flags = cpu_to_le16(flags);
3826
3827 return ptr - data;
3828 }
3829
3830 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3831 {
3832 struct l2cap_le_conn_rsp rsp;
3833 struct l2cap_conn *conn = chan->conn;
3834
3835 BT_DBG("chan %p", chan);
3836
3837 rsp.dcid = cpu_to_le16(chan->scid);
3838 rsp.mtu = cpu_to_le16(chan->imtu);
3839 rsp.mps = cpu_to_le16(chan->mps);
3840 rsp.credits = cpu_to_le16(chan->rx_credits);
3841 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3842
3843 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3844 &rsp);
3845 }
3846
3847 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3848 {
3849 struct l2cap_conn_rsp rsp;
3850 struct l2cap_conn *conn = chan->conn;
3851 u8 buf[128];
3852 u8 rsp_code;
3853
3854 rsp.scid = cpu_to_le16(chan->dcid);
3855 rsp.dcid = cpu_to_le16(chan->scid);
3856 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3857 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3858
3859 if (chan->hs_hcon)
3860 rsp_code = L2CAP_CREATE_CHAN_RSP;
3861 else
3862 rsp_code = L2CAP_CONN_RSP;
3863
3864 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3865
3866 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3867
3868 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3869 return;
3870
3871 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3872 l2cap_build_conf_req(chan, buf), buf);
3873 chan->num_conf_req++;
3874 }
3875
3876 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3877 {
3878 int type, olen;
3879 unsigned long val;
3880 /* Use sane default values in case a misbehaving remote device
3881 * did not send an RFC or extended window size option.
3882 */
3883 u16 txwin_ext = chan->ack_win;
3884 struct l2cap_conf_rfc rfc = {
3885 .mode = chan->mode,
3886 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3887 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3888 .max_pdu_size = cpu_to_le16(chan->imtu),
3889 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3890 };
3891
3892 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3893
3894 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3895 return;
3896
3897 while (len >= L2CAP_CONF_OPT_SIZE) {
3898 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3899
3900 switch (type) {
3901 case L2CAP_CONF_RFC:
3902 if (olen == sizeof(rfc))
3903 memcpy(&rfc, (void *)val, olen);
3904 break;
3905 case L2CAP_CONF_EWS:
3906 txwin_ext = val;
3907 break;
3908 }
3909 }
3910
3911 switch (rfc.mode) {
3912 case L2CAP_MODE_ERTM:
3913 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3914 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3915 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3916 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3917 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3918 else
3919 chan->ack_win = min_t(u16, chan->ack_win,
3920 rfc.txwin_size);
3921 break;
3922 case L2CAP_MODE_STREAMING:
3923 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3924 }
3925 }
3926
3927 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3928 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3929 u8 *data)
3930 {
3931 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3932
3933 if (cmd_len < sizeof(*rej))
3934 return -EPROTO;
3935
3936 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3937 return 0;
3938
3939 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3940 cmd->ident == conn->info_ident) {
3941 cancel_delayed_work(&conn->info_timer);
3942
3943 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3944 conn->info_ident = 0;
3945
3946 l2cap_conn_start(conn);
3947 }
3948
3949 return 0;
3950 }
3951
3952 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3953 struct l2cap_cmd_hdr *cmd,
3954 u8 *data, u8 rsp_code, u8 amp_id)
3955 {
3956 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3957 struct l2cap_conn_rsp rsp;
3958 struct l2cap_chan *chan = NULL, *pchan;
3959 int result, status = L2CAP_CS_NO_INFO;
3960
3961 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3962 __le16 psm = req->psm;
3963
3964 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3965
3966 /* Check if we have socket listening on psm */
3967 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3968 &conn->hcon->dst, ACL_LINK);
3969 if (!pchan) {
3970 result = L2CAP_CR_BAD_PSM;
3971 goto sendresp;
3972 }
3973
3974 mutex_lock(&conn->chan_lock);
3975 l2cap_chan_lock(pchan);
3976
3977 /* Check if the ACL is secure enough (if not SDP) */
3978 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3979 !hci_conn_check_link_mode(conn->hcon)) {
3980 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3981 result = L2CAP_CR_SEC_BLOCK;
3982 goto response;
3983 }
3984
3985 result = L2CAP_CR_NO_MEM;
3986
3987 /* Check if we already have channel with that dcid */
3988 if (__l2cap_get_chan_by_dcid(conn, scid))
3989 goto response;
3990
3991 chan = pchan->ops->new_connection(pchan);
3992 if (!chan)
3993 goto response;
3994
3995 /* For certain devices (ex: HID mouse), support for authentication,
3996 * pairing and bonding is optional. For such devices, inorder to avoid
3997 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3998 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3999 */
4000 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4001
4002 bacpy(&chan->src, &conn->hcon->src);
4003 bacpy(&chan->dst, &conn->hcon->dst);
4004 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
4005 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
4006 chan->psm = psm;
4007 chan->dcid = scid;
4008 chan->local_amp_id = amp_id;
4009
4010 __l2cap_chan_add(conn, chan);
4011
4012 dcid = chan->scid;
4013
4014 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4015
4016 chan->ident = cmd->ident;
4017
4018 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4019 if (l2cap_chan_check_security(chan)) {
4020 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4021 l2cap_state_change(chan, BT_CONNECT2);
4022 result = L2CAP_CR_PEND;
4023 status = L2CAP_CS_AUTHOR_PEND;
4024 chan->ops->defer(chan);
4025 } else {
4026 /* Force pending result for AMP controllers.
4027 * The connection will succeed after the
4028 * physical link is up.
4029 */
4030 if (amp_id == AMP_ID_BREDR) {
4031 l2cap_state_change(chan, BT_CONFIG);
4032 result = L2CAP_CR_SUCCESS;
4033 } else {
4034 l2cap_state_change(chan, BT_CONNECT2);
4035 result = L2CAP_CR_PEND;
4036 }
4037 status = L2CAP_CS_NO_INFO;
4038 }
4039 } else {
4040 l2cap_state_change(chan, BT_CONNECT2);
4041 result = L2CAP_CR_PEND;
4042 status = L2CAP_CS_AUTHEN_PEND;
4043 }
4044 } else {
4045 l2cap_state_change(chan, BT_CONNECT2);
4046 result = L2CAP_CR_PEND;
4047 status = L2CAP_CS_NO_INFO;
4048 }
4049
4050 response:
4051 l2cap_chan_unlock(pchan);
4052 mutex_unlock(&conn->chan_lock);
4053
4054 sendresp:
4055 rsp.scid = cpu_to_le16(scid);
4056 rsp.dcid = cpu_to_le16(dcid);
4057 rsp.result = cpu_to_le16(result);
4058 rsp.status = cpu_to_le16(status);
4059 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4060
4061 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4062 struct l2cap_info_req info;
4063 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4064
4065 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4066 conn->info_ident = l2cap_get_ident(conn);
4067
4068 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4069
4070 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4071 sizeof(info), &info);
4072 }
4073
4074 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4075 result == L2CAP_CR_SUCCESS) {
4076 u8 buf[128];
4077 set_bit(CONF_REQ_SENT, &chan->conf_state);
4078 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4079 l2cap_build_conf_req(chan, buf), buf);
4080 chan->num_conf_req++;
4081 }
4082
4083 return chan;
4084 }
4085
4086 static int l2cap_connect_req(struct l2cap_conn *conn,
4087 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4088 {
4089 struct hci_dev *hdev = conn->hcon->hdev;
4090 struct hci_conn *hcon = conn->hcon;
4091
4092 if (cmd_len < sizeof(struct l2cap_conn_req))
4093 return -EPROTO;
4094
4095 hci_dev_lock(hdev);
4096 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4097 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4098 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4099 hcon->dst_type, 0, NULL, 0,
4100 hcon->dev_class);
4101 hci_dev_unlock(hdev);
4102
4103 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4104 return 0;
4105 }
4106
4107 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4108 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4109 u8 *data)
4110 {
4111 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4112 u16 scid, dcid, result, status;
4113 struct l2cap_chan *chan;
4114 u8 req[128];
4115 int err;
4116
4117 if (cmd_len < sizeof(*rsp))
4118 return -EPROTO;
4119
4120 scid = __le16_to_cpu(rsp->scid);
4121 dcid = __le16_to_cpu(rsp->dcid);
4122 result = __le16_to_cpu(rsp->result);
4123 status = __le16_to_cpu(rsp->status);
4124
4125 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4126 dcid, scid, result, status);
4127
4128 mutex_lock(&conn->chan_lock);
4129
4130 if (scid) {
4131 chan = __l2cap_get_chan_by_scid(conn, scid);
4132 if (!chan) {
4133 err = -EBADSLT;
4134 goto unlock;
4135 }
4136 } else {
4137 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4138 if (!chan) {
4139 err = -EBADSLT;
4140 goto unlock;
4141 }
4142 }
4143
4144 err = 0;
4145
4146 l2cap_chan_lock(chan);
4147
4148 switch (result) {
4149 case L2CAP_CR_SUCCESS:
4150 l2cap_state_change(chan, BT_CONFIG);
4151 chan->ident = 0;
4152 chan->dcid = dcid;
4153 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4154
4155 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4156 break;
4157
4158 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4159 l2cap_build_conf_req(chan, req), req);
4160 chan->num_conf_req++;
4161 break;
4162
4163 case L2CAP_CR_PEND:
4164 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4165 break;
4166
4167 default:
4168 l2cap_chan_del(chan, ECONNREFUSED);
4169 break;
4170 }
4171
4172 l2cap_chan_unlock(chan);
4173
4174 unlock:
4175 mutex_unlock(&conn->chan_lock);
4176
4177 return err;
4178 }
4179
4180 static inline void set_default_fcs(struct l2cap_chan *chan)
4181 {
4182 /* FCS is enabled only in ERTM or streaming mode, if one or both
4183 * sides request it.
4184 */
4185 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4186 chan->fcs = L2CAP_FCS_NONE;
4187 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4188 chan->fcs = L2CAP_FCS_CRC16;
4189 }
4190
4191 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4192 u8 ident, u16 flags)
4193 {
4194 struct l2cap_conn *conn = chan->conn;
4195
4196 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4197 flags);
4198
4199 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4200 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4201
4202 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4203 l2cap_build_conf_rsp(chan, data,
4204 L2CAP_CONF_SUCCESS, flags), data);
4205 }
4206
4207 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4208 u16 scid, u16 dcid)
4209 {
4210 struct l2cap_cmd_rej_cid rej;
4211
4212 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4213 rej.scid = __cpu_to_le16(scid);
4214 rej.dcid = __cpu_to_le16(dcid);
4215
4216 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4217 }
4218
4219 static inline int l2cap_config_req(struct l2cap_conn *conn,
4220 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4221 u8 *data)
4222 {
4223 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4224 u16 dcid, flags;
4225 u8 rsp[64];
4226 struct l2cap_chan *chan;
4227 int len, err = 0;
4228
4229 if (cmd_len < sizeof(*req))
4230 return -EPROTO;
4231
4232 dcid = __le16_to_cpu(req->dcid);
4233 flags = __le16_to_cpu(req->flags);
4234
4235 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4236
4237 chan = l2cap_get_chan_by_scid(conn, dcid);
4238 if (!chan) {
4239 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4240 return 0;
4241 }
4242
4243 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4244 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4245 chan->dcid);
4246 goto unlock;
4247 }
4248
4249 /* Reject if config buffer is too small. */
4250 len = cmd_len - sizeof(*req);
4251 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4252 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4253 l2cap_build_conf_rsp(chan, rsp,
4254 L2CAP_CONF_REJECT, flags), rsp);
4255 goto unlock;
4256 }
4257
4258 /* Store config. */
4259 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4260 chan->conf_len += len;
4261
4262 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4263 /* Incomplete config. Send empty response. */
4264 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4265 l2cap_build_conf_rsp(chan, rsp,
4266 L2CAP_CONF_SUCCESS, flags), rsp);
4267 goto unlock;
4268 }
4269
4270 /* Complete config. */
4271 len = l2cap_parse_conf_req(chan, rsp);
4272 if (len < 0) {
4273 l2cap_send_disconn_req(chan, ECONNRESET);
4274 goto unlock;
4275 }
4276
4277 chan->ident = cmd->ident;
4278 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4279 chan->num_conf_rsp++;
4280
4281 /* Reset config buffer. */
4282 chan->conf_len = 0;
4283
4284 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4285 goto unlock;
4286
4287 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4288 set_default_fcs(chan);
4289
4290 if (chan->mode == L2CAP_MODE_ERTM ||
4291 chan->mode == L2CAP_MODE_STREAMING)
4292 err = l2cap_ertm_init(chan);
4293
4294 if (err < 0)
4295 l2cap_send_disconn_req(chan, -err);
4296 else
4297 l2cap_chan_ready(chan);
4298
4299 goto unlock;
4300 }
4301
4302 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4303 u8 buf[64];
4304 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4305 l2cap_build_conf_req(chan, buf), buf);
4306 chan->num_conf_req++;
4307 }
4308
4309 /* Got Conf Rsp PENDING from remote side and asume we sent
4310 Conf Rsp PENDING in the code above */
4311 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4312 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4313
4314 /* check compatibility */
4315
4316 /* Send rsp for BR/EDR channel */
4317 if (!chan->hs_hcon)
4318 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4319 else
4320 chan->ident = cmd->ident;
4321 }
4322
4323 unlock:
4324 l2cap_chan_unlock(chan);
4325 return err;
4326 }
4327
4328 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4329 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4330 u8 *data)
4331 {
4332 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4333 u16 scid, flags, result;
4334 struct l2cap_chan *chan;
4335 int len = cmd_len - sizeof(*rsp);
4336 int err = 0;
4337
4338 if (cmd_len < sizeof(*rsp))
4339 return -EPROTO;
4340
4341 scid = __le16_to_cpu(rsp->scid);
4342 flags = __le16_to_cpu(rsp->flags);
4343 result = __le16_to_cpu(rsp->result);
4344
4345 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4346 result, len);
4347
4348 chan = l2cap_get_chan_by_scid(conn, scid);
4349 if (!chan)
4350 return 0;
4351
4352 switch (result) {
4353 case L2CAP_CONF_SUCCESS:
4354 l2cap_conf_rfc_get(chan, rsp->data, len);
4355 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4356 break;
4357
4358 case L2CAP_CONF_PENDING:
4359 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4360
4361 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4362 char buf[64];
4363
4364 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4365 buf, &result);
4366 if (len < 0) {
4367 l2cap_send_disconn_req(chan, ECONNRESET);
4368 goto done;
4369 }
4370
4371 if (!chan->hs_hcon) {
4372 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4373 0);
4374 } else {
4375 if (l2cap_check_efs(chan)) {
4376 amp_create_logical_link(chan);
4377 chan->ident = cmd->ident;
4378 }
4379 }
4380 }
4381 goto done;
4382
4383 case L2CAP_CONF_UNACCEPT:
4384 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4385 char req[64];
4386
4387 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4388 l2cap_send_disconn_req(chan, ECONNRESET);
4389 goto done;
4390 }
4391
4392 /* throw out any old stored conf requests */
4393 result = L2CAP_CONF_SUCCESS;
4394 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4395 req, &result);
4396 if (len < 0) {
4397 l2cap_send_disconn_req(chan, ECONNRESET);
4398 goto done;
4399 }
4400
4401 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4402 L2CAP_CONF_REQ, len, req);
4403 chan->num_conf_req++;
4404 if (result != L2CAP_CONF_SUCCESS)
4405 goto done;
4406 break;
4407 }
4408
4409 default:
4410 l2cap_chan_set_err(chan, ECONNRESET);
4411
4412 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4413 l2cap_send_disconn_req(chan, ECONNRESET);
4414 goto done;
4415 }
4416
4417 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4418 goto done;
4419
4420 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4421
4422 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4423 set_default_fcs(chan);
4424
4425 if (chan->mode == L2CAP_MODE_ERTM ||
4426 chan->mode == L2CAP_MODE_STREAMING)
4427 err = l2cap_ertm_init(chan);
4428
4429 if (err < 0)
4430 l2cap_send_disconn_req(chan, -err);
4431 else
4432 l2cap_chan_ready(chan);
4433 }
4434
4435 done:
4436 l2cap_chan_unlock(chan);
4437 return err;
4438 }
4439
4440 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4441 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4442 u8 *data)
4443 {
4444 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4445 struct l2cap_disconn_rsp rsp;
4446 u16 dcid, scid;
4447 struct l2cap_chan *chan;
4448
4449 if (cmd_len != sizeof(*req))
4450 return -EPROTO;
4451
4452 scid = __le16_to_cpu(req->scid);
4453 dcid = __le16_to_cpu(req->dcid);
4454
4455 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4456
4457 mutex_lock(&conn->chan_lock);
4458
4459 chan = __l2cap_get_chan_by_scid(conn, dcid);
4460 if (!chan) {
4461 mutex_unlock(&conn->chan_lock);
4462 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4463 return 0;
4464 }
4465
4466 l2cap_chan_lock(chan);
4467
4468 rsp.dcid = cpu_to_le16(chan->scid);
4469 rsp.scid = cpu_to_le16(chan->dcid);
4470 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4471
4472 chan->ops->set_shutdown(chan);
4473
4474 l2cap_chan_hold(chan);
4475 l2cap_chan_del(chan, ECONNRESET);
4476
4477 l2cap_chan_unlock(chan);
4478
4479 chan->ops->close(chan);
4480 l2cap_chan_put(chan);
4481
4482 mutex_unlock(&conn->chan_lock);
4483
4484 return 0;
4485 }
4486
4487 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4488 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4489 u8 *data)
4490 {
4491 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4492 u16 dcid, scid;
4493 struct l2cap_chan *chan;
4494
4495 if (cmd_len != sizeof(*rsp))
4496 return -EPROTO;
4497
4498 scid = __le16_to_cpu(rsp->scid);
4499 dcid = __le16_to_cpu(rsp->dcid);
4500
4501 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4502
4503 mutex_lock(&conn->chan_lock);
4504
4505 chan = __l2cap_get_chan_by_scid(conn, scid);
4506 if (!chan) {
4507 mutex_unlock(&conn->chan_lock);
4508 return 0;
4509 }
4510
4511 l2cap_chan_lock(chan);
4512
4513 l2cap_chan_hold(chan);
4514 l2cap_chan_del(chan, 0);
4515
4516 l2cap_chan_unlock(chan);
4517
4518 chan->ops->close(chan);
4519 l2cap_chan_put(chan);
4520
4521 mutex_unlock(&conn->chan_lock);
4522
4523 return 0;
4524 }
4525
4526 static inline int l2cap_information_req(struct l2cap_conn *conn,
4527 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4528 u8 *data)
4529 {
4530 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4531 u16 type;
4532
4533 if (cmd_len != sizeof(*req))
4534 return -EPROTO;
4535
4536 type = __le16_to_cpu(req->type);
4537
4538 BT_DBG("type 0x%4.4x", type);
4539
4540 if (type == L2CAP_IT_FEAT_MASK) {
4541 u8 buf[8];
4542 u32 feat_mask = l2cap_feat_mask;
4543 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4544 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4545 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4546 if (!disable_ertm)
4547 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4548 | L2CAP_FEAT_FCS;
4549 if (conn->hs_enabled)
4550 feat_mask |= L2CAP_FEAT_EXT_FLOW
4551 | L2CAP_FEAT_EXT_WINDOW;
4552
4553 put_unaligned_le32(feat_mask, rsp->data);
4554 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4555 buf);
4556 } else if (type == L2CAP_IT_FIXED_CHAN) {
4557 u8 buf[12];
4558 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4559
4560 if (conn->hs_enabled)
4561 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4562 else
4563 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4564
4565 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4566 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4567 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4568 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4569 buf);
4570 } else {
4571 struct l2cap_info_rsp rsp;
4572 rsp.type = cpu_to_le16(type);
4573 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4574 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4575 &rsp);
4576 }
4577
4578 return 0;
4579 }
4580
4581 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4582 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4583 u8 *data)
4584 {
4585 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4586 u16 type, result;
4587
4588 if (cmd_len < sizeof(*rsp))
4589 return -EPROTO;
4590
4591 type = __le16_to_cpu(rsp->type);
4592 result = __le16_to_cpu(rsp->result);
4593
4594 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4595
4596 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4597 if (cmd->ident != conn->info_ident ||
4598 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4599 return 0;
4600
4601 cancel_delayed_work(&conn->info_timer);
4602
4603 if (result != L2CAP_IR_SUCCESS) {
4604 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4605 conn->info_ident = 0;
4606
4607 l2cap_conn_start(conn);
4608
4609 return 0;
4610 }
4611
4612 switch (type) {
4613 case L2CAP_IT_FEAT_MASK:
4614 conn->feat_mask = get_unaligned_le32(rsp->data);
4615
4616 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4617 struct l2cap_info_req req;
4618 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4619
4620 conn->info_ident = l2cap_get_ident(conn);
4621
4622 l2cap_send_cmd(conn, conn->info_ident,
4623 L2CAP_INFO_REQ, sizeof(req), &req);
4624 } else {
4625 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4626 conn->info_ident = 0;
4627
4628 l2cap_conn_start(conn);
4629 }
4630 break;
4631
4632 case L2CAP_IT_FIXED_CHAN:
4633 conn->fixed_chan_mask = rsp->data[0];
4634 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4635 conn->info_ident = 0;
4636
4637 l2cap_conn_start(conn);
4638 break;
4639 }
4640
4641 return 0;
4642 }
4643
4644 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4645 struct l2cap_cmd_hdr *cmd,
4646 u16 cmd_len, void *data)
4647 {
4648 struct l2cap_create_chan_req *req = data;
4649 struct l2cap_create_chan_rsp rsp;
4650 struct l2cap_chan *chan;
4651 struct hci_dev *hdev;
4652 u16 psm, scid;
4653
4654 if (cmd_len != sizeof(*req))
4655 return -EPROTO;
4656
4657 if (!conn->hs_enabled)
4658 return -EINVAL;
4659
4660 psm = le16_to_cpu(req->psm);
4661 scid = le16_to_cpu(req->scid);
4662
4663 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4664
4665 /* For controller id 0 make BR/EDR connection */
4666 if (req->amp_id == AMP_ID_BREDR) {
4667 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4668 req->amp_id);
4669 return 0;
4670 }
4671
4672 /* Validate AMP controller id */
4673 hdev = hci_dev_get(req->amp_id);
4674 if (!hdev)
4675 goto error;
4676
4677 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4678 hci_dev_put(hdev);
4679 goto error;
4680 }
4681
4682 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4683 req->amp_id);
4684 if (chan) {
4685 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4686 struct hci_conn *hs_hcon;
4687
4688 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4689 &conn->hcon->dst);
4690 if (!hs_hcon) {
4691 hci_dev_put(hdev);
4692 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4693 chan->dcid);
4694 return 0;
4695 }
4696
4697 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4698
4699 mgr->bredr_chan = chan;
4700 chan->hs_hcon = hs_hcon;
4701 chan->fcs = L2CAP_FCS_NONE;
4702 conn->mtu = hdev->block_mtu;
4703 }
4704
4705 hci_dev_put(hdev);
4706
4707 return 0;
4708
4709 error:
4710 rsp.dcid = 0;
4711 rsp.scid = cpu_to_le16(scid);
4712 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4713 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4714
4715 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4716 sizeof(rsp), &rsp);
4717
4718 return 0;
4719 }
4720
4721 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4722 {
4723 struct l2cap_move_chan_req req;
4724 u8 ident;
4725
4726 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4727
4728 ident = l2cap_get_ident(chan->conn);
4729 chan->ident = ident;
4730
4731 req.icid = cpu_to_le16(chan->scid);
4732 req.dest_amp_id = dest_amp_id;
4733
4734 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4735 &req);
4736
4737 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4738 }
4739
4740 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4741 {
4742 struct l2cap_move_chan_rsp rsp;
4743
4744 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4745
4746 rsp.icid = cpu_to_le16(chan->dcid);
4747 rsp.result = cpu_to_le16(result);
4748
4749 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4750 sizeof(rsp), &rsp);
4751 }
4752
4753 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4754 {
4755 struct l2cap_move_chan_cfm cfm;
4756
4757 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4758
4759 chan->ident = l2cap_get_ident(chan->conn);
4760
4761 cfm.icid = cpu_to_le16(chan->scid);
4762 cfm.result = cpu_to_le16(result);
4763
4764 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4765 sizeof(cfm), &cfm);
4766
4767 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4768 }
4769
4770 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4771 {
4772 struct l2cap_move_chan_cfm cfm;
4773
4774 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4775
4776 cfm.icid = cpu_to_le16(icid);
4777 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4778
4779 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4780 sizeof(cfm), &cfm);
4781 }
4782
4783 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4784 u16 icid)
4785 {
4786 struct l2cap_move_chan_cfm_rsp rsp;
4787
4788 BT_DBG("icid 0x%4.4x", icid);
4789
4790 rsp.icid = cpu_to_le16(icid);
4791 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4792 }
4793
4794 static void __release_logical_link(struct l2cap_chan *chan)
4795 {
4796 chan->hs_hchan = NULL;
4797 chan->hs_hcon = NULL;
4798
4799 /* Placeholder - release the logical link */
4800 }
4801
4802 static void l2cap_logical_fail(struct l2cap_chan *chan)
4803 {
4804 /* Logical link setup failed */
4805 if (chan->state != BT_CONNECTED) {
4806 /* Create channel failure, disconnect */
4807 l2cap_send_disconn_req(chan, ECONNRESET);
4808 return;
4809 }
4810
4811 switch (chan->move_role) {
4812 case L2CAP_MOVE_ROLE_RESPONDER:
4813 l2cap_move_done(chan);
4814 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4815 break;
4816 case L2CAP_MOVE_ROLE_INITIATOR:
4817 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4818 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4819 /* Remote has only sent pending or
4820 * success responses, clean up
4821 */
4822 l2cap_move_done(chan);
4823 }
4824
4825 /* Other amp move states imply that the move
4826 * has already aborted
4827 */
4828 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4829 break;
4830 }
4831 }
4832
4833 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4834 struct hci_chan *hchan)
4835 {
4836 struct l2cap_conf_rsp rsp;
4837
4838 chan->hs_hchan = hchan;
4839 chan->hs_hcon->l2cap_data = chan->conn;
4840
4841 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4842
4843 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4844 int err;
4845
4846 set_default_fcs(chan);
4847
4848 err = l2cap_ertm_init(chan);
4849 if (err < 0)
4850 l2cap_send_disconn_req(chan, -err);
4851 else
4852 l2cap_chan_ready(chan);
4853 }
4854 }
4855
4856 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4857 struct hci_chan *hchan)
4858 {
4859 chan->hs_hcon = hchan->conn;
4860 chan->hs_hcon->l2cap_data = chan->conn;
4861
4862 BT_DBG("move_state %d", chan->move_state);
4863
4864 switch (chan->move_state) {
4865 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4866 /* Move confirm will be sent after a success
4867 * response is received
4868 */
4869 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4870 break;
4871 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4872 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4873 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4874 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4875 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4876 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4877 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4878 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4879 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4880 }
4881 break;
4882 default:
4883 /* Move was not in expected state, free the channel */
4884 __release_logical_link(chan);
4885
4886 chan->move_state = L2CAP_MOVE_STABLE;
4887 }
4888 }
4889
4890 /* Call with chan locked */
4891 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4892 u8 status)
4893 {
4894 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4895
4896 if (status) {
4897 l2cap_logical_fail(chan);
4898 __release_logical_link(chan);
4899 return;
4900 }
4901
4902 if (chan->state != BT_CONNECTED) {
4903 /* Ignore logical link if channel is on BR/EDR */
4904 if (chan->local_amp_id != AMP_ID_BREDR)
4905 l2cap_logical_finish_create(chan, hchan);
4906 } else {
4907 l2cap_logical_finish_move(chan, hchan);
4908 }
4909 }
4910
4911 void l2cap_move_start(struct l2cap_chan *chan)
4912 {
4913 BT_DBG("chan %p", chan);
4914
4915 if (chan->local_amp_id == AMP_ID_BREDR) {
4916 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4917 return;
4918 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4919 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4920 /* Placeholder - start physical link setup */
4921 } else {
4922 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4923 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4924 chan->move_id = 0;
4925 l2cap_move_setup(chan);
4926 l2cap_send_move_chan_req(chan, 0);
4927 }
4928 }
4929
4930 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4931 u8 local_amp_id, u8 remote_amp_id)
4932 {
4933 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4934 local_amp_id, remote_amp_id);
4935
4936 chan->fcs = L2CAP_FCS_NONE;
4937
4938 /* Outgoing channel on AMP */
4939 if (chan->state == BT_CONNECT) {
4940 if (result == L2CAP_CR_SUCCESS) {
4941 chan->local_amp_id = local_amp_id;
4942 l2cap_send_create_chan_req(chan, remote_amp_id);
4943 } else {
4944 /* Revert to BR/EDR connect */
4945 l2cap_send_conn_req(chan);
4946 }
4947
4948 return;
4949 }
4950
4951 /* Incoming channel on AMP */
4952 if (__l2cap_no_conn_pending(chan)) {
4953 struct l2cap_conn_rsp rsp;
4954 char buf[128];
4955 rsp.scid = cpu_to_le16(chan->dcid);
4956 rsp.dcid = cpu_to_le16(chan->scid);
4957
4958 if (result == L2CAP_CR_SUCCESS) {
4959 /* Send successful response */
4960 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4961 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4962 } else {
4963 /* Send negative response */
4964 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4965 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4966 }
4967
4968 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4969 sizeof(rsp), &rsp);
4970
4971 if (result == L2CAP_CR_SUCCESS) {
4972 l2cap_state_change(chan, BT_CONFIG);
4973 set_bit(CONF_REQ_SENT, &chan->conf_state);
4974 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4975 L2CAP_CONF_REQ,
4976 l2cap_build_conf_req(chan, buf), buf);
4977 chan->num_conf_req++;
4978 }
4979 }
4980 }
4981
4982 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4983 u8 remote_amp_id)
4984 {
4985 l2cap_move_setup(chan);
4986 chan->move_id = local_amp_id;
4987 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4988
4989 l2cap_send_move_chan_req(chan, remote_amp_id);
4990 }
4991
4992 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4993 {
4994 struct hci_chan *hchan = NULL;
4995
4996 /* Placeholder - get hci_chan for logical link */
4997
4998 if (hchan) {
4999 if (hchan->state == BT_CONNECTED) {
5000 /* Logical link is ready to go */
5001 chan->hs_hcon = hchan->conn;
5002 chan->hs_hcon->l2cap_data = chan->conn;
5003 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5004 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5005
5006 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5007 } else {
5008 /* Wait for logical link to be ready */
5009 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5010 }
5011 } else {
5012 /* Logical link not available */
5013 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5014 }
5015 }
5016
5017 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5018 {
5019 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5020 u8 rsp_result;
5021 if (result == -EINVAL)
5022 rsp_result = L2CAP_MR_BAD_ID;
5023 else
5024 rsp_result = L2CAP_MR_NOT_ALLOWED;
5025
5026 l2cap_send_move_chan_rsp(chan, rsp_result);
5027 }
5028
5029 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5030 chan->move_state = L2CAP_MOVE_STABLE;
5031
5032 /* Restart data transmission */
5033 l2cap_ertm_send(chan);
5034 }
5035
5036 /* Invoke with locked chan */
5037 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5038 {
5039 u8 local_amp_id = chan->local_amp_id;
5040 u8 remote_amp_id = chan->remote_amp_id;
5041
5042 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5043 chan, result, local_amp_id, remote_amp_id);
5044
5045 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5046 l2cap_chan_unlock(chan);
5047 return;
5048 }
5049
5050 if (chan->state != BT_CONNECTED) {
5051 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5052 } else if (result != L2CAP_MR_SUCCESS) {
5053 l2cap_do_move_cancel(chan, result);
5054 } else {
5055 switch (chan->move_role) {
5056 case L2CAP_MOVE_ROLE_INITIATOR:
5057 l2cap_do_move_initiate(chan, local_amp_id,
5058 remote_amp_id);
5059 break;
5060 case L2CAP_MOVE_ROLE_RESPONDER:
5061 l2cap_do_move_respond(chan, result);
5062 break;
5063 default:
5064 l2cap_do_move_cancel(chan, result);
5065 break;
5066 }
5067 }
5068 }
5069
5070 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5071 struct l2cap_cmd_hdr *cmd,
5072 u16 cmd_len, void *data)
5073 {
5074 struct l2cap_move_chan_req *req = data;
5075 struct l2cap_move_chan_rsp rsp;
5076 struct l2cap_chan *chan;
5077 u16 icid = 0;
5078 u16 result = L2CAP_MR_NOT_ALLOWED;
5079
5080 if (cmd_len != sizeof(*req))
5081 return -EPROTO;
5082
5083 icid = le16_to_cpu(req->icid);
5084
5085 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5086
5087 if (!conn->hs_enabled)
5088 return -EINVAL;
5089
5090 chan = l2cap_get_chan_by_dcid(conn, icid);
5091 if (!chan) {
5092 rsp.icid = cpu_to_le16(icid);
5093 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5094 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5095 sizeof(rsp), &rsp);
5096 return 0;
5097 }
5098
5099 chan->ident = cmd->ident;
5100
5101 if (chan->scid < L2CAP_CID_DYN_START ||
5102 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5103 (chan->mode != L2CAP_MODE_ERTM &&
5104 chan->mode != L2CAP_MODE_STREAMING)) {
5105 result = L2CAP_MR_NOT_ALLOWED;
5106 goto send_move_response;
5107 }
5108
5109 if (chan->local_amp_id == req->dest_amp_id) {
5110 result = L2CAP_MR_SAME_ID;
5111 goto send_move_response;
5112 }
5113
5114 if (req->dest_amp_id != AMP_ID_BREDR) {
5115 struct hci_dev *hdev;
5116 hdev = hci_dev_get(req->dest_amp_id);
5117 if (!hdev || hdev->dev_type != HCI_AMP ||
5118 !test_bit(HCI_UP, &hdev->flags)) {
5119 if (hdev)
5120 hci_dev_put(hdev);
5121
5122 result = L2CAP_MR_BAD_ID;
5123 goto send_move_response;
5124 }
5125 hci_dev_put(hdev);
5126 }
5127
5128 /* Detect a move collision. Only send a collision response
5129 * if this side has "lost", otherwise proceed with the move.
5130 * The winner has the larger bd_addr.
5131 */
5132 if ((__chan_is_moving(chan) ||
5133 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5134 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5135 result = L2CAP_MR_COLLISION;
5136 goto send_move_response;
5137 }
5138
5139 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5140 l2cap_move_setup(chan);
5141 chan->move_id = req->dest_amp_id;
5142 icid = chan->dcid;
5143
5144 if (req->dest_amp_id == AMP_ID_BREDR) {
5145 /* Moving to BR/EDR */
5146 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5147 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5148 result = L2CAP_MR_PEND;
5149 } else {
5150 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5151 result = L2CAP_MR_SUCCESS;
5152 }
5153 } else {
5154 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5155 /* Placeholder - uncomment when amp functions are available */
5156 /*amp_accept_physical(chan, req->dest_amp_id);*/
5157 result = L2CAP_MR_PEND;
5158 }
5159
5160 send_move_response:
5161 l2cap_send_move_chan_rsp(chan, result);
5162
5163 l2cap_chan_unlock(chan);
5164
5165 return 0;
5166 }
5167
5168 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5169 {
5170 struct l2cap_chan *chan;
5171 struct hci_chan *hchan = NULL;
5172
5173 chan = l2cap_get_chan_by_scid(conn, icid);
5174 if (!chan) {
5175 l2cap_send_move_chan_cfm_icid(conn, icid);
5176 return;
5177 }
5178
5179 __clear_chan_timer(chan);
5180 if (result == L2CAP_MR_PEND)
5181 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5182
5183 switch (chan->move_state) {
5184 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5185 /* Move confirm will be sent when logical link
5186 * is complete.
5187 */
5188 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5189 break;
5190 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5191 if (result == L2CAP_MR_PEND) {
5192 break;
5193 } else if (test_bit(CONN_LOCAL_BUSY,
5194 &chan->conn_state)) {
5195 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5196 } else {
5197 /* Logical link is up or moving to BR/EDR,
5198 * proceed with move
5199 */
5200 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5201 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5202 }
5203 break;
5204 case L2CAP_MOVE_WAIT_RSP:
5205 /* Moving to AMP */
5206 if (result == L2CAP_MR_SUCCESS) {
5207 /* Remote is ready, send confirm immediately
5208 * after logical link is ready
5209 */
5210 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5211 } else {
5212 /* Both logical link and move success
5213 * are required to confirm
5214 */
5215 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5216 }
5217
5218 /* Placeholder - get hci_chan for logical link */
5219 if (!hchan) {
5220 /* Logical link not available */
5221 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5222 break;
5223 }
5224
5225 /* If the logical link is not yet connected, do not
5226 * send confirmation.
5227 */
5228 if (hchan->state != BT_CONNECTED)
5229 break;
5230
5231 /* Logical link is already ready to go */
5232
5233 chan->hs_hcon = hchan->conn;
5234 chan->hs_hcon->l2cap_data = chan->conn;
5235
5236 if (result == L2CAP_MR_SUCCESS) {
5237 /* Can confirm now */
5238 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5239 } else {
5240 /* Now only need move success
5241 * to confirm
5242 */
5243 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5244 }
5245
5246 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5247 break;
5248 default:
5249 /* Any other amp move state means the move failed. */
5250 chan->move_id = chan->local_amp_id;
5251 l2cap_move_done(chan);
5252 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5253 }
5254
5255 l2cap_chan_unlock(chan);
5256 }
5257
5258 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5259 u16 result)
5260 {
5261 struct l2cap_chan *chan;
5262
5263 chan = l2cap_get_chan_by_ident(conn, ident);
5264 if (!chan) {
5265 /* Could not locate channel, icid is best guess */
5266 l2cap_send_move_chan_cfm_icid(conn, icid);
5267 return;
5268 }
5269
5270 __clear_chan_timer(chan);
5271
5272 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5273 if (result == L2CAP_MR_COLLISION) {
5274 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5275 } else {
5276 /* Cleanup - cancel move */
5277 chan->move_id = chan->local_amp_id;
5278 l2cap_move_done(chan);
5279 }
5280 }
5281
5282 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5283
5284 l2cap_chan_unlock(chan);
5285 }
5286
5287 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5288 struct l2cap_cmd_hdr *cmd,
5289 u16 cmd_len, void *data)
5290 {
5291 struct l2cap_move_chan_rsp *rsp = data;
5292 u16 icid, result;
5293
5294 if (cmd_len != sizeof(*rsp))
5295 return -EPROTO;
5296
5297 icid = le16_to_cpu(rsp->icid);
5298 result = le16_to_cpu(rsp->result);
5299
5300 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5301
5302 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5303 l2cap_move_continue(conn, icid, result);
5304 else
5305 l2cap_move_fail(conn, cmd->ident, icid, result);
5306
5307 return 0;
5308 }
5309
5310 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5311 struct l2cap_cmd_hdr *cmd,
5312 u16 cmd_len, void *data)
5313 {
5314 struct l2cap_move_chan_cfm *cfm = data;
5315 struct l2cap_chan *chan;
5316 u16 icid, result;
5317
5318 if (cmd_len != sizeof(*cfm))
5319 return -EPROTO;
5320
5321 icid = le16_to_cpu(cfm->icid);
5322 result = le16_to_cpu(cfm->result);
5323
5324 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5325
5326 chan = l2cap_get_chan_by_dcid(conn, icid);
5327 if (!chan) {
5328 /* Spec requires a response even if the icid was not found */
5329 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5330 return 0;
5331 }
5332
5333 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5334 if (result == L2CAP_MC_CONFIRMED) {
5335 chan->local_amp_id = chan->move_id;
5336 if (chan->local_amp_id == AMP_ID_BREDR)
5337 __release_logical_link(chan);
5338 } else {
5339 chan->move_id = chan->local_amp_id;
5340 }
5341
5342 l2cap_move_done(chan);
5343 }
5344
5345 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5346
5347 l2cap_chan_unlock(chan);
5348
5349 return 0;
5350 }
5351
5352 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5353 struct l2cap_cmd_hdr *cmd,
5354 u16 cmd_len, void *data)
5355 {
5356 struct l2cap_move_chan_cfm_rsp *rsp = data;
5357 struct l2cap_chan *chan;
5358 u16 icid;
5359
5360 if (cmd_len != sizeof(*rsp))
5361 return -EPROTO;
5362
5363 icid = le16_to_cpu(rsp->icid);
5364
5365 BT_DBG("icid 0x%4.4x", icid);
5366
5367 chan = l2cap_get_chan_by_scid(conn, icid);
5368 if (!chan)
5369 return 0;
5370
5371 __clear_chan_timer(chan);
5372
5373 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5374 chan->local_amp_id = chan->move_id;
5375
5376 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5377 __release_logical_link(chan);
5378
5379 l2cap_move_done(chan);
5380 }
5381
5382 l2cap_chan_unlock(chan);
5383
5384 return 0;
5385 }
5386
5387 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5388 u16 to_multiplier)
5389 {
5390 u16 max_latency;
5391
5392 if (min > max || min < 6 || max > 3200)
5393 return -EINVAL;
5394
5395 if (to_multiplier < 10 || to_multiplier > 3200)
5396 return -EINVAL;
5397
5398 if (max >= to_multiplier * 8)
5399 return -EINVAL;
5400
5401 max_latency = (to_multiplier * 8 / max) - 1;
5402 if (latency > 499 || latency > max_latency)
5403 return -EINVAL;
5404
5405 return 0;
5406 }
5407
5408 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5409 struct l2cap_cmd_hdr *cmd,
5410 u16 cmd_len, u8 *data)
5411 {
5412 struct hci_conn *hcon = conn->hcon;
5413 struct l2cap_conn_param_update_req *req;
5414 struct l2cap_conn_param_update_rsp rsp;
5415 u16 min, max, latency, to_multiplier;
5416 int err;
5417
5418 if (!(hcon->link_mode & HCI_LM_MASTER))
5419 return -EINVAL;
5420
5421 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5422 return -EPROTO;
5423
5424 req = (struct l2cap_conn_param_update_req *) data;
5425 min = __le16_to_cpu(req->min);
5426 max = __le16_to_cpu(req->max);
5427 latency = __le16_to_cpu(req->latency);
5428 to_multiplier = __le16_to_cpu(req->to_multiplier);
5429
5430 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5431 min, max, latency, to_multiplier);
5432
5433 memset(&rsp, 0, sizeof(rsp));
5434
5435 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5436 if (err)
5437 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5438 else
5439 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5440
5441 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5442 sizeof(rsp), &rsp);
5443
5444 if (!err)
5445 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5446
5447 return 0;
5448 }
5449
5450 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5451 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5452 u8 *data)
5453 {
5454 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5455 u16 dcid, mtu, mps, credits, result;
5456 struct l2cap_chan *chan;
5457 int err;
5458
5459 if (cmd_len < sizeof(*rsp))
5460 return -EPROTO;
5461
5462 dcid = __le16_to_cpu(rsp->dcid);
5463 mtu = __le16_to_cpu(rsp->mtu);
5464 mps = __le16_to_cpu(rsp->mps);
5465 credits = __le16_to_cpu(rsp->credits);
5466 result = __le16_to_cpu(rsp->result);
5467
5468 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5469 return -EPROTO;
5470
5471 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5472 dcid, mtu, mps, credits, result);
5473
5474 mutex_lock(&conn->chan_lock);
5475
5476 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5477 if (!chan) {
5478 err = -EBADSLT;
5479 goto unlock;
5480 }
5481
5482 err = 0;
5483
5484 l2cap_chan_lock(chan);
5485
5486 switch (result) {
5487 case L2CAP_CR_SUCCESS:
5488 chan->ident = 0;
5489 chan->dcid = dcid;
5490 chan->omtu = mtu;
5491 chan->remote_mps = mps;
5492 chan->tx_credits = credits;
5493 l2cap_chan_ready(chan);
5494 break;
5495
5496 default:
5497 l2cap_chan_del(chan, ECONNREFUSED);
5498 break;
5499 }
5500
5501 l2cap_chan_unlock(chan);
5502
5503 unlock:
5504 mutex_unlock(&conn->chan_lock);
5505
5506 return err;
5507 }
5508
5509 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5510 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5511 u8 *data)
5512 {
5513 int err = 0;
5514
5515 switch (cmd->code) {
5516 case L2CAP_COMMAND_REJ:
5517 l2cap_command_rej(conn, cmd, cmd_len, data);
5518 break;
5519
5520 case L2CAP_CONN_REQ:
5521 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5522 break;
5523
5524 case L2CAP_CONN_RSP:
5525 case L2CAP_CREATE_CHAN_RSP:
5526 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5527 break;
5528
5529 case L2CAP_CONF_REQ:
5530 err = l2cap_config_req(conn, cmd, cmd_len, data);
5531 break;
5532
5533 case L2CAP_CONF_RSP:
5534 l2cap_config_rsp(conn, cmd, cmd_len, data);
5535 break;
5536
5537 case L2CAP_DISCONN_REQ:
5538 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5539 break;
5540
5541 case L2CAP_DISCONN_RSP:
5542 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5543 break;
5544
5545 case L2CAP_ECHO_REQ:
5546 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5547 break;
5548
5549 case L2CAP_ECHO_RSP:
5550 break;
5551
5552 case L2CAP_INFO_REQ:
5553 err = l2cap_information_req(conn, cmd, cmd_len, data);
5554 break;
5555
5556 case L2CAP_INFO_RSP:
5557 l2cap_information_rsp(conn, cmd, cmd_len, data);
5558 break;
5559
5560 case L2CAP_CREATE_CHAN_REQ:
5561 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5562 break;
5563
5564 case L2CAP_MOVE_CHAN_REQ:
5565 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5566 break;
5567
5568 case L2CAP_MOVE_CHAN_RSP:
5569 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5570 break;
5571
5572 case L2CAP_MOVE_CHAN_CFM:
5573 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5574 break;
5575
5576 case L2CAP_MOVE_CHAN_CFM_RSP:
5577 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5578 break;
5579
5580 default:
5581 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5582 err = -EINVAL;
5583 break;
5584 }
5585
5586 return err;
5587 }
5588
5589 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5590 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5591 u8 *data)
5592 {
5593 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5594 struct l2cap_le_conn_rsp rsp;
5595 struct l2cap_chan *chan, *pchan;
5596 u16 dcid, scid, credits, mtu, mps;
5597 __le16 psm;
5598 u8 result;
5599
5600 if (cmd_len != sizeof(*req))
5601 return -EPROTO;
5602
5603 scid = __le16_to_cpu(req->scid);
5604 mtu = __le16_to_cpu(req->mtu);
5605 mps = __le16_to_cpu(req->mps);
5606 psm = req->psm;
5607 dcid = 0;
5608 credits = 0;
5609
5610 if (mtu < 23 || mps < 23)
5611 return -EPROTO;
5612
5613 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5614 scid, mtu, mps);
5615
5616 /* Check if we have socket listening on psm */
5617 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5618 &conn->hcon->dst, LE_LINK);
5619 if (!pchan) {
5620 result = L2CAP_CR_BAD_PSM;
5621 chan = NULL;
5622 goto response;
5623 }
5624
5625 mutex_lock(&conn->chan_lock);
5626 l2cap_chan_lock(pchan);
5627
5628 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5629 result = L2CAP_CR_AUTHENTICATION;
5630 chan = NULL;
5631 goto response_unlock;
5632 }
5633
5634 /* Check if we already have channel with that dcid */
5635 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5636 result = L2CAP_CR_NO_MEM;
5637 chan = NULL;
5638 goto response_unlock;
5639 }
5640
5641 chan = pchan->ops->new_connection(pchan);
5642 if (!chan) {
5643 result = L2CAP_CR_NO_MEM;
5644 goto response_unlock;
5645 }
5646
5647 bacpy(&chan->src, &conn->hcon->src);
5648 bacpy(&chan->dst, &conn->hcon->dst);
5649 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5650 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5651 chan->psm = psm;
5652 chan->dcid = scid;
5653 chan->omtu = mtu;
5654 chan->remote_mps = mps;
5655 chan->tx_credits = __le16_to_cpu(req->credits);
5656
5657 __l2cap_chan_add(conn, chan);
5658 dcid = chan->scid;
5659 credits = chan->rx_credits;
5660
5661 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5662
5663 chan->ident = cmd->ident;
5664
5665 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5666 l2cap_state_change(chan, BT_CONNECT2);
5667 result = L2CAP_CR_PEND;
5668 chan->ops->defer(chan);
5669 } else {
5670 l2cap_chan_ready(chan);
5671 result = L2CAP_CR_SUCCESS;
5672 }
5673
5674 response_unlock:
5675 l2cap_chan_unlock(pchan);
5676 mutex_unlock(&conn->chan_lock);
5677
5678 if (result == L2CAP_CR_PEND)
5679 return 0;
5680
5681 response:
5682 if (chan) {
5683 rsp.mtu = cpu_to_le16(chan->imtu);
5684 rsp.mps = cpu_to_le16(chan->mps);
5685 } else {
5686 rsp.mtu = 0;
5687 rsp.mps = 0;
5688 }
5689
5690 rsp.dcid = cpu_to_le16(dcid);
5691 rsp.credits = cpu_to_le16(credits);
5692 rsp.result = cpu_to_le16(result);
5693
5694 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5695
5696 return 0;
5697 }
5698
5699 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5700 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5701 u8 *data)
5702 {
5703 struct l2cap_le_credits *pkt;
5704 struct l2cap_chan *chan;
5705 u16 cid, credits;
5706
5707 if (cmd_len != sizeof(*pkt))
5708 return -EPROTO;
5709
5710 pkt = (struct l2cap_le_credits *) data;
5711 cid = __le16_to_cpu(pkt->cid);
5712 credits = __le16_to_cpu(pkt->credits);
5713
5714 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5715
5716 chan = l2cap_get_chan_by_dcid(conn, cid);
5717 if (!chan)
5718 return -EBADSLT;
5719
5720 chan->tx_credits += credits;
5721
5722 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5723 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5724 chan->tx_credits--;
5725 }
5726
5727 if (chan->tx_credits)
5728 chan->ops->resume(chan);
5729
5730 l2cap_chan_unlock(chan);
5731
5732 return 0;
5733 }
5734
5735 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5736 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5737 u8 *data)
5738 {
5739 int err = 0;
5740
5741 if (!enable_lecoc) {
5742 switch (cmd->code) {
5743 case L2CAP_LE_CONN_REQ:
5744 case L2CAP_LE_CONN_RSP:
5745 case L2CAP_LE_CREDITS:
5746 case L2CAP_DISCONN_REQ:
5747 case L2CAP_DISCONN_RSP:
5748 return -EINVAL;
5749 }
5750 }
5751
5752 switch (cmd->code) {
5753 case L2CAP_COMMAND_REJ:
5754 break;
5755
5756 case L2CAP_CONN_PARAM_UPDATE_REQ:
5757 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5758 break;
5759
5760 case L2CAP_CONN_PARAM_UPDATE_RSP:
5761 break;
5762
5763 case L2CAP_LE_CONN_RSP:
5764 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5765 break;
5766
5767 case L2CAP_LE_CONN_REQ:
5768 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5769 break;
5770
5771 case L2CAP_LE_CREDITS:
5772 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5773 break;
5774
5775 case L2CAP_DISCONN_REQ:
5776 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5777 break;
5778
5779 case L2CAP_DISCONN_RSP:
5780 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5781 break;
5782
5783 default:
5784 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5785 err = -EINVAL;
5786 break;
5787 }
5788
5789 return err;
5790 }
5791
5792 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5793 struct sk_buff *skb)
5794 {
5795 struct hci_conn *hcon = conn->hcon;
5796 struct l2cap_cmd_hdr *cmd;
5797 u16 len;
5798 int err;
5799
5800 if (hcon->type != LE_LINK)
5801 goto drop;
5802
5803 if (skb->len < L2CAP_CMD_HDR_SIZE)
5804 goto drop;
5805
5806 cmd = (void *) skb->data;
5807 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5808
5809 len = le16_to_cpu(cmd->len);
5810
5811 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5812
5813 if (len != skb->len || !cmd->ident) {
5814 BT_DBG("corrupted command");
5815 goto drop;
5816 }
5817
5818 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5819 if (err) {
5820 struct l2cap_cmd_rej_unk rej;
5821
5822 BT_ERR("Wrong link type (%d)", err);
5823
5824 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5825 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5826 sizeof(rej), &rej);
5827 }
5828
5829 drop:
5830 kfree_skb(skb);
5831 }
5832
5833 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5834 struct sk_buff *skb)
5835 {
5836 struct hci_conn *hcon = conn->hcon;
5837 u8 *data = skb->data;
5838 int len = skb->len;
5839 struct l2cap_cmd_hdr cmd;
5840 int err;
5841
5842 l2cap_raw_recv(conn, skb);
5843
5844 if (hcon->type != ACL_LINK)
5845 goto drop;
5846
5847 while (len >= L2CAP_CMD_HDR_SIZE) {
5848 u16 cmd_len;
5849 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5850 data += L2CAP_CMD_HDR_SIZE;
5851 len -= L2CAP_CMD_HDR_SIZE;
5852
5853 cmd_len = le16_to_cpu(cmd.len);
5854
5855 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5856 cmd.ident);
5857
5858 if (cmd_len > len || !cmd.ident) {
5859 BT_DBG("corrupted command");
5860 break;
5861 }
5862
5863 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5864 if (err) {
5865 struct l2cap_cmd_rej_unk rej;
5866
5867 BT_ERR("Wrong link type (%d)", err);
5868
5869 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5870 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5871 sizeof(rej), &rej);
5872 }
5873
5874 data += cmd_len;
5875 len -= cmd_len;
5876 }
5877
5878 drop:
5879 kfree_skb(skb);
5880 }
5881
5882 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5883 {
5884 u16 our_fcs, rcv_fcs;
5885 int hdr_size;
5886
5887 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5888 hdr_size = L2CAP_EXT_HDR_SIZE;
5889 else
5890 hdr_size = L2CAP_ENH_HDR_SIZE;
5891
5892 if (chan->fcs == L2CAP_FCS_CRC16) {
5893 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5894 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5895 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5896
5897 if (our_fcs != rcv_fcs)
5898 return -EBADMSG;
5899 }
5900 return 0;
5901 }
5902
5903 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5904 {
5905 struct l2cap_ctrl control;
5906
5907 BT_DBG("chan %p", chan);
5908
5909 memset(&control, 0, sizeof(control));
5910 control.sframe = 1;
5911 control.final = 1;
5912 control.reqseq = chan->buffer_seq;
5913 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5914
5915 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5916 control.super = L2CAP_SUPER_RNR;
5917 l2cap_send_sframe(chan, &control);
5918 }
5919
5920 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5921 chan->unacked_frames > 0)
5922 __set_retrans_timer(chan);
5923
5924 /* Send pending iframes */
5925 l2cap_ertm_send(chan);
5926
5927 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5928 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5929 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5930 * send it now.
5931 */
5932 control.super = L2CAP_SUPER_RR;
5933 l2cap_send_sframe(chan, &control);
5934 }
5935 }
5936
5937 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5938 struct sk_buff **last_frag)
5939 {
5940 /* skb->len reflects data in skb as well as all fragments
5941 * skb->data_len reflects only data in fragments
5942 */
5943 if (!skb_has_frag_list(skb))
5944 skb_shinfo(skb)->frag_list = new_frag;
5945
5946 new_frag->next = NULL;
5947
5948 (*last_frag)->next = new_frag;
5949 *last_frag = new_frag;
5950
5951 skb->len += new_frag->len;
5952 skb->data_len += new_frag->len;
5953 skb->truesize += new_frag->truesize;
5954 }
5955
5956 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5957 struct l2cap_ctrl *control)
5958 {
5959 int err = -EINVAL;
5960
5961 switch (control->sar) {
5962 case L2CAP_SAR_UNSEGMENTED:
5963 if (chan->sdu)
5964 break;
5965
5966 err = chan->ops->recv(chan, skb);
5967 break;
5968
5969 case L2CAP_SAR_START:
5970 if (chan->sdu)
5971 break;
5972
5973 chan->sdu_len = get_unaligned_le16(skb->data);
5974 skb_pull(skb, L2CAP_SDULEN_SIZE);
5975
5976 if (chan->sdu_len > chan->imtu) {
5977 err = -EMSGSIZE;
5978 break;
5979 }
5980
5981 if (skb->len >= chan->sdu_len)
5982 break;
5983
5984 chan->sdu = skb;
5985 chan->sdu_last_frag = skb;
5986
5987 skb = NULL;
5988 err = 0;
5989 break;
5990
5991 case L2CAP_SAR_CONTINUE:
5992 if (!chan->sdu)
5993 break;
5994
5995 append_skb_frag(chan->sdu, skb,
5996 &chan->sdu_last_frag);
5997 skb = NULL;
5998
5999 if (chan->sdu->len >= chan->sdu_len)
6000 break;
6001
6002 err = 0;
6003 break;
6004
6005 case L2CAP_SAR_END:
6006 if (!chan->sdu)
6007 break;
6008
6009 append_skb_frag(chan->sdu, skb,
6010 &chan->sdu_last_frag);
6011 skb = NULL;
6012
6013 if (chan->sdu->len != chan->sdu_len)
6014 break;
6015
6016 err = chan->ops->recv(chan, chan->sdu);
6017
6018 if (!err) {
6019 /* Reassembly complete */
6020 chan->sdu = NULL;
6021 chan->sdu_last_frag = NULL;
6022 chan->sdu_len = 0;
6023 }
6024 break;
6025 }
6026
6027 if (err) {
6028 kfree_skb(skb);
6029 kfree_skb(chan->sdu);
6030 chan->sdu = NULL;
6031 chan->sdu_last_frag = NULL;
6032 chan->sdu_len = 0;
6033 }
6034
6035 return err;
6036 }
6037
6038 static int l2cap_resegment(struct l2cap_chan *chan)
6039 {
6040 /* Placeholder */
6041 return 0;
6042 }
6043
6044 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6045 {
6046 u8 event;
6047
6048 if (chan->mode != L2CAP_MODE_ERTM)
6049 return;
6050
6051 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6052 l2cap_tx(chan, NULL, NULL, event);
6053 }
6054
6055 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6056 {
6057 int err = 0;
6058 /* Pass sequential frames to l2cap_reassemble_sdu()
6059 * until a gap is encountered.
6060 */
6061
6062 BT_DBG("chan %p", chan);
6063
6064 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6065 struct sk_buff *skb;
6066 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6067 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6068
6069 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6070
6071 if (!skb)
6072 break;
6073
6074 skb_unlink(skb, &chan->srej_q);
6075 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6076 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6077 if (err)
6078 break;
6079 }
6080
6081 if (skb_queue_empty(&chan->srej_q)) {
6082 chan->rx_state = L2CAP_RX_STATE_RECV;
6083 l2cap_send_ack(chan);
6084 }
6085
6086 return err;
6087 }
6088
6089 static void l2cap_handle_srej(struct l2cap_chan *chan,
6090 struct l2cap_ctrl *control)
6091 {
6092 struct sk_buff *skb;
6093
6094 BT_DBG("chan %p, control %p", chan, control);
6095
6096 if (control->reqseq == chan->next_tx_seq) {
6097 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6098 l2cap_send_disconn_req(chan, ECONNRESET);
6099 return;
6100 }
6101
6102 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6103
6104 if (skb == NULL) {
6105 BT_DBG("Seq %d not available for retransmission",
6106 control->reqseq);
6107 return;
6108 }
6109
6110 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6111 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6112 l2cap_send_disconn_req(chan, ECONNRESET);
6113 return;
6114 }
6115
6116 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6117
6118 if (control->poll) {
6119 l2cap_pass_to_tx(chan, control);
6120
6121 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6122 l2cap_retransmit(chan, control);
6123 l2cap_ertm_send(chan);
6124
6125 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6126 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6127 chan->srej_save_reqseq = control->reqseq;
6128 }
6129 } else {
6130 l2cap_pass_to_tx_fbit(chan, control);
6131
6132 if (control->final) {
6133 if (chan->srej_save_reqseq != control->reqseq ||
6134 !test_and_clear_bit(CONN_SREJ_ACT,
6135 &chan->conn_state))
6136 l2cap_retransmit(chan, control);
6137 } else {
6138 l2cap_retransmit(chan, control);
6139 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6140 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6141 chan->srej_save_reqseq = control->reqseq;
6142 }
6143 }
6144 }
6145 }
6146
6147 static void l2cap_handle_rej(struct l2cap_chan *chan,
6148 struct l2cap_ctrl *control)
6149 {
6150 struct sk_buff *skb;
6151
6152 BT_DBG("chan %p, control %p", chan, control);
6153
6154 if (control->reqseq == chan->next_tx_seq) {
6155 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6156 l2cap_send_disconn_req(chan, ECONNRESET);
6157 return;
6158 }
6159
6160 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6161
6162 if (chan->max_tx && skb &&
6163 bt_cb(skb)->control.retries >= chan->max_tx) {
6164 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6165 l2cap_send_disconn_req(chan, ECONNRESET);
6166 return;
6167 }
6168
6169 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6170
6171 l2cap_pass_to_tx(chan, control);
6172
6173 if (control->final) {
6174 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6175 l2cap_retransmit_all(chan, control);
6176 } else {
6177 l2cap_retransmit_all(chan, control);
6178 l2cap_ertm_send(chan);
6179 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6180 set_bit(CONN_REJ_ACT, &chan->conn_state);
6181 }
6182 }
6183
6184 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6185 {
6186 BT_DBG("chan %p, txseq %d", chan, txseq);
6187
6188 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6189 chan->expected_tx_seq);
6190
6191 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6192 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6193 chan->tx_win) {
6194 /* See notes below regarding "double poll" and
6195 * invalid packets.
6196 */
6197 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6198 BT_DBG("Invalid/Ignore - after SREJ");
6199 return L2CAP_TXSEQ_INVALID_IGNORE;
6200 } else {
6201 BT_DBG("Invalid - in window after SREJ sent");
6202 return L2CAP_TXSEQ_INVALID;
6203 }
6204 }
6205
6206 if (chan->srej_list.head == txseq) {
6207 BT_DBG("Expected SREJ");
6208 return L2CAP_TXSEQ_EXPECTED_SREJ;
6209 }
6210
6211 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6212 BT_DBG("Duplicate SREJ - txseq already stored");
6213 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6214 }
6215
6216 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6217 BT_DBG("Unexpected SREJ - not requested");
6218 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6219 }
6220 }
6221
6222 if (chan->expected_tx_seq == txseq) {
6223 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6224 chan->tx_win) {
6225 BT_DBG("Invalid - txseq outside tx window");
6226 return L2CAP_TXSEQ_INVALID;
6227 } else {
6228 BT_DBG("Expected");
6229 return L2CAP_TXSEQ_EXPECTED;
6230 }
6231 }
6232
6233 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6234 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6235 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6236 return L2CAP_TXSEQ_DUPLICATE;
6237 }
6238
6239 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6240 /* A source of invalid packets is a "double poll" condition,
6241 * where delays cause us to send multiple poll packets. If
6242 * the remote stack receives and processes both polls,
6243 * sequence numbers can wrap around in such a way that a
6244 * resent frame has a sequence number that looks like new data
6245 * with a sequence gap. This would trigger an erroneous SREJ
6246 * request.
6247 *
6248 * Fortunately, this is impossible with a tx window that's
6249 * less than half of the maximum sequence number, which allows
6250 * invalid frames to be safely ignored.
6251 *
6252 * With tx window sizes greater than half of the tx window
6253 * maximum, the frame is invalid and cannot be ignored. This
6254 * causes a disconnect.
6255 */
6256
6257 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6258 BT_DBG("Invalid/Ignore - txseq outside tx window");
6259 return L2CAP_TXSEQ_INVALID_IGNORE;
6260 } else {
6261 BT_DBG("Invalid - txseq outside tx window");
6262 return L2CAP_TXSEQ_INVALID;
6263 }
6264 } else {
6265 BT_DBG("Unexpected - txseq indicates missing frames");
6266 return L2CAP_TXSEQ_UNEXPECTED;
6267 }
6268 }
6269
6270 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6271 struct l2cap_ctrl *control,
6272 struct sk_buff *skb, u8 event)
6273 {
6274 int err = 0;
6275 bool skb_in_use = false;
6276
6277 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6278 event);
6279
6280 switch (event) {
6281 case L2CAP_EV_RECV_IFRAME:
6282 switch (l2cap_classify_txseq(chan, control->txseq)) {
6283 case L2CAP_TXSEQ_EXPECTED:
6284 l2cap_pass_to_tx(chan, control);
6285
6286 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6287 BT_DBG("Busy, discarding expected seq %d",
6288 control->txseq);
6289 break;
6290 }
6291
6292 chan->expected_tx_seq = __next_seq(chan,
6293 control->txseq);
6294
6295 chan->buffer_seq = chan->expected_tx_seq;
6296 skb_in_use = true;
6297
6298 err = l2cap_reassemble_sdu(chan, skb, control);
6299 if (err)
6300 break;
6301
6302 if (control->final) {
6303 if (!test_and_clear_bit(CONN_REJ_ACT,
6304 &chan->conn_state)) {
6305 control->final = 0;
6306 l2cap_retransmit_all(chan, control);
6307 l2cap_ertm_send(chan);
6308 }
6309 }
6310
6311 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6312 l2cap_send_ack(chan);
6313 break;
6314 case L2CAP_TXSEQ_UNEXPECTED:
6315 l2cap_pass_to_tx(chan, control);
6316
6317 /* Can't issue SREJ frames in the local busy state.
6318 * Drop this frame, it will be seen as missing
6319 * when local busy is exited.
6320 */
6321 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6322 BT_DBG("Busy, discarding unexpected seq %d",
6323 control->txseq);
6324 break;
6325 }
6326
6327 /* There was a gap in the sequence, so an SREJ
6328 * must be sent for each missing frame. The
6329 * current frame is stored for later use.
6330 */
6331 skb_queue_tail(&chan->srej_q, skb);
6332 skb_in_use = true;
6333 BT_DBG("Queued %p (queue len %d)", skb,
6334 skb_queue_len(&chan->srej_q));
6335
6336 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6337 l2cap_seq_list_clear(&chan->srej_list);
6338 l2cap_send_srej(chan, control->txseq);
6339
6340 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6341 break;
6342 case L2CAP_TXSEQ_DUPLICATE:
6343 l2cap_pass_to_tx(chan, control);
6344 break;
6345 case L2CAP_TXSEQ_INVALID_IGNORE:
6346 break;
6347 case L2CAP_TXSEQ_INVALID:
6348 default:
6349 l2cap_send_disconn_req(chan, ECONNRESET);
6350 break;
6351 }
6352 break;
6353 case L2CAP_EV_RECV_RR:
6354 l2cap_pass_to_tx(chan, control);
6355 if (control->final) {
6356 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6357
6358 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6359 !__chan_is_moving(chan)) {
6360 control->final = 0;
6361 l2cap_retransmit_all(chan, control);
6362 }
6363
6364 l2cap_ertm_send(chan);
6365 } else if (control->poll) {
6366 l2cap_send_i_or_rr_or_rnr(chan);
6367 } else {
6368 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6369 &chan->conn_state) &&
6370 chan->unacked_frames)
6371 __set_retrans_timer(chan);
6372
6373 l2cap_ertm_send(chan);
6374 }
6375 break;
6376 case L2CAP_EV_RECV_RNR:
6377 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6378 l2cap_pass_to_tx(chan, control);
6379 if (control && control->poll) {
6380 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6381 l2cap_send_rr_or_rnr(chan, 0);
6382 }
6383 __clear_retrans_timer(chan);
6384 l2cap_seq_list_clear(&chan->retrans_list);
6385 break;
6386 case L2CAP_EV_RECV_REJ:
6387 l2cap_handle_rej(chan, control);
6388 break;
6389 case L2CAP_EV_RECV_SREJ:
6390 l2cap_handle_srej(chan, control);
6391 break;
6392 default:
6393 break;
6394 }
6395
6396 if (skb && !skb_in_use) {
6397 BT_DBG("Freeing %p", skb);
6398 kfree_skb(skb);
6399 }
6400
6401 return err;
6402 }
6403
6404 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6405 struct l2cap_ctrl *control,
6406 struct sk_buff *skb, u8 event)
6407 {
6408 int err = 0;
6409 u16 txseq = control->txseq;
6410 bool skb_in_use = false;
6411
6412 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6413 event);
6414
6415 switch (event) {
6416 case L2CAP_EV_RECV_IFRAME:
6417 switch (l2cap_classify_txseq(chan, txseq)) {
6418 case L2CAP_TXSEQ_EXPECTED:
6419 /* Keep frame for reassembly later */
6420 l2cap_pass_to_tx(chan, control);
6421 skb_queue_tail(&chan->srej_q, skb);
6422 skb_in_use = true;
6423 BT_DBG("Queued %p (queue len %d)", skb,
6424 skb_queue_len(&chan->srej_q));
6425
6426 chan->expected_tx_seq = __next_seq(chan, txseq);
6427 break;
6428 case L2CAP_TXSEQ_EXPECTED_SREJ:
6429 l2cap_seq_list_pop(&chan->srej_list);
6430
6431 l2cap_pass_to_tx(chan, control);
6432 skb_queue_tail(&chan->srej_q, skb);
6433 skb_in_use = true;
6434 BT_DBG("Queued %p (queue len %d)", skb,
6435 skb_queue_len(&chan->srej_q));
6436
6437 err = l2cap_rx_queued_iframes(chan);
6438 if (err)
6439 break;
6440
6441 break;
6442 case L2CAP_TXSEQ_UNEXPECTED:
6443 /* Got a frame that can't be reassembled yet.
6444 * Save it for later, and send SREJs to cover
6445 * the missing frames.
6446 */
6447 skb_queue_tail(&chan->srej_q, skb);
6448 skb_in_use = true;
6449 BT_DBG("Queued %p (queue len %d)", skb,
6450 skb_queue_len(&chan->srej_q));
6451
6452 l2cap_pass_to_tx(chan, control);
6453 l2cap_send_srej(chan, control->txseq);
6454 break;
6455 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6456 /* This frame was requested with an SREJ, but
6457 * some expected retransmitted frames are
6458 * missing. Request retransmission of missing
6459 * SREJ'd frames.
6460 */
6461 skb_queue_tail(&chan->srej_q, skb);
6462 skb_in_use = true;
6463 BT_DBG("Queued %p (queue len %d)", skb,
6464 skb_queue_len(&chan->srej_q));
6465
6466 l2cap_pass_to_tx(chan, control);
6467 l2cap_send_srej_list(chan, control->txseq);
6468 break;
6469 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6470 /* We've already queued this frame. Drop this copy. */
6471 l2cap_pass_to_tx(chan, control);
6472 break;
6473 case L2CAP_TXSEQ_DUPLICATE:
6474 /* Expecting a later sequence number, so this frame
6475 * was already received. Ignore it completely.
6476 */
6477 break;
6478 case L2CAP_TXSEQ_INVALID_IGNORE:
6479 break;
6480 case L2CAP_TXSEQ_INVALID:
6481 default:
6482 l2cap_send_disconn_req(chan, ECONNRESET);
6483 break;
6484 }
6485 break;
6486 case L2CAP_EV_RECV_RR:
6487 l2cap_pass_to_tx(chan, control);
6488 if (control->final) {
6489 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6490
6491 if (!test_and_clear_bit(CONN_REJ_ACT,
6492 &chan->conn_state)) {
6493 control->final = 0;
6494 l2cap_retransmit_all(chan, control);
6495 }
6496
6497 l2cap_ertm_send(chan);
6498 } else if (control->poll) {
6499 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6500 &chan->conn_state) &&
6501 chan->unacked_frames) {
6502 __set_retrans_timer(chan);
6503 }
6504
6505 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6506 l2cap_send_srej_tail(chan);
6507 } else {
6508 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6509 &chan->conn_state) &&
6510 chan->unacked_frames)
6511 __set_retrans_timer(chan);
6512
6513 l2cap_send_ack(chan);
6514 }
6515 break;
6516 case L2CAP_EV_RECV_RNR:
6517 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6518 l2cap_pass_to_tx(chan, control);
6519 if (control->poll) {
6520 l2cap_send_srej_tail(chan);
6521 } else {
6522 struct l2cap_ctrl rr_control;
6523 memset(&rr_control, 0, sizeof(rr_control));
6524 rr_control.sframe = 1;
6525 rr_control.super = L2CAP_SUPER_RR;
6526 rr_control.reqseq = chan->buffer_seq;
6527 l2cap_send_sframe(chan, &rr_control);
6528 }
6529
6530 break;
6531 case L2CAP_EV_RECV_REJ:
6532 l2cap_handle_rej(chan, control);
6533 break;
6534 case L2CAP_EV_RECV_SREJ:
6535 l2cap_handle_srej(chan, control);
6536 break;
6537 }
6538
6539 if (skb && !skb_in_use) {
6540 BT_DBG("Freeing %p", skb);
6541 kfree_skb(skb);
6542 }
6543
6544 return err;
6545 }
6546
6547 static int l2cap_finish_move(struct l2cap_chan *chan)
6548 {
6549 BT_DBG("chan %p", chan);
6550
6551 chan->rx_state = L2CAP_RX_STATE_RECV;
6552
6553 if (chan->hs_hcon)
6554 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6555 else
6556 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6557
6558 return l2cap_resegment(chan);
6559 }
6560
6561 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6562 struct l2cap_ctrl *control,
6563 struct sk_buff *skb, u8 event)
6564 {
6565 int err;
6566
6567 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6568 event);
6569
6570 if (!control->poll)
6571 return -EPROTO;
6572
6573 l2cap_process_reqseq(chan, control->reqseq);
6574
6575 if (!skb_queue_empty(&chan->tx_q))
6576 chan->tx_send_head = skb_peek(&chan->tx_q);
6577 else
6578 chan->tx_send_head = NULL;
6579
6580 /* Rewind next_tx_seq to the point expected
6581 * by the receiver.
6582 */
6583 chan->next_tx_seq = control->reqseq;
6584 chan->unacked_frames = 0;
6585
6586 err = l2cap_finish_move(chan);
6587 if (err)
6588 return err;
6589
6590 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6591 l2cap_send_i_or_rr_or_rnr(chan);
6592
6593 if (event == L2CAP_EV_RECV_IFRAME)
6594 return -EPROTO;
6595
6596 return l2cap_rx_state_recv(chan, control, NULL, event);
6597 }
6598
6599 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6600 struct l2cap_ctrl *control,
6601 struct sk_buff *skb, u8 event)
6602 {
6603 int err;
6604
6605 if (!control->final)
6606 return -EPROTO;
6607
6608 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6609
6610 chan->rx_state = L2CAP_RX_STATE_RECV;
6611 l2cap_process_reqseq(chan, control->reqseq);
6612
6613 if (!skb_queue_empty(&chan->tx_q))
6614 chan->tx_send_head = skb_peek(&chan->tx_q);
6615 else
6616 chan->tx_send_head = NULL;
6617
6618 /* Rewind next_tx_seq to the point expected
6619 * by the receiver.
6620 */
6621 chan->next_tx_seq = control->reqseq;
6622 chan->unacked_frames = 0;
6623
6624 if (chan->hs_hcon)
6625 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6626 else
6627 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6628
6629 err = l2cap_resegment(chan);
6630
6631 if (!err)
6632 err = l2cap_rx_state_recv(chan, control, skb, event);
6633
6634 return err;
6635 }
6636
6637 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6638 {
6639 /* Make sure reqseq is for a packet that has been sent but not acked */
6640 u16 unacked;
6641
6642 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6643 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6644 }
6645
6646 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6647 struct sk_buff *skb, u8 event)
6648 {
6649 int err = 0;
6650
6651 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6652 control, skb, event, chan->rx_state);
6653
6654 if (__valid_reqseq(chan, control->reqseq)) {
6655 switch (chan->rx_state) {
6656 case L2CAP_RX_STATE_RECV:
6657 err = l2cap_rx_state_recv(chan, control, skb, event);
6658 break;
6659 case L2CAP_RX_STATE_SREJ_SENT:
6660 err = l2cap_rx_state_srej_sent(chan, control, skb,
6661 event);
6662 break;
6663 case L2CAP_RX_STATE_WAIT_P:
6664 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6665 break;
6666 case L2CAP_RX_STATE_WAIT_F:
6667 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6668 break;
6669 default:
6670 /* shut it down */
6671 break;
6672 }
6673 } else {
6674 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6675 control->reqseq, chan->next_tx_seq,
6676 chan->expected_ack_seq);
6677 l2cap_send_disconn_req(chan, ECONNRESET);
6678 }
6679
6680 return err;
6681 }
6682
6683 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6684 struct sk_buff *skb)
6685 {
6686 int err = 0;
6687
6688 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6689 chan->rx_state);
6690
6691 if (l2cap_classify_txseq(chan, control->txseq) ==
6692 L2CAP_TXSEQ_EXPECTED) {
6693 l2cap_pass_to_tx(chan, control);
6694
6695 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6696 __next_seq(chan, chan->buffer_seq));
6697
6698 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6699
6700 l2cap_reassemble_sdu(chan, skb, control);
6701 } else {
6702 if (chan->sdu) {
6703 kfree_skb(chan->sdu);
6704 chan->sdu = NULL;
6705 }
6706 chan->sdu_last_frag = NULL;
6707 chan->sdu_len = 0;
6708
6709 if (skb) {
6710 BT_DBG("Freeing %p", skb);
6711 kfree_skb(skb);
6712 }
6713 }
6714
6715 chan->last_acked_seq = control->txseq;
6716 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6717
6718 return err;
6719 }
6720
6721 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6722 {
6723 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6724 u16 len;
6725 u8 event;
6726
6727 __unpack_control(chan, skb);
6728
6729 len = skb->len;
6730
6731 /*
6732 * We can just drop the corrupted I-frame here.
6733 * Receiver will miss it and start proper recovery
6734 * procedures and ask for retransmission.
6735 */
6736 if (l2cap_check_fcs(chan, skb))
6737 goto drop;
6738
6739 if (!control->sframe && control->sar == L2CAP_SAR_START)
6740 len -= L2CAP_SDULEN_SIZE;
6741
6742 if (chan->fcs == L2CAP_FCS_CRC16)
6743 len -= L2CAP_FCS_SIZE;
6744
6745 if (len > chan->mps) {
6746 l2cap_send_disconn_req(chan, ECONNRESET);
6747 goto drop;
6748 }
6749
6750 if (!control->sframe) {
6751 int err;
6752
6753 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6754 control->sar, control->reqseq, control->final,
6755 control->txseq);
6756
6757 /* Validate F-bit - F=0 always valid, F=1 only
6758 * valid in TX WAIT_F
6759 */
6760 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6761 goto drop;
6762
6763 if (chan->mode != L2CAP_MODE_STREAMING) {
6764 event = L2CAP_EV_RECV_IFRAME;
6765 err = l2cap_rx(chan, control, skb, event);
6766 } else {
6767 err = l2cap_stream_rx(chan, control, skb);
6768 }
6769
6770 if (err)
6771 l2cap_send_disconn_req(chan, ECONNRESET);
6772 } else {
6773 const u8 rx_func_to_event[4] = {
6774 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6775 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6776 };
6777
6778 /* Only I-frames are expected in streaming mode */
6779 if (chan->mode == L2CAP_MODE_STREAMING)
6780 goto drop;
6781
6782 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6783 control->reqseq, control->final, control->poll,
6784 control->super);
6785
6786 if (len != 0) {
6787 BT_ERR("Trailing bytes: %d in sframe", len);
6788 l2cap_send_disconn_req(chan, ECONNRESET);
6789 goto drop;
6790 }
6791
6792 /* Validate F and P bits */
6793 if (control->final && (control->poll ||
6794 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6795 goto drop;
6796
6797 event = rx_func_to_event[control->super];
6798 if (l2cap_rx(chan, control, skb, event))
6799 l2cap_send_disconn_req(chan, ECONNRESET);
6800 }
6801
6802 return 0;
6803
6804 drop:
6805 kfree_skb(skb);
6806 return 0;
6807 }
6808
6809 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6810 {
6811 struct l2cap_conn *conn = chan->conn;
6812 struct l2cap_le_credits pkt;
6813 u16 return_credits;
6814
6815 /* We return more credits to the sender only after the amount of
6816 * credits falls below half of the initial amount.
6817 */
6818 if (chan->rx_credits >= (L2CAP_LE_MAX_CREDITS + 1) / 2)
6819 return;
6820
6821 return_credits = L2CAP_LE_MAX_CREDITS - chan->rx_credits;
6822
6823 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6824
6825 chan->rx_credits += return_credits;
6826
6827 pkt.cid = cpu_to_le16(chan->scid);
6828 pkt.credits = cpu_to_le16(return_credits);
6829
6830 chan->ident = l2cap_get_ident(conn);
6831
6832 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6833 }
6834
6835 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6836 {
6837 int err;
6838
6839 if (!chan->rx_credits) {
6840 BT_ERR("No credits to receive LE L2CAP data");
6841 return -ENOBUFS;
6842 }
6843
6844 if (chan->imtu < skb->len) {
6845 BT_ERR("Too big LE L2CAP PDU");
6846 return -ENOBUFS;
6847 }
6848
6849 chan->rx_credits--;
6850 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6851
6852 l2cap_chan_le_send_credits(chan);
6853
6854 err = 0;
6855
6856 if (!chan->sdu) {
6857 u16 sdu_len;
6858
6859 sdu_len = get_unaligned_le16(skb->data);
6860 skb_pull(skb, L2CAP_SDULEN_SIZE);
6861
6862 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6863 sdu_len, skb->len, chan->imtu);
6864
6865 if (sdu_len > chan->imtu) {
6866 BT_ERR("Too big LE L2CAP SDU length received");
6867 err = -EMSGSIZE;
6868 goto failed;
6869 }
6870
6871 if (skb->len > sdu_len) {
6872 BT_ERR("Too much LE L2CAP data received");
6873 err = -EINVAL;
6874 goto failed;
6875 }
6876
6877 if (skb->len == sdu_len)
6878 return chan->ops->recv(chan, skb);
6879
6880 chan->sdu = skb;
6881 chan->sdu_len = sdu_len;
6882 chan->sdu_last_frag = skb;
6883
6884 return 0;
6885 }
6886
6887 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6888 chan->sdu->len, skb->len, chan->sdu_len);
6889
6890 if (chan->sdu->len + skb->len > chan->sdu_len) {
6891 BT_ERR("Too much LE L2CAP data received");
6892 err = -EINVAL;
6893 goto failed;
6894 }
6895
6896 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6897 skb = NULL;
6898
6899 if (chan->sdu->len == chan->sdu_len) {
6900 err = chan->ops->recv(chan, chan->sdu);
6901 if (!err) {
6902 chan->sdu = NULL;
6903 chan->sdu_last_frag = NULL;
6904 chan->sdu_len = 0;
6905 }
6906 }
6907
6908 failed:
6909 if (err) {
6910 kfree_skb(skb);
6911 kfree_skb(chan->sdu);
6912 chan->sdu = NULL;
6913 chan->sdu_last_frag = NULL;
6914 chan->sdu_len = 0;
6915 }
6916
6917 /* We can't return an error here since we took care of the skb
6918 * freeing internally. An error return would cause the caller to
6919 * do a double-free of the skb.
6920 */
6921 return 0;
6922 }
6923
6924 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6925 struct sk_buff *skb)
6926 {
6927 struct l2cap_chan *chan;
6928
6929 chan = l2cap_get_chan_by_scid(conn, cid);
6930 if (!chan) {
6931 if (cid == L2CAP_CID_A2MP) {
6932 chan = a2mp_channel_create(conn, skb);
6933 if (!chan) {
6934 kfree_skb(skb);
6935 return;
6936 }
6937
6938 l2cap_chan_lock(chan);
6939 } else {
6940 BT_DBG("unknown cid 0x%4.4x", cid);
6941 /* Drop packet and return */
6942 kfree_skb(skb);
6943 return;
6944 }
6945 }
6946
6947 BT_DBG("chan %p, len %d", chan, skb->len);
6948
6949 if (chan->state != BT_CONNECTED)
6950 goto drop;
6951
6952 switch (chan->mode) {
6953 case L2CAP_MODE_LE_FLOWCTL:
6954 if (l2cap_le_data_rcv(chan, skb) < 0)
6955 goto drop;
6956
6957 goto done;
6958
6959 case L2CAP_MODE_BASIC:
6960 /* If socket recv buffers overflows we drop data here
6961 * which is *bad* because L2CAP has to be reliable.
6962 * But we don't have any other choice. L2CAP doesn't
6963 * provide flow control mechanism. */
6964
6965 if (chan->imtu < skb->len)
6966 goto drop;
6967
6968 if (!chan->ops->recv(chan, skb))
6969 goto done;
6970 break;
6971
6972 case L2CAP_MODE_ERTM:
6973 case L2CAP_MODE_STREAMING:
6974 l2cap_data_rcv(chan, skb);
6975 goto done;
6976
6977 default:
6978 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6979 break;
6980 }
6981
6982 drop:
6983 kfree_skb(skb);
6984
6985 done:
6986 l2cap_chan_unlock(chan);
6987 }
6988
6989 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6990 struct sk_buff *skb)
6991 {
6992 struct hci_conn *hcon = conn->hcon;
6993 struct l2cap_chan *chan;
6994
6995 if (hcon->type != ACL_LINK)
6996 goto drop;
6997
6998 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6999 ACL_LINK);
7000 if (!chan)
7001 goto drop;
7002
7003 BT_DBG("chan %p, len %d", chan, skb->len);
7004
7005 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7006 goto drop;
7007
7008 if (chan->imtu < skb->len)
7009 goto drop;
7010
7011 /* Store remote BD_ADDR and PSM for msg_name */
7012 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
7013 bt_cb(skb)->psm = psm;
7014
7015 if (!chan->ops->recv(chan, skb))
7016 return;
7017
7018 drop:
7019 kfree_skb(skb);
7020 }
7021
7022 static void l2cap_att_channel(struct l2cap_conn *conn,
7023 struct sk_buff *skb)
7024 {
7025 struct hci_conn *hcon = conn->hcon;
7026 struct l2cap_chan *chan;
7027
7028 if (hcon->type != LE_LINK)
7029 goto drop;
7030
7031 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
7032 &hcon->src, &hcon->dst);
7033 if (!chan)
7034 goto drop;
7035
7036 BT_DBG("chan %p, len %d", chan, skb->len);
7037
7038 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
7039 goto drop;
7040
7041 if (chan->imtu < skb->len)
7042 goto drop;
7043
7044 if (!chan->ops->recv(chan, skb))
7045 return;
7046
7047 drop:
7048 kfree_skb(skb);
7049 }
7050
7051 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7052 {
7053 struct l2cap_hdr *lh = (void *) skb->data;
7054 u16 cid, len;
7055 __le16 psm;
7056
7057 skb_pull(skb, L2CAP_HDR_SIZE);
7058 cid = __le16_to_cpu(lh->cid);
7059 len = __le16_to_cpu(lh->len);
7060
7061 if (len != skb->len) {
7062 kfree_skb(skb);
7063 return;
7064 }
7065
7066 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7067
7068 switch (cid) {
7069 case L2CAP_CID_SIGNALING:
7070 l2cap_sig_channel(conn, skb);
7071 break;
7072
7073 case L2CAP_CID_CONN_LESS:
7074 psm = get_unaligned((__le16 *) skb->data);
7075 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7076 l2cap_conless_channel(conn, psm, skb);
7077 break;
7078
7079 case L2CAP_CID_ATT:
7080 l2cap_att_channel(conn, skb);
7081 break;
7082
7083 case L2CAP_CID_LE_SIGNALING:
7084 l2cap_le_sig_channel(conn, skb);
7085 break;
7086
7087 case L2CAP_CID_SMP:
7088 if (smp_sig_channel(conn, skb))
7089 l2cap_conn_del(conn->hcon, EACCES);
7090 break;
7091
7092 default:
7093 l2cap_data_channel(conn, cid, skb);
7094 break;
7095 }
7096 }
7097
7098 /* ---- L2CAP interface with lower layer (HCI) ---- */
7099
7100 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7101 {
7102 int exact = 0, lm1 = 0, lm2 = 0;
7103 struct l2cap_chan *c;
7104
7105 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7106
7107 /* Find listening sockets and check their link_mode */
7108 read_lock(&chan_list_lock);
7109 list_for_each_entry(c, &chan_list, global_l) {
7110 if (c->state != BT_LISTEN)
7111 continue;
7112
7113 if (!bacmp(&c->src, &hdev->bdaddr)) {
7114 lm1 |= HCI_LM_ACCEPT;
7115 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7116 lm1 |= HCI_LM_MASTER;
7117 exact++;
7118 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7119 lm2 |= HCI_LM_ACCEPT;
7120 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7121 lm2 |= HCI_LM_MASTER;
7122 }
7123 }
7124 read_unlock(&chan_list_lock);
7125
7126 return exact ? lm1 : lm2;
7127 }
7128
7129 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7130 {
7131 struct l2cap_conn *conn;
7132
7133 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7134
7135 if (!status) {
7136 conn = l2cap_conn_add(hcon);
7137 if (conn)
7138 l2cap_conn_ready(conn);
7139 } else {
7140 l2cap_conn_del(hcon, bt_to_errno(status));
7141 }
7142 }
7143
7144 int l2cap_disconn_ind(struct hci_conn *hcon)
7145 {
7146 struct l2cap_conn *conn = hcon->l2cap_data;
7147
7148 BT_DBG("hcon %p", hcon);
7149
7150 if (!conn)
7151 return HCI_ERROR_REMOTE_USER_TERM;
7152 return conn->disc_reason;
7153 }
7154
7155 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7156 {
7157 BT_DBG("hcon %p reason %d", hcon, reason);
7158
7159 l2cap_conn_del(hcon, bt_to_errno(reason));
7160 }
7161
7162 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7163 {
7164 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7165 return;
7166
7167 if (encrypt == 0x00) {
7168 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7169 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7170 } else if (chan->sec_level == BT_SECURITY_HIGH)
7171 l2cap_chan_close(chan, ECONNREFUSED);
7172 } else {
7173 if (chan->sec_level == BT_SECURITY_MEDIUM)
7174 __clear_chan_timer(chan);
7175 }
7176 }
7177
7178 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7179 {
7180 struct l2cap_conn *conn = hcon->l2cap_data;
7181 struct l2cap_chan *chan;
7182
7183 if (!conn)
7184 return 0;
7185
7186 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7187
7188 if (hcon->type == LE_LINK) {
7189 if (!status && encrypt)
7190 smp_distribute_keys(conn, 0);
7191 cancel_delayed_work(&conn->security_timer);
7192 }
7193
7194 mutex_lock(&conn->chan_lock);
7195
7196 list_for_each_entry(chan, &conn->chan_l, list) {
7197 l2cap_chan_lock(chan);
7198
7199 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7200 state_to_string(chan->state));
7201
7202 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7203 l2cap_chan_unlock(chan);
7204 continue;
7205 }
7206
7207 if (chan->scid == L2CAP_CID_ATT) {
7208 if (!status && encrypt) {
7209 chan->sec_level = hcon->sec_level;
7210 l2cap_chan_ready(chan);
7211 }
7212
7213 l2cap_chan_unlock(chan);
7214 continue;
7215 }
7216
7217 if (!__l2cap_no_conn_pending(chan)) {
7218 l2cap_chan_unlock(chan);
7219 continue;
7220 }
7221
7222 if (!status && (chan->state == BT_CONNECTED ||
7223 chan->state == BT_CONFIG)) {
7224 chan->ops->resume(chan);
7225 l2cap_check_encryption(chan, encrypt);
7226 l2cap_chan_unlock(chan);
7227 continue;
7228 }
7229
7230 if (chan->state == BT_CONNECT) {
7231 if (!status)
7232 l2cap_start_connection(chan);
7233 else
7234 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7235 } else if (chan->state == BT_CONNECT2) {
7236 struct l2cap_conn_rsp rsp;
7237 __u16 res, stat;
7238
7239 if (!status) {
7240 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7241 res = L2CAP_CR_PEND;
7242 stat = L2CAP_CS_AUTHOR_PEND;
7243 chan->ops->defer(chan);
7244 } else {
7245 l2cap_state_change(chan, BT_CONFIG);
7246 res = L2CAP_CR_SUCCESS;
7247 stat = L2CAP_CS_NO_INFO;
7248 }
7249 } else {
7250 l2cap_state_change(chan, BT_DISCONN);
7251 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7252 res = L2CAP_CR_SEC_BLOCK;
7253 stat = L2CAP_CS_NO_INFO;
7254 }
7255
7256 rsp.scid = cpu_to_le16(chan->dcid);
7257 rsp.dcid = cpu_to_le16(chan->scid);
7258 rsp.result = cpu_to_le16(res);
7259 rsp.status = cpu_to_le16(stat);
7260 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7261 sizeof(rsp), &rsp);
7262
7263 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7264 res == L2CAP_CR_SUCCESS) {
7265 char buf[128];
7266 set_bit(CONF_REQ_SENT, &chan->conf_state);
7267 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7268 L2CAP_CONF_REQ,
7269 l2cap_build_conf_req(chan, buf),
7270 buf);
7271 chan->num_conf_req++;
7272 }
7273 }
7274
7275 l2cap_chan_unlock(chan);
7276 }
7277
7278 mutex_unlock(&conn->chan_lock);
7279
7280 return 0;
7281 }
7282
7283 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7284 {
7285 struct l2cap_conn *conn = hcon->l2cap_data;
7286 struct l2cap_hdr *hdr;
7287 int len;
7288
7289 /* For AMP controller do not create l2cap conn */
7290 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7291 goto drop;
7292
7293 if (!conn)
7294 conn = l2cap_conn_add(hcon);
7295
7296 if (!conn)
7297 goto drop;
7298
7299 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7300
7301 switch (flags) {
7302 case ACL_START:
7303 case ACL_START_NO_FLUSH:
7304 case ACL_COMPLETE:
7305 if (conn->rx_len) {
7306 BT_ERR("Unexpected start frame (len %d)", skb->len);
7307 kfree_skb(conn->rx_skb);
7308 conn->rx_skb = NULL;
7309 conn->rx_len = 0;
7310 l2cap_conn_unreliable(conn, ECOMM);
7311 }
7312
7313 /* Start fragment always begin with Basic L2CAP header */
7314 if (skb->len < L2CAP_HDR_SIZE) {
7315 BT_ERR("Frame is too short (len %d)", skb->len);
7316 l2cap_conn_unreliable(conn, ECOMM);
7317 goto drop;
7318 }
7319
7320 hdr = (struct l2cap_hdr *) skb->data;
7321 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7322
7323 if (len == skb->len) {
7324 /* Complete frame received */
7325 l2cap_recv_frame(conn, skb);
7326 return 0;
7327 }
7328
7329 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7330
7331 if (skb->len > len) {
7332 BT_ERR("Frame is too long (len %d, expected len %d)",
7333 skb->len, len);
7334 l2cap_conn_unreliable(conn, ECOMM);
7335 goto drop;
7336 }
7337
7338 /* Allocate skb for the complete frame (with header) */
7339 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7340 if (!conn->rx_skb)
7341 goto drop;
7342
7343 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7344 skb->len);
7345 conn->rx_len = len - skb->len;
7346 break;
7347
7348 case ACL_CONT:
7349 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7350
7351 if (!conn->rx_len) {
7352 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7353 l2cap_conn_unreliable(conn, ECOMM);
7354 goto drop;
7355 }
7356
7357 if (skb->len > conn->rx_len) {
7358 BT_ERR("Fragment is too long (len %d, expected %d)",
7359 skb->len, conn->rx_len);
7360 kfree_skb(conn->rx_skb);
7361 conn->rx_skb = NULL;
7362 conn->rx_len = 0;
7363 l2cap_conn_unreliable(conn, ECOMM);
7364 goto drop;
7365 }
7366
7367 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7368 skb->len);
7369 conn->rx_len -= skb->len;
7370
7371 if (!conn->rx_len) {
7372 /* Complete frame received. l2cap_recv_frame
7373 * takes ownership of the skb so set the global
7374 * rx_skb pointer to NULL first.
7375 */
7376 struct sk_buff *rx_skb = conn->rx_skb;
7377 conn->rx_skb = NULL;
7378 l2cap_recv_frame(conn, rx_skb);
7379 }
7380 break;
7381 }
7382
7383 drop:
7384 kfree_skb(skb);
7385 return 0;
7386 }
7387
7388 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7389 {
7390 struct l2cap_chan *c;
7391
7392 read_lock(&chan_list_lock);
7393
7394 list_for_each_entry(c, &chan_list, global_l) {
7395 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7396 &c->src, &c->dst,
7397 c->state, __le16_to_cpu(c->psm),
7398 c->scid, c->dcid, c->imtu, c->omtu,
7399 c->sec_level, c->mode);
7400 }
7401
7402 read_unlock(&chan_list_lock);
7403
7404 return 0;
7405 }
7406
7407 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7408 {
7409 return single_open(file, l2cap_debugfs_show, inode->i_private);
7410 }
7411
7412 static const struct file_operations l2cap_debugfs_fops = {
7413 .open = l2cap_debugfs_open,
7414 .read = seq_read,
7415 .llseek = seq_lseek,
7416 .release = single_release,
7417 };
7418
7419 static struct dentry *l2cap_debugfs;
7420
7421 int __init l2cap_init(void)
7422 {
7423 int err;
7424
7425 err = l2cap_init_sockets();
7426 if (err < 0)
7427 return err;
7428
7429 if (IS_ERR_OR_NULL(bt_debugfs))
7430 return 0;
7431
7432 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7433 NULL, &l2cap_debugfs_fops);
7434
7435 return 0;
7436 }
7437
7438 void l2cap_exit(void)
7439 {
7440 debugfs_remove(l2cap_debugfs);
7441 l2cap_cleanup_sockets();
7442 }
7443
7444 module_param(disable_ertm, bool, 0644);
7445 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.315409 seconds and 6 git commands to generate.