52e1871d63346ecca65a3f8caccea442460c811e
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45
46 bool disable_ertm;
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
50
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 void *data);
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66
67 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68 {
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77 }
78
79 /* ---- L2CAP channels ---- */
80
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
83 {
84 struct l2cap_chan *c;
85
86 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
89 }
90 return NULL;
91 }
92
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
95 {
96 struct l2cap_chan *c;
97
98 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
101 }
102 return NULL;
103 }
104
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
109 {
110 struct l2cap_chan *c;
111
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
114 if (c)
115 l2cap_chan_lock(c);
116 mutex_unlock(&conn->chan_lock);
117
118 return c;
119 }
120
121 /* Find channel with given DCID.
122 * Returns locked channel.
123 */
124 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126 {
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136 }
137
138 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
140 {
141 struct l2cap_chan *c;
142
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
146 }
147 return NULL;
148 }
149
150 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152 {
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162 }
163
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165 {
166 struct l2cap_chan *c;
167
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
170 return c;
171 }
172 return NULL;
173 }
174
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176 {
177 int err;
178
179 write_lock(&chan_list_lock);
180
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 err = -EADDRINUSE;
183 goto done;
184 }
185
186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
192
193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203 done:
204 write_unlock(&chan_list_lock);
205 return err;
206 }
207 EXPORT_SYMBOL_GPL(l2cap_add_psm);
208
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210 {
211 write_lock(&chan_list_lock);
212
213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
216
217 chan->scid = scid;
218
219 write_unlock(&chan_list_lock);
220
221 return 0;
222 }
223
224 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
225 {
226 u16 cid, dyn_end;
227
228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
230 else
231 dyn_end = L2CAP_CID_DYN_END;
232
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
234 if (!__l2cap_get_chan_by_scid(conn, cid))
235 return cid;
236 }
237
238 return 0;
239 }
240
241 static void l2cap_state_change(struct l2cap_chan *chan, int state)
242 {
243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
244 state_to_string(state));
245
246 chan->state = state;
247 chan->ops->state_change(chan, state, 0);
248 }
249
250 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
251 int state, int err)
252 {
253 chan->state = state;
254 chan->ops->state_change(chan, chan->state, err);
255 }
256
257 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258 {
259 chan->ops->state_change(chan, chan->state, err);
260 }
261
262 static void __set_retrans_timer(struct l2cap_chan *chan)
263 {
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
268 }
269 }
270
271 static void __set_monitor_timer(struct l2cap_chan *chan)
272 {
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
277 }
278 }
279
280 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 u16 seq)
282 {
283 struct sk_buff *skb;
284
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
287 return skb;
288 }
289
290 return NULL;
291 }
292
293 /* ---- L2CAP sequence number lists ---- */
294
295 /* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
301 * allocs or frees.
302 */
303
304 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305 {
306 size_t alloc_size, i;
307
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
311 */
312 alloc_size = roundup_pow_of_two(size);
313
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
315 if (!seq_list->list)
316 return -ENOMEM;
317
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323
324 return 0;
325 }
326
327 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328 {
329 kfree(seq_list->list);
330 }
331
332 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 u16 seq)
334 {
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337 }
338
339 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340 {
341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
343
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 }
351
352 return seq;
353 }
354
355 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356 {
357 u16 i;
358
359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
360 return;
361
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
367 }
368
369 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370 {
371 u16 mask = seq_list->mask;
372
373 /* All appends happen in constant time */
374
375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
376 return;
377
378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
380 else
381 seq_list->list[seq_list->tail & mask] = seq;
382
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
385 }
386
387 static void l2cap_chan_timeout(struct work_struct *work)
388 {
389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
390 chan_timer.work);
391 struct l2cap_conn *conn = chan->conn;
392 int reason;
393
394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
395
396 mutex_lock(&conn->chan_lock);
397 l2cap_chan_lock(chan);
398
399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
400 reason = ECONNREFUSED;
401 else if (chan->state == BT_CONNECT &&
402 chan->sec_level != BT_SECURITY_SDP)
403 reason = ECONNREFUSED;
404 else
405 reason = ETIMEDOUT;
406
407 l2cap_chan_close(chan, reason);
408
409 l2cap_chan_unlock(chan);
410
411 chan->ops->close(chan);
412 mutex_unlock(&conn->chan_lock);
413
414 l2cap_chan_put(chan);
415 }
416
417 struct l2cap_chan *l2cap_chan_create(void)
418 {
419 struct l2cap_chan *chan;
420
421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
422 if (!chan)
423 return NULL;
424
425 mutex_init(&chan->lock);
426
427 /* Set default lock nesting level */
428 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
429
430 write_lock(&chan_list_lock);
431 list_add(&chan->global_l, &chan_list);
432 write_unlock(&chan_list_lock);
433
434 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
435
436 chan->state = BT_OPEN;
437
438 kref_init(&chan->kref);
439
440 /* This flag is cleared in l2cap_chan_ready() */
441 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
442
443 BT_DBG("chan %p", chan);
444
445 return chan;
446 }
447 EXPORT_SYMBOL_GPL(l2cap_chan_create);
448
449 static void l2cap_chan_destroy(struct kref *kref)
450 {
451 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
452
453 BT_DBG("chan %p", chan);
454
455 write_lock(&chan_list_lock);
456 list_del(&chan->global_l);
457 write_unlock(&chan_list_lock);
458
459 kfree(chan);
460 }
461
462 void l2cap_chan_hold(struct l2cap_chan *c)
463 {
464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465
466 kref_get(&c->kref);
467 }
468
469 void l2cap_chan_put(struct l2cap_chan *c)
470 {
471 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
472
473 kref_put(&c->kref, l2cap_chan_destroy);
474 }
475 EXPORT_SYMBOL_GPL(l2cap_chan_put);
476
477 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
478 {
479 chan->fcs = L2CAP_FCS_CRC16;
480 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
481 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
482 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
483 chan->remote_max_tx = chan->max_tx;
484 chan->remote_tx_win = chan->tx_win;
485 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->sec_level = BT_SECURITY_LOW;
487 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
488 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
489 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
490 chan->conf_state = 0;
491
492 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 }
494 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
495
496 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
497 {
498 chan->sdu = NULL;
499 chan->sdu_last_frag = NULL;
500 chan->sdu_len = 0;
501 chan->tx_credits = 0;
502 chan->rx_credits = le_max_credits;
503 chan->mps = min_t(u16, chan->imtu, le_default_mps);
504
505 skb_queue_head_init(&chan->tx_q);
506 }
507
508 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
509 {
510 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
511 __le16_to_cpu(chan->psm), chan->dcid);
512
513 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
514
515 chan->conn = conn;
516
517 switch (chan->chan_type) {
518 case L2CAP_CHAN_CONN_ORIENTED:
519 /* Alloc CID for connection-oriented socket */
520 chan->scid = l2cap_alloc_cid(conn);
521 if (conn->hcon->type == ACL_LINK)
522 chan->omtu = L2CAP_DEFAULT_MTU;
523 break;
524
525 case L2CAP_CHAN_CONN_LESS:
526 /* Connectionless socket */
527 chan->scid = L2CAP_CID_CONN_LESS;
528 chan->dcid = L2CAP_CID_CONN_LESS;
529 chan->omtu = L2CAP_DEFAULT_MTU;
530 break;
531
532 case L2CAP_CHAN_FIXED:
533 /* Caller will set CID and CID specific MTU values */
534 break;
535
536 default:
537 /* Raw socket can send/recv signalling messages only */
538 chan->scid = L2CAP_CID_SIGNALING;
539 chan->dcid = L2CAP_CID_SIGNALING;
540 chan->omtu = L2CAP_DEFAULT_MTU;
541 }
542
543 chan->local_id = L2CAP_BESTEFFORT_ID;
544 chan->local_stype = L2CAP_SERV_BESTEFFORT;
545 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
546 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
547 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
548 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
549
550 l2cap_chan_hold(chan);
551
552 /* Only keep a reference for fixed channels if they requested it */
553 if (chan->chan_type != L2CAP_CHAN_FIXED ||
554 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
555 hci_conn_hold(conn->hcon);
556
557 list_add(&chan->list, &conn->chan_l);
558 }
559
560 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
561 {
562 mutex_lock(&conn->chan_lock);
563 __l2cap_chan_add(conn, chan);
564 mutex_unlock(&conn->chan_lock);
565 }
566
567 void l2cap_chan_del(struct l2cap_chan *chan, int err)
568 {
569 struct l2cap_conn *conn = chan->conn;
570
571 __clear_chan_timer(chan);
572
573 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
574
575 chan->ops->teardown(chan, err);
576
577 if (conn) {
578 struct amp_mgr *mgr = conn->hcon->amp_mgr;
579 /* Delete from channel list */
580 list_del(&chan->list);
581
582 l2cap_chan_put(chan);
583
584 chan->conn = NULL;
585
586 /* Reference was only held for non-fixed channels or
587 * fixed channels that explicitly requested it using the
588 * FLAG_HOLD_HCI_CONN flag.
589 */
590 if (chan->chan_type != L2CAP_CHAN_FIXED ||
591 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
592 hci_conn_drop(conn->hcon);
593
594 if (mgr && mgr->bredr_chan == chan)
595 mgr->bredr_chan = NULL;
596 }
597
598 if (chan->hs_hchan) {
599 struct hci_chan *hs_hchan = chan->hs_hchan;
600
601 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
602 amp_disconnect_logical_link(hs_hchan);
603 }
604
605 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
606 return;
607
608 switch(chan->mode) {
609 case L2CAP_MODE_BASIC:
610 break;
611
612 case L2CAP_MODE_LE_FLOWCTL:
613 skb_queue_purge(&chan->tx_q);
614 break;
615
616 case L2CAP_MODE_ERTM:
617 __clear_retrans_timer(chan);
618 __clear_monitor_timer(chan);
619 __clear_ack_timer(chan);
620
621 skb_queue_purge(&chan->srej_q);
622
623 l2cap_seq_list_free(&chan->srej_list);
624 l2cap_seq_list_free(&chan->retrans_list);
625
626 /* fall through */
627
628 case L2CAP_MODE_STREAMING:
629 skb_queue_purge(&chan->tx_q);
630 break;
631 }
632
633 return;
634 }
635 EXPORT_SYMBOL_GPL(l2cap_chan_del);
636
637 static void l2cap_conn_update_id_addr(struct work_struct *work)
638 {
639 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
640 id_addr_update_work);
641 struct hci_conn *hcon = conn->hcon;
642 struct l2cap_chan *chan;
643
644 mutex_lock(&conn->chan_lock);
645
646 list_for_each_entry(chan, &conn->chan_l, list) {
647 l2cap_chan_lock(chan);
648 bacpy(&chan->dst, &hcon->dst);
649 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
650 l2cap_chan_unlock(chan);
651 }
652
653 mutex_unlock(&conn->chan_lock);
654 }
655
656 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
657 {
658 struct l2cap_conn *conn = chan->conn;
659 struct l2cap_le_conn_rsp rsp;
660 u16 result;
661
662 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
663 result = L2CAP_CR_AUTHORIZATION;
664 else
665 result = L2CAP_CR_BAD_PSM;
666
667 l2cap_state_change(chan, BT_DISCONN);
668
669 rsp.dcid = cpu_to_le16(chan->scid);
670 rsp.mtu = cpu_to_le16(chan->imtu);
671 rsp.mps = cpu_to_le16(chan->mps);
672 rsp.credits = cpu_to_le16(chan->rx_credits);
673 rsp.result = cpu_to_le16(result);
674
675 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
676 &rsp);
677 }
678
679 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
680 {
681 struct l2cap_conn *conn = chan->conn;
682 struct l2cap_conn_rsp rsp;
683 u16 result;
684
685 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
686 result = L2CAP_CR_SEC_BLOCK;
687 else
688 result = L2CAP_CR_BAD_PSM;
689
690 l2cap_state_change(chan, BT_DISCONN);
691
692 rsp.scid = cpu_to_le16(chan->dcid);
693 rsp.dcid = cpu_to_le16(chan->scid);
694 rsp.result = cpu_to_le16(result);
695 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
696
697 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
698 }
699
700 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
701 {
702 struct l2cap_conn *conn = chan->conn;
703
704 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
705
706 switch (chan->state) {
707 case BT_LISTEN:
708 chan->ops->teardown(chan, 0);
709 break;
710
711 case BT_CONNECTED:
712 case BT_CONFIG:
713 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
714 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
715 l2cap_send_disconn_req(chan, reason);
716 } else
717 l2cap_chan_del(chan, reason);
718 break;
719
720 case BT_CONNECT2:
721 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
722 if (conn->hcon->type == ACL_LINK)
723 l2cap_chan_connect_reject(chan);
724 else if (conn->hcon->type == LE_LINK)
725 l2cap_chan_le_connect_reject(chan);
726 }
727
728 l2cap_chan_del(chan, reason);
729 break;
730
731 case BT_CONNECT:
732 case BT_DISCONN:
733 l2cap_chan_del(chan, reason);
734 break;
735
736 default:
737 chan->ops->teardown(chan, 0);
738 break;
739 }
740 }
741 EXPORT_SYMBOL(l2cap_chan_close);
742
743 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
744 {
745 switch (chan->chan_type) {
746 case L2CAP_CHAN_RAW:
747 switch (chan->sec_level) {
748 case BT_SECURITY_HIGH:
749 case BT_SECURITY_FIPS:
750 return HCI_AT_DEDICATED_BONDING_MITM;
751 case BT_SECURITY_MEDIUM:
752 return HCI_AT_DEDICATED_BONDING;
753 default:
754 return HCI_AT_NO_BONDING;
755 }
756 break;
757 case L2CAP_CHAN_CONN_LESS:
758 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
759 if (chan->sec_level == BT_SECURITY_LOW)
760 chan->sec_level = BT_SECURITY_SDP;
761 }
762 if (chan->sec_level == BT_SECURITY_HIGH ||
763 chan->sec_level == BT_SECURITY_FIPS)
764 return HCI_AT_NO_BONDING_MITM;
765 else
766 return HCI_AT_NO_BONDING;
767 break;
768 case L2CAP_CHAN_CONN_ORIENTED:
769 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
770 if (chan->sec_level == BT_SECURITY_LOW)
771 chan->sec_level = BT_SECURITY_SDP;
772
773 if (chan->sec_level == BT_SECURITY_HIGH ||
774 chan->sec_level == BT_SECURITY_FIPS)
775 return HCI_AT_NO_BONDING_MITM;
776 else
777 return HCI_AT_NO_BONDING;
778 }
779 /* fall through */
780 default:
781 switch (chan->sec_level) {
782 case BT_SECURITY_HIGH:
783 case BT_SECURITY_FIPS:
784 return HCI_AT_GENERAL_BONDING_MITM;
785 case BT_SECURITY_MEDIUM:
786 return HCI_AT_GENERAL_BONDING;
787 default:
788 return HCI_AT_NO_BONDING;
789 }
790 break;
791 }
792 }
793
794 /* Service level security */
795 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
796 {
797 struct l2cap_conn *conn = chan->conn;
798 __u8 auth_type;
799
800 if (conn->hcon->type == LE_LINK)
801 return smp_conn_security(conn->hcon, chan->sec_level);
802
803 auth_type = l2cap_get_auth_type(chan);
804
805 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
806 initiator);
807 }
808
809 static u8 l2cap_get_ident(struct l2cap_conn *conn)
810 {
811 u8 id;
812
813 /* Get next available identificator.
814 * 1 - 128 are used by kernel.
815 * 129 - 199 are reserved.
816 * 200 - 254 are used by utilities like l2ping, etc.
817 */
818
819 mutex_lock(&conn->ident_lock);
820
821 if (++conn->tx_ident > 128)
822 conn->tx_ident = 1;
823
824 id = conn->tx_ident;
825
826 mutex_unlock(&conn->ident_lock);
827
828 return id;
829 }
830
831 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
832 void *data)
833 {
834 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
835 u8 flags;
836
837 BT_DBG("code 0x%2.2x", code);
838
839 if (!skb)
840 return;
841
842 if (lmp_no_flush_capable(conn->hcon->hdev))
843 flags = ACL_START_NO_FLUSH;
844 else
845 flags = ACL_START;
846
847 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
848 skb->priority = HCI_PRIO_MAX;
849
850 hci_send_acl(conn->hchan, skb, flags);
851 }
852
853 static bool __chan_is_moving(struct l2cap_chan *chan)
854 {
855 return chan->move_state != L2CAP_MOVE_STABLE &&
856 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
857 }
858
859 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
860 {
861 struct hci_conn *hcon = chan->conn->hcon;
862 u16 flags;
863
864 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
865 skb->priority);
866
867 if (chan->hs_hcon && !__chan_is_moving(chan)) {
868 if (chan->hs_hchan)
869 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
870 else
871 kfree_skb(skb);
872
873 return;
874 }
875
876 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
877 lmp_no_flush_capable(hcon->hdev))
878 flags = ACL_START_NO_FLUSH;
879 else
880 flags = ACL_START;
881
882 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
883 hci_send_acl(chan->conn->hchan, skb, flags);
884 }
885
886 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
887 {
888 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
889 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
890
891 if (enh & L2CAP_CTRL_FRAME_TYPE) {
892 /* S-Frame */
893 control->sframe = 1;
894 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
895 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
896
897 control->sar = 0;
898 control->txseq = 0;
899 } else {
900 /* I-Frame */
901 control->sframe = 0;
902 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
903 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
904
905 control->poll = 0;
906 control->super = 0;
907 }
908 }
909
910 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
911 {
912 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
913 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
914
915 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
916 /* S-Frame */
917 control->sframe = 1;
918 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
919 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
920
921 control->sar = 0;
922 control->txseq = 0;
923 } else {
924 /* I-Frame */
925 control->sframe = 0;
926 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
927 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
928
929 control->poll = 0;
930 control->super = 0;
931 }
932 }
933
934 static inline void __unpack_control(struct l2cap_chan *chan,
935 struct sk_buff *skb)
936 {
937 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
938 __unpack_extended_control(get_unaligned_le32(skb->data),
939 &bt_cb(skb)->control);
940 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
941 } else {
942 __unpack_enhanced_control(get_unaligned_le16(skb->data),
943 &bt_cb(skb)->control);
944 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
945 }
946 }
947
948 static u32 __pack_extended_control(struct l2cap_ctrl *control)
949 {
950 u32 packed;
951
952 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
953 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
954
955 if (control->sframe) {
956 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
957 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
958 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
959 } else {
960 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
961 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
962 }
963
964 return packed;
965 }
966
967 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
968 {
969 u16 packed;
970
971 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
972 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
973
974 if (control->sframe) {
975 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
976 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
977 packed |= L2CAP_CTRL_FRAME_TYPE;
978 } else {
979 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
980 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
981 }
982
983 return packed;
984 }
985
986 static inline void __pack_control(struct l2cap_chan *chan,
987 struct l2cap_ctrl *control,
988 struct sk_buff *skb)
989 {
990 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
991 put_unaligned_le32(__pack_extended_control(control),
992 skb->data + L2CAP_HDR_SIZE);
993 } else {
994 put_unaligned_le16(__pack_enhanced_control(control),
995 skb->data + L2CAP_HDR_SIZE);
996 }
997 }
998
999 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1000 {
1001 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1002 return L2CAP_EXT_HDR_SIZE;
1003 else
1004 return L2CAP_ENH_HDR_SIZE;
1005 }
1006
1007 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1008 u32 control)
1009 {
1010 struct sk_buff *skb;
1011 struct l2cap_hdr *lh;
1012 int hlen = __ertm_hdr_size(chan);
1013
1014 if (chan->fcs == L2CAP_FCS_CRC16)
1015 hlen += L2CAP_FCS_SIZE;
1016
1017 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1018
1019 if (!skb)
1020 return ERR_PTR(-ENOMEM);
1021
1022 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1023 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1024 lh->cid = cpu_to_le16(chan->dcid);
1025
1026 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1027 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1028 else
1029 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1030
1031 if (chan->fcs == L2CAP_FCS_CRC16) {
1032 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1033 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1034 }
1035
1036 skb->priority = HCI_PRIO_MAX;
1037 return skb;
1038 }
1039
1040 static void l2cap_send_sframe(struct l2cap_chan *chan,
1041 struct l2cap_ctrl *control)
1042 {
1043 struct sk_buff *skb;
1044 u32 control_field;
1045
1046 BT_DBG("chan %p, control %p", chan, control);
1047
1048 if (!control->sframe)
1049 return;
1050
1051 if (__chan_is_moving(chan))
1052 return;
1053
1054 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1055 !control->poll)
1056 control->final = 1;
1057
1058 if (control->super == L2CAP_SUPER_RR)
1059 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1060 else if (control->super == L2CAP_SUPER_RNR)
1061 set_bit(CONN_RNR_SENT, &chan->conn_state);
1062
1063 if (control->super != L2CAP_SUPER_SREJ) {
1064 chan->last_acked_seq = control->reqseq;
1065 __clear_ack_timer(chan);
1066 }
1067
1068 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1069 control->final, control->poll, control->super);
1070
1071 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1072 control_field = __pack_extended_control(control);
1073 else
1074 control_field = __pack_enhanced_control(control);
1075
1076 skb = l2cap_create_sframe_pdu(chan, control_field);
1077 if (!IS_ERR(skb))
1078 l2cap_do_send(chan, skb);
1079 }
1080
1081 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1082 {
1083 struct l2cap_ctrl control;
1084
1085 BT_DBG("chan %p, poll %d", chan, poll);
1086
1087 memset(&control, 0, sizeof(control));
1088 control.sframe = 1;
1089 control.poll = poll;
1090
1091 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1092 control.super = L2CAP_SUPER_RNR;
1093 else
1094 control.super = L2CAP_SUPER_RR;
1095
1096 control.reqseq = chan->buffer_seq;
1097 l2cap_send_sframe(chan, &control);
1098 }
1099
1100 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1101 {
1102 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1103 return true;
1104
1105 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1106 }
1107
1108 static bool __amp_capable(struct l2cap_chan *chan)
1109 {
1110 struct l2cap_conn *conn = chan->conn;
1111 struct hci_dev *hdev;
1112 bool amp_available = false;
1113
1114 if (!conn->hs_enabled)
1115 return false;
1116
1117 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1118 return false;
1119
1120 read_lock(&hci_dev_list_lock);
1121 list_for_each_entry(hdev, &hci_dev_list, list) {
1122 if (hdev->amp_type != AMP_TYPE_BREDR &&
1123 test_bit(HCI_UP, &hdev->flags)) {
1124 amp_available = true;
1125 break;
1126 }
1127 }
1128 read_unlock(&hci_dev_list_lock);
1129
1130 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1131 return amp_available;
1132
1133 return false;
1134 }
1135
1136 static bool l2cap_check_efs(struct l2cap_chan *chan)
1137 {
1138 /* Check EFS parameters */
1139 return true;
1140 }
1141
1142 void l2cap_send_conn_req(struct l2cap_chan *chan)
1143 {
1144 struct l2cap_conn *conn = chan->conn;
1145 struct l2cap_conn_req req;
1146
1147 req.scid = cpu_to_le16(chan->scid);
1148 req.psm = chan->psm;
1149
1150 chan->ident = l2cap_get_ident(conn);
1151
1152 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1153
1154 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1155 }
1156
1157 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1158 {
1159 struct l2cap_create_chan_req req;
1160 req.scid = cpu_to_le16(chan->scid);
1161 req.psm = chan->psm;
1162 req.amp_id = amp_id;
1163
1164 chan->ident = l2cap_get_ident(chan->conn);
1165
1166 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1167 sizeof(req), &req);
1168 }
1169
1170 static void l2cap_move_setup(struct l2cap_chan *chan)
1171 {
1172 struct sk_buff *skb;
1173
1174 BT_DBG("chan %p", chan);
1175
1176 if (chan->mode != L2CAP_MODE_ERTM)
1177 return;
1178
1179 __clear_retrans_timer(chan);
1180 __clear_monitor_timer(chan);
1181 __clear_ack_timer(chan);
1182
1183 chan->retry_count = 0;
1184 skb_queue_walk(&chan->tx_q, skb) {
1185 if (bt_cb(skb)->control.retries)
1186 bt_cb(skb)->control.retries = 1;
1187 else
1188 break;
1189 }
1190
1191 chan->expected_tx_seq = chan->buffer_seq;
1192
1193 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1194 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1195 l2cap_seq_list_clear(&chan->retrans_list);
1196 l2cap_seq_list_clear(&chan->srej_list);
1197 skb_queue_purge(&chan->srej_q);
1198
1199 chan->tx_state = L2CAP_TX_STATE_XMIT;
1200 chan->rx_state = L2CAP_RX_STATE_MOVE;
1201
1202 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1203 }
1204
1205 static void l2cap_move_done(struct l2cap_chan *chan)
1206 {
1207 u8 move_role = chan->move_role;
1208 BT_DBG("chan %p", chan);
1209
1210 chan->move_state = L2CAP_MOVE_STABLE;
1211 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1212
1213 if (chan->mode != L2CAP_MODE_ERTM)
1214 return;
1215
1216 switch (move_role) {
1217 case L2CAP_MOVE_ROLE_INITIATOR:
1218 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1219 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1220 break;
1221 case L2CAP_MOVE_ROLE_RESPONDER:
1222 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1223 break;
1224 }
1225 }
1226
1227 static void l2cap_chan_ready(struct l2cap_chan *chan)
1228 {
1229 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1230 chan->conf_state = 0;
1231 __clear_chan_timer(chan);
1232
1233 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1234 chan->ops->suspend(chan);
1235
1236 chan->state = BT_CONNECTED;
1237
1238 chan->ops->ready(chan);
1239 }
1240
1241 static void l2cap_le_connect(struct l2cap_chan *chan)
1242 {
1243 struct l2cap_conn *conn = chan->conn;
1244 struct l2cap_le_conn_req req;
1245
1246 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1247 return;
1248
1249 req.psm = chan->psm;
1250 req.scid = cpu_to_le16(chan->scid);
1251 req.mtu = cpu_to_le16(chan->imtu);
1252 req.mps = cpu_to_le16(chan->mps);
1253 req.credits = cpu_to_le16(chan->rx_credits);
1254
1255 chan->ident = l2cap_get_ident(conn);
1256
1257 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1258 sizeof(req), &req);
1259 }
1260
1261 static void l2cap_le_start(struct l2cap_chan *chan)
1262 {
1263 struct l2cap_conn *conn = chan->conn;
1264
1265 if (!smp_conn_security(conn->hcon, chan->sec_level))
1266 return;
1267
1268 if (!chan->psm) {
1269 l2cap_chan_ready(chan);
1270 return;
1271 }
1272
1273 if (chan->state == BT_CONNECT)
1274 l2cap_le_connect(chan);
1275 }
1276
1277 static void l2cap_start_connection(struct l2cap_chan *chan)
1278 {
1279 if (__amp_capable(chan)) {
1280 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1281 a2mp_discover_amp(chan);
1282 } else if (chan->conn->hcon->type == LE_LINK) {
1283 l2cap_le_start(chan);
1284 } else {
1285 l2cap_send_conn_req(chan);
1286 }
1287 }
1288
1289 static void l2cap_request_info(struct l2cap_conn *conn)
1290 {
1291 struct l2cap_info_req req;
1292
1293 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1294 return;
1295
1296 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1297
1298 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1299 conn->info_ident = l2cap_get_ident(conn);
1300
1301 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1302
1303 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1304 sizeof(req), &req);
1305 }
1306
1307 static void l2cap_do_start(struct l2cap_chan *chan)
1308 {
1309 struct l2cap_conn *conn = chan->conn;
1310
1311 if (conn->hcon->type == LE_LINK) {
1312 l2cap_le_start(chan);
1313 return;
1314 }
1315
1316 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1317 l2cap_request_info(conn);
1318 return;
1319 }
1320
1321 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1322 return;
1323
1324 if (l2cap_chan_check_security(chan, true) &&
1325 __l2cap_no_conn_pending(chan))
1326 l2cap_start_connection(chan);
1327 }
1328
1329 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1330 {
1331 u32 local_feat_mask = l2cap_feat_mask;
1332 if (!disable_ertm)
1333 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1334
1335 switch (mode) {
1336 case L2CAP_MODE_ERTM:
1337 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1338 case L2CAP_MODE_STREAMING:
1339 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1340 default:
1341 return 0x00;
1342 }
1343 }
1344
1345 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1346 {
1347 struct l2cap_conn *conn = chan->conn;
1348 struct l2cap_disconn_req req;
1349
1350 if (!conn)
1351 return;
1352
1353 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1354 __clear_retrans_timer(chan);
1355 __clear_monitor_timer(chan);
1356 __clear_ack_timer(chan);
1357 }
1358
1359 if (chan->scid == L2CAP_CID_A2MP) {
1360 l2cap_state_change(chan, BT_DISCONN);
1361 return;
1362 }
1363
1364 req.dcid = cpu_to_le16(chan->dcid);
1365 req.scid = cpu_to_le16(chan->scid);
1366 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1367 sizeof(req), &req);
1368
1369 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1370 }
1371
1372 /* ---- L2CAP connections ---- */
1373 static void l2cap_conn_start(struct l2cap_conn *conn)
1374 {
1375 struct l2cap_chan *chan, *tmp;
1376
1377 BT_DBG("conn %p", conn);
1378
1379 mutex_lock(&conn->chan_lock);
1380
1381 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1382 l2cap_chan_lock(chan);
1383
1384 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1385 l2cap_chan_ready(chan);
1386 l2cap_chan_unlock(chan);
1387 continue;
1388 }
1389
1390 if (chan->state == BT_CONNECT) {
1391 if (!l2cap_chan_check_security(chan, true) ||
1392 !__l2cap_no_conn_pending(chan)) {
1393 l2cap_chan_unlock(chan);
1394 continue;
1395 }
1396
1397 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1398 && test_bit(CONF_STATE2_DEVICE,
1399 &chan->conf_state)) {
1400 l2cap_chan_close(chan, ECONNRESET);
1401 l2cap_chan_unlock(chan);
1402 continue;
1403 }
1404
1405 l2cap_start_connection(chan);
1406
1407 } else if (chan->state == BT_CONNECT2) {
1408 struct l2cap_conn_rsp rsp;
1409 char buf[128];
1410 rsp.scid = cpu_to_le16(chan->dcid);
1411 rsp.dcid = cpu_to_le16(chan->scid);
1412
1413 if (l2cap_chan_check_security(chan, false)) {
1414 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1415 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1416 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1417 chan->ops->defer(chan);
1418
1419 } else {
1420 l2cap_state_change(chan, BT_CONFIG);
1421 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1422 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1423 }
1424 } else {
1425 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1426 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1427 }
1428
1429 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1430 sizeof(rsp), &rsp);
1431
1432 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1433 rsp.result != L2CAP_CR_SUCCESS) {
1434 l2cap_chan_unlock(chan);
1435 continue;
1436 }
1437
1438 set_bit(CONF_REQ_SENT, &chan->conf_state);
1439 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1440 l2cap_build_conf_req(chan, buf), buf);
1441 chan->num_conf_req++;
1442 }
1443
1444 l2cap_chan_unlock(chan);
1445 }
1446
1447 mutex_unlock(&conn->chan_lock);
1448 }
1449
1450 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1451 {
1452 struct hci_conn *hcon = conn->hcon;
1453 struct hci_dev *hdev = hcon->hdev;
1454
1455 BT_DBG("%s conn %p", hdev->name, conn);
1456
1457 /* For outgoing pairing which doesn't necessarily have an
1458 * associated socket (e.g. mgmt_pair_device).
1459 */
1460 if (hcon->out)
1461 smp_conn_security(hcon, hcon->pending_sec_level);
1462
1463 /* For LE slave connections, make sure the connection interval
1464 * is in the range of the minium and maximum interval that has
1465 * been configured for this connection. If not, then trigger
1466 * the connection update procedure.
1467 */
1468 if (hcon->role == HCI_ROLE_SLAVE &&
1469 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1470 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1471 struct l2cap_conn_param_update_req req;
1472
1473 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1474 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1475 req.latency = cpu_to_le16(hcon->le_conn_latency);
1476 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1477
1478 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1479 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1480 }
1481 }
1482
1483 static void l2cap_conn_ready(struct l2cap_conn *conn)
1484 {
1485 struct l2cap_chan *chan;
1486 struct hci_conn *hcon = conn->hcon;
1487
1488 BT_DBG("conn %p", conn);
1489
1490 if (hcon->type == ACL_LINK)
1491 l2cap_request_info(conn);
1492
1493 mutex_lock(&conn->chan_lock);
1494
1495 list_for_each_entry(chan, &conn->chan_l, list) {
1496
1497 l2cap_chan_lock(chan);
1498
1499 if (chan->scid == L2CAP_CID_A2MP) {
1500 l2cap_chan_unlock(chan);
1501 continue;
1502 }
1503
1504 if (hcon->type == LE_LINK) {
1505 l2cap_le_start(chan);
1506 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1507 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1508 l2cap_chan_ready(chan);
1509 } else if (chan->state == BT_CONNECT) {
1510 l2cap_do_start(chan);
1511 }
1512
1513 l2cap_chan_unlock(chan);
1514 }
1515
1516 mutex_unlock(&conn->chan_lock);
1517
1518 if (hcon->type == LE_LINK)
1519 l2cap_le_conn_ready(conn);
1520
1521 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1522 }
1523
1524 /* Notify sockets that we cannot guaranty reliability anymore */
1525 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1526 {
1527 struct l2cap_chan *chan;
1528
1529 BT_DBG("conn %p", conn);
1530
1531 mutex_lock(&conn->chan_lock);
1532
1533 list_for_each_entry(chan, &conn->chan_l, list) {
1534 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1535 l2cap_chan_set_err(chan, err);
1536 }
1537
1538 mutex_unlock(&conn->chan_lock);
1539 }
1540
1541 static void l2cap_info_timeout(struct work_struct *work)
1542 {
1543 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1544 info_timer.work);
1545
1546 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1547 conn->info_ident = 0;
1548
1549 l2cap_conn_start(conn);
1550 }
1551
1552 /*
1553 * l2cap_user
1554 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1555 * callback is called during registration. The ->remove callback is called
1556 * during unregistration.
1557 * An l2cap_user object can either be explicitly unregistered or when the
1558 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1559 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1560 * External modules must own a reference to the l2cap_conn object if they intend
1561 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1562 * any time if they don't.
1563 */
1564
1565 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1566 {
1567 struct hci_dev *hdev = conn->hcon->hdev;
1568 int ret;
1569
1570 /* We need to check whether l2cap_conn is registered. If it is not, we
1571 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1572 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1573 * relies on the parent hci_conn object to be locked. This itself relies
1574 * on the hci_dev object to be locked. So we must lock the hci device
1575 * here, too. */
1576
1577 hci_dev_lock(hdev);
1578
1579 if (user->list.next || user->list.prev) {
1580 ret = -EINVAL;
1581 goto out_unlock;
1582 }
1583
1584 /* conn->hchan is NULL after l2cap_conn_del() was called */
1585 if (!conn->hchan) {
1586 ret = -ENODEV;
1587 goto out_unlock;
1588 }
1589
1590 ret = user->probe(conn, user);
1591 if (ret)
1592 goto out_unlock;
1593
1594 list_add(&user->list, &conn->users);
1595 ret = 0;
1596
1597 out_unlock:
1598 hci_dev_unlock(hdev);
1599 return ret;
1600 }
1601 EXPORT_SYMBOL(l2cap_register_user);
1602
1603 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1604 {
1605 struct hci_dev *hdev = conn->hcon->hdev;
1606
1607 hci_dev_lock(hdev);
1608
1609 if (!user->list.next || !user->list.prev)
1610 goto out_unlock;
1611
1612 list_del(&user->list);
1613 user->list.next = NULL;
1614 user->list.prev = NULL;
1615 user->remove(conn, user);
1616
1617 out_unlock:
1618 hci_dev_unlock(hdev);
1619 }
1620 EXPORT_SYMBOL(l2cap_unregister_user);
1621
1622 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1623 {
1624 struct l2cap_user *user;
1625
1626 while (!list_empty(&conn->users)) {
1627 user = list_first_entry(&conn->users, struct l2cap_user, list);
1628 list_del(&user->list);
1629 user->list.next = NULL;
1630 user->list.prev = NULL;
1631 user->remove(conn, user);
1632 }
1633 }
1634
1635 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1636 {
1637 struct l2cap_conn *conn = hcon->l2cap_data;
1638 struct l2cap_chan *chan, *l;
1639
1640 if (!conn)
1641 return;
1642
1643 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1644
1645 kfree_skb(conn->rx_skb);
1646
1647 skb_queue_purge(&conn->pending_rx);
1648
1649 /* We can not call flush_work(&conn->pending_rx_work) here since we
1650 * might block if we are running on a worker from the same workqueue
1651 * pending_rx_work is waiting on.
1652 */
1653 if (work_pending(&conn->pending_rx_work))
1654 cancel_work_sync(&conn->pending_rx_work);
1655
1656 if (work_pending(&conn->id_addr_update_work))
1657 cancel_work_sync(&conn->id_addr_update_work);
1658
1659 l2cap_unregister_all_users(conn);
1660
1661 /* Force the connection to be immediately dropped */
1662 hcon->disc_timeout = 0;
1663
1664 mutex_lock(&conn->chan_lock);
1665
1666 /* Kill channels */
1667 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1668 l2cap_chan_hold(chan);
1669 l2cap_chan_lock(chan);
1670
1671 l2cap_chan_del(chan, err);
1672
1673 l2cap_chan_unlock(chan);
1674
1675 chan->ops->close(chan);
1676 l2cap_chan_put(chan);
1677 }
1678
1679 mutex_unlock(&conn->chan_lock);
1680
1681 hci_chan_del(conn->hchan);
1682
1683 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1684 cancel_delayed_work_sync(&conn->info_timer);
1685
1686 hcon->l2cap_data = NULL;
1687 conn->hchan = NULL;
1688 l2cap_conn_put(conn);
1689 }
1690
1691 static void l2cap_conn_free(struct kref *ref)
1692 {
1693 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1694
1695 hci_conn_put(conn->hcon);
1696 kfree(conn);
1697 }
1698
1699 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1700 {
1701 kref_get(&conn->ref);
1702 return conn;
1703 }
1704 EXPORT_SYMBOL(l2cap_conn_get);
1705
1706 void l2cap_conn_put(struct l2cap_conn *conn)
1707 {
1708 kref_put(&conn->ref, l2cap_conn_free);
1709 }
1710 EXPORT_SYMBOL(l2cap_conn_put);
1711
1712 /* ---- Socket interface ---- */
1713
1714 /* Find socket with psm and source / destination bdaddr.
1715 * Returns closest match.
1716 */
1717 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1718 bdaddr_t *src,
1719 bdaddr_t *dst,
1720 u8 link_type)
1721 {
1722 struct l2cap_chan *c, *c1 = NULL;
1723
1724 read_lock(&chan_list_lock);
1725
1726 list_for_each_entry(c, &chan_list, global_l) {
1727 if (state && c->state != state)
1728 continue;
1729
1730 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1731 continue;
1732
1733 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1734 continue;
1735
1736 if (c->psm == psm) {
1737 int src_match, dst_match;
1738 int src_any, dst_any;
1739
1740 /* Exact match. */
1741 src_match = !bacmp(&c->src, src);
1742 dst_match = !bacmp(&c->dst, dst);
1743 if (src_match && dst_match) {
1744 l2cap_chan_hold(c);
1745 read_unlock(&chan_list_lock);
1746 return c;
1747 }
1748
1749 /* Closest match */
1750 src_any = !bacmp(&c->src, BDADDR_ANY);
1751 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1752 if ((src_match && dst_any) || (src_any && dst_match) ||
1753 (src_any && dst_any))
1754 c1 = c;
1755 }
1756 }
1757
1758 if (c1)
1759 l2cap_chan_hold(c1);
1760
1761 read_unlock(&chan_list_lock);
1762
1763 return c1;
1764 }
1765
1766 static void l2cap_monitor_timeout(struct work_struct *work)
1767 {
1768 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1769 monitor_timer.work);
1770
1771 BT_DBG("chan %p", chan);
1772
1773 l2cap_chan_lock(chan);
1774
1775 if (!chan->conn) {
1776 l2cap_chan_unlock(chan);
1777 l2cap_chan_put(chan);
1778 return;
1779 }
1780
1781 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1782
1783 l2cap_chan_unlock(chan);
1784 l2cap_chan_put(chan);
1785 }
1786
1787 static void l2cap_retrans_timeout(struct work_struct *work)
1788 {
1789 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1790 retrans_timer.work);
1791
1792 BT_DBG("chan %p", chan);
1793
1794 l2cap_chan_lock(chan);
1795
1796 if (!chan->conn) {
1797 l2cap_chan_unlock(chan);
1798 l2cap_chan_put(chan);
1799 return;
1800 }
1801
1802 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1803 l2cap_chan_unlock(chan);
1804 l2cap_chan_put(chan);
1805 }
1806
1807 static void l2cap_streaming_send(struct l2cap_chan *chan,
1808 struct sk_buff_head *skbs)
1809 {
1810 struct sk_buff *skb;
1811 struct l2cap_ctrl *control;
1812
1813 BT_DBG("chan %p, skbs %p", chan, skbs);
1814
1815 if (__chan_is_moving(chan))
1816 return;
1817
1818 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1819
1820 while (!skb_queue_empty(&chan->tx_q)) {
1821
1822 skb = skb_dequeue(&chan->tx_q);
1823
1824 bt_cb(skb)->control.retries = 1;
1825 control = &bt_cb(skb)->control;
1826
1827 control->reqseq = 0;
1828 control->txseq = chan->next_tx_seq;
1829
1830 __pack_control(chan, control, skb);
1831
1832 if (chan->fcs == L2CAP_FCS_CRC16) {
1833 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1834 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1835 }
1836
1837 l2cap_do_send(chan, skb);
1838
1839 BT_DBG("Sent txseq %u", control->txseq);
1840
1841 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1842 chan->frames_sent++;
1843 }
1844 }
1845
1846 static int l2cap_ertm_send(struct l2cap_chan *chan)
1847 {
1848 struct sk_buff *skb, *tx_skb;
1849 struct l2cap_ctrl *control;
1850 int sent = 0;
1851
1852 BT_DBG("chan %p", chan);
1853
1854 if (chan->state != BT_CONNECTED)
1855 return -ENOTCONN;
1856
1857 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1858 return 0;
1859
1860 if (__chan_is_moving(chan))
1861 return 0;
1862
1863 while (chan->tx_send_head &&
1864 chan->unacked_frames < chan->remote_tx_win &&
1865 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1866
1867 skb = chan->tx_send_head;
1868
1869 bt_cb(skb)->control.retries = 1;
1870 control = &bt_cb(skb)->control;
1871
1872 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1873 control->final = 1;
1874
1875 control->reqseq = chan->buffer_seq;
1876 chan->last_acked_seq = chan->buffer_seq;
1877 control->txseq = chan->next_tx_seq;
1878
1879 __pack_control(chan, control, skb);
1880
1881 if (chan->fcs == L2CAP_FCS_CRC16) {
1882 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1883 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1884 }
1885
1886 /* Clone after data has been modified. Data is assumed to be
1887 read-only (for locking purposes) on cloned sk_buffs.
1888 */
1889 tx_skb = skb_clone(skb, GFP_KERNEL);
1890
1891 if (!tx_skb)
1892 break;
1893
1894 __set_retrans_timer(chan);
1895
1896 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1897 chan->unacked_frames++;
1898 chan->frames_sent++;
1899 sent++;
1900
1901 if (skb_queue_is_last(&chan->tx_q, skb))
1902 chan->tx_send_head = NULL;
1903 else
1904 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1905
1906 l2cap_do_send(chan, tx_skb);
1907 BT_DBG("Sent txseq %u", control->txseq);
1908 }
1909
1910 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1911 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1912
1913 return sent;
1914 }
1915
1916 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1917 {
1918 struct l2cap_ctrl control;
1919 struct sk_buff *skb;
1920 struct sk_buff *tx_skb;
1921 u16 seq;
1922
1923 BT_DBG("chan %p", chan);
1924
1925 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1926 return;
1927
1928 if (__chan_is_moving(chan))
1929 return;
1930
1931 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1932 seq = l2cap_seq_list_pop(&chan->retrans_list);
1933
1934 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1935 if (!skb) {
1936 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1937 seq);
1938 continue;
1939 }
1940
1941 bt_cb(skb)->control.retries++;
1942 control = bt_cb(skb)->control;
1943
1944 if (chan->max_tx != 0 &&
1945 bt_cb(skb)->control.retries > chan->max_tx) {
1946 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1947 l2cap_send_disconn_req(chan, ECONNRESET);
1948 l2cap_seq_list_clear(&chan->retrans_list);
1949 break;
1950 }
1951
1952 control.reqseq = chan->buffer_seq;
1953 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1954 control.final = 1;
1955 else
1956 control.final = 0;
1957
1958 if (skb_cloned(skb)) {
1959 /* Cloned sk_buffs are read-only, so we need a
1960 * writeable copy
1961 */
1962 tx_skb = skb_copy(skb, GFP_KERNEL);
1963 } else {
1964 tx_skb = skb_clone(skb, GFP_KERNEL);
1965 }
1966
1967 if (!tx_skb) {
1968 l2cap_seq_list_clear(&chan->retrans_list);
1969 break;
1970 }
1971
1972 /* Update skb contents */
1973 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1974 put_unaligned_le32(__pack_extended_control(&control),
1975 tx_skb->data + L2CAP_HDR_SIZE);
1976 } else {
1977 put_unaligned_le16(__pack_enhanced_control(&control),
1978 tx_skb->data + L2CAP_HDR_SIZE);
1979 }
1980
1981 /* Update FCS */
1982 if (chan->fcs == L2CAP_FCS_CRC16) {
1983 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1984 tx_skb->len - L2CAP_FCS_SIZE);
1985 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1986 L2CAP_FCS_SIZE);
1987 }
1988
1989 l2cap_do_send(chan, tx_skb);
1990
1991 BT_DBG("Resent txseq %d", control.txseq);
1992
1993 chan->last_acked_seq = chan->buffer_seq;
1994 }
1995 }
1996
1997 static void l2cap_retransmit(struct l2cap_chan *chan,
1998 struct l2cap_ctrl *control)
1999 {
2000 BT_DBG("chan %p, control %p", chan, control);
2001
2002 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2003 l2cap_ertm_resend(chan);
2004 }
2005
2006 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2007 struct l2cap_ctrl *control)
2008 {
2009 struct sk_buff *skb;
2010
2011 BT_DBG("chan %p, control %p", chan, control);
2012
2013 if (control->poll)
2014 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2015
2016 l2cap_seq_list_clear(&chan->retrans_list);
2017
2018 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2019 return;
2020
2021 if (chan->unacked_frames) {
2022 skb_queue_walk(&chan->tx_q, skb) {
2023 if (bt_cb(skb)->control.txseq == control->reqseq ||
2024 skb == chan->tx_send_head)
2025 break;
2026 }
2027
2028 skb_queue_walk_from(&chan->tx_q, skb) {
2029 if (skb == chan->tx_send_head)
2030 break;
2031
2032 l2cap_seq_list_append(&chan->retrans_list,
2033 bt_cb(skb)->control.txseq);
2034 }
2035
2036 l2cap_ertm_resend(chan);
2037 }
2038 }
2039
2040 static void l2cap_send_ack(struct l2cap_chan *chan)
2041 {
2042 struct l2cap_ctrl control;
2043 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2044 chan->last_acked_seq);
2045 int threshold;
2046
2047 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2048 chan, chan->last_acked_seq, chan->buffer_seq);
2049
2050 memset(&control, 0, sizeof(control));
2051 control.sframe = 1;
2052
2053 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2054 chan->rx_state == L2CAP_RX_STATE_RECV) {
2055 __clear_ack_timer(chan);
2056 control.super = L2CAP_SUPER_RNR;
2057 control.reqseq = chan->buffer_seq;
2058 l2cap_send_sframe(chan, &control);
2059 } else {
2060 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2061 l2cap_ertm_send(chan);
2062 /* If any i-frames were sent, they included an ack */
2063 if (chan->buffer_seq == chan->last_acked_seq)
2064 frames_to_ack = 0;
2065 }
2066
2067 /* Ack now if the window is 3/4ths full.
2068 * Calculate without mul or div
2069 */
2070 threshold = chan->ack_win;
2071 threshold += threshold << 1;
2072 threshold >>= 2;
2073
2074 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2075 threshold);
2076
2077 if (frames_to_ack >= threshold) {
2078 __clear_ack_timer(chan);
2079 control.super = L2CAP_SUPER_RR;
2080 control.reqseq = chan->buffer_seq;
2081 l2cap_send_sframe(chan, &control);
2082 frames_to_ack = 0;
2083 }
2084
2085 if (frames_to_ack)
2086 __set_ack_timer(chan);
2087 }
2088 }
2089
2090 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2091 struct msghdr *msg, int len,
2092 int count, struct sk_buff *skb)
2093 {
2094 struct l2cap_conn *conn = chan->conn;
2095 struct sk_buff **frag;
2096 int sent = 0;
2097
2098 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2099 msg->msg_iov, count))
2100 return -EFAULT;
2101
2102 sent += count;
2103 len -= count;
2104
2105 /* Continuation fragments (no L2CAP header) */
2106 frag = &skb_shinfo(skb)->frag_list;
2107 while (len) {
2108 struct sk_buff *tmp;
2109
2110 count = min_t(unsigned int, conn->mtu, len);
2111
2112 tmp = chan->ops->alloc_skb(chan, 0, count,
2113 msg->msg_flags & MSG_DONTWAIT);
2114 if (IS_ERR(tmp))
2115 return PTR_ERR(tmp);
2116
2117 *frag = tmp;
2118
2119 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2120 msg->msg_iov, count))
2121 return -EFAULT;
2122
2123 sent += count;
2124 len -= count;
2125
2126 skb->len += (*frag)->len;
2127 skb->data_len += (*frag)->len;
2128
2129 frag = &(*frag)->next;
2130 }
2131
2132 return sent;
2133 }
2134
2135 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2136 struct msghdr *msg, size_t len)
2137 {
2138 struct l2cap_conn *conn = chan->conn;
2139 struct sk_buff *skb;
2140 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2141 struct l2cap_hdr *lh;
2142
2143 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2144 __le16_to_cpu(chan->psm), len);
2145
2146 count = min_t(unsigned int, (conn->mtu - hlen), len);
2147
2148 skb = chan->ops->alloc_skb(chan, hlen, count,
2149 msg->msg_flags & MSG_DONTWAIT);
2150 if (IS_ERR(skb))
2151 return skb;
2152
2153 /* Create L2CAP header */
2154 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2155 lh->cid = cpu_to_le16(chan->dcid);
2156 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2157 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2158
2159 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2160 if (unlikely(err < 0)) {
2161 kfree_skb(skb);
2162 return ERR_PTR(err);
2163 }
2164 return skb;
2165 }
2166
2167 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2168 struct msghdr *msg, size_t len)
2169 {
2170 struct l2cap_conn *conn = chan->conn;
2171 struct sk_buff *skb;
2172 int err, count;
2173 struct l2cap_hdr *lh;
2174
2175 BT_DBG("chan %p len %zu", chan, len);
2176
2177 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2178
2179 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2180 msg->msg_flags & MSG_DONTWAIT);
2181 if (IS_ERR(skb))
2182 return skb;
2183
2184 /* Create L2CAP header */
2185 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2186 lh->cid = cpu_to_le16(chan->dcid);
2187 lh->len = cpu_to_le16(len);
2188
2189 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2190 if (unlikely(err < 0)) {
2191 kfree_skb(skb);
2192 return ERR_PTR(err);
2193 }
2194 return skb;
2195 }
2196
2197 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2198 struct msghdr *msg, size_t len,
2199 u16 sdulen)
2200 {
2201 struct l2cap_conn *conn = chan->conn;
2202 struct sk_buff *skb;
2203 int err, count, hlen;
2204 struct l2cap_hdr *lh;
2205
2206 BT_DBG("chan %p len %zu", chan, len);
2207
2208 if (!conn)
2209 return ERR_PTR(-ENOTCONN);
2210
2211 hlen = __ertm_hdr_size(chan);
2212
2213 if (sdulen)
2214 hlen += L2CAP_SDULEN_SIZE;
2215
2216 if (chan->fcs == L2CAP_FCS_CRC16)
2217 hlen += L2CAP_FCS_SIZE;
2218
2219 count = min_t(unsigned int, (conn->mtu - hlen), len);
2220
2221 skb = chan->ops->alloc_skb(chan, hlen, count,
2222 msg->msg_flags & MSG_DONTWAIT);
2223 if (IS_ERR(skb))
2224 return skb;
2225
2226 /* Create L2CAP header */
2227 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2228 lh->cid = cpu_to_le16(chan->dcid);
2229 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2230
2231 /* Control header is populated later */
2232 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2233 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2234 else
2235 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2236
2237 if (sdulen)
2238 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2239
2240 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2241 if (unlikely(err < 0)) {
2242 kfree_skb(skb);
2243 return ERR_PTR(err);
2244 }
2245
2246 bt_cb(skb)->control.fcs = chan->fcs;
2247 bt_cb(skb)->control.retries = 0;
2248 return skb;
2249 }
2250
2251 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2252 struct sk_buff_head *seg_queue,
2253 struct msghdr *msg, size_t len)
2254 {
2255 struct sk_buff *skb;
2256 u16 sdu_len;
2257 size_t pdu_len;
2258 u8 sar;
2259
2260 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2261
2262 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2263 * so fragmented skbs are not used. The HCI layer's handling
2264 * of fragmented skbs is not compatible with ERTM's queueing.
2265 */
2266
2267 /* PDU size is derived from the HCI MTU */
2268 pdu_len = chan->conn->mtu;
2269
2270 /* Constrain PDU size for BR/EDR connections */
2271 if (!chan->hs_hcon)
2272 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2273
2274 /* Adjust for largest possible L2CAP overhead. */
2275 if (chan->fcs)
2276 pdu_len -= L2CAP_FCS_SIZE;
2277
2278 pdu_len -= __ertm_hdr_size(chan);
2279
2280 /* Remote device may have requested smaller PDUs */
2281 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2282
2283 if (len <= pdu_len) {
2284 sar = L2CAP_SAR_UNSEGMENTED;
2285 sdu_len = 0;
2286 pdu_len = len;
2287 } else {
2288 sar = L2CAP_SAR_START;
2289 sdu_len = len;
2290 }
2291
2292 while (len > 0) {
2293 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2294
2295 if (IS_ERR(skb)) {
2296 __skb_queue_purge(seg_queue);
2297 return PTR_ERR(skb);
2298 }
2299
2300 bt_cb(skb)->control.sar = sar;
2301 __skb_queue_tail(seg_queue, skb);
2302
2303 len -= pdu_len;
2304 if (sdu_len)
2305 sdu_len = 0;
2306
2307 if (len <= pdu_len) {
2308 sar = L2CAP_SAR_END;
2309 pdu_len = len;
2310 } else {
2311 sar = L2CAP_SAR_CONTINUE;
2312 }
2313 }
2314
2315 return 0;
2316 }
2317
2318 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2319 struct msghdr *msg,
2320 size_t len, u16 sdulen)
2321 {
2322 struct l2cap_conn *conn = chan->conn;
2323 struct sk_buff *skb;
2324 int err, count, hlen;
2325 struct l2cap_hdr *lh;
2326
2327 BT_DBG("chan %p len %zu", chan, len);
2328
2329 if (!conn)
2330 return ERR_PTR(-ENOTCONN);
2331
2332 hlen = L2CAP_HDR_SIZE;
2333
2334 if (sdulen)
2335 hlen += L2CAP_SDULEN_SIZE;
2336
2337 count = min_t(unsigned int, (conn->mtu - hlen), len);
2338
2339 skb = chan->ops->alloc_skb(chan, hlen, count,
2340 msg->msg_flags & MSG_DONTWAIT);
2341 if (IS_ERR(skb))
2342 return skb;
2343
2344 /* Create L2CAP header */
2345 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2346 lh->cid = cpu_to_le16(chan->dcid);
2347 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2348
2349 if (sdulen)
2350 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2351
2352 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2353 if (unlikely(err < 0)) {
2354 kfree_skb(skb);
2355 return ERR_PTR(err);
2356 }
2357
2358 return skb;
2359 }
2360
2361 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2362 struct sk_buff_head *seg_queue,
2363 struct msghdr *msg, size_t len)
2364 {
2365 struct sk_buff *skb;
2366 size_t pdu_len;
2367 u16 sdu_len;
2368
2369 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2370
2371 sdu_len = len;
2372 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2373
2374 while (len > 0) {
2375 if (len <= pdu_len)
2376 pdu_len = len;
2377
2378 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2379 if (IS_ERR(skb)) {
2380 __skb_queue_purge(seg_queue);
2381 return PTR_ERR(skb);
2382 }
2383
2384 __skb_queue_tail(seg_queue, skb);
2385
2386 len -= pdu_len;
2387
2388 if (sdu_len) {
2389 sdu_len = 0;
2390 pdu_len += L2CAP_SDULEN_SIZE;
2391 }
2392 }
2393
2394 return 0;
2395 }
2396
2397 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2398 {
2399 struct sk_buff *skb;
2400 int err;
2401 struct sk_buff_head seg_queue;
2402
2403 if (!chan->conn)
2404 return -ENOTCONN;
2405
2406 /* Connectionless channel */
2407 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2408 skb = l2cap_create_connless_pdu(chan, msg, len);
2409 if (IS_ERR(skb))
2410 return PTR_ERR(skb);
2411
2412 /* Channel lock is released before requesting new skb and then
2413 * reacquired thus we need to recheck channel state.
2414 */
2415 if (chan->state != BT_CONNECTED) {
2416 kfree_skb(skb);
2417 return -ENOTCONN;
2418 }
2419
2420 l2cap_do_send(chan, skb);
2421 return len;
2422 }
2423
2424 switch (chan->mode) {
2425 case L2CAP_MODE_LE_FLOWCTL:
2426 /* Check outgoing MTU */
2427 if (len > chan->omtu)
2428 return -EMSGSIZE;
2429
2430 if (!chan->tx_credits)
2431 return -EAGAIN;
2432
2433 __skb_queue_head_init(&seg_queue);
2434
2435 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2436
2437 if (chan->state != BT_CONNECTED) {
2438 __skb_queue_purge(&seg_queue);
2439 err = -ENOTCONN;
2440 }
2441
2442 if (err)
2443 return err;
2444
2445 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2446
2447 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2448 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2449 chan->tx_credits--;
2450 }
2451
2452 if (!chan->tx_credits)
2453 chan->ops->suspend(chan);
2454
2455 err = len;
2456
2457 break;
2458
2459 case L2CAP_MODE_BASIC:
2460 /* Check outgoing MTU */
2461 if (len > chan->omtu)
2462 return -EMSGSIZE;
2463
2464 /* Create a basic PDU */
2465 skb = l2cap_create_basic_pdu(chan, msg, len);
2466 if (IS_ERR(skb))
2467 return PTR_ERR(skb);
2468
2469 /* Channel lock is released before requesting new skb and then
2470 * reacquired thus we need to recheck channel state.
2471 */
2472 if (chan->state != BT_CONNECTED) {
2473 kfree_skb(skb);
2474 return -ENOTCONN;
2475 }
2476
2477 l2cap_do_send(chan, skb);
2478 err = len;
2479 break;
2480
2481 case L2CAP_MODE_ERTM:
2482 case L2CAP_MODE_STREAMING:
2483 /* Check outgoing MTU */
2484 if (len > chan->omtu) {
2485 err = -EMSGSIZE;
2486 break;
2487 }
2488
2489 __skb_queue_head_init(&seg_queue);
2490
2491 /* Do segmentation before calling in to the state machine,
2492 * since it's possible to block while waiting for memory
2493 * allocation.
2494 */
2495 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2496
2497 /* The channel could have been closed while segmenting,
2498 * check that it is still connected.
2499 */
2500 if (chan->state != BT_CONNECTED) {
2501 __skb_queue_purge(&seg_queue);
2502 err = -ENOTCONN;
2503 }
2504
2505 if (err)
2506 break;
2507
2508 if (chan->mode == L2CAP_MODE_ERTM)
2509 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2510 else
2511 l2cap_streaming_send(chan, &seg_queue);
2512
2513 err = len;
2514
2515 /* If the skbs were not queued for sending, they'll still be in
2516 * seg_queue and need to be purged.
2517 */
2518 __skb_queue_purge(&seg_queue);
2519 break;
2520
2521 default:
2522 BT_DBG("bad state %1.1x", chan->mode);
2523 err = -EBADFD;
2524 }
2525
2526 return err;
2527 }
2528 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2529
2530 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2531 {
2532 struct l2cap_ctrl control;
2533 u16 seq;
2534
2535 BT_DBG("chan %p, txseq %u", chan, txseq);
2536
2537 memset(&control, 0, sizeof(control));
2538 control.sframe = 1;
2539 control.super = L2CAP_SUPER_SREJ;
2540
2541 for (seq = chan->expected_tx_seq; seq != txseq;
2542 seq = __next_seq(chan, seq)) {
2543 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2544 control.reqseq = seq;
2545 l2cap_send_sframe(chan, &control);
2546 l2cap_seq_list_append(&chan->srej_list, seq);
2547 }
2548 }
2549
2550 chan->expected_tx_seq = __next_seq(chan, txseq);
2551 }
2552
2553 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2554 {
2555 struct l2cap_ctrl control;
2556
2557 BT_DBG("chan %p", chan);
2558
2559 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2560 return;
2561
2562 memset(&control, 0, sizeof(control));
2563 control.sframe = 1;
2564 control.super = L2CAP_SUPER_SREJ;
2565 control.reqseq = chan->srej_list.tail;
2566 l2cap_send_sframe(chan, &control);
2567 }
2568
2569 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2570 {
2571 struct l2cap_ctrl control;
2572 u16 initial_head;
2573 u16 seq;
2574
2575 BT_DBG("chan %p, txseq %u", chan, txseq);
2576
2577 memset(&control, 0, sizeof(control));
2578 control.sframe = 1;
2579 control.super = L2CAP_SUPER_SREJ;
2580
2581 /* Capture initial list head to allow only one pass through the list. */
2582 initial_head = chan->srej_list.head;
2583
2584 do {
2585 seq = l2cap_seq_list_pop(&chan->srej_list);
2586 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2587 break;
2588
2589 control.reqseq = seq;
2590 l2cap_send_sframe(chan, &control);
2591 l2cap_seq_list_append(&chan->srej_list, seq);
2592 } while (chan->srej_list.head != initial_head);
2593 }
2594
2595 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2596 {
2597 struct sk_buff *acked_skb;
2598 u16 ackseq;
2599
2600 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2601
2602 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2603 return;
2604
2605 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2606 chan->expected_ack_seq, chan->unacked_frames);
2607
2608 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2609 ackseq = __next_seq(chan, ackseq)) {
2610
2611 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2612 if (acked_skb) {
2613 skb_unlink(acked_skb, &chan->tx_q);
2614 kfree_skb(acked_skb);
2615 chan->unacked_frames--;
2616 }
2617 }
2618
2619 chan->expected_ack_seq = reqseq;
2620
2621 if (chan->unacked_frames == 0)
2622 __clear_retrans_timer(chan);
2623
2624 BT_DBG("unacked_frames %u", chan->unacked_frames);
2625 }
2626
2627 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2628 {
2629 BT_DBG("chan %p", chan);
2630
2631 chan->expected_tx_seq = chan->buffer_seq;
2632 l2cap_seq_list_clear(&chan->srej_list);
2633 skb_queue_purge(&chan->srej_q);
2634 chan->rx_state = L2CAP_RX_STATE_RECV;
2635 }
2636
2637 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2638 struct l2cap_ctrl *control,
2639 struct sk_buff_head *skbs, u8 event)
2640 {
2641 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2642 event);
2643
2644 switch (event) {
2645 case L2CAP_EV_DATA_REQUEST:
2646 if (chan->tx_send_head == NULL)
2647 chan->tx_send_head = skb_peek(skbs);
2648
2649 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2650 l2cap_ertm_send(chan);
2651 break;
2652 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2653 BT_DBG("Enter LOCAL_BUSY");
2654 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2655
2656 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2657 /* The SREJ_SENT state must be aborted if we are to
2658 * enter the LOCAL_BUSY state.
2659 */
2660 l2cap_abort_rx_srej_sent(chan);
2661 }
2662
2663 l2cap_send_ack(chan);
2664
2665 break;
2666 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2667 BT_DBG("Exit LOCAL_BUSY");
2668 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2669
2670 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2671 struct l2cap_ctrl local_control;
2672
2673 memset(&local_control, 0, sizeof(local_control));
2674 local_control.sframe = 1;
2675 local_control.super = L2CAP_SUPER_RR;
2676 local_control.poll = 1;
2677 local_control.reqseq = chan->buffer_seq;
2678 l2cap_send_sframe(chan, &local_control);
2679
2680 chan->retry_count = 1;
2681 __set_monitor_timer(chan);
2682 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2683 }
2684 break;
2685 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2686 l2cap_process_reqseq(chan, control->reqseq);
2687 break;
2688 case L2CAP_EV_EXPLICIT_POLL:
2689 l2cap_send_rr_or_rnr(chan, 1);
2690 chan->retry_count = 1;
2691 __set_monitor_timer(chan);
2692 __clear_ack_timer(chan);
2693 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2694 break;
2695 case L2CAP_EV_RETRANS_TO:
2696 l2cap_send_rr_or_rnr(chan, 1);
2697 chan->retry_count = 1;
2698 __set_monitor_timer(chan);
2699 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2700 break;
2701 case L2CAP_EV_RECV_FBIT:
2702 /* Nothing to process */
2703 break;
2704 default:
2705 break;
2706 }
2707 }
2708
2709 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2710 struct l2cap_ctrl *control,
2711 struct sk_buff_head *skbs, u8 event)
2712 {
2713 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2714 event);
2715
2716 switch (event) {
2717 case L2CAP_EV_DATA_REQUEST:
2718 if (chan->tx_send_head == NULL)
2719 chan->tx_send_head = skb_peek(skbs);
2720 /* Queue data, but don't send. */
2721 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2722 break;
2723 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2724 BT_DBG("Enter LOCAL_BUSY");
2725 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2726
2727 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2728 /* The SREJ_SENT state must be aborted if we are to
2729 * enter the LOCAL_BUSY state.
2730 */
2731 l2cap_abort_rx_srej_sent(chan);
2732 }
2733
2734 l2cap_send_ack(chan);
2735
2736 break;
2737 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2738 BT_DBG("Exit LOCAL_BUSY");
2739 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2740
2741 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2742 struct l2cap_ctrl local_control;
2743 memset(&local_control, 0, sizeof(local_control));
2744 local_control.sframe = 1;
2745 local_control.super = L2CAP_SUPER_RR;
2746 local_control.poll = 1;
2747 local_control.reqseq = chan->buffer_seq;
2748 l2cap_send_sframe(chan, &local_control);
2749
2750 chan->retry_count = 1;
2751 __set_monitor_timer(chan);
2752 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2753 }
2754 break;
2755 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2756 l2cap_process_reqseq(chan, control->reqseq);
2757
2758 /* Fall through */
2759
2760 case L2CAP_EV_RECV_FBIT:
2761 if (control && control->final) {
2762 __clear_monitor_timer(chan);
2763 if (chan->unacked_frames > 0)
2764 __set_retrans_timer(chan);
2765 chan->retry_count = 0;
2766 chan->tx_state = L2CAP_TX_STATE_XMIT;
2767 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2768 }
2769 break;
2770 case L2CAP_EV_EXPLICIT_POLL:
2771 /* Ignore */
2772 break;
2773 case L2CAP_EV_MONITOR_TO:
2774 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2775 l2cap_send_rr_or_rnr(chan, 1);
2776 __set_monitor_timer(chan);
2777 chan->retry_count++;
2778 } else {
2779 l2cap_send_disconn_req(chan, ECONNABORTED);
2780 }
2781 break;
2782 default:
2783 break;
2784 }
2785 }
2786
2787 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2788 struct sk_buff_head *skbs, u8 event)
2789 {
2790 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2791 chan, control, skbs, event, chan->tx_state);
2792
2793 switch (chan->tx_state) {
2794 case L2CAP_TX_STATE_XMIT:
2795 l2cap_tx_state_xmit(chan, control, skbs, event);
2796 break;
2797 case L2CAP_TX_STATE_WAIT_F:
2798 l2cap_tx_state_wait_f(chan, control, skbs, event);
2799 break;
2800 default:
2801 /* Ignore event */
2802 break;
2803 }
2804 }
2805
2806 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2807 struct l2cap_ctrl *control)
2808 {
2809 BT_DBG("chan %p, control %p", chan, control);
2810 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2811 }
2812
2813 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2814 struct l2cap_ctrl *control)
2815 {
2816 BT_DBG("chan %p, control %p", chan, control);
2817 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2818 }
2819
2820 /* Copy frame to all raw sockets on that connection */
2821 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2822 {
2823 struct sk_buff *nskb;
2824 struct l2cap_chan *chan;
2825
2826 BT_DBG("conn %p", conn);
2827
2828 mutex_lock(&conn->chan_lock);
2829
2830 list_for_each_entry(chan, &conn->chan_l, list) {
2831 if (chan->chan_type != L2CAP_CHAN_RAW)
2832 continue;
2833
2834 /* Don't send frame to the channel it came from */
2835 if (bt_cb(skb)->chan == chan)
2836 continue;
2837
2838 nskb = skb_clone(skb, GFP_KERNEL);
2839 if (!nskb)
2840 continue;
2841 if (chan->ops->recv(chan, nskb))
2842 kfree_skb(nskb);
2843 }
2844
2845 mutex_unlock(&conn->chan_lock);
2846 }
2847
2848 /* ---- L2CAP signalling commands ---- */
2849 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2850 u8 ident, u16 dlen, void *data)
2851 {
2852 struct sk_buff *skb, **frag;
2853 struct l2cap_cmd_hdr *cmd;
2854 struct l2cap_hdr *lh;
2855 int len, count;
2856
2857 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2858 conn, code, ident, dlen);
2859
2860 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2861 return NULL;
2862
2863 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2864 count = min_t(unsigned int, conn->mtu, len);
2865
2866 skb = bt_skb_alloc(count, GFP_KERNEL);
2867 if (!skb)
2868 return NULL;
2869
2870 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2871 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2872
2873 if (conn->hcon->type == LE_LINK)
2874 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2875 else
2876 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2877
2878 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2879 cmd->code = code;
2880 cmd->ident = ident;
2881 cmd->len = cpu_to_le16(dlen);
2882
2883 if (dlen) {
2884 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2885 memcpy(skb_put(skb, count), data, count);
2886 data += count;
2887 }
2888
2889 len -= skb->len;
2890
2891 /* Continuation fragments (no L2CAP header) */
2892 frag = &skb_shinfo(skb)->frag_list;
2893 while (len) {
2894 count = min_t(unsigned int, conn->mtu, len);
2895
2896 *frag = bt_skb_alloc(count, GFP_KERNEL);
2897 if (!*frag)
2898 goto fail;
2899
2900 memcpy(skb_put(*frag, count), data, count);
2901
2902 len -= count;
2903 data += count;
2904
2905 frag = &(*frag)->next;
2906 }
2907
2908 return skb;
2909
2910 fail:
2911 kfree_skb(skb);
2912 return NULL;
2913 }
2914
2915 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2916 unsigned long *val)
2917 {
2918 struct l2cap_conf_opt *opt = *ptr;
2919 int len;
2920
2921 len = L2CAP_CONF_OPT_SIZE + opt->len;
2922 *ptr += len;
2923
2924 *type = opt->type;
2925 *olen = opt->len;
2926
2927 switch (opt->len) {
2928 case 1:
2929 *val = *((u8 *) opt->val);
2930 break;
2931
2932 case 2:
2933 *val = get_unaligned_le16(opt->val);
2934 break;
2935
2936 case 4:
2937 *val = get_unaligned_le32(opt->val);
2938 break;
2939
2940 default:
2941 *val = (unsigned long) opt->val;
2942 break;
2943 }
2944
2945 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2946 return len;
2947 }
2948
2949 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2950 {
2951 struct l2cap_conf_opt *opt = *ptr;
2952
2953 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2954
2955 opt->type = type;
2956 opt->len = len;
2957
2958 switch (len) {
2959 case 1:
2960 *((u8 *) opt->val) = val;
2961 break;
2962
2963 case 2:
2964 put_unaligned_le16(val, opt->val);
2965 break;
2966
2967 case 4:
2968 put_unaligned_le32(val, opt->val);
2969 break;
2970
2971 default:
2972 memcpy(opt->val, (void *) val, len);
2973 break;
2974 }
2975
2976 *ptr += L2CAP_CONF_OPT_SIZE + len;
2977 }
2978
2979 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2980 {
2981 struct l2cap_conf_efs efs;
2982
2983 switch (chan->mode) {
2984 case L2CAP_MODE_ERTM:
2985 efs.id = chan->local_id;
2986 efs.stype = chan->local_stype;
2987 efs.msdu = cpu_to_le16(chan->local_msdu);
2988 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2989 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2990 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2991 break;
2992
2993 case L2CAP_MODE_STREAMING:
2994 efs.id = 1;
2995 efs.stype = L2CAP_SERV_BESTEFFORT;
2996 efs.msdu = cpu_to_le16(chan->local_msdu);
2997 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2998 efs.acc_lat = 0;
2999 efs.flush_to = 0;
3000 break;
3001
3002 default:
3003 return;
3004 }
3005
3006 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3007 (unsigned long) &efs);
3008 }
3009
3010 static void l2cap_ack_timeout(struct work_struct *work)
3011 {
3012 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3013 ack_timer.work);
3014 u16 frames_to_ack;
3015
3016 BT_DBG("chan %p", chan);
3017
3018 l2cap_chan_lock(chan);
3019
3020 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3021 chan->last_acked_seq);
3022
3023 if (frames_to_ack)
3024 l2cap_send_rr_or_rnr(chan, 0);
3025
3026 l2cap_chan_unlock(chan);
3027 l2cap_chan_put(chan);
3028 }
3029
3030 int l2cap_ertm_init(struct l2cap_chan *chan)
3031 {
3032 int err;
3033
3034 chan->next_tx_seq = 0;
3035 chan->expected_tx_seq = 0;
3036 chan->expected_ack_seq = 0;
3037 chan->unacked_frames = 0;
3038 chan->buffer_seq = 0;
3039 chan->frames_sent = 0;
3040 chan->last_acked_seq = 0;
3041 chan->sdu = NULL;
3042 chan->sdu_last_frag = NULL;
3043 chan->sdu_len = 0;
3044
3045 skb_queue_head_init(&chan->tx_q);
3046
3047 chan->local_amp_id = AMP_ID_BREDR;
3048 chan->move_id = AMP_ID_BREDR;
3049 chan->move_state = L2CAP_MOVE_STABLE;
3050 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3051
3052 if (chan->mode != L2CAP_MODE_ERTM)
3053 return 0;
3054
3055 chan->rx_state = L2CAP_RX_STATE_RECV;
3056 chan->tx_state = L2CAP_TX_STATE_XMIT;
3057
3058 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3059 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3060 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3061
3062 skb_queue_head_init(&chan->srej_q);
3063
3064 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3065 if (err < 0)
3066 return err;
3067
3068 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3069 if (err < 0)
3070 l2cap_seq_list_free(&chan->srej_list);
3071
3072 return err;
3073 }
3074
3075 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3076 {
3077 switch (mode) {
3078 case L2CAP_MODE_STREAMING:
3079 case L2CAP_MODE_ERTM:
3080 if (l2cap_mode_supported(mode, remote_feat_mask))
3081 return mode;
3082 /* fall through */
3083 default:
3084 return L2CAP_MODE_BASIC;
3085 }
3086 }
3087
3088 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3089 {
3090 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3091 }
3092
3093 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3094 {
3095 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3096 }
3097
3098 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3099 struct l2cap_conf_rfc *rfc)
3100 {
3101 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3102 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3103
3104 /* Class 1 devices have must have ERTM timeouts
3105 * exceeding the Link Supervision Timeout. The
3106 * default Link Supervision Timeout for AMP
3107 * controllers is 10 seconds.
3108 *
3109 * Class 1 devices use 0xffffffff for their
3110 * best-effort flush timeout, so the clamping logic
3111 * will result in a timeout that meets the above
3112 * requirement. ERTM timeouts are 16-bit values, so
3113 * the maximum timeout is 65.535 seconds.
3114 */
3115
3116 /* Convert timeout to milliseconds and round */
3117 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3118
3119 /* This is the recommended formula for class 2 devices
3120 * that start ERTM timers when packets are sent to the
3121 * controller.
3122 */
3123 ertm_to = 3 * ertm_to + 500;
3124
3125 if (ertm_to > 0xffff)
3126 ertm_to = 0xffff;
3127
3128 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3129 rfc->monitor_timeout = rfc->retrans_timeout;
3130 } else {
3131 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3132 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3133 }
3134 }
3135
3136 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3137 {
3138 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3139 __l2cap_ews_supported(chan->conn)) {
3140 /* use extended control field */
3141 set_bit(FLAG_EXT_CTRL, &chan->flags);
3142 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3143 } else {
3144 chan->tx_win = min_t(u16, chan->tx_win,
3145 L2CAP_DEFAULT_TX_WINDOW);
3146 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3147 }
3148 chan->ack_win = chan->tx_win;
3149 }
3150
3151 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3152 {
3153 struct l2cap_conf_req *req = data;
3154 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3155 void *ptr = req->data;
3156 u16 size;
3157
3158 BT_DBG("chan %p", chan);
3159
3160 if (chan->num_conf_req || chan->num_conf_rsp)
3161 goto done;
3162
3163 switch (chan->mode) {
3164 case L2CAP_MODE_STREAMING:
3165 case L2CAP_MODE_ERTM:
3166 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3167 break;
3168
3169 if (__l2cap_efs_supported(chan->conn))
3170 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3171
3172 /* fall through */
3173 default:
3174 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3175 break;
3176 }
3177
3178 done:
3179 if (chan->imtu != L2CAP_DEFAULT_MTU)
3180 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3181
3182 switch (chan->mode) {
3183 case L2CAP_MODE_BASIC:
3184 if (disable_ertm)
3185 break;
3186
3187 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3188 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3189 break;
3190
3191 rfc.mode = L2CAP_MODE_BASIC;
3192 rfc.txwin_size = 0;
3193 rfc.max_transmit = 0;
3194 rfc.retrans_timeout = 0;
3195 rfc.monitor_timeout = 0;
3196 rfc.max_pdu_size = 0;
3197
3198 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3199 (unsigned long) &rfc);
3200 break;
3201
3202 case L2CAP_MODE_ERTM:
3203 rfc.mode = L2CAP_MODE_ERTM;
3204 rfc.max_transmit = chan->max_tx;
3205
3206 __l2cap_set_ertm_timeouts(chan, &rfc);
3207
3208 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3209 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3210 L2CAP_FCS_SIZE);
3211 rfc.max_pdu_size = cpu_to_le16(size);
3212
3213 l2cap_txwin_setup(chan);
3214
3215 rfc.txwin_size = min_t(u16, chan->tx_win,
3216 L2CAP_DEFAULT_TX_WINDOW);
3217
3218 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3219 (unsigned long) &rfc);
3220
3221 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3222 l2cap_add_opt_efs(&ptr, chan);
3223
3224 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3225 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3226 chan->tx_win);
3227
3228 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3229 if (chan->fcs == L2CAP_FCS_NONE ||
3230 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3231 chan->fcs = L2CAP_FCS_NONE;
3232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3233 chan->fcs);
3234 }
3235 break;
3236
3237 case L2CAP_MODE_STREAMING:
3238 l2cap_txwin_setup(chan);
3239 rfc.mode = L2CAP_MODE_STREAMING;
3240 rfc.txwin_size = 0;
3241 rfc.max_transmit = 0;
3242 rfc.retrans_timeout = 0;
3243 rfc.monitor_timeout = 0;
3244
3245 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3246 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3247 L2CAP_FCS_SIZE);
3248 rfc.max_pdu_size = cpu_to_le16(size);
3249
3250 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3251 (unsigned long) &rfc);
3252
3253 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3254 l2cap_add_opt_efs(&ptr, chan);
3255
3256 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3257 if (chan->fcs == L2CAP_FCS_NONE ||
3258 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3259 chan->fcs = L2CAP_FCS_NONE;
3260 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3261 chan->fcs);
3262 }
3263 break;
3264 }
3265
3266 req->dcid = cpu_to_le16(chan->dcid);
3267 req->flags = cpu_to_le16(0);
3268
3269 return ptr - data;
3270 }
3271
3272 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3273 {
3274 struct l2cap_conf_rsp *rsp = data;
3275 void *ptr = rsp->data;
3276 void *req = chan->conf_req;
3277 int len = chan->conf_len;
3278 int type, hint, olen;
3279 unsigned long val;
3280 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3281 struct l2cap_conf_efs efs;
3282 u8 remote_efs = 0;
3283 u16 mtu = L2CAP_DEFAULT_MTU;
3284 u16 result = L2CAP_CONF_SUCCESS;
3285 u16 size;
3286
3287 BT_DBG("chan %p", chan);
3288
3289 while (len >= L2CAP_CONF_OPT_SIZE) {
3290 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3291
3292 hint = type & L2CAP_CONF_HINT;
3293 type &= L2CAP_CONF_MASK;
3294
3295 switch (type) {
3296 case L2CAP_CONF_MTU:
3297 mtu = val;
3298 break;
3299
3300 case L2CAP_CONF_FLUSH_TO:
3301 chan->flush_to = val;
3302 break;
3303
3304 case L2CAP_CONF_QOS:
3305 break;
3306
3307 case L2CAP_CONF_RFC:
3308 if (olen == sizeof(rfc))
3309 memcpy(&rfc, (void *) val, olen);
3310 break;
3311
3312 case L2CAP_CONF_FCS:
3313 if (val == L2CAP_FCS_NONE)
3314 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3315 break;
3316
3317 case L2CAP_CONF_EFS:
3318 remote_efs = 1;
3319 if (olen == sizeof(efs))
3320 memcpy(&efs, (void *) val, olen);
3321 break;
3322
3323 case L2CAP_CONF_EWS:
3324 if (!chan->conn->hs_enabled)
3325 return -ECONNREFUSED;
3326
3327 set_bit(FLAG_EXT_CTRL, &chan->flags);
3328 set_bit(CONF_EWS_RECV, &chan->conf_state);
3329 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3330 chan->remote_tx_win = val;
3331 break;
3332
3333 default:
3334 if (hint)
3335 break;
3336
3337 result = L2CAP_CONF_UNKNOWN;
3338 *((u8 *) ptr++) = type;
3339 break;
3340 }
3341 }
3342
3343 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3344 goto done;
3345
3346 switch (chan->mode) {
3347 case L2CAP_MODE_STREAMING:
3348 case L2CAP_MODE_ERTM:
3349 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3350 chan->mode = l2cap_select_mode(rfc.mode,
3351 chan->conn->feat_mask);
3352 break;
3353 }
3354
3355 if (remote_efs) {
3356 if (__l2cap_efs_supported(chan->conn))
3357 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3358 else
3359 return -ECONNREFUSED;
3360 }
3361
3362 if (chan->mode != rfc.mode)
3363 return -ECONNREFUSED;
3364
3365 break;
3366 }
3367
3368 done:
3369 if (chan->mode != rfc.mode) {
3370 result = L2CAP_CONF_UNACCEPT;
3371 rfc.mode = chan->mode;
3372
3373 if (chan->num_conf_rsp == 1)
3374 return -ECONNREFUSED;
3375
3376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3377 (unsigned long) &rfc);
3378 }
3379
3380 if (result == L2CAP_CONF_SUCCESS) {
3381 /* Configure output options and let the other side know
3382 * which ones we don't like. */
3383
3384 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3385 result = L2CAP_CONF_UNACCEPT;
3386 else {
3387 chan->omtu = mtu;
3388 set_bit(CONF_MTU_DONE, &chan->conf_state);
3389 }
3390 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3391
3392 if (remote_efs) {
3393 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3394 efs.stype != L2CAP_SERV_NOTRAFIC &&
3395 efs.stype != chan->local_stype) {
3396
3397 result = L2CAP_CONF_UNACCEPT;
3398
3399 if (chan->num_conf_req >= 1)
3400 return -ECONNREFUSED;
3401
3402 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3403 sizeof(efs),
3404 (unsigned long) &efs);
3405 } else {
3406 /* Send PENDING Conf Rsp */
3407 result = L2CAP_CONF_PENDING;
3408 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3409 }
3410 }
3411
3412 switch (rfc.mode) {
3413 case L2CAP_MODE_BASIC:
3414 chan->fcs = L2CAP_FCS_NONE;
3415 set_bit(CONF_MODE_DONE, &chan->conf_state);
3416 break;
3417
3418 case L2CAP_MODE_ERTM:
3419 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3420 chan->remote_tx_win = rfc.txwin_size;
3421 else
3422 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3423
3424 chan->remote_max_tx = rfc.max_transmit;
3425
3426 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3427 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3428 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3429 rfc.max_pdu_size = cpu_to_le16(size);
3430 chan->remote_mps = size;
3431
3432 __l2cap_set_ertm_timeouts(chan, &rfc);
3433
3434 set_bit(CONF_MODE_DONE, &chan->conf_state);
3435
3436 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3437 sizeof(rfc), (unsigned long) &rfc);
3438
3439 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3440 chan->remote_id = efs.id;
3441 chan->remote_stype = efs.stype;
3442 chan->remote_msdu = le16_to_cpu(efs.msdu);
3443 chan->remote_flush_to =
3444 le32_to_cpu(efs.flush_to);
3445 chan->remote_acc_lat =
3446 le32_to_cpu(efs.acc_lat);
3447 chan->remote_sdu_itime =
3448 le32_to_cpu(efs.sdu_itime);
3449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3450 sizeof(efs),
3451 (unsigned long) &efs);
3452 }
3453 break;
3454
3455 case L2CAP_MODE_STREAMING:
3456 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3457 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3458 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3459 rfc.max_pdu_size = cpu_to_le16(size);
3460 chan->remote_mps = size;
3461
3462 set_bit(CONF_MODE_DONE, &chan->conf_state);
3463
3464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3465 (unsigned long) &rfc);
3466
3467 break;
3468
3469 default:
3470 result = L2CAP_CONF_UNACCEPT;
3471
3472 memset(&rfc, 0, sizeof(rfc));
3473 rfc.mode = chan->mode;
3474 }
3475
3476 if (result == L2CAP_CONF_SUCCESS)
3477 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3478 }
3479 rsp->scid = cpu_to_le16(chan->dcid);
3480 rsp->result = cpu_to_le16(result);
3481 rsp->flags = cpu_to_le16(0);
3482
3483 return ptr - data;
3484 }
3485
3486 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3487 void *data, u16 *result)
3488 {
3489 struct l2cap_conf_req *req = data;
3490 void *ptr = req->data;
3491 int type, olen;
3492 unsigned long val;
3493 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3494 struct l2cap_conf_efs efs;
3495
3496 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3497
3498 while (len >= L2CAP_CONF_OPT_SIZE) {
3499 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3500
3501 switch (type) {
3502 case L2CAP_CONF_MTU:
3503 if (val < L2CAP_DEFAULT_MIN_MTU) {
3504 *result = L2CAP_CONF_UNACCEPT;
3505 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3506 } else
3507 chan->imtu = val;
3508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3509 break;
3510
3511 case L2CAP_CONF_FLUSH_TO:
3512 chan->flush_to = val;
3513 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3514 2, chan->flush_to);
3515 break;
3516
3517 case L2CAP_CONF_RFC:
3518 if (olen == sizeof(rfc))
3519 memcpy(&rfc, (void *)val, olen);
3520
3521 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3522 rfc.mode != chan->mode)
3523 return -ECONNREFUSED;
3524
3525 chan->fcs = 0;
3526
3527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3528 sizeof(rfc), (unsigned long) &rfc);
3529 break;
3530
3531 case L2CAP_CONF_EWS:
3532 chan->ack_win = min_t(u16, val, chan->ack_win);
3533 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3534 chan->tx_win);
3535 break;
3536
3537 case L2CAP_CONF_EFS:
3538 if (olen == sizeof(efs))
3539 memcpy(&efs, (void *)val, olen);
3540
3541 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3542 efs.stype != L2CAP_SERV_NOTRAFIC &&
3543 efs.stype != chan->local_stype)
3544 return -ECONNREFUSED;
3545
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3547 (unsigned long) &efs);
3548 break;
3549
3550 case L2CAP_CONF_FCS:
3551 if (*result == L2CAP_CONF_PENDING)
3552 if (val == L2CAP_FCS_NONE)
3553 set_bit(CONF_RECV_NO_FCS,
3554 &chan->conf_state);
3555 break;
3556 }
3557 }
3558
3559 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3560 return -ECONNREFUSED;
3561
3562 chan->mode = rfc.mode;
3563
3564 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3565 switch (rfc.mode) {
3566 case L2CAP_MODE_ERTM:
3567 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3568 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3569 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3570 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3571 chan->ack_win = min_t(u16, chan->ack_win,
3572 rfc.txwin_size);
3573
3574 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3575 chan->local_msdu = le16_to_cpu(efs.msdu);
3576 chan->local_sdu_itime =
3577 le32_to_cpu(efs.sdu_itime);
3578 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3579 chan->local_flush_to =
3580 le32_to_cpu(efs.flush_to);
3581 }
3582 break;
3583
3584 case L2CAP_MODE_STREAMING:
3585 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3586 }
3587 }
3588
3589 req->dcid = cpu_to_le16(chan->dcid);
3590 req->flags = cpu_to_le16(0);
3591
3592 return ptr - data;
3593 }
3594
3595 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3596 u16 result, u16 flags)
3597 {
3598 struct l2cap_conf_rsp *rsp = data;
3599 void *ptr = rsp->data;
3600
3601 BT_DBG("chan %p", chan);
3602
3603 rsp->scid = cpu_to_le16(chan->dcid);
3604 rsp->result = cpu_to_le16(result);
3605 rsp->flags = cpu_to_le16(flags);
3606
3607 return ptr - data;
3608 }
3609
3610 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3611 {
3612 struct l2cap_le_conn_rsp rsp;
3613 struct l2cap_conn *conn = chan->conn;
3614
3615 BT_DBG("chan %p", chan);
3616
3617 rsp.dcid = cpu_to_le16(chan->scid);
3618 rsp.mtu = cpu_to_le16(chan->imtu);
3619 rsp.mps = cpu_to_le16(chan->mps);
3620 rsp.credits = cpu_to_le16(chan->rx_credits);
3621 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3622
3623 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3624 &rsp);
3625 }
3626
3627 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3628 {
3629 struct l2cap_conn_rsp rsp;
3630 struct l2cap_conn *conn = chan->conn;
3631 u8 buf[128];
3632 u8 rsp_code;
3633
3634 rsp.scid = cpu_to_le16(chan->dcid);
3635 rsp.dcid = cpu_to_le16(chan->scid);
3636 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3637 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3638
3639 if (chan->hs_hcon)
3640 rsp_code = L2CAP_CREATE_CHAN_RSP;
3641 else
3642 rsp_code = L2CAP_CONN_RSP;
3643
3644 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3645
3646 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3647
3648 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3649 return;
3650
3651 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3652 l2cap_build_conf_req(chan, buf), buf);
3653 chan->num_conf_req++;
3654 }
3655
3656 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3657 {
3658 int type, olen;
3659 unsigned long val;
3660 /* Use sane default values in case a misbehaving remote device
3661 * did not send an RFC or extended window size option.
3662 */
3663 u16 txwin_ext = chan->ack_win;
3664 struct l2cap_conf_rfc rfc = {
3665 .mode = chan->mode,
3666 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3667 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3668 .max_pdu_size = cpu_to_le16(chan->imtu),
3669 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3670 };
3671
3672 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3673
3674 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3675 return;
3676
3677 while (len >= L2CAP_CONF_OPT_SIZE) {
3678 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3679
3680 switch (type) {
3681 case L2CAP_CONF_RFC:
3682 if (olen == sizeof(rfc))
3683 memcpy(&rfc, (void *)val, olen);
3684 break;
3685 case L2CAP_CONF_EWS:
3686 txwin_ext = val;
3687 break;
3688 }
3689 }
3690
3691 switch (rfc.mode) {
3692 case L2CAP_MODE_ERTM:
3693 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3694 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3695 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3696 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3697 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3698 else
3699 chan->ack_win = min_t(u16, chan->ack_win,
3700 rfc.txwin_size);
3701 break;
3702 case L2CAP_MODE_STREAMING:
3703 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3704 }
3705 }
3706
3707 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3708 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3709 u8 *data)
3710 {
3711 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3712
3713 if (cmd_len < sizeof(*rej))
3714 return -EPROTO;
3715
3716 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3717 return 0;
3718
3719 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3720 cmd->ident == conn->info_ident) {
3721 cancel_delayed_work(&conn->info_timer);
3722
3723 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3724 conn->info_ident = 0;
3725
3726 l2cap_conn_start(conn);
3727 }
3728
3729 return 0;
3730 }
3731
3732 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3733 struct l2cap_cmd_hdr *cmd,
3734 u8 *data, u8 rsp_code, u8 amp_id)
3735 {
3736 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3737 struct l2cap_conn_rsp rsp;
3738 struct l2cap_chan *chan = NULL, *pchan;
3739 int result, status = L2CAP_CS_NO_INFO;
3740
3741 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3742 __le16 psm = req->psm;
3743
3744 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3745
3746 /* Check if we have socket listening on psm */
3747 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3748 &conn->hcon->dst, ACL_LINK);
3749 if (!pchan) {
3750 result = L2CAP_CR_BAD_PSM;
3751 goto sendresp;
3752 }
3753
3754 mutex_lock(&conn->chan_lock);
3755 l2cap_chan_lock(pchan);
3756
3757 /* Check if the ACL is secure enough (if not SDP) */
3758 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3759 !hci_conn_check_link_mode(conn->hcon)) {
3760 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3761 result = L2CAP_CR_SEC_BLOCK;
3762 goto response;
3763 }
3764
3765 result = L2CAP_CR_NO_MEM;
3766
3767 /* Check if we already have channel with that dcid */
3768 if (__l2cap_get_chan_by_dcid(conn, scid))
3769 goto response;
3770
3771 chan = pchan->ops->new_connection(pchan);
3772 if (!chan)
3773 goto response;
3774
3775 /* For certain devices (ex: HID mouse), support for authentication,
3776 * pairing and bonding is optional. For such devices, inorder to avoid
3777 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3778 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3779 */
3780 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3781
3782 bacpy(&chan->src, &conn->hcon->src);
3783 bacpy(&chan->dst, &conn->hcon->dst);
3784 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3785 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3786 chan->psm = psm;
3787 chan->dcid = scid;
3788 chan->local_amp_id = amp_id;
3789
3790 __l2cap_chan_add(conn, chan);
3791
3792 dcid = chan->scid;
3793
3794 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3795
3796 chan->ident = cmd->ident;
3797
3798 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3799 if (l2cap_chan_check_security(chan, false)) {
3800 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3801 l2cap_state_change(chan, BT_CONNECT2);
3802 result = L2CAP_CR_PEND;
3803 status = L2CAP_CS_AUTHOR_PEND;
3804 chan->ops->defer(chan);
3805 } else {
3806 /* Force pending result for AMP controllers.
3807 * The connection will succeed after the
3808 * physical link is up.
3809 */
3810 if (amp_id == AMP_ID_BREDR) {
3811 l2cap_state_change(chan, BT_CONFIG);
3812 result = L2CAP_CR_SUCCESS;
3813 } else {
3814 l2cap_state_change(chan, BT_CONNECT2);
3815 result = L2CAP_CR_PEND;
3816 }
3817 status = L2CAP_CS_NO_INFO;
3818 }
3819 } else {
3820 l2cap_state_change(chan, BT_CONNECT2);
3821 result = L2CAP_CR_PEND;
3822 status = L2CAP_CS_AUTHEN_PEND;
3823 }
3824 } else {
3825 l2cap_state_change(chan, BT_CONNECT2);
3826 result = L2CAP_CR_PEND;
3827 status = L2CAP_CS_NO_INFO;
3828 }
3829
3830 response:
3831 l2cap_chan_unlock(pchan);
3832 mutex_unlock(&conn->chan_lock);
3833 l2cap_chan_put(pchan);
3834
3835 sendresp:
3836 rsp.scid = cpu_to_le16(scid);
3837 rsp.dcid = cpu_to_le16(dcid);
3838 rsp.result = cpu_to_le16(result);
3839 rsp.status = cpu_to_le16(status);
3840 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3841
3842 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3843 struct l2cap_info_req info;
3844 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3845
3846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3847 conn->info_ident = l2cap_get_ident(conn);
3848
3849 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3850
3851 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3852 sizeof(info), &info);
3853 }
3854
3855 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3856 result == L2CAP_CR_SUCCESS) {
3857 u8 buf[128];
3858 set_bit(CONF_REQ_SENT, &chan->conf_state);
3859 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3860 l2cap_build_conf_req(chan, buf), buf);
3861 chan->num_conf_req++;
3862 }
3863
3864 return chan;
3865 }
3866
3867 static int l2cap_connect_req(struct l2cap_conn *conn,
3868 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3869 {
3870 struct hci_dev *hdev = conn->hcon->hdev;
3871 struct hci_conn *hcon = conn->hcon;
3872
3873 if (cmd_len < sizeof(struct l2cap_conn_req))
3874 return -EPROTO;
3875
3876 hci_dev_lock(hdev);
3877 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3878 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3879 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3880 hci_dev_unlock(hdev);
3881
3882 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3883 return 0;
3884 }
3885
3886 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3887 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3888 u8 *data)
3889 {
3890 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3891 u16 scid, dcid, result, status;
3892 struct l2cap_chan *chan;
3893 u8 req[128];
3894 int err;
3895
3896 if (cmd_len < sizeof(*rsp))
3897 return -EPROTO;
3898
3899 scid = __le16_to_cpu(rsp->scid);
3900 dcid = __le16_to_cpu(rsp->dcid);
3901 result = __le16_to_cpu(rsp->result);
3902 status = __le16_to_cpu(rsp->status);
3903
3904 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3905 dcid, scid, result, status);
3906
3907 mutex_lock(&conn->chan_lock);
3908
3909 if (scid) {
3910 chan = __l2cap_get_chan_by_scid(conn, scid);
3911 if (!chan) {
3912 err = -EBADSLT;
3913 goto unlock;
3914 }
3915 } else {
3916 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3917 if (!chan) {
3918 err = -EBADSLT;
3919 goto unlock;
3920 }
3921 }
3922
3923 err = 0;
3924
3925 l2cap_chan_lock(chan);
3926
3927 switch (result) {
3928 case L2CAP_CR_SUCCESS:
3929 l2cap_state_change(chan, BT_CONFIG);
3930 chan->ident = 0;
3931 chan->dcid = dcid;
3932 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3933
3934 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3935 break;
3936
3937 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3938 l2cap_build_conf_req(chan, req), req);
3939 chan->num_conf_req++;
3940 break;
3941
3942 case L2CAP_CR_PEND:
3943 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3944 break;
3945
3946 default:
3947 l2cap_chan_del(chan, ECONNREFUSED);
3948 break;
3949 }
3950
3951 l2cap_chan_unlock(chan);
3952
3953 unlock:
3954 mutex_unlock(&conn->chan_lock);
3955
3956 return err;
3957 }
3958
3959 static inline void set_default_fcs(struct l2cap_chan *chan)
3960 {
3961 /* FCS is enabled only in ERTM or streaming mode, if one or both
3962 * sides request it.
3963 */
3964 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3965 chan->fcs = L2CAP_FCS_NONE;
3966 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3967 chan->fcs = L2CAP_FCS_CRC16;
3968 }
3969
3970 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3971 u8 ident, u16 flags)
3972 {
3973 struct l2cap_conn *conn = chan->conn;
3974
3975 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3976 flags);
3977
3978 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3979 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3980
3981 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3982 l2cap_build_conf_rsp(chan, data,
3983 L2CAP_CONF_SUCCESS, flags), data);
3984 }
3985
3986 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3987 u16 scid, u16 dcid)
3988 {
3989 struct l2cap_cmd_rej_cid rej;
3990
3991 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3992 rej.scid = __cpu_to_le16(scid);
3993 rej.dcid = __cpu_to_le16(dcid);
3994
3995 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3996 }
3997
3998 static inline int l2cap_config_req(struct l2cap_conn *conn,
3999 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4000 u8 *data)
4001 {
4002 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4003 u16 dcid, flags;
4004 u8 rsp[64];
4005 struct l2cap_chan *chan;
4006 int len, err = 0;
4007
4008 if (cmd_len < sizeof(*req))
4009 return -EPROTO;
4010
4011 dcid = __le16_to_cpu(req->dcid);
4012 flags = __le16_to_cpu(req->flags);
4013
4014 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4015
4016 chan = l2cap_get_chan_by_scid(conn, dcid);
4017 if (!chan) {
4018 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4019 return 0;
4020 }
4021
4022 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4023 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4024 chan->dcid);
4025 goto unlock;
4026 }
4027
4028 /* Reject if config buffer is too small. */
4029 len = cmd_len - sizeof(*req);
4030 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4031 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4032 l2cap_build_conf_rsp(chan, rsp,
4033 L2CAP_CONF_REJECT, flags), rsp);
4034 goto unlock;
4035 }
4036
4037 /* Store config. */
4038 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4039 chan->conf_len += len;
4040
4041 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4042 /* Incomplete config. Send empty response. */
4043 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4044 l2cap_build_conf_rsp(chan, rsp,
4045 L2CAP_CONF_SUCCESS, flags), rsp);
4046 goto unlock;
4047 }
4048
4049 /* Complete config. */
4050 len = l2cap_parse_conf_req(chan, rsp);
4051 if (len < 0) {
4052 l2cap_send_disconn_req(chan, ECONNRESET);
4053 goto unlock;
4054 }
4055
4056 chan->ident = cmd->ident;
4057 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4058 chan->num_conf_rsp++;
4059
4060 /* Reset config buffer. */
4061 chan->conf_len = 0;
4062
4063 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4064 goto unlock;
4065
4066 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4067 set_default_fcs(chan);
4068
4069 if (chan->mode == L2CAP_MODE_ERTM ||
4070 chan->mode == L2CAP_MODE_STREAMING)
4071 err = l2cap_ertm_init(chan);
4072
4073 if (err < 0)
4074 l2cap_send_disconn_req(chan, -err);
4075 else
4076 l2cap_chan_ready(chan);
4077
4078 goto unlock;
4079 }
4080
4081 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4082 u8 buf[64];
4083 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4084 l2cap_build_conf_req(chan, buf), buf);
4085 chan->num_conf_req++;
4086 }
4087
4088 /* Got Conf Rsp PENDING from remote side and assume we sent
4089 Conf Rsp PENDING in the code above */
4090 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4091 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4092
4093 /* check compatibility */
4094
4095 /* Send rsp for BR/EDR channel */
4096 if (!chan->hs_hcon)
4097 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4098 else
4099 chan->ident = cmd->ident;
4100 }
4101
4102 unlock:
4103 l2cap_chan_unlock(chan);
4104 return err;
4105 }
4106
4107 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4108 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4109 u8 *data)
4110 {
4111 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4112 u16 scid, flags, result;
4113 struct l2cap_chan *chan;
4114 int len = cmd_len - sizeof(*rsp);
4115 int err = 0;
4116
4117 if (cmd_len < sizeof(*rsp))
4118 return -EPROTO;
4119
4120 scid = __le16_to_cpu(rsp->scid);
4121 flags = __le16_to_cpu(rsp->flags);
4122 result = __le16_to_cpu(rsp->result);
4123
4124 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4125 result, len);
4126
4127 chan = l2cap_get_chan_by_scid(conn, scid);
4128 if (!chan)
4129 return 0;
4130
4131 switch (result) {
4132 case L2CAP_CONF_SUCCESS:
4133 l2cap_conf_rfc_get(chan, rsp->data, len);
4134 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4135 break;
4136
4137 case L2CAP_CONF_PENDING:
4138 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4139
4140 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4141 char buf[64];
4142
4143 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4144 buf, &result);
4145 if (len < 0) {
4146 l2cap_send_disconn_req(chan, ECONNRESET);
4147 goto done;
4148 }
4149
4150 if (!chan->hs_hcon) {
4151 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4152 0);
4153 } else {
4154 if (l2cap_check_efs(chan)) {
4155 amp_create_logical_link(chan);
4156 chan->ident = cmd->ident;
4157 }
4158 }
4159 }
4160 goto done;
4161
4162 case L2CAP_CONF_UNACCEPT:
4163 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4164 char req[64];
4165
4166 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4167 l2cap_send_disconn_req(chan, ECONNRESET);
4168 goto done;
4169 }
4170
4171 /* throw out any old stored conf requests */
4172 result = L2CAP_CONF_SUCCESS;
4173 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4174 req, &result);
4175 if (len < 0) {
4176 l2cap_send_disconn_req(chan, ECONNRESET);
4177 goto done;
4178 }
4179
4180 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4181 L2CAP_CONF_REQ, len, req);
4182 chan->num_conf_req++;
4183 if (result != L2CAP_CONF_SUCCESS)
4184 goto done;
4185 break;
4186 }
4187
4188 default:
4189 l2cap_chan_set_err(chan, ECONNRESET);
4190
4191 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4192 l2cap_send_disconn_req(chan, ECONNRESET);
4193 goto done;
4194 }
4195
4196 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4197 goto done;
4198
4199 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4200
4201 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4202 set_default_fcs(chan);
4203
4204 if (chan->mode == L2CAP_MODE_ERTM ||
4205 chan->mode == L2CAP_MODE_STREAMING)
4206 err = l2cap_ertm_init(chan);
4207
4208 if (err < 0)
4209 l2cap_send_disconn_req(chan, -err);
4210 else
4211 l2cap_chan_ready(chan);
4212 }
4213
4214 done:
4215 l2cap_chan_unlock(chan);
4216 return err;
4217 }
4218
4219 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4220 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4221 u8 *data)
4222 {
4223 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4224 struct l2cap_disconn_rsp rsp;
4225 u16 dcid, scid;
4226 struct l2cap_chan *chan;
4227
4228 if (cmd_len != sizeof(*req))
4229 return -EPROTO;
4230
4231 scid = __le16_to_cpu(req->scid);
4232 dcid = __le16_to_cpu(req->dcid);
4233
4234 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4235
4236 mutex_lock(&conn->chan_lock);
4237
4238 chan = __l2cap_get_chan_by_scid(conn, dcid);
4239 if (!chan) {
4240 mutex_unlock(&conn->chan_lock);
4241 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4242 return 0;
4243 }
4244
4245 l2cap_chan_lock(chan);
4246
4247 rsp.dcid = cpu_to_le16(chan->scid);
4248 rsp.scid = cpu_to_le16(chan->dcid);
4249 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4250
4251 chan->ops->set_shutdown(chan);
4252
4253 l2cap_chan_hold(chan);
4254 l2cap_chan_del(chan, ECONNRESET);
4255
4256 l2cap_chan_unlock(chan);
4257
4258 chan->ops->close(chan);
4259 l2cap_chan_put(chan);
4260
4261 mutex_unlock(&conn->chan_lock);
4262
4263 return 0;
4264 }
4265
4266 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4267 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4268 u8 *data)
4269 {
4270 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4271 u16 dcid, scid;
4272 struct l2cap_chan *chan;
4273
4274 if (cmd_len != sizeof(*rsp))
4275 return -EPROTO;
4276
4277 scid = __le16_to_cpu(rsp->scid);
4278 dcid = __le16_to_cpu(rsp->dcid);
4279
4280 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4281
4282 mutex_lock(&conn->chan_lock);
4283
4284 chan = __l2cap_get_chan_by_scid(conn, scid);
4285 if (!chan) {
4286 mutex_unlock(&conn->chan_lock);
4287 return 0;
4288 }
4289
4290 l2cap_chan_lock(chan);
4291
4292 l2cap_chan_hold(chan);
4293 l2cap_chan_del(chan, 0);
4294
4295 l2cap_chan_unlock(chan);
4296
4297 chan->ops->close(chan);
4298 l2cap_chan_put(chan);
4299
4300 mutex_unlock(&conn->chan_lock);
4301
4302 return 0;
4303 }
4304
4305 static inline int l2cap_information_req(struct l2cap_conn *conn,
4306 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4307 u8 *data)
4308 {
4309 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4310 u16 type;
4311
4312 if (cmd_len != sizeof(*req))
4313 return -EPROTO;
4314
4315 type = __le16_to_cpu(req->type);
4316
4317 BT_DBG("type 0x%4.4x", type);
4318
4319 if (type == L2CAP_IT_FEAT_MASK) {
4320 u8 buf[8];
4321 u32 feat_mask = l2cap_feat_mask;
4322 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4323 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4324 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4325 if (!disable_ertm)
4326 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4327 | L2CAP_FEAT_FCS;
4328 if (conn->hs_enabled)
4329 feat_mask |= L2CAP_FEAT_EXT_FLOW
4330 | L2CAP_FEAT_EXT_WINDOW;
4331
4332 put_unaligned_le32(feat_mask, rsp->data);
4333 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4334 buf);
4335 } else if (type == L2CAP_IT_FIXED_CHAN) {
4336 u8 buf[12];
4337 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4338
4339 if (conn->hs_enabled)
4340 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4341 else
4342 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4343
4344 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4345 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4346 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4347 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4348 buf);
4349 } else {
4350 struct l2cap_info_rsp rsp;
4351 rsp.type = cpu_to_le16(type);
4352 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4353 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4354 &rsp);
4355 }
4356
4357 return 0;
4358 }
4359
4360 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4361 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4362 u8 *data)
4363 {
4364 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4365 u16 type, result;
4366
4367 if (cmd_len < sizeof(*rsp))
4368 return -EPROTO;
4369
4370 type = __le16_to_cpu(rsp->type);
4371 result = __le16_to_cpu(rsp->result);
4372
4373 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4374
4375 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4376 if (cmd->ident != conn->info_ident ||
4377 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4378 return 0;
4379
4380 cancel_delayed_work(&conn->info_timer);
4381
4382 if (result != L2CAP_IR_SUCCESS) {
4383 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4384 conn->info_ident = 0;
4385
4386 l2cap_conn_start(conn);
4387
4388 return 0;
4389 }
4390
4391 switch (type) {
4392 case L2CAP_IT_FEAT_MASK:
4393 conn->feat_mask = get_unaligned_le32(rsp->data);
4394
4395 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4396 struct l2cap_info_req req;
4397 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4398
4399 conn->info_ident = l2cap_get_ident(conn);
4400
4401 l2cap_send_cmd(conn, conn->info_ident,
4402 L2CAP_INFO_REQ, sizeof(req), &req);
4403 } else {
4404 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4405 conn->info_ident = 0;
4406
4407 l2cap_conn_start(conn);
4408 }
4409 break;
4410
4411 case L2CAP_IT_FIXED_CHAN:
4412 conn->fixed_chan_mask = rsp->data[0];
4413 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4414 conn->info_ident = 0;
4415
4416 l2cap_conn_start(conn);
4417 break;
4418 }
4419
4420 return 0;
4421 }
4422
4423 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4424 struct l2cap_cmd_hdr *cmd,
4425 u16 cmd_len, void *data)
4426 {
4427 struct l2cap_create_chan_req *req = data;
4428 struct l2cap_create_chan_rsp rsp;
4429 struct l2cap_chan *chan;
4430 struct hci_dev *hdev;
4431 u16 psm, scid;
4432
4433 if (cmd_len != sizeof(*req))
4434 return -EPROTO;
4435
4436 if (!conn->hs_enabled)
4437 return -EINVAL;
4438
4439 psm = le16_to_cpu(req->psm);
4440 scid = le16_to_cpu(req->scid);
4441
4442 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4443
4444 /* For controller id 0 make BR/EDR connection */
4445 if (req->amp_id == AMP_ID_BREDR) {
4446 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4447 req->amp_id);
4448 return 0;
4449 }
4450
4451 /* Validate AMP controller id */
4452 hdev = hci_dev_get(req->amp_id);
4453 if (!hdev)
4454 goto error;
4455
4456 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4457 hci_dev_put(hdev);
4458 goto error;
4459 }
4460
4461 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4462 req->amp_id);
4463 if (chan) {
4464 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4465 struct hci_conn *hs_hcon;
4466
4467 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4468 &conn->hcon->dst);
4469 if (!hs_hcon) {
4470 hci_dev_put(hdev);
4471 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4472 chan->dcid);
4473 return 0;
4474 }
4475
4476 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4477
4478 mgr->bredr_chan = chan;
4479 chan->hs_hcon = hs_hcon;
4480 chan->fcs = L2CAP_FCS_NONE;
4481 conn->mtu = hdev->block_mtu;
4482 }
4483
4484 hci_dev_put(hdev);
4485
4486 return 0;
4487
4488 error:
4489 rsp.dcid = 0;
4490 rsp.scid = cpu_to_le16(scid);
4491 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4492 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4493
4494 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4495 sizeof(rsp), &rsp);
4496
4497 return 0;
4498 }
4499
4500 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4501 {
4502 struct l2cap_move_chan_req req;
4503 u8 ident;
4504
4505 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4506
4507 ident = l2cap_get_ident(chan->conn);
4508 chan->ident = ident;
4509
4510 req.icid = cpu_to_le16(chan->scid);
4511 req.dest_amp_id = dest_amp_id;
4512
4513 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4514 &req);
4515
4516 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4517 }
4518
4519 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4520 {
4521 struct l2cap_move_chan_rsp rsp;
4522
4523 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4524
4525 rsp.icid = cpu_to_le16(chan->dcid);
4526 rsp.result = cpu_to_le16(result);
4527
4528 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4529 sizeof(rsp), &rsp);
4530 }
4531
4532 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4533 {
4534 struct l2cap_move_chan_cfm cfm;
4535
4536 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4537
4538 chan->ident = l2cap_get_ident(chan->conn);
4539
4540 cfm.icid = cpu_to_le16(chan->scid);
4541 cfm.result = cpu_to_le16(result);
4542
4543 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4544 sizeof(cfm), &cfm);
4545
4546 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4547 }
4548
4549 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4550 {
4551 struct l2cap_move_chan_cfm cfm;
4552
4553 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4554
4555 cfm.icid = cpu_to_le16(icid);
4556 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4557
4558 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4559 sizeof(cfm), &cfm);
4560 }
4561
4562 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4563 u16 icid)
4564 {
4565 struct l2cap_move_chan_cfm_rsp rsp;
4566
4567 BT_DBG("icid 0x%4.4x", icid);
4568
4569 rsp.icid = cpu_to_le16(icid);
4570 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4571 }
4572
4573 static void __release_logical_link(struct l2cap_chan *chan)
4574 {
4575 chan->hs_hchan = NULL;
4576 chan->hs_hcon = NULL;
4577
4578 /* Placeholder - release the logical link */
4579 }
4580
4581 static void l2cap_logical_fail(struct l2cap_chan *chan)
4582 {
4583 /* Logical link setup failed */
4584 if (chan->state != BT_CONNECTED) {
4585 /* Create channel failure, disconnect */
4586 l2cap_send_disconn_req(chan, ECONNRESET);
4587 return;
4588 }
4589
4590 switch (chan->move_role) {
4591 case L2CAP_MOVE_ROLE_RESPONDER:
4592 l2cap_move_done(chan);
4593 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4594 break;
4595 case L2CAP_MOVE_ROLE_INITIATOR:
4596 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4597 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4598 /* Remote has only sent pending or
4599 * success responses, clean up
4600 */
4601 l2cap_move_done(chan);
4602 }
4603
4604 /* Other amp move states imply that the move
4605 * has already aborted
4606 */
4607 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4608 break;
4609 }
4610 }
4611
4612 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4613 struct hci_chan *hchan)
4614 {
4615 struct l2cap_conf_rsp rsp;
4616
4617 chan->hs_hchan = hchan;
4618 chan->hs_hcon->l2cap_data = chan->conn;
4619
4620 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4621
4622 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4623 int err;
4624
4625 set_default_fcs(chan);
4626
4627 err = l2cap_ertm_init(chan);
4628 if (err < 0)
4629 l2cap_send_disconn_req(chan, -err);
4630 else
4631 l2cap_chan_ready(chan);
4632 }
4633 }
4634
4635 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4636 struct hci_chan *hchan)
4637 {
4638 chan->hs_hcon = hchan->conn;
4639 chan->hs_hcon->l2cap_data = chan->conn;
4640
4641 BT_DBG("move_state %d", chan->move_state);
4642
4643 switch (chan->move_state) {
4644 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4645 /* Move confirm will be sent after a success
4646 * response is received
4647 */
4648 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4649 break;
4650 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4651 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4652 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4653 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4654 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4655 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4656 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4657 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4658 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4659 }
4660 break;
4661 default:
4662 /* Move was not in expected state, free the channel */
4663 __release_logical_link(chan);
4664
4665 chan->move_state = L2CAP_MOVE_STABLE;
4666 }
4667 }
4668
4669 /* Call with chan locked */
4670 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4671 u8 status)
4672 {
4673 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4674
4675 if (status) {
4676 l2cap_logical_fail(chan);
4677 __release_logical_link(chan);
4678 return;
4679 }
4680
4681 if (chan->state != BT_CONNECTED) {
4682 /* Ignore logical link if channel is on BR/EDR */
4683 if (chan->local_amp_id != AMP_ID_BREDR)
4684 l2cap_logical_finish_create(chan, hchan);
4685 } else {
4686 l2cap_logical_finish_move(chan, hchan);
4687 }
4688 }
4689
4690 void l2cap_move_start(struct l2cap_chan *chan)
4691 {
4692 BT_DBG("chan %p", chan);
4693
4694 if (chan->local_amp_id == AMP_ID_BREDR) {
4695 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4696 return;
4697 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4698 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4699 /* Placeholder - start physical link setup */
4700 } else {
4701 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4702 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4703 chan->move_id = 0;
4704 l2cap_move_setup(chan);
4705 l2cap_send_move_chan_req(chan, 0);
4706 }
4707 }
4708
4709 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4710 u8 local_amp_id, u8 remote_amp_id)
4711 {
4712 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4713 local_amp_id, remote_amp_id);
4714
4715 chan->fcs = L2CAP_FCS_NONE;
4716
4717 /* Outgoing channel on AMP */
4718 if (chan->state == BT_CONNECT) {
4719 if (result == L2CAP_CR_SUCCESS) {
4720 chan->local_amp_id = local_amp_id;
4721 l2cap_send_create_chan_req(chan, remote_amp_id);
4722 } else {
4723 /* Revert to BR/EDR connect */
4724 l2cap_send_conn_req(chan);
4725 }
4726
4727 return;
4728 }
4729
4730 /* Incoming channel on AMP */
4731 if (__l2cap_no_conn_pending(chan)) {
4732 struct l2cap_conn_rsp rsp;
4733 char buf[128];
4734 rsp.scid = cpu_to_le16(chan->dcid);
4735 rsp.dcid = cpu_to_le16(chan->scid);
4736
4737 if (result == L2CAP_CR_SUCCESS) {
4738 /* Send successful response */
4739 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4740 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4741 } else {
4742 /* Send negative response */
4743 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4744 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4745 }
4746
4747 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4748 sizeof(rsp), &rsp);
4749
4750 if (result == L2CAP_CR_SUCCESS) {
4751 l2cap_state_change(chan, BT_CONFIG);
4752 set_bit(CONF_REQ_SENT, &chan->conf_state);
4753 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4754 L2CAP_CONF_REQ,
4755 l2cap_build_conf_req(chan, buf), buf);
4756 chan->num_conf_req++;
4757 }
4758 }
4759 }
4760
4761 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4762 u8 remote_amp_id)
4763 {
4764 l2cap_move_setup(chan);
4765 chan->move_id = local_amp_id;
4766 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4767
4768 l2cap_send_move_chan_req(chan, remote_amp_id);
4769 }
4770
4771 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4772 {
4773 struct hci_chan *hchan = NULL;
4774
4775 /* Placeholder - get hci_chan for logical link */
4776
4777 if (hchan) {
4778 if (hchan->state == BT_CONNECTED) {
4779 /* Logical link is ready to go */
4780 chan->hs_hcon = hchan->conn;
4781 chan->hs_hcon->l2cap_data = chan->conn;
4782 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4783 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4784
4785 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4786 } else {
4787 /* Wait for logical link to be ready */
4788 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4789 }
4790 } else {
4791 /* Logical link not available */
4792 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4793 }
4794 }
4795
4796 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4797 {
4798 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4799 u8 rsp_result;
4800 if (result == -EINVAL)
4801 rsp_result = L2CAP_MR_BAD_ID;
4802 else
4803 rsp_result = L2CAP_MR_NOT_ALLOWED;
4804
4805 l2cap_send_move_chan_rsp(chan, rsp_result);
4806 }
4807
4808 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4809 chan->move_state = L2CAP_MOVE_STABLE;
4810
4811 /* Restart data transmission */
4812 l2cap_ertm_send(chan);
4813 }
4814
4815 /* Invoke with locked chan */
4816 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4817 {
4818 u8 local_amp_id = chan->local_amp_id;
4819 u8 remote_amp_id = chan->remote_amp_id;
4820
4821 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4822 chan, result, local_amp_id, remote_amp_id);
4823
4824 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4825 l2cap_chan_unlock(chan);
4826 return;
4827 }
4828
4829 if (chan->state != BT_CONNECTED) {
4830 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4831 } else if (result != L2CAP_MR_SUCCESS) {
4832 l2cap_do_move_cancel(chan, result);
4833 } else {
4834 switch (chan->move_role) {
4835 case L2CAP_MOVE_ROLE_INITIATOR:
4836 l2cap_do_move_initiate(chan, local_amp_id,
4837 remote_amp_id);
4838 break;
4839 case L2CAP_MOVE_ROLE_RESPONDER:
4840 l2cap_do_move_respond(chan, result);
4841 break;
4842 default:
4843 l2cap_do_move_cancel(chan, result);
4844 break;
4845 }
4846 }
4847 }
4848
4849 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4850 struct l2cap_cmd_hdr *cmd,
4851 u16 cmd_len, void *data)
4852 {
4853 struct l2cap_move_chan_req *req = data;
4854 struct l2cap_move_chan_rsp rsp;
4855 struct l2cap_chan *chan;
4856 u16 icid = 0;
4857 u16 result = L2CAP_MR_NOT_ALLOWED;
4858
4859 if (cmd_len != sizeof(*req))
4860 return -EPROTO;
4861
4862 icid = le16_to_cpu(req->icid);
4863
4864 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4865
4866 if (!conn->hs_enabled)
4867 return -EINVAL;
4868
4869 chan = l2cap_get_chan_by_dcid(conn, icid);
4870 if (!chan) {
4871 rsp.icid = cpu_to_le16(icid);
4872 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4873 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4874 sizeof(rsp), &rsp);
4875 return 0;
4876 }
4877
4878 chan->ident = cmd->ident;
4879
4880 if (chan->scid < L2CAP_CID_DYN_START ||
4881 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4882 (chan->mode != L2CAP_MODE_ERTM &&
4883 chan->mode != L2CAP_MODE_STREAMING)) {
4884 result = L2CAP_MR_NOT_ALLOWED;
4885 goto send_move_response;
4886 }
4887
4888 if (chan->local_amp_id == req->dest_amp_id) {
4889 result = L2CAP_MR_SAME_ID;
4890 goto send_move_response;
4891 }
4892
4893 if (req->dest_amp_id != AMP_ID_BREDR) {
4894 struct hci_dev *hdev;
4895 hdev = hci_dev_get(req->dest_amp_id);
4896 if (!hdev || hdev->dev_type != HCI_AMP ||
4897 !test_bit(HCI_UP, &hdev->flags)) {
4898 if (hdev)
4899 hci_dev_put(hdev);
4900
4901 result = L2CAP_MR_BAD_ID;
4902 goto send_move_response;
4903 }
4904 hci_dev_put(hdev);
4905 }
4906
4907 /* Detect a move collision. Only send a collision response
4908 * if this side has "lost", otherwise proceed with the move.
4909 * The winner has the larger bd_addr.
4910 */
4911 if ((__chan_is_moving(chan) ||
4912 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4913 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4914 result = L2CAP_MR_COLLISION;
4915 goto send_move_response;
4916 }
4917
4918 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4919 l2cap_move_setup(chan);
4920 chan->move_id = req->dest_amp_id;
4921 icid = chan->dcid;
4922
4923 if (req->dest_amp_id == AMP_ID_BREDR) {
4924 /* Moving to BR/EDR */
4925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4926 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4927 result = L2CAP_MR_PEND;
4928 } else {
4929 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4930 result = L2CAP_MR_SUCCESS;
4931 }
4932 } else {
4933 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4934 /* Placeholder - uncomment when amp functions are available */
4935 /*amp_accept_physical(chan, req->dest_amp_id);*/
4936 result = L2CAP_MR_PEND;
4937 }
4938
4939 send_move_response:
4940 l2cap_send_move_chan_rsp(chan, result);
4941
4942 l2cap_chan_unlock(chan);
4943
4944 return 0;
4945 }
4946
4947 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4948 {
4949 struct l2cap_chan *chan;
4950 struct hci_chan *hchan = NULL;
4951
4952 chan = l2cap_get_chan_by_scid(conn, icid);
4953 if (!chan) {
4954 l2cap_send_move_chan_cfm_icid(conn, icid);
4955 return;
4956 }
4957
4958 __clear_chan_timer(chan);
4959 if (result == L2CAP_MR_PEND)
4960 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4961
4962 switch (chan->move_state) {
4963 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4964 /* Move confirm will be sent when logical link
4965 * is complete.
4966 */
4967 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4968 break;
4969 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4970 if (result == L2CAP_MR_PEND) {
4971 break;
4972 } else if (test_bit(CONN_LOCAL_BUSY,
4973 &chan->conn_state)) {
4974 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4975 } else {
4976 /* Logical link is up or moving to BR/EDR,
4977 * proceed with move
4978 */
4979 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4980 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4981 }
4982 break;
4983 case L2CAP_MOVE_WAIT_RSP:
4984 /* Moving to AMP */
4985 if (result == L2CAP_MR_SUCCESS) {
4986 /* Remote is ready, send confirm immediately
4987 * after logical link is ready
4988 */
4989 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4990 } else {
4991 /* Both logical link and move success
4992 * are required to confirm
4993 */
4994 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4995 }
4996
4997 /* Placeholder - get hci_chan for logical link */
4998 if (!hchan) {
4999 /* Logical link not available */
5000 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5001 break;
5002 }
5003
5004 /* If the logical link is not yet connected, do not
5005 * send confirmation.
5006 */
5007 if (hchan->state != BT_CONNECTED)
5008 break;
5009
5010 /* Logical link is already ready to go */
5011
5012 chan->hs_hcon = hchan->conn;
5013 chan->hs_hcon->l2cap_data = chan->conn;
5014
5015 if (result == L2CAP_MR_SUCCESS) {
5016 /* Can confirm now */
5017 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5018 } else {
5019 /* Now only need move success
5020 * to confirm
5021 */
5022 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5023 }
5024
5025 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5026 break;
5027 default:
5028 /* Any other amp move state means the move failed. */
5029 chan->move_id = chan->local_amp_id;
5030 l2cap_move_done(chan);
5031 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5032 }
5033
5034 l2cap_chan_unlock(chan);
5035 }
5036
5037 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5038 u16 result)
5039 {
5040 struct l2cap_chan *chan;
5041
5042 chan = l2cap_get_chan_by_ident(conn, ident);
5043 if (!chan) {
5044 /* Could not locate channel, icid is best guess */
5045 l2cap_send_move_chan_cfm_icid(conn, icid);
5046 return;
5047 }
5048
5049 __clear_chan_timer(chan);
5050
5051 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5052 if (result == L2CAP_MR_COLLISION) {
5053 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5054 } else {
5055 /* Cleanup - cancel move */
5056 chan->move_id = chan->local_amp_id;
5057 l2cap_move_done(chan);
5058 }
5059 }
5060
5061 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5062
5063 l2cap_chan_unlock(chan);
5064 }
5065
5066 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5067 struct l2cap_cmd_hdr *cmd,
5068 u16 cmd_len, void *data)
5069 {
5070 struct l2cap_move_chan_rsp *rsp = data;
5071 u16 icid, result;
5072
5073 if (cmd_len != sizeof(*rsp))
5074 return -EPROTO;
5075
5076 icid = le16_to_cpu(rsp->icid);
5077 result = le16_to_cpu(rsp->result);
5078
5079 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5080
5081 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5082 l2cap_move_continue(conn, icid, result);
5083 else
5084 l2cap_move_fail(conn, cmd->ident, icid, result);
5085
5086 return 0;
5087 }
5088
5089 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5090 struct l2cap_cmd_hdr *cmd,
5091 u16 cmd_len, void *data)
5092 {
5093 struct l2cap_move_chan_cfm *cfm = data;
5094 struct l2cap_chan *chan;
5095 u16 icid, result;
5096
5097 if (cmd_len != sizeof(*cfm))
5098 return -EPROTO;
5099
5100 icid = le16_to_cpu(cfm->icid);
5101 result = le16_to_cpu(cfm->result);
5102
5103 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5104
5105 chan = l2cap_get_chan_by_dcid(conn, icid);
5106 if (!chan) {
5107 /* Spec requires a response even if the icid was not found */
5108 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5109 return 0;
5110 }
5111
5112 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5113 if (result == L2CAP_MC_CONFIRMED) {
5114 chan->local_amp_id = chan->move_id;
5115 if (chan->local_amp_id == AMP_ID_BREDR)
5116 __release_logical_link(chan);
5117 } else {
5118 chan->move_id = chan->local_amp_id;
5119 }
5120
5121 l2cap_move_done(chan);
5122 }
5123
5124 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5125
5126 l2cap_chan_unlock(chan);
5127
5128 return 0;
5129 }
5130
5131 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5132 struct l2cap_cmd_hdr *cmd,
5133 u16 cmd_len, void *data)
5134 {
5135 struct l2cap_move_chan_cfm_rsp *rsp = data;
5136 struct l2cap_chan *chan;
5137 u16 icid;
5138
5139 if (cmd_len != sizeof(*rsp))
5140 return -EPROTO;
5141
5142 icid = le16_to_cpu(rsp->icid);
5143
5144 BT_DBG("icid 0x%4.4x", icid);
5145
5146 chan = l2cap_get_chan_by_scid(conn, icid);
5147 if (!chan)
5148 return 0;
5149
5150 __clear_chan_timer(chan);
5151
5152 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5153 chan->local_amp_id = chan->move_id;
5154
5155 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5156 __release_logical_link(chan);
5157
5158 l2cap_move_done(chan);
5159 }
5160
5161 l2cap_chan_unlock(chan);
5162
5163 return 0;
5164 }
5165
5166 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5167 struct l2cap_cmd_hdr *cmd,
5168 u16 cmd_len, u8 *data)
5169 {
5170 struct hci_conn *hcon = conn->hcon;
5171 struct l2cap_conn_param_update_req *req;
5172 struct l2cap_conn_param_update_rsp rsp;
5173 u16 min, max, latency, to_multiplier;
5174 int err;
5175
5176 if (hcon->role != HCI_ROLE_MASTER)
5177 return -EINVAL;
5178
5179 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5180 return -EPROTO;
5181
5182 req = (struct l2cap_conn_param_update_req *) data;
5183 min = __le16_to_cpu(req->min);
5184 max = __le16_to_cpu(req->max);
5185 latency = __le16_to_cpu(req->latency);
5186 to_multiplier = __le16_to_cpu(req->to_multiplier);
5187
5188 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5189 min, max, latency, to_multiplier);
5190
5191 memset(&rsp, 0, sizeof(rsp));
5192
5193 err = hci_check_conn_params(min, max, latency, to_multiplier);
5194 if (err)
5195 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5196 else
5197 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5198
5199 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5200 sizeof(rsp), &rsp);
5201
5202 if (!err) {
5203 u8 store_hint;
5204
5205 store_hint = hci_le_conn_update(hcon, min, max, latency,
5206 to_multiplier);
5207 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5208 store_hint, min, max, latency,
5209 to_multiplier);
5210
5211 }
5212
5213 return 0;
5214 }
5215
5216 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5217 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5218 u8 *data)
5219 {
5220 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5221 u16 dcid, mtu, mps, credits, result;
5222 struct l2cap_chan *chan;
5223 int err;
5224
5225 if (cmd_len < sizeof(*rsp))
5226 return -EPROTO;
5227
5228 dcid = __le16_to_cpu(rsp->dcid);
5229 mtu = __le16_to_cpu(rsp->mtu);
5230 mps = __le16_to_cpu(rsp->mps);
5231 credits = __le16_to_cpu(rsp->credits);
5232 result = __le16_to_cpu(rsp->result);
5233
5234 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5235 return -EPROTO;
5236
5237 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5238 dcid, mtu, mps, credits, result);
5239
5240 mutex_lock(&conn->chan_lock);
5241
5242 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5243 if (!chan) {
5244 err = -EBADSLT;
5245 goto unlock;
5246 }
5247
5248 err = 0;
5249
5250 l2cap_chan_lock(chan);
5251
5252 switch (result) {
5253 case L2CAP_CR_SUCCESS:
5254 chan->ident = 0;
5255 chan->dcid = dcid;
5256 chan->omtu = mtu;
5257 chan->remote_mps = mps;
5258 chan->tx_credits = credits;
5259 l2cap_chan_ready(chan);
5260 break;
5261
5262 default:
5263 l2cap_chan_del(chan, ECONNREFUSED);
5264 break;
5265 }
5266
5267 l2cap_chan_unlock(chan);
5268
5269 unlock:
5270 mutex_unlock(&conn->chan_lock);
5271
5272 return err;
5273 }
5274
5275 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5276 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5277 u8 *data)
5278 {
5279 int err = 0;
5280
5281 switch (cmd->code) {
5282 case L2CAP_COMMAND_REJ:
5283 l2cap_command_rej(conn, cmd, cmd_len, data);
5284 break;
5285
5286 case L2CAP_CONN_REQ:
5287 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5288 break;
5289
5290 case L2CAP_CONN_RSP:
5291 case L2CAP_CREATE_CHAN_RSP:
5292 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5293 break;
5294
5295 case L2CAP_CONF_REQ:
5296 err = l2cap_config_req(conn, cmd, cmd_len, data);
5297 break;
5298
5299 case L2CAP_CONF_RSP:
5300 l2cap_config_rsp(conn, cmd, cmd_len, data);
5301 break;
5302
5303 case L2CAP_DISCONN_REQ:
5304 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5305 break;
5306
5307 case L2CAP_DISCONN_RSP:
5308 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5309 break;
5310
5311 case L2CAP_ECHO_REQ:
5312 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5313 break;
5314
5315 case L2CAP_ECHO_RSP:
5316 break;
5317
5318 case L2CAP_INFO_REQ:
5319 err = l2cap_information_req(conn, cmd, cmd_len, data);
5320 break;
5321
5322 case L2CAP_INFO_RSP:
5323 l2cap_information_rsp(conn, cmd, cmd_len, data);
5324 break;
5325
5326 case L2CAP_CREATE_CHAN_REQ:
5327 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5328 break;
5329
5330 case L2CAP_MOVE_CHAN_REQ:
5331 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5332 break;
5333
5334 case L2CAP_MOVE_CHAN_RSP:
5335 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5336 break;
5337
5338 case L2CAP_MOVE_CHAN_CFM:
5339 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5340 break;
5341
5342 case L2CAP_MOVE_CHAN_CFM_RSP:
5343 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5344 break;
5345
5346 default:
5347 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5348 err = -EINVAL;
5349 break;
5350 }
5351
5352 return err;
5353 }
5354
5355 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5356 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5357 u8 *data)
5358 {
5359 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5360 struct l2cap_le_conn_rsp rsp;
5361 struct l2cap_chan *chan, *pchan;
5362 u16 dcid, scid, credits, mtu, mps;
5363 __le16 psm;
5364 u8 result;
5365
5366 if (cmd_len != sizeof(*req))
5367 return -EPROTO;
5368
5369 scid = __le16_to_cpu(req->scid);
5370 mtu = __le16_to_cpu(req->mtu);
5371 mps = __le16_to_cpu(req->mps);
5372 psm = req->psm;
5373 dcid = 0;
5374 credits = 0;
5375
5376 if (mtu < 23 || mps < 23)
5377 return -EPROTO;
5378
5379 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5380 scid, mtu, mps);
5381
5382 /* Check if we have socket listening on psm */
5383 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5384 &conn->hcon->dst, LE_LINK);
5385 if (!pchan) {
5386 result = L2CAP_CR_BAD_PSM;
5387 chan = NULL;
5388 goto response;
5389 }
5390
5391 mutex_lock(&conn->chan_lock);
5392 l2cap_chan_lock(pchan);
5393
5394 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5395 result = L2CAP_CR_AUTHENTICATION;
5396 chan = NULL;
5397 goto response_unlock;
5398 }
5399
5400 /* Check if we already have channel with that dcid */
5401 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5402 result = L2CAP_CR_NO_MEM;
5403 chan = NULL;
5404 goto response_unlock;
5405 }
5406
5407 chan = pchan->ops->new_connection(pchan);
5408 if (!chan) {
5409 result = L2CAP_CR_NO_MEM;
5410 goto response_unlock;
5411 }
5412
5413 l2cap_le_flowctl_init(chan);
5414
5415 bacpy(&chan->src, &conn->hcon->src);
5416 bacpy(&chan->dst, &conn->hcon->dst);
5417 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5418 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5419 chan->psm = psm;
5420 chan->dcid = scid;
5421 chan->omtu = mtu;
5422 chan->remote_mps = mps;
5423 chan->tx_credits = __le16_to_cpu(req->credits);
5424
5425 __l2cap_chan_add(conn, chan);
5426 dcid = chan->scid;
5427 credits = chan->rx_credits;
5428
5429 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5430
5431 chan->ident = cmd->ident;
5432
5433 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5434 l2cap_state_change(chan, BT_CONNECT2);
5435 /* The following result value is actually not defined
5436 * for LE CoC but we use it to let the function know
5437 * that it should bail out after doing its cleanup
5438 * instead of sending a response.
5439 */
5440 result = L2CAP_CR_PEND;
5441 chan->ops->defer(chan);
5442 } else {
5443 l2cap_chan_ready(chan);
5444 result = L2CAP_CR_SUCCESS;
5445 }
5446
5447 response_unlock:
5448 l2cap_chan_unlock(pchan);
5449 mutex_unlock(&conn->chan_lock);
5450 l2cap_chan_put(pchan);
5451
5452 if (result == L2CAP_CR_PEND)
5453 return 0;
5454
5455 response:
5456 if (chan) {
5457 rsp.mtu = cpu_to_le16(chan->imtu);
5458 rsp.mps = cpu_to_le16(chan->mps);
5459 } else {
5460 rsp.mtu = 0;
5461 rsp.mps = 0;
5462 }
5463
5464 rsp.dcid = cpu_to_le16(dcid);
5465 rsp.credits = cpu_to_le16(credits);
5466 rsp.result = cpu_to_le16(result);
5467
5468 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5469
5470 return 0;
5471 }
5472
5473 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5474 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5475 u8 *data)
5476 {
5477 struct l2cap_le_credits *pkt;
5478 struct l2cap_chan *chan;
5479 u16 cid, credits, max_credits;
5480
5481 if (cmd_len != sizeof(*pkt))
5482 return -EPROTO;
5483
5484 pkt = (struct l2cap_le_credits *) data;
5485 cid = __le16_to_cpu(pkt->cid);
5486 credits = __le16_to_cpu(pkt->credits);
5487
5488 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5489
5490 chan = l2cap_get_chan_by_dcid(conn, cid);
5491 if (!chan)
5492 return -EBADSLT;
5493
5494 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5495 if (credits > max_credits) {
5496 BT_ERR("LE credits overflow");
5497 l2cap_send_disconn_req(chan, ECONNRESET);
5498 l2cap_chan_unlock(chan);
5499
5500 /* Return 0 so that we don't trigger an unnecessary
5501 * command reject packet.
5502 */
5503 return 0;
5504 }
5505
5506 chan->tx_credits += credits;
5507
5508 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5509 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5510 chan->tx_credits--;
5511 }
5512
5513 if (chan->tx_credits)
5514 chan->ops->resume(chan);
5515
5516 l2cap_chan_unlock(chan);
5517
5518 return 0;
5519 }
5520
5521 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5522 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5523 u8 *data)
5524 {
5525 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5526 struct l2cap_chan *chan;
5527
5528 if (cmd_len < sizeof(*rej))
5529 return -EPROTO;
5530
5531 mutex_lock(&conn->chan_lock);
5532
5533 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5534 if (!chan)
5535 goto done;
5536
5537 l2cap_chan_lock(chan);
5538 l2cap_chan_del(chan, ECONNREFUSED);
5539 l2cap_chan_unlock(chan);
5540
5541 done:
5542 mutex_unlock(&conn->chan_lock);
5543 return 0;
5544 }
5545
5546 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5547 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5548 u8 *data)
5549 {
5550 int err = 0;
5551
5552 switch (cmd->code) {
5553 case L2CAP_COMMAND_REJ:
5554 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5555 break;
5556
5557 case L2CAP_CONN_PARAM_UPDATE_REQ:
5558 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5559 break;
5560
5561 case L2CAP_CONN_PARAM_UPDATE_RSP:
5562 break;
5563
5564 case L2CAP_LE_CONN_RSP:
5565 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5566 break;
5567
5568 case L2CAP_LE_CONN_REQ:
5569 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5570 break;
5571
5572 case L2CAP_LE_CREDITS:
5573 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5574 break;
5575
5576 case L2CAP_DISCONN_REQ:
5577 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5578 break;
5579
5580 case L2CAP_DISCONN_RSP:
5581 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5582 break;
5583
5584 default:
5585 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5586 err = -EINVAL;
5587 break;
5588 }
5589
5590 return err;
5591 }
5592
5593 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5594 struct sk_buff *skb)
5595 {
5596 struct hci_conn *hcon = conn->hcon;
5597 struct l2cap_cmd_hdr *cmd;
5598 u16 len;
5599 int err;
5600
5601 if (hcon->type != LE_LINK)
5602 goto drop;
5603
5604 if (skb->len < L2CAP_CMD_HDR_SIZE)
5605 goto drop;
5606
5607 cmd = (void *) skb->data;
5608 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5609
5610 len = le16_to_cpu(cmd->len);
5611
5612 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5613
5614 if (len != skb->len || !cmd->ident) {
5615 BT_DBG("corrupted command");
5616 goto drop;
5617 }
5618
5619 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5620 if (err) {
5621 struct l2cap_cmd_rej_unk rej;
5622
5623 BT_ERR("Wrong link type (%d)", err);
5624
5625 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5626 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5627 sizeof(rej), &rej);
5628 }
5629
5630 drop:
5631 kfree_skb(skb);
5632 }
5633
5634 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5635 struct sk_buff *skb)
5636 {
5637 struct hci_conn *hcon = conn->hcon;
5638 u8 *data = skb->data;
5639 int len = skb->len;
5640 struct l2cap_cmd_hdr cmd;
5641 int err;
5642
5643 l2cap_raw_recv(conn, skb);
5644
5645 if (hcon->type != ACL_LINK)
5646 goto drop;
5647
5648 while (len >= L2CAP_CMD_HDR_SIZE) {
5649 u16 cmd_len;
5650 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5651 data += L2CAP_CMD_HDR_SIZE;
5652 len -= L2CAP_CMD_HDR_SIZE;
5653
5654 cmd_len = le16_to_cpu(cmd.len);
5655
5656 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5657 cmd.ident);
5658
5659 if (cmd_len > len || !cmd.ident) {
5660 BT_DBG("corrupted command");
5661 break;
5662 }
5663
5664 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5665 if (err) {
5666 struct l2cap_cmd_rej_unk rej;
5667
5668 BT_ERR("Wrong link type (%d)", err);
5669
5670 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5671 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5672 sizeof(rej), &rej);
5673 }
5674
5675 data += cmd_len;
5676 len -= cmd_len;
5677 }
5678
5679 drop:
5680 kfree_skb(skb);
5681 }
5682
5683 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5684 {
5685 u16 our_fcs, rcv_fcs;
5686 int hdr_size;
5687
5688 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5689 hdr_size = L2CAP_EXT_HDR_SIZE;
5690 else
5691 hdr_size = L2CAP_ENH_HDR_SIZE;
5692
5693 if (chan->fcs == L2CAP_FCS_CRC16) {
5694 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5695 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5696 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5697
5698 if (our_fcs != rcv_fcs)
5699 return -EBADMSG;
5700 }
5701 return 0;
5702 }
5703
5704 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5705 {
5706 struct l2cap_ctrl control;
5707
5708 BT_DBG("chan %p", chan);
5709
5710 memset(&control, 0, sizeof(control));
5711 control.sframe = 1;
5712 control.final = 1;
5713 control.reqseq = chan->buffer_seq;
5714 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5715
5716 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5717 control.super = L2CAP_SUPER_RNR;
5718 l2cap_send_sframe(chan, &control);
5719 }
5720
5721 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5722 chan->unacked_frames > 0)
5723 __set_retrans_timer(chan);
5724
5725 /* Send pending iframes */
5726 l2cap_ertm_send(chan);
5727
5728 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5729 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5730 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5731 * send it now.
5732 */
5733 control.super = L2CAP_SUPER_RR;
5734 l2cap_send_sframe(chan, &control);
5735 }
5736 }
5737
5738 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5739 struct sk_buff **last_frag)
5740 {
5741 /* skb->len reflects data in skb as well as all fragments
5742 * skb->data_len reflects only data in fragments
5743 */
5744 if (!skb_has_frag_list(skb))
5745 skb_shinfo(skb)->frag_list = new_frag;
5746
5747 new_frag->next = NULL;
5748
5749 (*last_frag)->next = new_frag;
5750 *last_frag = new_frag;
5751
5752 skb->len += new_frag->len;
5753 skb->data_len += new_frag->len;
5754 skb->truesize += new_frag->truesize;
5755 }
5756
5757 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5758 struct l2cap_ctrl *control)
5759 {
5760 int err = -EINVAL;
5761
5762 switch (control->sar) {
5763 case L2CAP_SAR_UNSEGMENTED:
5764 if (chan->sdu)
5765 break;
5766
5767 err = chan->ops->recv(chan, skb);
5768 break;
5769
5770 case L2CAP_SAR_START:
5771 if (chan->sdu)
5772 break;
5773
5774 chan->sdu_len = get_unaligned_le16(skb->data);
5775 skb_pull(skb, L2CAP_SDULEN_SIZE);
5776
5777 if (chan->sdu_len > chan->imtu) {
5778 err = -EMSGSIZE;
5779 break;
5780 }
5781
5782 if (skb->len >= chan->sdu_len)
5783 break;
5784
5785 chan->sdu = skb;
5786 chan->sdu_last_frag = skb;
5787
5788 skb = NULL;
5789 err = 0;
5790 break;
5791
5792 case L2CAP_SAR_CONTINUE:
5793 if (!chan->sdu)
5794 break;
5795
5796 append_skb_frag(chan->sdu, skb,
5797 &chan->sdu_last_frag);
5798 skb = NULL;
5799
5800 if (chan->sdu->len >= chan->sdu_len)
5801 break;
5802
5803 err = 0;
5804 break;
5805
5806 case L2CAP_SAR_END:
5807 if (!chan->sdu)
5808 break;
5809
5810 append_skb_frag(chan->sdu, skb,
5811 &chan->sdu_last_frag);
5812 skb = NULL;
5813
5814 if (chan->sdu->len != chan->sdu_len)
5815 break;
5816
5817 err = chan->ops->recv(chan, chan->sdu);
5818
5819 if (!err) {
5820 /* Reassembly complete */
5821 chan->sdu = NULL;
5822 chan->sdu_last_frag = NULL;
5823 chan->sdu_len = 0;
5824 }
5825 break;
5826 }
5827
5828 if (err) {
5829 kfree_skb(skb);
5830 kfree_skb(chan->sdu);
5831 chan->sdu = NULL;
5832 chan->sdu_last_frag = NULL;
5833 chan->sdu_len = 0;
5834 }
5835
5836 return err;
5837 }
5838
5839 static int l2cap_resegment(struct l2cap_chan *chan)
5840 {
5841 /* Placeholder */
5842 return 0;
5843 }
5844
5845 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5846 {
5847 u8 event;
5848
5849 if (chan->mode != L2CAP_MODE_ERTM)
5850 return;
5851
5852 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5853 l2cap_tx(chan, NULL, NULL, event);
5854 }
5855
5856 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5857 {
5858 int err = 0;
5859 /* Pass sequential frames to l2cap_reassemble_sdu()
5860 * until a gap is encountered.
5861 */
5862
5863 BT_DBG("chan %p", chan);
5864
5865 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5866 struct sk_buff *skb;
5867 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5868 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5869
5870 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5871
5872 if (!skb)
5873 break;
5874
5875 skb_unlink(skb, &chan->srej_q);
5876 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5877 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5878 if (err)
5879 break;
5880 }
5881
5882 if (skb_queue_empty(&chan->srej_q)) {
5883 chan->rx_state = L2CAP_RX_STATE_RECV;
5884 l2cap_send_ack(chan);
5885 }
5886
5887 return err;
5888 }
5889
5890 static void l2cap_handle_srej(struct l2cap_chan *chan,
5891 struct l2cap_ctrl *control)
5892 {
5893 struct sk_buff *skb;
5894
5895 BT_DBG("chan %p, control %p", chan, control);
5896
5897 if (control->reqseq == chan->next_tx_seq) {
5898 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5899 l2cap_send_disconn_req(chan, ECONNRESET);
5900 return;
5901 }
5902
5903 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5904
5905 if (skb == NULL) {
5906 BT_DBG("Seq %d not available for retransmission",
5907 control->reqseq);
5908 return;
5909 }
5910
5911 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5912 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5913 l2cap_send_disconn_req(chan, ECONNRESET);
5914 return;
5915 }
5916
5917 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5918
5919 if (control->poll) {
5920 l2cap_pass_to_tx(chan, control);
5921
5922 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5923 l2cap_retransmit(chan, control);
5924 l2cap_ertm_send(chan);
5925
5926 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5927 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5928 chan->srej_save_reqseq = control->reqseq;
5929 }
5930 } else {
5931 l2cap_pass_to_tx_fbit(chan, control);
5932
5933 if (control->final) {
5934 if (chan->srej_save_reqseq != control->reqseq ||
5935 !test_and_clear_bit(CONN_SREJ_ACT,
5936 &chan->conn_state))
5937 l2cap_retransmit(chan, control);
5938 } else {
5939 l2cap_retransmit(chan, control);
5940 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5941 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5942 chan->srej_save_reqseq = control->reqseq;
5943 }
5944 }
5945 }
5946 }
5947
5948 static void l2cap_handle_rej(struct l2cap_chan *chan,
5949 struct l2cap_ctrl *control)
5950 {
5951 struct sk_buff *skb;
5952
5953 BT_DBG("chan %p, control %p", chan, control);
5954
5955 if (control->reqseq == chan->next_tx_seq) {
5956 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5957 l2cap_send_disconn_req(chan, ECONNRESET);
5958 return;
5959 }
5960
5961 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5962
5963 if (chan->max_tx && skb &&
5964 bt_cb(skb)->control.retries >= chan->max_tx) {
5965 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5966 l2cap_send_disconn_req(chan, ECONNRESET);
5967 return;
5968 }
5969
5970 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5971
5972 l2cap_pass_to_tx(chan, control);
5973
5974 if (control->final) {
5975 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5976 l2cap_retransmit_all(chan, control);
5977 } else {
5978 l2cap_retransmit_all(chan, control);
5979 l2cap_ertm_send(chan);
5980 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5981 set_bit(CONN_REJ_ACT, &chan->conn_state);
5982 }
5983 }
5984
5985 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5986 {
5987 BT_DBG("chan %p, txseq %d", chan, txseq);
5988
5989 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5990 chan->expected_tx_seq);
5991
5992 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5993 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5994 chan->tx_win) {
5995 /* See notes below regarding "double poll" and
5996 * invalid packets.
5997 */
5998 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5999 BT_DBG("Invalid/Ignore - after SREJ");
6000 return L2CAP_TXSEQ_INVALID_IGNORE;
6001 } else {
6002 BT_DBG("Invalid - in window after SREJ sent");
6003 return L2CAP_TXSEQ_INVALID;
6004 }
6005 }
6006
6007 if (chan->srej_list.head == txseq) {
6008 BT_DBG("Expected SREJ");
6009 return L2CAP_TXSEQ_EXPECTED_SREJ;
6010 }
6011
6012 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6013 BT_DBG("Duplicate SREJ - txseq already stored");
6014 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6015 }
6016
6017 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6018 BT_DBG("Unexpected SREJ - not requested");
6019 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6020 }
6021 }
6022
6023 if (chan->expected_tx_seq == txseq) {
6024 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6025 chan->tx_win) {
6026 BT_DBG("Invalid - txseq outside tx window");
6027 return L2CAP_TXSEQ_INVALID;
6028 } else {
6029 BT_DBG("Expected");
6030 return L2CAP_TXSEQ_EXPECTED;
6031 }
6032 }
6033
6034 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6035 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6036 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6037 return L2CAP_TXSEQ_DUPLICATE;
6038 }
6039
6040 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6041 /* A source of invalid packets is a "double poll" condition,
6042 * where delays cause us to send multiple poll packets. If
6043 * the remote stack receives and processes both polls,
6044 * sequence numbers can wrap around in such a way that a
6045 * resent frame has a sequence number that looks like new data
6046 * with a sequence gap. This would trigger an erroneous SREJ
6047 * request.
6048 *
6049 * Fortunately, this is impossible with a tx window that's
6050 * less than half of the maximum sequence number, which allows
6051 * invalid frames to be safely ignored.
6052 *
6053 * With tx window sizes greater than half of the tx window
6054 * maximum, the frame is invalid and cannot be ignored. This
6055 * causes a disconnect.
6056 */
6057
6058 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6059 BT_DBG("Invalid/Ignore - txseq outside tx window");
6060 return L2CAP_TXSEQ_INVALID_IGNORE;
6061 } else {
6062 BT_DBG("Invalid - txseq outside tx window");
6063 return L2CAP_TXSEQ_INVALID;
6064 }
6065 } else {
6066 BT_DBG("Unexpected - txseq indicates missing frames");
6067 return L2CAP_TXSEQ_UNEXPECTED;
6068 }
6069 }
6070
6071 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6072 struct l2cap_ctrl *control,
6073 struct sk_buff *skb, u8 event)
6074 {
6075 int err = 0;
6076 bool skb_in_use = false;
6077
6078 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6079 event);
6080
6081 switch (event) {
6082 case L2CAP_EV_RECV_IFRAME:
6083 switch (l2cap_classify_txseq(chan, control->txseq)) {
6084 case L2CAP_TXSEQ_EXPECTED:
6085 l2cap_pass_to_tx(chan, control);
6086
6087 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6088 BT_DBG("Busy, discarding expected seq %d",
6089 control->txseq);
6090 break;
6091 }
6092
6093 chan->expected_tx_seq = __next_seq(chan,
6094 control->txseq);
6095
6096 chan->buffer_seq = chan->expected_tx_seq;
6097 skb_in_use = true;
6098
6099 err = l2cap_reassemble_sdu(chan, skb, control);
6100 if (err)
6101 break;
6102
6103 if (control->final) {
6104 if (!test_and_clear_bit(CONN_REJ_ACT,
6105 &chan->conn_state)) {
6106 control->final = 0;
6107 l2cap_retransmit_all(chan, control);
6108 l2cap_ertm_send(chan);
6109 }
6110 }
6111
6112 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6113 l2cap_send_ack(chan);
6114 break;
6115 case L2CAP_TXSEQ_UNEXPECTED:
6116 l2cap_pass_to_tx(chan, control);
6117
6118 /* Can't issue SREJ frames in the local busy state.
6119 * Drop this frame, it will be seen as missing
6120 * when local busy is exited.
6121 */
6122 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6123 BT_DBG("Busy, discarding unexpected seq %d",
6124 control->txseq);
6125 break;
6126 }
6127
6128 /* There was a gap in the sequence, so an SREJ
6129 * must be sent for each missing frame. The
6130 * current frame is stored for later use.
6131 */
6132 skb_queue_tail(&chan->srej_q, skb);
6133 skb_in_use = true;
6134 BT_DBG("Queued %p (queue len %d)", skb,
6135 skb_queue_len(&chan->srej_q));
6136
6137 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6138 l2cap_seq_list_clear(&chan->srej_list);
6139 l2cap_send_srej(chan, control->txseq);
6140
6141 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6142 break;
6143 case L2CAP_TXSEQ_DUPLICATE:
6144 l2cap_pass_to_tx(chan, control);
6145 break;
6146 case L2CAP_TXSEQ_INVALID_IGNORE:
6147 break;
6148 case L2CAP_TXSEQ_INVALID:
6149 default:
6150 l2cap_send_disconn_req(chan, ECONNRESET);
6151 break;
6152 }
6153 break;
6154 case L2CAP_EV_RECV_RR:
6155 l2cap_pass_to_tx(chan, control);
6156 if (control->final) {
6157 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6158
6159 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6160 !__chan_is_moving(chan)) {
6161 control->final = 0;
6162 l2cap_retransmit_all(chan, control);
6163 }
6164
6165 l2cap_ertm_send(chan);
6166 } else if (control->poll) {
6167 l2cap_send_i_or_rr_or_rnr(chan);
6168 } else {
6169 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6170 &chan->conn_state) &&
6171 chan->unacked_frames)
6172 __set_retrans_timer(chan);
6173
6174 l2cap_ertm_send(chan);
6175 }
6176 break;
6177 case L2CAP_EV_RECV_RNR:
6178 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6179 l2cap_pass_to_tx(chan, control);
6180 if (control && control->poll) {
6181 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6182 l2cap_send_rr_or_rnr(chan, 0);
6183 }
6184 __clear_retrans_timer(chan);
6185 l2cap_seq_list_clear(&chan->retrans_list);
6186 break;
6187 case L2CAP_EV_RECV_REJ:
6188 l2cap_handle_rej(chan, control);
6189 break;
6190 case L2CAP_EV_RECV_SREJ:
6191 l2cap_handle_srej(chan, control);
6192 break;
6193 default:
6194 break;
6195 }
6196
6197 if (skb && !skb_in_use) {
6198 BT_DBG("Freeing %p", skb);
6199 kfree_skb(skb);
6200 }
6201
6202 return err;
6203 }
6204
6205 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6206 struct l2cap_ctrl *control,
6207 struct sk_buff *skb, u8 event)
6208 {
6209 int err = 0;
6210 u16 txseq = control->txseq;
6211 bool skb_in_use = false;
6212
6213 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6214 event);
6215
6216 switch (event) {
6217 case L2CAP_EV_RECV_IFRAME:
6218 switch (l2cap_classify_txseq(chan, txseq)) {
6219 case L2CAP_TXSEQ_EXPECTED:
6220 /* Keep frame for reassembly later */
6221 l2cap_pass_to_tx(chan, control);
6222 skb_queue_tail(&chan->srej_q, skb);
6223 skb_in_use = true;
6224 BT_DBG("Queued %p (queue len %d)", skb,
6225 skb_queue_len(&chan->srej_q));
6226
6227 chan->expected_tx_seq = __next_seq(chan, txseq);
6228 break;
6229 case L2CAP_TXSEQ_EXPECTED_SREJ:
6230 l2cap_seq_list_pop(&chan->srej_list);
6231
6232 l2cap_pass_to_tx(chan, control);
6233 skb_queue_tail(&chan->srej_q, skb);
6234 skb_in_use = true;
6235 BT_DBG("Queued %p (queue len %d)", skb,
6236 skb_queue_len(&chan->srej_q));
6237
6238 err = l2cap_rx_queued_iframes(chan);
6239 if (err)
6240 break;
6241
6242 break;
6243 case L2CAP_TXSEQ_UNEXPECTED:
6244 /* Got a frame that can't be reassembled yet.
6245 * Save it for later, and send SREJs to cover
6246 * the missing frames.
6247 */
6248 skb_queue_tail(&chan->srej_q, skb);
6249 skb_in_use = true;
6250 BT_DBG("Queued %p (queue len %d)", skb,
6251 skb_queue_len(&chan->srej_q));
6252
6253 l2cap_pass_to_tx(chan, control);
6254 l2cap_send_srej(chan, control->txseq);
6255 break;
6256 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6257 /* This frame was requested with an SREJ, but
6258 * some expected retransmitted frames are
6259 * missing. Request retransmission of missing
6260 * SREJ'd frames.
6261 */
6262 skb_queue_tail(&chan->srej_q, skb);
6263 skb_in_use = true;
6264 BT_DBG("Queued %p (queue len %d)", skb,
6265 skb_queue_len(&chan->srej_q));
6266
6267 l2cap_pass_to_tx(chan, control);
6268 l2cap_send_srej_list(chan, control->txseq);
6269 break;
6270 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6271 /* We've already queued this frame. Drop this copy. */
6272 l2cap_pass_to_tx(chan, control);
6273 break;
6274 case L2CAP_TXSEQ_DUPLICATE:
6275 /* Expecting a later sequence number, so this frame
6276 * was already received. Ignore it completely.
6277 */
6278 break;
6279 case L2CAP_TXSEQ_INVALID_IGNORE:
6280 break;
6281 case L2CAP_TXSEQ_INVALID:
6282 default:
6283 l2cap_send_disconn_req(chan, ECONNRESET);
6284 break;
6285 }
6286 break;
6287 case L2CAP_EV_RECV_RR:
6288 l2cap_pass_to_tx(chan, control);
6289 if (control->final) {
6290 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6291
6292 if (!test_and_clear_bit(CONN_REJ_ACT,
6293 &chan->conn_state)) {
6294 control->final = 0;
6295 l2cap_retransmit_all(chan, control);
6296 }
6297
6298 l2cap_ertm_send(chan);
6299 } else if (control->poll) {
6300 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6301 &chan->conn_state) &&
6302 chan->unacked_frames) {
6303 __set_retrans_timer(chan);
6304 }
6305
6306 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6307 l2cap_send_srej_tail(chan);
6308 } else {
6309 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6310 &chan->conn_state) &&
6311 chan->unacked_frames)
6312 __set_retrans_timer(chan);
6313
6314 l2cap_send_ack(chan);
6315 }
6316 break;
6317 case L2CAP_EV_RECV_RNR:
6318 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6319 l2cap_pass_to_tx(chan, control);
6320 if (control->poll) {
6321 l2cap_send_srej_tail(chan);
6322 } else {
6323 struct l2cap_ctrl rr_control;
6324 memset(&rr_control, 0, sizeof(rr_control));
6325 rr_control.sframe = 1;
6326 rr_control.super = L2CAP_SUPER_RR;
6327 rr_control.reqseq = chan->buffer_seq;
6328 l2cap_send_sframe(chan, &rr_control);
6329 }
6330
6331 break;
6332 case L2CAP_EV_RECV_REJ:
6333 l2cap_handle_rej(chan, control);
6334 break;
6335 case L2CAP_EV_RECV_SREJ:
6336 l2cap_handle_srej(chan, control);
6337 break;
6338 }
6339
6340 if (skb && !skb_in_use) {
6341 BT_DBG("Freeing %p", skb);
6342 kfree_skb(skb);
6343 }
6344
6345 return err;
6346 }
6347
6348 static int l2cap_finish_move(struct l2cap_chan *chan)
6349 {
6350 BT_DBG("chan %p", chan);
6351
6352 chan->rx_state = L2CAP_RX_STATE_RECV;
6353
6354 if (chan->hs_hcon)
6355 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6356 else
6357 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6358
6359 return l2cap_resegment(chan);
6360 }
6361
6362 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6363 struct l2cap_ctrl *control,
6364 struct sk_buff *skb, u8 event)
6365 {
6366 int err;
6367
6368 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6369 event);
6370
6371 if (!control->poll)
6372 return -EPROTO;
6373
6374 l2cap_process_reqseq(chan, control->reqseq);
6375
6376 if (!skb_queue_empty(&chan->tx_q))
6377 chan->tx_send_head = skb_peek(&chan->tx_q);
6378 else
6379 chan->tx_send_head = NULL;
6380
6381 /* Rewind next_tx_seq to the point expected
6382 * by the receiver.
6383 */
6384 chan->next_tx_seq = control->reqseq;
6385 chan->unacked_frames = 0;
6386
6387 err = l2cap_finish_move(chan);
6388 if (err)
6389 return err;
6390
6391 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6392 l2cap_send_i_or_rr_or_rnr(chan);
6393
6394 if (event == L2CAP_EV_RECV_IFRAME)
6395 return -EPROTO;
6396
6397 return l2cap_rx_state_recv(chan, control, NULL, event);
6398 }
6399
6400 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6401 struct l2cap_ctrl *control,
6402 struct sk_buff *skb, u8 event)
6403 {
6404 int err;
6405
6406 if (!control->final)
6407 return -EPROTO;
6408
6409 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6410
6411 chan->rx_state = L2CAP_RX_STATE_RECV;
6412 l2cap_process_reqseq(chan, control->reqseq);
6413
6414 if (!skb_queue_empty(&chan->tx_q))
6415 chan->tx_send_head = skb_peek(&chan->tx_q);
6416 else
6417 chan->tx_send_head = NULL;
6418
6419 /* Rewind next_tx_seq to the point expected
6420 * by the receiver.
6421 */
6422 chan->next_tx_seq = control->reqseq;
6423 chan->unacked_frames = 0;
6424
6425 if (chan->hs_hcon)
6426 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6427 else
6428 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6429
6430 err = l2cap_resegment(chan);
6431
6432 if (!err)
6433 err = l2cap_rx_state_recv(chan, control, skb, event);
6434
6435 return err;
6436 }
6437
6438 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6439 {
6440 /* Make sure reqseq is for a packet that has been sent but not acked */
6441 u16 unacked;
6442
6443 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6444 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6445 }
6446
6447 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6448 struct sk_buff *skb, u8 event)
6449 {
6450 int err = 0;
6451
6452 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6453 control, skb, event, chan->rx_state);
6454
6455 if (__valid_reqseq(chan, control->reqseq)) {
6456 switch (chan->rx_state) {
6457 case L2CAP_RX_STATE_RECV:
6458 err = l2cap_rx_state_recv(chan, control, skb, event);
6459 break;
6460 case L2CAP_RX_STATE_SREJ_SENT:
6461 err = l2cap_rx_state_srej_sent(chan, control, skb,
6462 event);
6463 break;
6464 case L2CAP_RX_STATE_WAIT_P:
6465 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6466 break;
6467 case L2CAP_RX_STATE_WAIT_F:
6468 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6469 break;
6470 default:
6471 /* shut it down */
6472 break;
6473 }
6474 } else {
6475 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6476 control->reqseq, chan->next_tx_seq,
6477 chan->expected_ack_seq);
6478 l2cap_send_disconn_req(chan, ECONNRESET);
6479 }
6480
6481 return err;
6482 }
6483
6484 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6485 struct sk_buff *skb)
6486 {
6487 int err = 0;
6488
6489 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6490 chan->rx_state);
6491
6492 if (l2cap_classify_txseq(chan, control->txseq) ==
6493 L2CAP_TXSEQ_EXPECTED) {
6494 l2cap_pass_to_tx(chan, control);
6495
6496 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6497 __next_seq(chan, chan->buffer_seq));
6498
6499 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6500
6501 l2cap_reassemble_sdu(chan, skb, control);
6502 } else {
6503 if (chan->sdu) {
6504 kfree_skb(chan->sdu);
6505 chan->sdu = NULL;
6506 }
6507 chan->sdu_last_frag = NULL;
6508 chan->sdu_len = 0;
6509
6510 if (skb) {
6511 BT_DBG("Freeing %p", skb);
6512 kfree_skb(skb);
6513 }
6514 }
6515
6516 chan->last_acked_seq = control->txseq;
6517 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6518
6519 return err;
6520 }
6521
6522 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6523 {
6524 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6525 u16 len;
6526 u8 event;
6527
6528 __unpack_control(chan, skb);
6529
6530 len = skb->len;
6531
6532 /*
6533 * We can just drop the corrupted I-frame here.
6534 * Receiver will miss it and start proper recovery
6535 * procedures and ask for retransmission.
6536 */
6537 if (l2cap_check_fcs(chan, skb))
6538 goto drop;
6539
6540 if (!control->sframe && control->sar == L2CAP_SAR_START)
6541 len -= L2CAP_SDULEN_SIZE;
6542
6543 if (chan->fcs == L2CAP_FCS_CRC16)
6544 len -= L2CAP_FCS_SIZE;
6545
6546 if (len > chan->mps) {
6547 l2cap_send_disconn_req(chan, ECONNRESET);
6548 goto drop;
6549 }
6550
6551 if (!control->sframe) {
6552 int err;
6553
6554 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6555 control->sar, control->reqseq, control->final,
6556 control->txseq);
6557
6558 /* Validate F-bit - F=0 always valid, F=1 only
6559 * valid in TX WAIT_F
6560 */
6561 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6562 goto drop;
6563
6564 if (chan->mode != L2CAP_MODE_STREAMING) {
6565 event = L2CAP_EV_RECV_IFRAME;
6566 err = l2cap_rx(chan, control, skb, event);
6567 } else {
6568 err = l2cap_stream_rx(chan, control, skb);
6569 }
6570
6571 if (err)
6572 l2cap_send_disconn_req(chan, ECONNRESET);
6573 } else {
6574 const u8 rx_func_to_event[4] = {
6575 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6576 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6577 };
6578
6579 /* Only I-frames are expected in streaming mode */
6580 if (chan->mode == L2CAP_MODE_STREAMING)
6581 goto drop;
6582
6583 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6584 control->reqseq, control->final, control->poll,
6585 control->super);
6586
6587 if (len != 0) {
6588 BT_ERR("Trailing bytes: %d in sframe", len);
6589 l2cap_send_disconn_req(chan, ECONNRESET);
6590 goto drop;
6591 }
6592
6593 /* Validate F and P bits */
6594 if (control->final && (control->poll ||
6595 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6596 goto drop;
6597
6598 event = rx_func_to_event[control->super];
6599 if (l2cap_rx(chan, control, skb, event))
6600 l2cap_send_disconn_req(chan, ECONNRESET);
6601 }
6602
6603 return 0;
6604
6605 drop:
6606 kfree_skb(skb);
6607 return 0;
6608 }
6609
6610 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6611 {
6612 struct l2cap_conn *conn = chan->conn;
6613 struct l2cap_le_credits pkt;
6614 u16 return_credits;
6615
6616 /* We return more credits to the sender only after the amount of
6617 * credits falls below half of the initial amount.
6618 */
6619 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6620 return;
6621
6622 return_credits = le_max_credits - chan->rx_credits;
6623
6624 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6625
6626 chan->rx_credits += return_credits;
6627
6628 pkt.cid = cpu_to_le16(chan->scid);
6629 pkt.credits = cpu_to_le16(return_credits);
6630
6631 chan->ident = l2cap_get_ident(conn);
6632
6633 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6634 }
6635
6636 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6637 {
6638 int err;
6639
6640 if (!chan->rx_credits) {
6641 BT_ERR("No credits to receive LE L2CAP data");
6642 l2cap_send_disconn_req(chan, ECONNRESET);
6643 return -ENOBUFS;
6644 }
6645
6646 if (chan->imtu < skb->len) {
6647 BT_ERR("Too big LE L2CAP PDU");
6648 return -ENOBUFS;
6649 }
6650
6651 chan->rx_credits--;
6652 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6653
6654 l2cap_chan_le_send_credits(chan);
6655
6656 err = 0;
6657
6658 if (!chan->sdu) {
6659 u16 sdu_len;
6660
6661 sdu_len = get_unaligned_le16(skb->data);
6662 skb_pull(skb, L2CAP_SDULEN_SIZE);
6663
6664 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6665 sdu_len, skb->len, chan->imtu);
6666
6667 if (sdu_len > chan->imtu) {
6668 BT_ERR("Too big LE L2CAP SDU length received");
6669 err = -EMSGSIZE;
6670 goto failed;
6671 }
6672
6673 if (skb->len > sdu_len) {
6674 BT_ERR("Too much LE L2CAP data received");
6675 err = -EINVAL;
6676 goto failed;
6677 }
6678
6679 if (skb->len == sdu_len)
6680 return chan->ops->recv(chan, skb);
6681
6682 chan->sdu = skb;
6683 chan->sdu_len = sdu_len;
6684 chan->sdu_last_frag = skb;
6685
6686 return 0;
6687 }
6688
6689 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6690 chan->sdu->len, skb->len, chan->sdu_len);
6691
6692 if (chan->sdu->len + skb->len > chan->sdu_len) {
6693 BT_ERR("Too much LE L2CAP data received");
6694 err = -EINVAL;
6695 goto failed;
6696 }
6697
6698 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6699 skb = NULL;
6700
6701 if (chan->sdu->len == chan->sdu_len) {
6702 err = chan->ops->recv(chan, chan->sdu);
6703 if (!err) {
6704 chan->sdu = NULL;
6705 chan->sdu_last_frag = NULL;
6706 chan->sdu_len = 0;
6707 }
6708 }
6709
6710 failed:
6711 if (err) {
6712 kfree_skb(skb);
6713 kfree_skb(chan->sdu);
6714 chan->sdu = NULL;
6715 chan->sdu_last_frag = NULL;
6716 chan->sdu_len = 0;
6717 }
6718
6719 /* We can't return an error here since we took care of the skb
6720 * freeing internally. An error return would cause the caller to
6721 * do a double-free of the skb.
6722 */
6723 return 0;
6724 }
6725
6726 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6727 struct sk_buff *skb)
6728 {
6729 struct l2cap_chan *chan;
6730
6731 chan = l2cap_get_chan_by_scid(conn, cid);
6732 if (!chan) {
6733 if (cid == L2CAP_CID_A2MP) {
6734 chan = a2mp_channel_create(conn, skb);
6735 if (!chan) {
6736 kfree_skb(skb);
6737 return;
6738 }
6739
6740 l2cap_chan_lock(chan);
6741 } else {
6742 BT_DBG("unknown cid 0x%4.4x", cid);
6743 /* Drop packet and return */
6744 kfree_skb(skb);
6745 return;
6746 }
6747 }
6748
6749 BT_DBG("chan %p, len %d", chan, skb->len);
6750
6751 if (chan->state != BT_CONNECTED)
6752 goto drop;
6753
6754 switch (chan->mode) {
6755 case L2CAP_MODE_LE_FLOWCTL:
6756 if (l2cap_le_data_rcv(chan, skb) < 0)
6757 goto drop;
6758
6759 goto done;
6760
6761 case L2CAP_MODE_BASIC:
6762 /* If socket recv buffers overflows we drop data here
6763 * which is *bad* because L2CAP has to be reliable.
6764 * But we don't have any other choice. L2CAP doesn't
6765 * provide flow control mechanism. */
6766
6767 if (chan->imtu < skb->len) {
6768 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6769 goto drop;
6770 }
6771
6772 if (!chan->ops->recv(chan, skb))
6773 goto done;
6774 break;
6775
6776 case L2CAP_MODE_ERTM:
6777 case L2CAP_MODE_STREAMING:
6778 l2cap_data_rcv(chan, skb);
6779 goto done;
6780
6781 default:
6782 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6783 break;
6784 }
6785
6786 drop:
6787 kfree_skb(skb);
6788
6789 done:
6790 l2cap_chan_unlock(chan);
6791 }
6792
6793 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6794 struct sk_buff *skb)
6795 {
6796 struct hci_conn *hcon = conn->hcon;
6797 struct l2cap_chan *chan;
6798
6799 if (hcon->type != ACL_LINK)
6800 goto free_skb;
6801
6802 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6803 ACL_LINK);
6804 if (!chan)
6805 goto free_skb;
6806
6807 BT_DBG("chan %p, len %d", chan, skb->len);
6808
6809 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6810 goto drop;
6811
6812 if (chan->imtu < skb->len)
6813 goto drop;
6814
6815 /* Store remote BD_ADDR and PSM for msg_name */
6816 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6817 bt_cb(skb)->psm = psm;
6818
6819 if (!chan->ops->recv(chan, skb)) {
6820 l2cap_chan_put(chan);
6821 return;
6822 }
6823
6824 drop:
6825 l2cap_chan_put(chan);
6826 free_skb:
6827 kfree_skb(skb);
6828 }
6829
6830 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6831 {
6832 struct l2cap_hdr *lh = (void *) skb->data;
6833 struct hci_conn *hcon = conn->hcon;
6834 u16 cid, len;
6835 __le16 psm;
6836
6837 if (hcon->state != BT_CONNECTED) {
6838 BT_DBG("queueing pending rx skb");
6839 skb_queue_tail(&conn->pending_rx, skb);
6840 return;
6841 }
6842
6843 skb_pull(skb, L2CAP_HDR_SIZE);
6844 cid = __le16_to_cpu(lh->cid);
6845 len = __le16_to_cpu(lh->len);
6846
6847 if (len != skb->len) {
6848 kfree_skb(skb);
6849 return;
6850 }
6851
6852 /* Since we can't actively block incoming LE connections we must
6853 * at least ensure that we ignore incoming data from them.
6854 */
6855 if (hcon->type == LE_LINK &&
6856 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6857 bdaddr_type(hcon, hcon->dst_type))) {
6858 kfree_skb(skb);
6859 return;
6860 }
6861
6862 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6863
6864 switch (cid) {
6865 case L2CAP_CID_SIGNALING:
6866 l2cap_sig_channel(conn, skb);
6867 break;
6868
6869 case L2CAP_CID_CONN_LESS:
6870 psm = get_unaligned((__le16 *) skb->data);
6871 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6872 l2cap_conless_channel(conn, psm, skb);
6873 break;
6874
6875 case L2CAP_CID_LE_SIGNALING:
6876 l2cap_le_sig_channel(conn, skb);
6877 break;
6878
6879 default:
6880 l2cap_data_channel(conn, cid, skb);
6881 break;
6882 }
6883 }
6884
6885 static void process_pending_rx(struct work_struct *work)
6886 {
6887 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6888 pending_rx_work);
6889 struct sk_buff *skb;
6890
6891 BT_DBG("");
6892
6893 while ((skb = skb_dequeue(&conn->pending_rx)))
6894 l2cap_recv_frame(conn, skb);
6895 }
6896
6897 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6898 {
6899 struct l2cap_conn *conn = hcon->l2cap_data;
6900 struct hci_chan *hchan;
6901
6902 if (conn)
6903 return conn;
6904
6905 hchan = hci_chan_create(hcon);
6906 if (!hchan)
6907 return NULL;
6908
6909 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6910 if (!conn) {
6911 hci_chan_del(hchan);
6912 return NULL;
6913 }
6914
6915 kref_init(&conn->ref);
6916 hcon->l2cap_data = conn;
6917 conn->hcon = hci_conn_get(hcon);
6918 conn->hchan = hchan;
6919
6920 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6921
6922 switch (hcon->type) {
6923 case LE_LINK:
6924 if (hcon->hdev->le_mtu) {
6925 conn->mtu = hcon->hdev->le_mtu;
6926 break;
6927 }
6928 /* fall through */
6929 default:
6930 conn->mtu = hcon->hdev->acl_mtu;
6931 break;
6932 }
6933
6934 conn->feat_mask = 0;
6935
6936 if (hcon->type == ACL_LINK)
6937 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6938 &hcon->hdev->dev_flags);
6939
6940 mutex_init(&conn->ident_lock);
6941 mutex_init(&conn->chan_lock);
6942
6943 INIT_LIST_HEAD(&conn->chan_l);
6944 INIT_LIST_HEAD(&conn->users);
6945
6946 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6947
6948 skb_queue_head_init(&conn->pending_rx);
6949 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6950 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
6951
6952 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6953
6954 return conn;
6955 }
6956
6957 static bool is_valid_psm(u16 psm, u8 dst_type) {
6958 if (!psm)
6959 return false;
6960
6961 if (bdaddr_type_is_le(dst_type))
6962 return (psm <= 0x00ff);
6963
6964 /* PSM must be odd and lsb of upper byte must be 0 */
6965 return ((psm & 0x0101) == 0x0001);
6966 }
6967
6968 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6969 bdaddr_t *dst, u8 dst_type)
6970 {
6971 struct l2cap_conn *conn;
6972 struct hci_conn *hcon;
6973 struct hci_dev *hdev;
6974 int err;
6975
6976 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6977 dst_type, __le16_to_cpu(psm));
6978
6979 hdev = hci_get_route(dst, &chan->src);
6980 if (!hdev)
6981 return -EHOSTUNREACH;
6982
6983 hci_dev_lock(hdev);
6984
6985 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6986 chan->chan_type != L2CAP_CHAN_RAW) {
6987 err = -EINVAL;
6988 goto done;
6989 }
6990
6991 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6992 err = -EINVAL;
6993 goto done;
6994 }
6995
6996 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6997 err = -EINVAL;
6998 goto done;
6999 }
7000
7001 switch (chan->mode) {
7002 case L2CAP_MODE_BASIC:
7003 break;
7004 case L2CAP_MODE_LE_FLOWCTL:
7005 l2cap_le_flowctl_init(chan);
7006 break;
7007 case L2CAP_MODE_ERTM:
7008 case L2CAP_MODE_STREAMING:
7009 if (!disable_ertm)
7010 break;
7011 /* fall through */
7012 default:
7013 err = -EOPNOTSUPP;
7014 goto done;
7015 }
7016
7017 switch (chan->state) {
7018 case BT_CONNECT:
7019 case BT_CONNECT2:
7020 case BT_CONFIG:
7021 /* Already connecting */
7022 err = 0;
7023 goto done;
7024
7025 case BT_CONNECTED:
7026 /* Already connected */
7027 err = -EISCONN;
7028 goto done;
7029
7030 case BT_OPEN:
7031 case BT_BOUND:
7032 /* Can connect */
7033 break;
7034
7035 default:
7036 err = -EBADFD;
7037 goto done;
7038 }
7039
7040 /* Set destination address and psm */
7041 bacpy(&chan->dst, dst);
7042 chan->dst_type = dst_type;
7043
7044 chan->psm = psm;
7045 chan->dcid = cid;
7046
7047 if (bdaddr_type_is_le(dst_type)) {
7048 u8 role;
7049
7050 /* Convert from L2CAP channel address type to HCI address type
7051 */
7052 if (dst_type == BDADDR_LE_PUBLIC)
7053 dst_type = ADDR_LE_DEV_PUBLIC;
7054 else
7055 dst_type = ADDR_LE_DEV_RANDOM;
7056
7057 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7058 role = HCI_ROLE_SLAVE;
7059 else
7060 role = HCI_ROLE_MASTER;
7061
7062 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7063 HCI_LE_CONN_TIMEOUT, role);
7064 } else {
7065 u8 auth_type = l2cap_get_auth_type(chan);
7066 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7067 }
7068
7069 if (IS_ERR(hcon)) {
7070 err = PTR_ERR(hcon);
7071 goto done;
7072 }
7073
7074 conn = l2cap_conn_add(hcon);
7075 if (!conn) {
7076 hci_conn_drop(hcon);
7077 err = -ENOMEM;
7078 goto done;
7079 }
7080
7081 mutex_lock(&conn->chan_lock);
7082 l2cap_chan_lock(chan);
7083
7084 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7085 hci_conn_drop(hcon);
7086 err = -EBUSY;
7087 goto chan_unlock;
7088 }
7089
7090 /* Update source addr of the socket */
7091 bacpy(&chan->src, &hcon->src);
7092 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7093
7094 __l2cap_chan_add(conn, chan);
7095
7096 /* l2cap_chan_add takes its own ref so we can drop this one */
7097 hci_conn_drop(hcon);
7098
7099 l2cap_state_change(chan, BT_CONNECT);
7100 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7101
7102 /* Release chan->sport so that it can be reused by other
7103 * sockets (as it's only used for listening sockets).
7104 */
7105 write_lock(&chan_list_lock);
7106 chan->sport = 0;
7107 write_unlock(&chan_list_lock);
7108
7109 if (hcon->state == BT_CONNECTED) {
7110 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7111 __clear_chan_timer(chan);
7112 if (l2cap_chan_check_security(chan, true))
7113 l2cap_state_change(chan, BT_CONNECTED);
7114 } else
7115 l2cap_do_start(chan);
7116 }
7117
7118 err = 0;
7119
7120 chan_unlock:
7121 l2cap_chan_unlock(chan);
7122 mutex_unlock(&conn->chan_lock);
7123 done:
7124 hci_dev_unlock(hdev);
7125 hci_dev_put(hdev);
7126 return err;
7127 }
7128 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7129
7130 /* ---- L2CAP interface with lower layer (HCI) ---- */
7131
7132 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7133 {
7134 int exact = 0, lm1 = 0, lm2 = 0;
7135 struct l2cap_chan *c;
7136
7137 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7138
7139 /* Find listening sockets and check their link_mode */
7140 read_lock(&chan_list_lock);
7141 list_for_each_entry(c, &chan_list, global_l) {
7142 if (c->state != BT_LISTEN)
7143 continue;
7144
7145 if (!bacmp(&c->src, &hdev->bdaddr)) {
7146 lm1 |= HCI_LM_ACCEPT;
7147 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7148 lm1 |= HCI_LM_MASTER;
7149 exact++;
7150 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7151 lm2 |= HCI_LM_ACCEPT;
7152 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7153 lm2 |= HCI_LM_MASTER;
7154 }
7155 }
7156 read_unlock(&chan_list_lock);
7157
7158 return exact ? lm1 : lm2;
7159 }
7160
7161 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7162 * from an existing channel in the list or from the beginning of the
7163 * global list (by passing NULL as first parameter).
7164 */
7165 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7166 bdaddr_t *src, u8 link_type)
7167 {
7168 read_lock(&chan_list_lock);
7169
7170 if (c)
7171 c = list_next_entry(c, global_l);
7172 else
7173 c = list_entry(chan_list.next, typeof(*c), global_l);
7174
7175 list_for_each_entry_from(c, &chan_list, global_l) {
7176 if (c->chan_type != L2CAP_CHAN_FIXED)
7177 continue;
7178 if (c->state != BT_LISTEN)
7179 continue;
7180 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7181 continue;
7182 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7183 continue;
7184 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7185 continue;
7186
7187 l2cap_chan_hold(c);
7188 read_unlock(&chan_list_lock);
7189 return c;
7190 }
7191
7192 read_unlock(&chan_list_lock);
7193
7194 return NULL;
7195 }
7196
7197 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7198 {
7199 struct hci_dev *hdev = hcon->hdev;
7200 struct l2cap_conn *conn;
7201 struct l2cap_chan *pchan;
7202 u8 dst_type;
7203
7204 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7205
7206 if (status) {
7207 l2cap_conn_del(hcon, bt_to_errno(status));
7208 return;
7209 }
7210
7211 conn = l2cap_conn_add(hcon);
7212 if (!conn)
7213 return;
7214
7215 dst_type = bdaddr_type(hcon, hcon->dst_type);
7216
7217 /* If device is blocked, do not create channels for it */
7218 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7219 return;
7220
7221 /* Find fixed channels and notify them of the new connection. We
7222 * use multiple individual lookups, continuing each time where
7223 * we left off, because the list lock would prevent calling the
7224 * potentially sleeping l2cap_chan_lock() function.
7225 */
7226 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
7227 while (pchan) {
7228 struct l2cap_chan *chan, *next;
7229
7230 /* Client fixed channels should override server ones */
7231 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7232 goto next;
7233
7234 l2cap_chan_lock(pchan);
7235 chan = pchan->ops->new_connection(pchan);
7236 if (chan) {
7237 bacpy(&chan->src, &hcon->src);
7238 bacpy(&chan->dst, &hcon->dst);
7239 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7240 chan->dst_type = dst_type;
7241
7242 __l2cap_chan_add(conn, chan);
7243 }
7244
7245 l2cap_chan_unlock(pchan);
7246 next:
7247 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7248 hcon->type);
7249 l2cap_chan_put(pchan);
7250 pchan = next;
7251 }
7252
7253 l2cap_conn_ready(conn);
7254 }
7255
7256 int l2cap_disconn_ind(struct hci_conn *hcon)
7257 {
7258 struct l2cap_conn *conn = hcon->l2cap_data;
7259
7260 BT_DBG("hcon %p", hcon);
7261
7262 if (!conn)
7263 return HCI_ERROR_REMOTE_USER_TERM;
7264 return conn->disc_reason;
7265 }
7266
7267 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7268 {
7269 BT_DBG("hcon %p reason %d", hcon, reason);
7270
7271 l2cap_conn_del(hcon, bt_to_errno(reason));
7272 }
7273
7274 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7275 {
7276 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7277 return;
7278
7279 if (encrypt == 0x00) {
7280 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7281 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7282 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7283 chan->sec_level == BT_SECURITY_FIPS)
7284 l2cap_chan_close(chan, ECONNREFUSED);
7285 } else {
7286 if (chan->sec_level == BT_SECURITY_MEDIUM)
7287 __clear_chan_timer(chan);
7288 }
7289 }
7290
7291 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7292 {
7293 struct l2cap_conn *conn = hcon->l2cap_data;
7294 struct l2cap_chan *chan;
7295
7296 if (!conn)
7297 return 0;
7298
7299 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7300
7301 mutex_lock(&conn->chan_lock);
7302
7303 list_for_each_entry(chan, &conn->chan_l, list) {
7304 l2cap_chan_lock(chan);
7305
7306 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7307 state_to_string(chan->state));
7308
7309 if (chan->scid == L2CAP_CID_A2MP) {
7310 l2cap_chan_unlock(chan);
7311 continue;
7312 }
7313
7314 if (!status && encrypt)
7315 chan->sec_level = hcon->sec_level;
7316
7317 if (!__l2cap_no_conn_pending(chan)) {
7318 l2cap_chan_unlock(chan);
7319 continue;
7320 }
7321
7322 if (!status && (chan->state == BT_CONNECTED ||
7323 chan->state == BT_CONFIG)) {
7324 chan->ops->resume(chan);
7325 l2cap_check_encryption(chan, encrypt);
7326 l2cap_chan_unlock(chan);
7327 continue;
7328 }
7329
7330 if (chan->state == BT_CONNECT) {
7331 if (!status)
7332 l2cap_start_connection(chan);
7333 else
7334 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7335 } else if (chan->state == BT_CONNECT2) {
7336 struct l2cap_conn_rsp rsp;
7337 __u16 res, stat;
7338
7339 if (!status) {
7340 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7341 res = L2CAP_CR_PEND;
7342 stat = L2CAP_CS_AUTHOR_PEND;
7343 chan->ops->defer(chan);
7344 } else {
7345 l2cap_state_change(chan, BT_CONFIG);
7346 res = L2CAP_CR_SUCCESS;
7347 stat = L2CAP_CS_NO_INFO;
7348 }
7349 } else {
7350 l2cap_state_change(chan, BT_DISCONN);
7351 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7352 res = L2CAP_CR_SEC_BLOCK;
7353 stat = L2CAP_CS_NO_INFO;
7354 }
7355
7356 rsp.scid = cpu_to_le16(chan->dcid);
7357 rsp.dcid = cpu_to_le16(chan->scid);
7358 rsp.result = cpu_to_le16(res);
7359 rsp.status = cpu_to_le16(stat);
7360 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7361 sizeof(rsp), &rsp);
7362
7363 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7364 res == L2CAP_CR_SUCCESS) {
7365 char buf[128];
7366 set_bit(CONF_REQ_SENT, &chan->conf_state);
7367 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7368 L2CAP_CONF_REQ,
7369 l2cap_build_conf_req(chan, buf),
7370 buf);
7371 chan->num_conf_req++;
7372 }
7373 }
7374
7375 l2cap_chan_unlock(chan);
7376 }
7377
7378 mutex_unlock(&conn->chan_lock);
7379
7380 return 0;
7381 }
7382
7383 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7384 {
7385 struct l2cap_conn *conn = hcon->l2cap_data;
7386 struct l2cap_hdr *hdr;
7387 int len;
7388
7389 /* For AMP controller do not create l2cap conn */
7390 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7391 goto drop;
7392
7393 if (!conn)
7394 conn = l2cap_conn_add(hcon);
7395
7396 if (!conn)
7397 goto drop;
7398
7399 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7400
7401 switch (flags) {
7402 case ACL_START:
7403 case ACL_START_NO_FLUSH:
7404 case ACL_COMPLETE:
7405 if (conn->rx_len) {
7406 BT_ERR("Unexpected start frame (len %d)", skb->len);
7407 kfree_skb(conn->rx_skb);
7408 conn->rx_skb = NULL;
7409 conn->rx_len = 0;
7410 l2cap_conn_unreliable(conn, ECOMM);
7411 }
7412
7413 /* Start fragment always begin with Basic L2CAP header */
7414 if (skb->len < L2CAP_HDR_SIZE) {
7415 BT_ERR("Frame is too short (len %d)", skb->len);
7416 l2cap_conn_unreliable(conn, ECOMM);
7417 goto drop;
7418 }
7419
7420 hdr = (struct l2cap_hdr *) skb->data;
7421 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7422
7423 if (len == skb->len) {
7424 /* Complete frame received */
7425 l2cap_recv_frame(conn, skb);
7426 return 0;
7427 }
7428
7429 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7430
7431 if (skb->len > len) {
7432 BT_ERR("Frame is too long (len %d, expected len %d)",
7433 skb->len, len);
7434 l2cap_conn_unreliable(conn, ECOMM);
7435 goto drop;
7436 }
7437
7438 /* Allocate skb for the complete frame (with header) */
7439 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7440 if (!conn->rx_skb)
7441 goto drop;
7442
7443 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7444 skb->len);
7445 conn->rx_len = len - skb->len;
7446 break;
7447
7448 case ACL_CONT:
7449 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7450
7451 if (!conn->rx_len) {
7452 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7453 l2cap_conn_unreliable(conn, ECOMM);
7454 goto drop;
7455 }
7456
7457 if (skb->len > conn->rx_len) {
7458 BT_ERR("Fragment is too long (len %d, expected %d)",
7459 skb->len, conn->rx_len);
7460 kfree_skb(conn->rx_skb);
7461 conn->rx_skb = NULL;
7462 conn->rx_len = 0;
7463 l2cap_conn_unreliable(conn, ECOMM);
7464 goto drop;
7465 }
7466
7467 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7468 skb->len);
7469 conn->rx_len -= skb->len;
7470
7471 if (!conn->rx_len) {
7472 /* Complete frame received. l2cap_recv_frame
7473 * takes ownership of the skb so set the global
7474 * rx_skb pointer to NULL first.
7475 */
7476 struct sk_buff *rx_skb = conn->rx_skb;
7477 conn->rx_skb = NULL;
7478 l2cap_recv_frame(conn, rx_skb);
7479 }
7480 break;
7481 }
7482
7483 drop:
7484 kfree_skb(skb);
7485 return 0;
7486 }
7487
7488 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7489 {
7490 struct l2cap_chan *c;
7491
7492 read_lock(&chan_list_lock);
7493
7494 list_for_each_entry(c, &chan_list, global_l) {
7495 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7496 &c->src, &c->dst,
7497 c->state, __le16_to_cpu(c->psm),
7498 c->scid, c->dcid, c->imtu, c->omtu,
7499 c->sec_level, c->mode);
7500 }
7501
7502 read_unlock(&chan_list_lock);
7503
7504 return 0;
7505 }
7506
7507 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7508 {
7509 return single_open(file, l2cap_debugfs_show, inode->i_private);
7510 }
7511
7512 static const struct file_operations l2cap_debugfs_fops = {
7513 .open = l2cap_debugfs_open,
7514 .read = seq_read,
7515 .llseek = seq_lseek,
7516 .release = single_release,
7517 };
7518
7519 static struct dentry *l2cap_debugfs;
7520
7521 int __init l2cap_init(void)
7522 {
7523 int err;
7524
7525 err = l2cap_init_sockets();
7526 if (err < 0)
7527 return err;
7528
7529 if (IS_ERR_OR_NULL(bt_debugfs))
7530 return 0;
7531
7532 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7533 NULL, &l2cap_debugfs_fops);
7534
7535 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7536 &le_max_credits);
7537 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7538 &le_default_mps);
7539
7540 return 0;
7541 }
7542
7543 void l2cap_exit(void)
7544 {
7545 debugfs_remove(l2cap_debugfs);
7546 l2cap_cleanup_sockets();
7547 }
7548
7549 module_param(disable_ertm, bool, 0644);
7550 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.177193 seconds and 4 git commands to generate.