ab405f0e53cbf17b9f992eb377309e0fab31ce58
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45
46 bool disable_ertm;
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
50
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 void *data);
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66
67 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68 {
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77 }
78
79 /* ---- L2CAP channels ---- */
80
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
83 {
84 struct l2cap_chan *c;
85
86 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
89 }
90 return NULL;
91 }
92
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
95 {
96 struct l2cap_chan *c;
97
98 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
101 }
102 return NULL;
103 }
104
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
109 {
110 struct l2cap_chan *c;
111
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
114 if (c)
115 l2cap_chan_lock(c);
116 mutex_unlock(&conn->chan_lock);
117
118 return c;
119 }
120
121 /* Find channel with given DCID.
122 * Returns locked channel.
123 */
124 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126 {
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136 }
137
138 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
140 {
141 struct l2cap_chan *c;
142
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
146 }
147 return NULL;
148 }
149
150 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152 {
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162 }
163
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165 {
166 struct l2cap_chan *c;
167
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
170 return c;
171 }
172 return NULL;
173 }
174
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176 {
177 int err;
178
179 write_lock(&chan_list_lock);
180
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 err = -EADDRINUSE;
183 goto done;
184 }
185
186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
192
193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203 done:
204 write_unlock(&chan_list_lock);
205 return err;
206 }
207 EXPORT_SYMBOL_GPL(l2cap_add_psm);
208
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210 {
211 write_lock(&chan_list_lock);
212
213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
216
217 chan->scid = scid;
218
219 write_unlock(&chan_list_lock);
220
221 return 0;
222 }
223
224 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
225 {
226 u16 cid, dyn_end;
227
228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
230 else
231 dyn_end = L2CAP_CID_DYN_END;
232
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
234 if (!__l2cap_get_chan_by_scid(conn, cid))
235 return cid;
236 }
237
238 return 0;
239 }
240
241 static void l2cap_state_change(struct l2cap_chan *chan, int state)
242 {
243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
244 state_to_string(state));
245
246 chan->state = state;
247 chan->ops->state_change(chan, state, 0);
248 }
249
250 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
251 int state, int err)
252 {
253 chan->state = state;
254 chan->ops->state_change(chan, chan->state, err);
255 }
256
257 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258 {
259 chan->ops->state_change(chan, chan->state, err);
260 }
261
262 static void __set_retrans_timer(struct l2cap_chan *chan)
263 {
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
268 }
269 }
270
271 static void __set_monitor_timer(struct l2cap_chan *chan)
272 {
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
277 }
278 }
279
280 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 u16 seq)
282 {
283 struct sk_buff *skb;
284
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
287 return skb;
288 }
289
290 return NULL;
291 }
292
293 /* ---- L2CAP sequence number lists ---- */
294
295 /* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
301 * allocs or frees.
302 */
303
304 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305 {
306 size_t alloc_size, i;
307
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
311 */
312 alloc_size = roundup_pow_of_two(size);
313
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
315 if (!seq_list->list)
316 return -ENOMEM;
317
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323
324 return 0;
325 }
326
327 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328 {
329 kfree(seq_list->list);
330 }
331
332 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 u16 seq)
334 {
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337 }
338
339 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340 {
341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
343
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 }
351
352 return seq;
353 }
354
355 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356 {
357 u16 i;
358
359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
360 return;
361
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
367 }
368
369 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370 {
371 u16 mask = seq_list->mask;
372
373 /* All appends happen in constant time */
374
375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
376 return;
377
378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
380 else
381 seq_list->list[seq_list->tail & mask] = seq;
382
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
385 }
386
387 static void l2cap_chan_timeout(struct work_struct *work)
388 {
389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
390 chan_timer.work);
391 struct l2cap_conn *conn = chan->conn;
392 int reason;
393
394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
395
396 mutex_lock(&conn->chan_lock);
397 l2cap_chan_lock(chan);
398
399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
400 reason = ECONNREFUSED;
401 else if (chan->state == BT_CONNECT &&
402 chan->sec_level != BT_SECURITY_SDP)
403 reason = ECONNREFUSED;
404 else
405 reason = ETIMEDOUT;
406
407 l2cap_chan_close(chan, reason);
408
409 l2cap_chan_unlock(chan);
410
411 chan->ops->close(chan);
412 mutex_unlock(&conn->chan_lock);
413
414 l2cap_chan_put(chan);
415 }
416
417 struct l2cap_chan *l2cap_chan_create(void)
418 {
419 struct l2cap_chan *chan;
420
421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
422 if (!chan)
423 return NULL;
424
425 mutex_init(&chan->lock);
426
427 write_lock(&chan_list_lock);
428 list_add(&chan->global_l, &chan_list);
429 write_unlock(&chan_list_lock);
430
431 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
432
433 chan->state = BT_OPEN;
434
435 kref_init(&chan->kref);
436
437 /* This flag is cleared in l2cap_chan_ready() */
438 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
439
440 BT_DBG("chan %p", chan);
441
442 return chan;
443 }
444 EXPORT_SYMBOL_GPL(l2cap_chan_create);
445
446 static void l2cap_chan_destroy(struct kref *kref)
447 {
448 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
449
450 BT_DBG("chan %p", chan);
451
452 write_lock(&chan_list_lock);
453 list_del(&chan->global_l);
454 write_unlock(&chan_list_lock);
455
456 kfree(chan);
457 }
458
459 void l2cap_chan_hold(struct l2cap_chan *c)
460 {
461 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
462
463 kref_get(&c->kref);
464 }
465
466 void l2cap_chan_put(struct l2cap_chan *c)
467 {
468 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
469
470 kref_put(&c->kref, l2cap_chan_destroy);
471 }
472 EXPORT_SYMBOL_GPL(l2cap_chan_put);
473
474 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
475 {
476 chan->fcs = L2CAP_FCS_CRC16;
477 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
478 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
480 chan->remote_max_tx = chan->max_tx;
481 chan->remote_tx_win = chan->tx_win;
482 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
483 chan->sec_level = BT_SECURITY_LOW;
484 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
485 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
486 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
487 chan->conf_state = 0;
488
489 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
490 }
491 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
492
493 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
494 {
495 chan->sdu = NULL;
496 chan->sdu_last_frag = NULL;
497 chan->sdu_len = 0;
498 chan->tx_credits = 0;
499 chan->rx_credits = le_max_credits;
500 chan->mps = min_t(u16, chan->imtu, le_default_mps);
501
502 skb_queue_head_init(&chan->tx_q);
503 }
504
505 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
506 {
507 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
508 __le16_to_cpu(chan->psm), chan->dcid);
509
510 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
511
512 chan->conn = conn;
513
514 switch (chan->chan_type) {
515 case L2CAP_CHAN_CONN_ORIENTED:
516 /* Alloc CID for connection-oriented socket */
517 chan->scid = l2cap_alloc_cid(conn);
518 if (conn->hcon->type == ACL_LINK)
519 chan->omtu = L2CAP_DEFAULT_MTU;
520 break;
521
522 case L2CAP_CHAN_CONN_LESS:
523 /* Connectionless socket */
524 chan->scid = L2CAP_CID_CONN_LESS;
525 chan->dcid = L2CAP_CID_CONN_LESS;
526 chan->omtu = L2CAP_DEFAULT_MTU;
527 break;
528
529 case L2CAP_CHAN_FIXED:
530 /* Caller will set CID and CID specific MTU values */
531 break;
532
533 default:
534 /* Raw socket can send/recv signalling messages only */
535 chan->scid = L2CAP_CID_SIGNALING;
536 chan->dcid = L2CAP_CID_SIGNALING;
537 chan->omtu = L2CAP_DEFAULT_MTU;
538 }
539
540 chan->local_id = L2CAP_BESTEFFORT_ID;
541 chan->local_stype = L2CAP_SERV_BESTEFFORT;
542 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
543 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
544 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
545 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
546
547 l2cap_chan_hold(chan);
548
549 /* Only keep a reference for fixed channels if they requested it */
550 if (chan->chan_type != L2CAP_CHAN_FIXED ||
551 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
552 hci_conn_hold(conn->hcon);
553
554 list_add(&chan->list, &conn->chan_l);
555 }
556
557 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
558 {
559 mutex_lock(&conn->chan_lock);
560 __l2cap_chan_add(conn, chan);
561 mutex_unlock(&conn->chan_lock);
562 }
563
564 void l2cap_chan_del(struct l2cap_chan *chan, int err)
565 {
566 struct l2cap_conn *conn = chan->conn;
567
568 __clear_chan_timer(chan);
569
570 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
571
572 chan->ops->teardown(chan, err);
573
574 if (conn) {
575 struct amp_mgr *mgr = conn->hcon->amp_mgr;
576 /* Delete from channel list */
577 list_del(&chan->list);
578
579 l2cap_chan_put(chan);
580
581 chan->conn = NULL;
582
583 /* Reference was only held for non-fixed channels or
584 * fixed channels that explicitly requested it using the
585 * FLAG_HOLD_HCI_CONN flag.
586 */
587 if (chan->chan_type != L2CAP_CHAN_FIXED ||
588 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
589 hci_conn_drop(conn->hcon);
590
591 if (mgr && mgr->bredr_chan == chan)
592 mgr->bredr_chan = NULL;
593 }
594
595 if (chan->hs_hchan) {
596 struct hci_chan *hs_hchan = chan->hs_hchan;
597
598 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
599 amp_disconnect_logical_link(hs_hchan);
600 }
601
602 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
603 return;
604
605 switch(chan->mode) {
606 case L2CAP_MODE_BASIC:
607 break;
608
609 case L2CAP_MODE_LE_FLOWCTL:
610 skb_queue_purge(&chan->tx_q);
611 break;
612
613 case L2CAP_MODE_ERTM:
614 __clear_retrans_timer(chan);
615 __clear_monitor_timer(chan);
616 __clear_ack_timer(chan);
617
618 skb_queue_purge(&chan->srej_q);
619
620 l2cap_seq_list_free(&chan->srej_list);
621 l2cap_seq_list_free(&chan->retrans_list);
622
623 /* fall through */
624
625 case L2CAP_MODE_STREAMING:
626 skb_queue_purge(&chan->tx_q);
627 break;
628 }
629
630 return;
631 }
632 EXPORT_SYMBOL_GPL(l2cap_chan_del);
633
634 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
635 {
636 struct l2cap_conn *conn = hcon->l2cap_data;
637 struct l2cap_chan *chan;
638
639 mutex_lock(&conn->chan_lock);
640
641 list_for_each_entry(chan, &conn->chan_l, list) {
642 l2cap_chan_lock(chan);
643 bacpy(&chan->dst, &hcon->dst);
644 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
645 l2cap_chan_unlock(chan);
646 }
647
648 mutex_unlock(&conn->chan_lock);
649 }
650
651 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
652 {
653 struct l2cap_conn *conn = chan->conn;
654 struct l2cap_le_conn_rsp rsp;
655 u16 result;
656
657 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
658 result = L2CAP_CR_AUTHORIZATION;
659 else
660 result = L2CAP_CR_BAD_PSM;
661
662 l2cap_state_change(chan, BT_DISCONN);
663
664 rsp.dcid = cpu_to_le16(chan->scid);
665 rsp.mtu = cpu_to_le16(chan->imtu);
666 rsp.mps = cpu_to_le16(chan->mps);
667 rsp.credits = cpu_to_le16(chan->rx_credits);
668 rsp.result = cpu_to_le16(result);
669
670 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
671 &rsp);
672 }
673
674 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
675 {
676 struct l2cap_conn *conn = chan->conn;
677 struct l2cap_conn_rsp rsp;
678 u16 result;
679
680 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
681 result = L2CAP_CR_SEC_BLOCK;
682 else
683 result = L2CAP_CR_BAD_PSM;
684
685 l2cap_state_change(chan, BT_DISCONN);
686
687 rsp.scid = cpu_to_le16(chan->dcid);
688 rsp.dcid = cpu_to_le16(chan->scid);
689 rsp.result = cpu_to_le16(result);
690 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
691
692 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
693 }
694
695 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
696 {
697 struct l2cap_conn *conn = chan->conn;
698
699 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
700
701 switch (chan->state) {
702 case BT_LISTEN:
703 chan->ops->teardown(chan, 0);
704 break;
705
706 case BT_CONNECTED:
707 case BT_CONFIG:
708 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
709 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
710 l2cap_send_disconn_req(chan, reason);
711 } else
712 l2cap_chan_del(chan, reason);
713 break;
714
715 case BT_CONNECT2:
716 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
717 if (conn->hcon->type == ACL_LINK)
718 l2cap_chan_connect_reject(chan);
719 else if (conn->hcon->type == LE_LINK)
720 l2cap_chan_le_connect_reject(chan);
721 }
722
723 l2cap_chan_del(chan, reason);
724 break;
725
726 case BT_CONNECT:
727 case BT_DISCONN:
728 l2cap_chan_del(chan, reason);
729 break;
730
731 default:
732 chan->ops->teardown(chan, 0);
733 break;
734 }
735 }
736 EXPORT_SYMBOL(l2cap_chan_close);
737
738 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
739 {
740 switch (chan->chan_type) {
741 case L2CAP_CHAN_RAW:
742 switch (chan->sec_level) {
743 case BT_SECURITY_HIGH:
744 case BT_SECURITY_FIPS:
745 return HCI_AT_DEDICATED_BONDING_MITM;
746 case BT_SECURITY_MEDIUM:
747 return HCI_AT_DEDICATED_BONDING;
748 default:
749 return HCI_AT_NO_BONDING;
750 }
751 break;
752 case L2CAP_CHAN_CONN_LESS:
753 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
754 if (chan->sec_level == BT_SECURITY_LOW)
755 chan->sec_level = BT_SECURITY_SDP;
756 }
757 if (chan->sec_level == BT_SECURITY_HIGH ||
758 chan->sec_level == BT_SECURITY_FIPS)
759 return HCI_AT_NO_BONDING_MITM;
760 else
761 return HCI_AT_NO_BONDING;
762 break;
763 case L2CAP_CHAN_CONN_ORIENTED:
764 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
765 if (chan->sec_level == BT_SECURITY_LOW)
766 chan->sec_level = BT_SECURITY_SDP;
767
768 if (chan->sec_level == BT_SECURITY_HIGH ||
769 chan->sec_level == BT_SECURITY_FIPS)
770 return HCI_AT_NO_BONDING_MITM;
771 else
772 return HCI_AT_NO_BONDING;
773 }
774 /* fall through */
775 default:
776 switch (chan->sec_level) {
777 case BT_SECURITY_HIGH:
778 case BT_SECURITY_FIPS:
779 return HCI_AT_GENERAL_BONDING_MITM;
780 case BT_SECURITY_MEDIUM:
781 return HCI_AT_GENERAL_BONDING;
782 default:
783 return HCI_AT_NO_BONDING;
784 }
785 break;
786 }
787 }
788
789 /* Service level security */
790 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
791 {
792 struct l2cap_conn *conn = chan->conn;
793 __u8 auth_type;
794
795 if (conn->hcon->type == LE_LINK)
796 return smp_conn_security(conn->hcon, chan->sec_level);
797
798 auth_type = l2cap_get_auth_type(chan);
799
800 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
801 initiator);
802 }
803
804 static u8 l2cap_get_ident(struct l2cap_conn *conn)
805 {
806 u8 id;
807
808 /* Get next available identificator.
809 * 1 - 128 are used by kernel.
810 * 129 - 199 are reserved.
811 * 200 - 254 are used by utilities like l2ping, etc.
812 */
813
814 mutex_lock(&conn->ident_lock);
815
816 if (++conn->tx_ident > 128)
817 conn->tx_ident = 1;
818
819 id = conn->tx_ident;
820
821 mutex_unlock(&conn->ident_lock);
822
823 return id;
824 }
825
826 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
827 void *data)
828 {
829 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
830 u8 flags;
831
832 BT_DBG("code 0x%2.2x", code);
833
834 if (!skb)
835 return;
836
837 if (lmp_no_flush_capable(conn->hcon->hdev))
838 flags = ACL_START_NO_FLUSH;
839 else
840 flags = ACL_START;
841
842 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
843 skb->priority = HCI_PRIO_MAX;
844
845 hci_send_acl(conn->hchan, skb, flags);
846 }
847
848 static bool __chan_is_moving(struct l2cap_chan *chan)
849 {
850 return chan->move_state != L2CAP_MOVE_STABLE &&
851 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
852 }
853
854 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
855 {
856 struct hci_conn *hcon = chan->conn->hcon;
857 u16 flags;
858
859 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
860 skb->priority);
861
862 if (chan->hs_hcon && !__chan_is_moving(chan)) {
863 if (chan->hs_hchan)
864 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
865 else
866 kfree_skb(skb);
867
868 return;
869 }
870
871 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
872 lmp_no_flush_capable(hcon->hdev))
873 flags = ACL_START_NO_FLUSH;
874 else
875 flags = ACL_START;
876
877 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
878 hci_send_acl(chan->conn->hchan, skb, flags);
879 }
880
881 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
882 {
883 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
884 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
885
886 if (enh & L2CAP_CTRL_FRAME_TYPE) {
887 /* S-Frame */
888 control->sframe = 1;
889 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
890 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
891
892 control->sar = 0;
893 control->txseq = 0;
894 } else {
895 /* I-Frame */
896 control->sframe = 0;
897 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
898 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
899
900 control->poll = 0;
901 control->super = 0;
902 }
903 }
904
905 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
906 {
907 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
908 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
909
910 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
911 /* S-Frame */
912 control->sframe = 1;
913 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
914 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
915
916 control->sar = 0;
917 control->txseq = 0;
918 } else {
919 /* I-Frame */
920 control->sframe = 0;
921 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
922 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
923
924 control->poll = 0;
925 control->super = 0;
926 }
927 }
928
929 static inline void __unpack_control(struct l2cap_chan *chan,
930 struct sk_buff *skb)
931 {
932 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
933 __unpack_extended_control(get_unaligned_le32(skb->data),
934 &bt_cb(skb)->control);
935 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
936 } else {
937 __unpack_enhanced_control(get_unaligned_le16(skb->data),
938 &bt_cb(skb)->control);
939 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
940 }
941 }
942
943 static u32 __pack_extended_control(struct l2cap_ctrl *control)
944 {
945 u32 packed;
946
947 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
948 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
949
950 if (control->sframe) {
951 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
952 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
953 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
954 } else {
955 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
956 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
957 }
958
959 return packed;
960 }
961
962 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
963 {
964 u16 packed;
965
966 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
967 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
968
969 if (control->sframe) {
970 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
971 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
972 packed |= L2CAP_CTRL_FRAME_TYPE;
973 } else {
974 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
975 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
976 }
977
978 return packed;
979 }
980
981 static inline void __pack_control(struct l2cap_chan *chan,
982 struct l2cap_ctrl *control,
983 struct sk_buff *skb)
984 {
985 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
986 put_unaligned_le32(__pack_extended_control(control),
987 skb->data + L2CAP_HDR_SIZE);
988 } else {
989 put_unaligned_le16(__pack_enhanced_control(control),
990 skb->data + L2CAP_HDR_SIZE);
991 }
992 }
993
994 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
995 {
996 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
997 return L2CAP_EXT_HDR_SIZE;
998 else
999 return L2CAP_ENH_HDR_SIZE;
1000 }
1001
1002 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1003 u32 control)
1004 {
1005 struct sk_buff *skb;
1006 struct l2cap_hdr *lh;
1007 int hlen = __ertm_hdr_size(chan);
1008
1009 if (chan->fcs == L2CAP_FCS_CRC16)
1010 hlen += L2CAP_FCS_SIZE;
1011
1012 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1013
1014 if (!skb)
1015 return ERR_PTR(-ENOMEM);
1016
1017 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1018 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1019 lh->cid = cpu_to_le16(chan->dcid);
1020
1021 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1022 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1023 else
1024 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1025
1026 if (chan->fcs == L2CAP_FCS_CRC16) {
1027 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1028 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1029 }
1030
1031 skb->priority = HCI_PRIO_MAX;
1032 return skb;
1033 }
1034
1035 static void l2cap_send_sframe(struct l2cap_chan *chan,
1036 struct l2cap_ctrl *control)
1037 {
1038 struct sk_buff *skb;
1039 u32 control_field;
1040
1041 BT_DBG("chan %p, control %p", chan, control);
1042
1043 if (!control->sframe)
1044 return;
1045
1046 if (__chan_is_moving(chan))
1047 return;
1048
1049 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1050 !control->poll)
1051 control->final = 1;
1052
1053 if (control->super == L2CAP_SUPER_RR)
1054 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1055 else if (control->super == L2CAP_SUPER_RNR)
1056 set_bit(CONN_RNR_SENT, &chan->conn_state);
1057
1058 if (control->super != L2CAP_SUPER_SREJ) {
1059 chan->last_acked_seq = control->reqseq;
1060 __clear_ack_timer(chan);
1061 }
1062
1063 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1064 control->final, control->poll, control->super);
1065
1066 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1067 control_field = __pack_extended_control(control);
1068 else
1069 control_field = __pack_enhanced_control(control);
1070
1071 skb = l2cap_create_sframe_pdu(chan, control_field);
1072 if (!IS_ERR(skb))
1073 l2cap_do_send(chan, skb);
1074 }
1075
1076 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1077 {
1078 struct l2cap_ctrl control;
1079
1080 BT_DBG("chan %p, poll %d", chan, poll);
1081
1082 memset(&control, 0, sizeof(control));
1083 control.sframe = 1;
1084 control.poll = poll;
1085
1086 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1087 control.super = L2CAP_SUPER_RNR;
1088 else
1089 control.super = L2CAP_SUPER_RR;
1090
1091 control.reqseq = chan->buffer_seq;
1092 l2cap_send_sframe(chan, &control);
1093 }
1094
1095 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1096 {
1097 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1098 return true;
1099
1100 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1101 }
1102
1103 static bool __amp_capable(struct l2cap_chan *chan)
1104 {
1105 struct l2cap_conn *conn = chan->conn;
1106 struct hci_dev *hdev;
1107 bool amp_available = false;
1108
1109 if (!conn->hs_enabled)
1110 return false;
1111
1112 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1113 return false;
1114
1115 read_lock(&hci_dev_list_lock);
1116 list_for_each_entry(hdev, &hci_dev_list, list) {
1117 if (hdev->amp_type != AMP_TYPE_BREDR &&
1118 test_bit(HCI_UP, &hdev->flags)) {
1119 amp_available = true;
1120 break;
1121 }
1122 }
1123 read_unlock(&hci_dev_list_lock);
1124
1125 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1126 return amp_available;
1127
1128 return false;
1129 }
1130
1131 static bool l2cap_check_efs(struct l2cap_chan *chan)
1132 {
1133 /* Check EFS parameters */
1134 return true;
1135 }
1136
1137 void l2cap_send_conn_req(struct l2cap_chan *chan)
1138 {
1139 struct l2cap_conn *conn = chan->conn;
1140 struct l2cap_conn_req req;
1141
1142 req.scid = cpu_to_le16(chan->scid);
1143 req.psm = chan->psm;
1144
1145 chan->ident = l2cap_get_ident(conn);
1146
1147 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1148
1149 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1150 }
1151
1152 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1153 {
1154 struct l2cap_create_chan_req req;
1155 req.scid = cpu_to_le16(chan->scid);
1156 req.psm = chan->psm;
1157 req.amp_id = amp_id;
1158
1159 chan->ident = l2cap_get_ident(chan->conn);
1160
1161 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1162 sizeof(req), &req);
1163 }
1164
1165 static void l2cap_move_setup(struct l2cap_chan *chan)
1166 {
1167 struct sk_buff *skb;
1168
1169 BT_DBG("chan %p", chan);
1170
1171 if (chan->mode != L2CAP_MODE_ERTM)
1172 return;
1173
1174 __clear_retrans_timer(chan);
1175 __clear_monitor_timer(chan);
1176 __clear_ack_timer(chan);
1177
1178 chan->retry_count = 0;
1179 skb_queue_walk(&chan->tx_q, skb) {
1180 if (bt_cb(skb)->control.retries)
1181 bt_cb(skb)->control.retries = 1;
1182 else
1183 break;
1184 }
1185
1186 chan->expected_tx_seq = chan->buffer_seq;
1187
1188 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1189 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1190 l2cap_seq_list_clear(&chan->retrans_list);
1191 l2cap_seq_list_clear(&chan->srej_list);
1192 skb_queue_purge(&chan->srej_q);
1193
1194 chan->tx_state = L2CAP_TX_STATE_XMIT;
1195 chan->rx_state = L2CAP_RX_STATE_MOVE;
1196
1197 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1198 }
1199
1200 static void l2cap_move_done(struct l2cap_chan *chan)
1201 {
1202 u8 move_role = chan->move_role;
1203 BT_DBG("chan %p", chan);
1204
1205 chan->move_state = L2CAP_MOVE_STABLE;
1206 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1207
1208 if (chan->mode != L2CAP_MODE_ERTM)
1209 return;
1210
1211 switch (move_role) {
1212 case L2CAP_MOVE_ROLE_INITIATOR:
1213 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1214 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1215 break;
1216 case L2CAP_MOVE_ROLE_RESPONDER:
1217 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1218 break;
1219 }
1220 }
1221
1222 static void l2cap_chan_ready(struct l2cap_chan *chan)
1223 {
1224 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1225 chan->conf_state = 0;
1226 __clear_chan_timer(chan);
1227
1228 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1229 chan->ops->suspend(chan);
1230
1231 chan->state = BT_CONNECTED;
1232
1233 chan->ops->ready(chan);
1234 }
1235
1236 static void l2cap_le_connect(struct l2cap_chan *chan)
1237 {
1238 struct l2cap_conn *conn = chan->conn;
1239 struct l2cap_le_conn_req req;
1240
1241 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1242 return;
1243
1244 req.psm = chan->psm;
1245 req.scid = cpu_to_le16(chan->scid);
1246 req.mtu = cpu_to_le16(chan->imtu);
1247 req.mps = cpu_to_le16(chan->mps);
1248 req.credits = cpu_to_le16(chan->rx_credits);
1249
1250 chan->ident = l2cap_get_ident(conn);
1251
1252 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1253 sizeof(req), &req);
1254 }
1255
1256 static void l2cap_le_start(struct l2cap_chan *chan)
1257 {
1258 struct l2cap_conn *conn = chan->conn;
1259
1260 if (!smp_conn_security(conn->hcon, chan->sec_level))
1261 return;
1262
1263 if (!chan->psm) {
1264 l2cap_chan_ready(chan);
1265 return;
1266 }
1267
1268 if (chan->state == BT_CONNECT)
1269 l2cap_le_connect(chan);
1270 }
1271
1272 static void l2cap_start_connection(struct l2cap_chan *chan)
1273 {
1274 if (__amp_capable(chan)) {
1275 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1276 a2mp_discover_amp(chan);
1277 } else if (chan->conn->hcon->type == LE_LINK) {
1278 l2cap_le_start(chan);
1279 } else {
1280 l2cap_send_conn_req(chan);
1281 }
1282 }
1283
1284 static void l2cap_do_start(struct l2cap_chan *chan)
1285 {
1286 struct l2cap_conn *conn = chan->conn;
1287
1288 if (conn->hcon->type == LE_LINK) {
1289 l2cap_le_start(chan);
1290 return;
1291 }
1292
1293 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1294 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1295 return;
1296
1297 if (l2cap_chan_check_security(chan, true) &&
1298 __l2cap_no_conn_pending(chan)) {
1299 l2cap_start_connection(chan);
1300 }
1301 } else {
1302 struct l2cap_info_req req;
1303 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1304
1305 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1306 conn->info_ident = l2cap_get_ident(conn);
1307
1308 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1309
1310 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1311 sizeof(req), &req);
1312 }
1313 }
1314
1315 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1316 {
1317 u32 local_feat_mask = l2cap_feat_mask;
1318 if (!disable_ertm)
1319 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1320
1321 switch (mode) {
1322 case L2CAP_MODE_ERTM:
1323 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1324 case L2CAP_MODE_STREAMING:
1325 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1326 default:
1327 return 0x00;
1328 }
1329 }
1330
1331 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1332 {
1333 struct l2cap_conn *conn = chan->conn;
1334 struct l2cap_disconn_req req;
1335
1336 if (!conn)
1337 return;
1338
1339 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1340 __clear_retrans_timer(chan);
1341 __clear_monitor_timer(chan);
1342 __clear_ack_timer(chan);
1343 }
1344
1345 if (chan->scid == L2CAP_CID_A2MP) {
1346 l2cap_state_change(chan, BT_DISCONN);
1347 return;
1348 }
1349
1350 req.dcid = cpu_to_le16(chan->dcid);
1351 req.scid = cpu_to_le16(chan->scid);
1352 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1353 sizeof(req), &req);
1354
1355 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1356 }
1357
1358 /* ---- L2CAP connections ---- */
1359 static void l2cap_conn_start(struct l2cap_conn *conn)
1360 {
1361 struct l2cap_chan *chan, *tmp;
1362
1363 BT_DBG("conn %p", conn);
1364
1365 mutex_lock(&conn->chan_lock);
1366
1367 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1368 l2cap_chan_lock(chan);
1369
1370 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1371 l2cap_chan_unlock(chan);
1372 continue;
1373 }
1374
1375 if (chan->state == BT_CONNECT) {
1376 if (!l2cap_chan_check_security(chan, true) ||
1377 !__l2cap_no_conn_pending(chan)) {
1378 l2cap_chan_unlock(chan);
1379 continue;
1380 }
1381
1382 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1383 && test_bit(CONF_STATE2_DEVICE,
1384 &chan->conf_state)) {
1385 l2cap_chan_close(chan, ECONNRESET);
1386 l2cap_chan_unlock(chan);
1387 continue;
1388 }
1389
1390 l2cap_start_connection(chan);
1391
1392 } else if (chan->state == BT_CONNECT2) {
1393 struct l2cap_conn_rsp rsp;
1394 char buf[128];
1395 rsp.scid = cpu_to_le16(chan->dcid);
1396 rsp.dcid = cpu_to_le16(chan->scid);
1397
1398 if (l2cap_chan_check_security(chan, false)) {
1399 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1400 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1401 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1402 chan->ops->defer(chan);
1403
1404 } else {
1405 l2cap_state_change(chan, BT_CONFIG);
1406 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1407 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1408 }
1409 } else {
1410 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1411 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1412 }
1413
1414 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1415 sizeof(rsp), &rsp);
1416
1417 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1418 rsp.result != L2CAP_CR_SUCCESS) {
1419 l2cap_chan_unlock(chan);
1420 continue;
1421 }
1422
1423 set_bit(CONF_REQ_SENT, &chan->conf_state);
1424 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1425 l2cap_build_conf_req(chan, buf), buf);
1426 chan->num_conf_req++;
1427 }
1428
1429 l2cap_chan_unlock(chan);
1430 }
1431
1432 mutex_unlock(&conn->chan_lock);
1433 }
1434
1435 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1436 {
1437 struct hci_conn *hcon = conn->hcon;
1438 struct hci_dev *hdev = hcon->hdev;
1439
1440 BT_DBG("%s conn %p", hdev->name, conn);
1441
1442 /* For outgoing pairing which doesn't necessarily have an
1443 * associated socket (e.g. mgmt_pair_device).
1444 */
1445 if (hcon->out)
1446 smp_conn_security(hcon, hcon->pending_sec_level);
1447
1448 /* For LE slave connections, make sure the connection interval
1449 * is in the range of the minium and maximum interval that has
1450 * been configured for this connection. If not, then trigger
1451 * the connection update procedure.
1452 */
1453 if (hcon->role == HCI_ROLE_SLAVE &&
1454 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1455 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1456 struct l2cap_conn_param_update_req req;
1457
1458 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1459 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1460 req.latency = cpu_to_le16(hcon->le_conn_latency);
1461 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1462
1463 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1464 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1465 }
1466 }
1467
1468 static void l2cap_conn_ready(struct l2cap_conn *conn)
1469 {
1470 struct l2cap_chan *chan;
1471 struct hci_conn *hcon = conn->hcon;
1472
1473 BT_DBG("conn %p", conn);
1474
1475 mutex_lock(&conn->chan_lock);
1476
1477 list_for_each_entry(chan, &conn->chan_l, list) {
1478
1479 l2cap_chan_lock(chan);
1480
1481 if (chan->scid == L2CAP_CID_A2MP) {
1482 l2cap_chan_unlock(chan);
1483 continue;
1484 }
1485
1486 if (hcon->type == LE_LINK) {
1487 l2cap_le_start(chan);
1488 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1489 l2cap_chan_ready(chan);
1490
1491 } else if (chan->state == BT_CONNECT) {
1492 l2cap_do_start(chan);
1493 }
1494
1495 l2cap_chan_unlock(chan);
1496 }
1497
1498 mutex_unlock(&conn->chan_lock);
1499
1500 if (hcon->type == LE_LINK)
1501 l2cap_le_conn_ready(conn);
1502
1503 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1504 }
1505
1506 /* Notify sockets that we cannot guaranty reliability anymore */
1507 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1508 {
1509 struct l2cap_chan *chan;
1510
1511 BT_DBG("conn %p", conn);
1512
1513 mutex_lock(&conn->chan_lock);
1514
1515 list_for_each_entry(chan, &conn->chan_l, list) {
1516 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1517 l2cap_chan_set_err(chan, err);
1518 }
1519
1520 mutex_unlock(&conn->chan_lock);
1521 }
1522
1523 static void l2cap_info_timeout(struct work_struct *work)
1524 {
1525 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1526 info_timer.work);
1527
1528 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1529 conn->info_ident = 0;
1530
1531 l2cap_conn_start(conn);
1532 }
1533
1534 /*
1535 * l2cap_user
1536 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1537 * callback is called during registration. The ->remove callback is called
1538 * during unregistration.
1539 * An l2cap_user object can either be explicitly unregistered or when the
1540 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1541 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1542 * External modules must own a reference to the l2cap_conn object if they intend
1543 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1544 * any time if they don't.
1545 */
1546
1547 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1548 {
1549 struct hci_dev *hdev = conn->hcon->hdev;
1550 int ret;
1551
1552 /* We need to check whether l2cap_conn is registered. If it is not, we
1553 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1554 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1555 * relies on the parent hci_conn object to be locked. This itself relies
1556 * on the hci_dev object to be locked. So we must lock the hci device
1557 * here, too. */
1558
1559 hci_dev_lock(hdev);
1560
1561 if (user->list.next || user->list.prev) {
1562 ret = -EINVAL;
1563 goto out_unlock;
1564 }
1565
1566 /* conn->hchan is NULL after l2cap_conn_del() was called */
1567 if (!conn->hchan) {
1568 ret = -ENODEV;
1569 goto out_unlock;
1570 }
1571
1572 ret = user->probe(conn, user);
1573 if (ret)
1574 goto out_unlock;
1575
1576 list_add(&user->list, &conn->users);
1577 ret = 0;
1578
1579 out_unlock:
1580 hci_dev_unlock(hdev);
1581 return ret;
1582 }
1583 EXPORT_SYMBOL(l2cap_register_user);
1584
1585 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1586 {
1587 struct hci_dev *hdev = conn->hcon->hdev;
1588
1589 hci_dev_lock(hdev);
1590
1591 if (!user->list.next || !user->list.prev)
1592 goto out_unlock;
1593
1594 list_del(&user->list);
1595 user->list.next = NULL;
1596 user->list.prev = NULL;
1597 user->remove(conn, user);
1598
1599 out_unlock:
1600 hci_dev_unlock(hdev);
1601 }
1602 EXPORT_SYMBOL(l2cap_unregister_user);
1603
1604 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1605 {
1606 struct l2cap_user *user;
1607
1608 while (!list_empty(&conn->users)) {
1609 user = list_first_entry(&conn->users, struct l2cap_user, list);
1610 list_del(&user->list);
1611 user->list.next = NULL;
1612 user->list.prev = NULL;
1613 user->remove(conn, user);
1614 }
1615 }
1616
1617 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1618 {
1619 struct l2cap_conn *conn = hcon->l2cap_data;
1620 struct l2cap_chan *chan, *l;
1621
1622 if (!conn)
1623 return;
1624
1625 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1626
1627 kfree_skb(conn->rx_skb);
1628
1629 skb_queue_purge(&conn->pending_rx);
1630
1631 /* We can not call flush_work(&conn->pending_rx_work) here since we
1632 * might block if we are running on a worker from the same workqueue
1633 * pending_rx_work is waiting on.
1634 */
1635 if (work_pending(&conn->pending_rx_work))
1636 cancel_work_sync(&conn->pending_rx_work);
1637
1638 l2cap_unregister_all_users(conn);
1639
1640 /* Force the connection to be immediately dropped */
1641 hcon->disc_timeout = 0;
1642
1643 mutex_lock(&conn->chan_lock);
1644
1645 /* Kill channels */
1646 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1647 l2cap_chan_hold(chan);
1648 l2cap_chan_lock(chan);
1649
1650 l2cap_chan_del(chan, err);
1651
1652 l2cap_chan_unlock(chan);
1653
1654 chan->ops->close(chan);
1655 l2cap_chan_put(chan);
1656 }
1657
1658 mutex_unlock(&conn->chan_lock);
1659
1660 hci_chan_del(conn->hchan);
1661
1662 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1663 cancel_delayed_work_sync(&conn->info_timer);
1664
1665 hcon->l2cap_data = NULL;
1666 conn->hchan = NULL;
1667 l2cap_conn_put(conn);
1668 }
1669
1670 static void l2cap_conn_free(struct kref *ref)
1671 {
1672 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1673
1674 hci_conn_put(conn->hcon);
1675 kfree(conn);
1676 }
1677
1678 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1679 {
1680 kref_get(&conn->ref);
1681 return conn;
1682 }
1683 EXPORT_SYMBOL(l2cap_conn_get);
1684
1685 void l2cap_conn_put(struct l2cap_conn *conn)
1686 {
1687 kref_put(&conn->ref, l2cap_conn_free);
1688 }
1689 EXPORT_SYMBOL(l2cap_conn_put);
1690
1691 /* ---- Socket interface ---- */
1692
1693 /* Find socket with psm and source / destination bdaddr.
1694 * Returns closest match.
1695 */
1696 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1697 bdaddr_t *src,
1698 bdaddr_t *dst,
1699 u8 link_type)
1700 {
1701 struct l2cap_chan *c, *c1 = NULL;
1702
1703 read_lock(&chan_list_lock);
1704
1705 list_for_each_entry(c, &chan_list, global_l) {
1706 if (state && c->state != state)
1707 continue;
1708
1709 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1710 continue;
1711
1712 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1713 continue;
1714
1715 if (c->psm == psm) {
1716 int src_match, dst_match;
1717 int src_any, dst_any;
1718
1719 /* Exact match. */
1720 src_match = !bacmp(&c->src, src);
1721 dst_match = !bacmp(&c->dst, dst);
1722 if (src_match && dst_match) {
1723 l2cap_chan_hold(c);
1724 read_unlock(&chan_list_lock);
1725 return c;
1726 }
1727
1728 /* Closest match */
1729 src_any = !bacmp(&c->src, BDADDR_ANY);
1730 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1731 if ((src_match && dst_any) || (src_any && dst_match) ||
1732 (src_any && dst_any))
1733 c1 = c;
1734 }
1735 }
1736
1737 if (c1)
1738 l2cap_chan_hold(c1);
1739
1740 read_unlock(&chan_list_lock);
1741
1742 return c1;
1743 }
1744
1745 static void l2cap_monitor_timeout(struct work_struct *work)
1746 {
1747 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1748 monitor_timer.work);
1749
1750 BT_DBG("chan %p", chan);
1751
1752 l2cap_chan_lock(chan);
1753
1754 if (!chan->conn) {
1755 l2cap_chan_unlock(chan);
1756 l2cap_chan_put(chan);
1757 return;
1758 }
1759
1760 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1761
1762 l2cap_chan_unlock(chan);
1763 l2cap_chan_put(chan);
1764 }
1765
1766 static void l2cap_retrans_timeout(struct work_struct *work)
1767 {
1768 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1769 retrans_timer.work);
1770
1771 BT_DBG("chan %p", chan);
1772
1773 l2cap_chan_lock(chan);
1774
1775 if (!chan->conn) {
1776 l2cap_chan_unlock(chan);
1777 l2cap_chan_put(chan);
1778 return;
1779 }
1780
1781 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1782 l2cap_chan_unlock(chan);
1783 l2cap_chan_put(chan);
1784 }
1785
1786 static void l2cap_streaming_send(struct l2cap_chan *chan,
1787 struct sk_buff_head *skbs)
1788 {
1789 struct sk_buff *skb;
1790 struct l2cap_ctrl *control;
1791
1792 BT_DBG("chan %p, skbs %p", chan, skbs);
1793
1794 if (__chan_is_moving(chan))
1795 return;
1796
1797 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1798
1799 while (!skb_queue_empty(&chan->tx_q)) {
1800
1801 skb = skb_dequeue(&chan->tx_q);
1802
1803 bt_cb(skb)->control.retries = 1;
1804 control = &bt_cb(skb)->control;
1805
1806 control->reqseq = 0;
1807 control->txseq = chan->next_tx_seq;
1808
1809 __pack_control(chan, control, skb);
1810
1811 if (chan->fcs == L2CAP_FCS_CRC16) {
1812 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1813 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1814 }
1815
1816 l2cap_do_send(chan, skb);
1817
1818 BT_DBG("Sent txseq %u", control->txseq);
1819
1820 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1821 chan->frames_sent++;
1822 }
1823 }
1824
1825 static int l2cap_ertm_send(struct l2cap_chan *chan)
1826 {
1827 struct sk_buff *skb, *tx_skb;
1828 struct l2cap_ctrl *control;
1829 int sent = 0;
1830
1831 BT_DBG("chan %p", chan);
1832
1833 if (chan->state != BT_CONNECTED)
1834 return -ENOTCONN;
1835
1836 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1837 return 0;
1838
1839 if (__chan_is_moving(chan))
1840 return 0;
1841
1842 while (chan->tx_send_head &&
1843 chan->unacked_frames < chan->remote_tx_win &&
1844 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1845
1846 skb = chan->tx_send_head;
1847
1848 bt_cb(skb)->control.retries = 1;
1849 control = &bt_cb(skb)->control;
1850
1851 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1852 control->final = 1;
1853
1854 control->reqseq = chan->buffer_seq;
1855 chan->last_acked_seq = chan->buffer_seq;
1856 control->txseq = chan->next_tx_seq;
1857
1858 __pack_control(chan, control, skb);
1859
1860 if (chan->fcs == L2CAP_FCS_CRC16) {
1861 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1862 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1863 }
1864
1865 /* Clone after data has been modified. Data is assumed to be
1866 read-only (for locking purposes) on cloned sk_buffs.
1867 */
1868 tx_skb = skb_clone(skb, GFP_KERNEL);
1869
1870 if (!tx_skb)
1871 break;
1872
1873 __set_retrans_timer(chan);
1874
1875 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1876 chan->unacked_frames++;
1877 chan->frames_sent++;
1878 sent++;
1879
1880 if (skb_queue_is_last(&chan->tx_q, skb))
1881 chan->tx_send_head = NULL;
1882 else
1883 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1884
1885 l2cap_do_send(chan, tx_skb);
1886 BT_DBG("Sent txseq %u", control->txseq);
1887 }
1888
1889 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1890 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1891
1892 return sent;
1893 }
1894
1895 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1896 {
1897 struct l2cap_ctrl control;
1898 struct sk_buff *skb;
1899 struct sk_buff *tx_skb;
1900 u16 seq;
1901
1902 BT_DBG("chan %p", chan);
1903
1904 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1905 return;
1906
1907 if (__chan_is_moving(chan))
1908 return;
1909
1910 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1911 seq = l2cap_seq_list_pop(&chan->retrans_list);
1912
1913 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1914 if (!skb) {
1915 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1916 seq);
1917 continue;
1918 }
1919
1920 bt_cb(skb)->control.retries++;
1921 control = bt_cb(skb)->control;
1922
1923 if (chan->max_tx != 0 &&
1924 bt_cb(skb)->control.retries > chan->max_tx) {
1925 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1926 l2cap_send_disconn_req(chan, ECONNRESET);
1927 l2cap_seq_list_clear(&chan->retrans_list);
1928 break;
1929 }
1930
1931 control.reqseq = chan->buffer_seq;
1932 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1933 control.final = 1;
1934 else
1935 control.final = 0;
1936
1937 if (skb_cloned(skb)) {
1938 /* Cloned sk_buffs are read-only, so we need a
1939 * writeable copy
1940 */
1941 tx_skb = skb_copy(skb, GFP_KERNEL);
1942 } else {
1943 tx_skb = skb_clone(skb, GFP_KERNEL);
1944 }
1945
1946 if (!tx_skb) {
1947 l2cap_seq_list_clear(&chan->retrans_list);
1948 break;
1949 }
1950
1951 /* Update skb contents */
1952 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1953 put_unaligned_le32(__pack_extended_control(&control),
1954 tx_skb->data + L2CAP_HDR_SIZE);
1955 } else {
1956 put_unaligned_le16(__pack_enhanced_control(&control),
1957 tx_skb->data + L2CAP_HDR_SIZE);
1958 }
1959
1960 /* Update FCS */
1961 if (chan->fcs == L2CAP_FCS_CRC16) {
1962 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1963 tx_skb->len - L2CAP_FCS_SIZE);
1964 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1965 L2CAP_FCS_SIZE);
1966 }
1967
1968 l2cap_do_send(chan, tx_skb);
1969
1970 BT_DBG("Resent txseq %d", control.txseq);
1971
1972 chan->last_acked_seq = chan->buffer_seq;
1973 }
1974 }
1975
1976 static void l2cap_retransmit(struct l2cap_chan *chan,
1977 struct l2cap_ctrl *control)
1978 {
1979 BT_DBG("chan %p, control %p", chan, control);
1980
1981 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1982 l2cap_ertm_resend(chan);
1983 }
1984
1985 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1986 struct l2cap_ctrl *control)
1987 {
1988 struct sk_buff *skb;
1989
1990 BT_DBG("chan %p, control %p", chan, control);
1991
1992 if (control->poll)
1993 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1994
1995 l2cap_seq_list_clear(&chan->retrans_list);
1996
1997 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1998 return;
1999
2000 if (chan->unacked_frames) {
2001 skb_queue_walk(&chan->tx_q, skb) {
2002 if (bt_cb(skb)->control.txseq == control->reqseq ||
2003 skb == chan->tx_send_head)
2004 break;
2005 }
2006
2007 skb_queue_walk_from(&chan->tx_q, skb) {
2008 if (skb == chan->tx_send_head)
2009 break;
2010
2011 l2cap_seq_list_append(&chan->retrans_list,
2012 bt_cb(skb)->control.txseq);
2013 }
2014
2015 l2cap_ertm_resend(chan);
2016 }
2017 }
2018
2019 static void l2cap_send_ack(struct l2cap_chan *chan)
2020 {
2021 struct l2cap_ctrl control;
2022 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2023 chan->last_acked_seq);
2024 int threshold;
2025
2026 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2027 chan, chan->last_acked_seq, chan->buffer_seq);
2028
2029 memset(&control, 0, sizeof(control));
2030 control.sframe = 1;
2031
2032 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2033 chan->rx_state == L2CAP_RX_STATE_RECV) {
2034 __clear_ack_timer(chan);
2035 control.super = L2CAP_SUPER_RNR;
2036 control.reqseq = chan->buffer_seq;
2037 l2cap_send_sframe(chan, &control);
2038 } else {
2039 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2040 l2cap_ertm_send(chan);
2041 /* If any i-frames were sent, they included an ack */
2042 if (chan->buffer_seq == chan->last_acked_seq)
2043 frames_to_ack = 0;
2044 }
2045
2046 /* Ack now if the window is 3/4ths full.
2047 * Calculate without mul or div
2048 */
2049 threshold = chan->ack_win;
2050 threshold += threshold << 1;
2051 threshold >>= 2;
2052
2053 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2054 threshold);
2055
2056 if (frames_to_ack >= threshold) {
2057 __clear_ack_timer(chan);
2058 control.super = L2CAP_SUPER_RR;
2059 control.reqseq = chan->buffer_seq;
2060 l2cap_send_sframe(chan, &control);
2061 frames_to_ack = 0;
2062 }
2063
2064 if (frames_to_ack)
2065 __set_ack_timer(chan);
2066 }
2067 }
2068
2069 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2070 struct msghdr *msg, int len,
2071 int count, struct sk_buff *skb)
2072 {
2073 struct l2cap_conn *conn = chan->conn;
2074 struct sk_buff **frag;
2075 int sent = 0;
2076
2077 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2078 msg->msg_iov, count))
2079 return -EFAULT;
2080
2081 sent += count;
2082 len -= count;
2083
2084 /* Continuation fragments (no L2CAP header) */
2085 frag = &skb_shinfo(skb)->frag_list;
2086 while (len) {
2087 struct sk_buff *tmp;
2088
2089 count = min_t(unsigned int, conn->mtu, len);
2090
2091 tmp = chan->ops->alloc_skb(chan, 0, count,
2092 msg->msg_flags & MSG_DONTWAIT);
2093 if (IS_ERR(tmp))
2094 return PTR_ERR(tmp);
2095
2096 *frag = tmp;
2097
2098 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2099 msg->msg_iov, count))
2100 return -EFAULT;
2101
2102 sent += count;
2103 len -= count;
2104
2105 skb->len += (*frag)->len;
2106 skb->data_len += (*frag)->len;
2107
2108 frag = &(*frag)->next;
2109 }
2110
2111 return sent;
2112 }
2113
2114 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2115 struct msghdr *msg, size_t len)
2116 {
2117 struct l2cap_conn *conn = chan->conn;
2118 struct sk_buff *skb;
2119 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2120 struct l2cap_hdr *lh;
2121
2122 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2123 __le16_to_cpu(chan->psm), len);
2124
2125 count = min_t(unsigned int, (conn->mtu - hlen), len);
2126
2127 skb = chan->ops->alloc_skb(chan, hlen, count,
2128 msg->msg_flags & MSG_DONTWAIT);
2129 if (IS_ERR(skb))
2130 return skb;
2131
2132 /* Create L2CAP header */
2133 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2134 lh->cid = cpu_to_le16(chan->dcid);
2135 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2136 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2137
2138 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2139 if (unlikely(err < 0)) {
2140 kfree_skb(skb);
2141 return ERR_PTR(err);
2142 }
2143 return skb;
2144 }
2145
2146 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2147 struct msghdr *msg, size_t len)
2148 {
2149 struct l2cap_conn *conn = chan->conn;
2150 struct sk_buff *skb;
2151 int err, count;
2152 struct l2cap_hdr *lh;
2153
2154 BT_DBG("chan %p len %zu", chan, len);
2155
2156 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2157
2158 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2159 msg->msg_flags & MSG_DONTWAIT);
2160 if (IS_ERR(skb))
2161 return skb;
2162
2163 /* Create L2CAP header */
2164 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2165 lh->cid = cpu_to_le16(chan->dcid);
2166 lh->len = cpu_to_le16(len);
2167
2168 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2169 if (unlikely(err < 0)) {
2170 kfree_skb(skb);
2171 return ERR_PTR(err);
2172 }
2173 return skb;
2174 }
2175
2176 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2177 struct msghdr *msg, size_t len,
2178 u16 sdulen)
2179 {
2180 struct l2cap_conn *conn = chan->conn;
2181 struct sk_buff *skb;
2182 int err, count, hlen;
2183 struct l2cap_hdr *lh;
2184
2185 BT_DBG("chan %p len %zu", chan, len);
2186
2187 if (!conn)
2188 return ERR_PTR(-ENOTCONN);
2189
2190 hlen = __ertm_hdr_size(chan);
2191
2192 if (sdulen)
2193 hlen += L2CAP_SDULEN_SIZE;
2194
2195 if (chan->fcs == L2CAP_FCS_CRC16)
2196 hlen += L2CAP_FCS_SIZE;
2197
2198 count = min_t(unsigned int, (conn->mtu - hlen), len);
2199
2200 skb = chan->ops->alloc_skb(chan, hlen, count,
2201 msg->msg_flags & MSG_DONTWAIT);
2202 if (IS_ERR(skb))
2203 return skb;
2204
2205 /* Create L2CAP header */
2206 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2207 lh->cid = cpu_to_le16(chan->dcid);
2208 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2209
2210 /* Control header is populated later */
2211 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2212 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2213 else
2214 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2215
2216 if (sdulen)
2217 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2218
2219 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2220 if (unlikely(err < 0)) {
2221 kfree_skb(skb);
2222 return ERR_PTR(err);
2223 }
2224
2225 bt_cb(skb)->control.fcs = chan->fcs;
2226 bt_cb(skb)->control.retries = 0;
2227 return skb;
2228 }
2229
2230 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2231 struct sk_buff_head *seg_queue,
2232 struct msghdr *msg, size_t len)
2233 {
2234 struct sk_buff *skb;
2235 u16 sdu_len;
2236 size_t pdu_len;
2237 u8 sar;
2238
2239 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2240
2241 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2242 * so fragmented skbs are not used. The HCI layer's handling
2243 * of fragmented skbs is not compatible with ERTM's queueing.
2244 */
2245
2246 /* PDU size is derived from the HCI MTU */
2247 pdu_len = chan->conn->mtu;
2248
2249 /* Constrain PDU size for BR/EDR connections */
2250 if (!chan->hs_hcon)
2251 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2252
2253 /* Adjust for largest possible L2CAP overhead. */
2254 if (chan->fcs)
2255 pdu_len -= L2CAP_FCS_SIZE;
2256
2257 pdu_len -= __ertm_hdr_size(chan);
2258
2259 /* Remote device may have requested smaller PDUs */
2260 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2261
2262 if (len <= pdu_len) {
2263 sar = L2CAP_SAR_UNSEGMENTED;
2264 sdu_len = 0;
2265 pdu_len = len;
2266 } else {
2267 sar = L2CAP_SAR_START;
2268 sdu_len = len;
2269 }
2270
2271 while (len > 0) {
2272 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2273
2274 if (IS_ERR(skb)) {
2275 __skb_queue_purge(seg_queue);
2276 return PTR_ERR(skb);
2277 }
2278
2279 bt_cb(skb)->control.sar = sar;
2280 __skb_queue_tail(seg_queue, skb);
2281
2282 len -= pdu_len;
2283 if (sdu_len)
2284 sdu_len = 0;
2285
2286 if (len <= pdu_len) {
2287 sar = L2CAP_SAR_END;
2288 pdu_len = len;
2289 } else {
2290 sar = L2CAP_SAR_CONTINUE;
2291 }
2292 }
2293
2294 return 0;
2295 }
2296
2297 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2298 struct msghdr *msg,
2299 size_t len, u16 sdulen)
2300 {
2301 struct l2cap_conn *conn = chan->conn;
2302 struct sk_buff *skb;
2303 int err, count, hlen;
2304 struct l2cap_hdr *lh;
2305
2306 BT_DBG("chan %p len %zu", chan, len);
2307
2308 if (!conn)
2309 return ERR_PTR(-ENOTCONN);
2310
2311 hlen = L2CAP_HDR_SIZE;
2312
2313 if (sdulen)
2314 hlen += L2CAP_SDULEN_SIZE;
2315
2316 count = min_t(unsigned int, (conn->mtu - hlen), len);
2317
2318 skb = chan->ops->alloc_skb(chan, hlen, count,
2319 msg->msg_flags & MSG_DONTWAIT);
2320 if (IS_ERR(skb))
2321 return skb;
2322
2323 /* Create L2CAP header */
2324 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2325 lh->cid = cpu_to_le16(chan->dcid);
2326 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2327
2328 if (sdulen)
2329 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2330
2331 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2332 if (unlikely(err < 0)) {
2333 kfree_skb(skb);
2334 return ERR_PTR(err);
2335 }
2336
2337 return skb;
2338 }
2339
2340 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2341 struct sk_buff_head *seg_queue,
2342 struct msghdr *msg, size_t len)
2343 {
2344 struct sk_buff *skb;
2345 size_t pdu_len;
2346 u16 sdu_len;
2347
2348 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2349
2350 sdu_len = len;
2351 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2352
2353 while (len > 0) {
2354 if (len <= pdu_len)
2355 pdu_len = len;
2356
2357 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2358 if (IS_ERR(skb)) {
2359 __skb_queue_purge(seg_queue);
2360 return PTR_ERR(skb);
2361 }
2362
2363 __skb_queue_tail(seg_queue, skb);
2364
2365 len -= pdu_len;
2366
2367 if (sdu_len) {
2368 sdu_len = 0;
2369 pdu_len += L2CAP_SDULEN_SIZE;
2370 }
2371 }
2372
2373 return 0;
2374 }
2375
2376 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2377 {
2378 struct sk_buff *skb;
2379 int err;
2380 struct sk_buff_head seg_queue;
2381
2382 if (!chan->conn)
2383 return -ENOTCONN;
2384
2385 /* Connectionless channel */
2386 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2387 skb = l2cap_create_connless_pdu(chan, msg, len);
2388 if (IS_ERR(skb))
2389 return PTR_ERR(skb);
2390
2391 /* Channel lock is released before requesting new skb and then
2392 * reacquired thus we need to recheck channel state.
2393 */
2394 if (chan->state != BT_CONNECTED) {
2395 kfree_skb(skb);
2396 return -ENOTCONN;
2397 }
2398
2399 l2cap_do_send(chan, skb);
2400 return len;
2401 }
2402
2403 switch (chan->mode) {
2404 case L2CAP_MODE_LE_FLOWCTL:
2405 /* Check outgoing MTU */
2406 if (len > chan->omtu)
2407 return -EMSGSIZE;
2408
2409 if (!chan->tx_credits)
2410 return -EAGAIN;
2411
2412 __skb_queue_head_init(&seg_queue);
2413
2414 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2415
2416 if (chan->state != BT_CONNECTED) {
2417 __skb_queue_purge(&seg_queue);
2418 err = -ENOTCONN;
2419 }
2420
2421 if (err)
2422 return err;
2423
2424 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2425
2426 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2427 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2428 chan->tx_credits--;
2429 }
2430
2431 if (!chan->tx_credits)
2432 chan->ops->suspend(chan);
2433
2434 err = len;
2435
2436 break;
2437
2438 case L2CAP_MODE_BASIC:
2439 /* Check outgoing MTU */
2440 if (len > chan->omtu)
2441 return -EMSGSIZE;
2442
2443 /* Create a basic PDU */
2444 skb = l2cap_create_basic_pdu(chan, msg, len);
2445 if (IS_ERR(skb))
2446 return PTR_ERR(skb);
2447
2448 /* Channel lock is released before requesting new skb and then
2449 * reacquired thus we need to recheck channel state.
2450 */
2451 if (chan->state != BT_CONNECTED) {
2452 kfree_skb(skb);
2453 return -ENOTCONN;
2454 }
2455
2456 l2cap_do_send(chan, skb);
2457 err = len;
2458 break;
2459
2460 case L2CAP_MODE_ERTM:
2461 case L2CAP_MODE_STREAMING:
2462 /* Check outgoing MTU */
2463 if (len > chan->omtu) {
2464 err = -EMSGSIZE;
2465 break;
2466 }
2467
2468 __skb_queue_head_init(&seg_queue);
2469
2470 /* Do segmentation before calling in to the state machine,
2471 * since it's possible to block while waiting for memory
2472 * allocation.
2473 */
2474 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2475
2476 /* The channel could have been closed while segmenting,
2477 * check that it is still connected.
2478 */
2479 if (chan->state != BT_CONNECTED) {
2480 __skb_queue_purge(&seg_queue);
2481 err = -ENOTCONN;
2482 }
2483
2484 if (err)
2485 break;
2486
2487 if (chan->mode == L2CAP_MODE_ERTM)
2488 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2489 else
2490 l2cap_streaming_send(chan, &seg_queue);
2491
2492 err = len;
2493
2494 /* If the skbs were not queued for sending, they'll still be in
2495 * seg_queue and need to be purged.
2496 */
2497 __skb_queue_purge(&seg_queue);
2498 break;
2499
2500 default:
2501 BT_DBG("bad state %1.1x", chan->mode);
2502 err = -EBADFD;
2503 }
2504
2505 return err;
2506 }
2507 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2508
2509 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2510 {
2511 struct l2cap_ctrl control;
2512 u16 seq;
2513
2514 BT_DBG("chan %p, txseq %u", chan, txseq);
2515
2516 memset(&control, 0, sizeof(control));
2517 control.sframe = 1;
2518 control.super = L2CAP_SUPER_SREJ;
2519
2520 for (seq = chan->expected_tx_seq; seq != txseq;
2521 seq = __next_seq(chan, seq)) {
2522 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2523 control.reqseq = seq;
2524 l2cap_send_sframe(chan, &control);
2525 l2cap_seq_list_append(&chan->srej_list, seq);
2526 }
2527 }
2528
2529 chan->expected_tx_seq = __next_seq(chan, txseq);
2530 }
2531
2532 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2533 {
2534 struct l2cap_ctrl control;
2535
2536 BT_DBG("chan %p", chan);
2537
2538 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2539 return;
2540
2541 memset(&control, 0, sizeof(control));
2542 control.sframe = 1;
2543 control.super = L2CAP_SUPER_SREJ;
2544 control.reqseq = chan->srej_list.tail;
2545 l2cap_send_sframe(chan, &control);
2546 }
2547
2548 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2549 {
2550 struct l2cap_ctrl control;
2551 u16 initial_head;
2552 u16 seq;
2553
2554 BT_DBG("chan %p, txseq %u", chan, txseq);
2555
2556 memset(&control, 0, sizeof(control));
2557 control.sframe = 1;
2558 control.super = L2CAP_SUPER_SREJ;
2559
2560 /* Capture initial list head to allow only one pass through the list. */
2561 initial_head = chan->srej_list.head;
2562
2563 do {
2564 seq = l2cap_seq_list_pop(&chan->srej_list);
2565 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2566 break;
2567
2568 control.reqseq = seq;
2569 l2cap_send_sframe(chan, &control);
2570 l2cap_seq_list_append(&chan->srej_list, seq);
2571 } while (chan->srej_list.head != initial_head);
2572 }
2573
2574 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2575 {
2576 struct sk_buff *acked_skb;
2577 u16 ackseq;
2578
2579 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2580
2581 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2582 return;
2583
2584 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2585 chan->expected_ack_seq, chan->unacked_frames);
2586
2587 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2588 ackseq = __next_seq(chan, ackseq)) {
2589
2590 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2591 if (acked_skb) {
2592 skb_unlink(acked_skb, &chan->tx_q);
2593 kfree_skb(acked_skb);
2594 chan->unacked_frames--;
2595 }
2596 }
2597
2598 chan->expected_ack_seq = reqseq;
2599
2600 if (chan->unacked_frames == 0)
2601 __clear_retrans_timer(chan);
2602
2603 BT_DBG("unacked_frames %u", chan->unacked_frames);
2604 }
2605
2606 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2607 {
2608 BT_DBG("chan %p", chan);
2609
2610 chan->expected_tx_seq = chan->buffer_seq;
2611 l2cap_seq_list_clear(&chan->srej_list);
2612 skb_queue_purge(&chan->srej_q);
2613 chan->rx_state = L2CAP_RX_STATE_RECV;
2614 }
2615
2616 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2617 struct l2cap_ctrl *control,
2618 struct sk_buff_head *skbs, u8 event)
2619 {
2620 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2621 event);
2622
2623 switch (event) {
2624 case L2CAP_EV_DATA_REQUEST:
2625 if (chan->tx_send_head == NULL)
2626 chan->tx_send_head = skb_peek(skbs);
2627
2628 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2629 l2cap_ertm_send(chan);
2630 break;
2631 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2632 BT_DBG("Enter LOCAL_BUSY");
2633 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2634
2635 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2636 /* The SREJ_SENT state must be aborted if we are to
2637 * enter the LOCAL_BUSY state.
2638 */
2639 l2cap_abort_rx_srej_sent(chan);
2640 }
2641
2642 l2cap_send_ack(chan);
2643
2644 break;
2645 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2646 BT_DBG("Exit LOCAL_BUSY");
2647 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2648
2649 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2650 struct l2cap_ctrl local_control;
2651
2652 memset(&local_control, 0, sizeof(local_control));
2653 local_control.sframe = 1;
2654 local_control.super = L2CAP_SUPER_RR;
2655 local_control.poll = 1;
2656 local_control.reqseq = chan->buffer_seq;
2657 l2cap_send_sframe(chan, &local_control);
2658
2659 chan->retry_count = 1;
2660 __set_monitor_timer(chan);
2661 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2662 }
2663 break;
2664 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2665 l2cap_process_reqseq(chan, control->reqseq);
2666 break;
2667 case L2CAP_EV_EXPLICIT_POLL:
2668 l2cap_send_rr_or_rnr(chan, 1);
2669 chan->retry_count = 1;
2670 __set_monitor_timer(chan);
2671 __clear_ack_timer(chan);
2672 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2673 break;
2674 case L2CAP_EV_RETRANS_TO:
2675 l2cap_send_rr_or_rnr(chan, 1);
2676 chan->retry_count = 1;
2677 __set_monitor_timer(chan);
2678 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2679 break;
2680 case L2CAP_EV_RECV_FBIT:
2681 /* Nothing to process */
2682 break;
2683 default:
2684 break;
2685 }
2686 }
2687
2688 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2689 struct l2cap_ctrl *control,
2690 struct sk_buff_head *skbs, u8 event)
2691 {
2692 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2693 event);
2694
2695 switch (event) {
2696 case L2CAP_EV_DATA_REQUEST:
2697 if (chan->tx_send_head == NULL)
2698 chan->tx_send_head = skb_peek(skbs);
2699 /* Queue data, but don't send. */
2700 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2701 break;
2702 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2703 BT_DBG("Enter LOCAL_BUSY");
2704 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2705
2706 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2707 /* The SREJ_SENT state must be aborted if we are to
2708 * enter the LOCAL_BUSY state.
2709 */
2710 l2cap_abort_rx_srej_sent(chan);
2711 }
2712
2713 l2cap_send_ack(chan);
2714
2715 break;
2716 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2717 BT_DBG("Exit LOCAL_BUSY");
2718 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2719
2720 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2721 struct l2cap_ctrl local_control;
2722 memset(&local_control, 0, sizeof(local_control));
2723 local_control.sframe = 1;
2724 local_control.super = L2CAP_SUPER_RR;
2725 local_control.poll = 1;
2726 local_control.reqseq = chan->buffer_seq;
2727 l2cap_send_sframe(chan, &local_control);
2728
2729 chan->retry_count = 1;
2730 __set_monitor_timer(chan);
2731 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2732 }
2733 break;
2734 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2735 l2cap_process_reqseq(chan, control->reqseq);
2736
2737 /* Fall through */
2738
2739 case L2CAP_EV_RECV_FBIT:
2740 if (control && control->final) {
2741 __clear_monitor_timer(chan);
2742 if (chan->unacked_frames > 0)
2743 __set_retrans_timer(chan);
2744 chan->retry_count = 0;
2745 chan->tx_state = L2CAP_TX_STATE_XMIT;
2746 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2747 }
2748 break;
2749 case L2CAP_EV_EXPLICIT_POLL:
2750 /* Ignore */
2751 break;
2752 case L2CAP_EV_MONITOR_TO:
2753 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2754 l2cap_send_rr_or_rnr(chan, 1);
2755 __set_monitor_timer(chan);
2756 chan->retry_count++;
2757 } else {
2758 l2cap_send_disconn_req(chan, ECONNABORTED);
2759 }
2760 break;
2761 default:
2762 break;
2763 }
2764 }
2765
2766 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2767 struct sk_buff_head *skbs, u8 event)
2768 {
2769 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2770 chan, control, skbs, event, chan->tx_state);
2771
2772 switch (chan->tx_state) {
2773 case L2CAP_TX_STATE_XMIT:
2774 l2cap_tx_state_xmit(chan, control, skbs, event);
2775 break;
2776 case L2CAP_TX_STATE_WAIT_F:
2777 l2cap_tx_state_wait_f(chan, control, skbs, event);
2778 break;
2779 default:
2780 /* Ignore event */
2781 break;
2782 }
2783 }
2784
2785 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2786 struct l2cap_ctrl *control)
2787 {
2788 BT_DBG("chan %p, control %p", chan, control);
2789 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2790 }
2791
2792 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2793 struct l2cap_ctrl *control)
2794 {
2795 BT_DBG("chan %p, control %p", chan, control);
2796 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2797 }
2798
2799 /* Copy frame to all raw sockets on that connection */
2800 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2801 {
2802 struct sk_buff *nskb;
2803 struct l2cap_chan *chan;
2804
2805 BT_DBG("conn %p", conn);
2806
2807 mutex_lock(&conn->chan_lock);
2808
2809 list_for_each_entry(chan, &conn->chan_l, list) {
2810 if (chan->chan_type != L2CAP_CHAN_RAW)
2811 continue;
2812
2813 /* Don't send frame to the channel it came from */
2814 if (bt_cb(skb)->chan == chan)
2815 continue;
2816
2817 nskb = skb_clone(skb, GFP_KERNEL);
2818 if (!nskb)
2819 continue;
2820 if (chan->ops->recv(chan, nskb))
2821 kfree_skb(nskb);
2822 }
2823
2824 mutex_unlock(&conn->chan_lock);
2825 }
2826
2827 /* ---- L2CAP signalling commands ---- */
2828 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2829 u8 ident, u16 dlen, void *data)
2830 {
2831 struct sk_buff *skb, **frag;
2832 struct l2cap_cmd_hdr *cmd;
2833 struct l2cap_hdr *lh;
2834 int len, count;
2835
2836 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2837 conn, code, ident, dlen);
2838
2839 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2840 return NULL;
2841
2842 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2843 count = min_t(unsigned int, conn->mtu, len);
2844
2845 skb = bt_skb_alloc(count, GFP_KERNEL);
2846 if (!skb)
2847 return NULL;
2848
2849 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2850 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2851
2852 if (conn->hcon->type == LE_LINK)
2853 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2854 else
2855 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2856
2857 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2858 cmd->code = code;
2859 cmd->ident = ident;
2860 cmd->len = cpu_to_le16(dlen);
2861
2862 if (dlen) {
2863 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2864 memcpy(skb_put(skb, count), data, count);
2865 data += count;
2866 }
2867
2868 len -= skb->len;
2869
2870 /* Continuation fragments (no L2CAP header) */
2871 frag = &skb_shinfo(skb)->frag_list;
2872 while (len) {
2873 count = min_t(unsigned int, conn->mtu, len);
2874
2875 *frag = bt_skb_alloc(count, GFP_KERNEL);
2876 if (!*frag)
2877 goto fail;
2878
2879 memcpy(skb_put(*frag, count), data, count);
2880
2881 len -= count;
2882 data += count;
2883
2884 frag = &(*frag)->next;
2885 }
2886
2887 return skb;
2888
2889 fail:
2890 kfree_skb(skb);
2891 return NULL;
2892 }
2893
2894 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2895 unsigned long *val)
2896 {
2897 struct l2cap_conf_opt *opt = *ptr;
2898 int len;
2899
2900 len = L2CAP_CONF_OPT_SIZE + opt->len;
2901 *ptr += len;
2902
2903 *type = opt->type;
2904 *olen = opt->len;
2905
2906 switch (opt->len) {
2907 case 1:
2908 *val = *((u8 *) opt->val);
2909 break;
2910
2911 case 2:
2912 *val = get_unaligned_le16(opt->val);
2913 break;
2914
2915 case 4:
2916 *val = get_unaligned_le32(opt->val);
2917 break;
2918
2919 default:
2920 *val = (unsigned long) opt->val;
2921 break;
2922 }
2923
2924 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2925 return len;
2926 }
2927
2928 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2929 {
2930 struct l2cap_conf_opt *opt = *ptr;
2931
2932 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2933
2934 opt->type = type;
2935 opt->len = len;
2936
2937 switch (len) {
2938 case 1:
2939 *((u8 *) opt->val) = val;
2940 break;
2941
2942 case 2:
2943 put_unaligned_le16(val, opt->val);
2944 break;
2945
2946 case 4:
2947 put_unaligned_le32(val, opt->val);
2948 break;
2949
2950 default:
2951 memcpy(opt->val, (void *) val, len);
2952 break;
2953 }
2954
2955 *ptr += L2CAP_CONF_OPT_SIZE + len;
2956 }
2957
2958 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2959 {
2960 struct l2cap_conf_efs efs;
2961
2962 switch (chan->mode) {
2963 case L2CAP_MODE_ERTM:
2964 efs.id = chan->local_id;
2965 efs.stype = chan->local_stype;
2966 efs.msdu = cpu_to_le16(chan->local_msdu);
2967 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2968 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2969 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2970 break;
2971
2972 case L2CAP_MODE_STREAMING:
2973 efs.id = 1;
2974 efs.stype = L2CAP_SERV_BESTEFFORT;
2975 efs.msdu = cpu_to_le16(chan->local_msdu);
2976 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2977 efs.acc_lat = 0;
2978 efs.flush_to = 0;
2979 break;
2980
2981 default:
2982 return;
2983 }
2984
2985 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2986 (unsigned long) &efs);
2987 }
2988
2989 static void l2cap_ack_timeout(struct work_struct *work)
2990 {
2991 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2992 ack_timer.work);
2993 u16 frames_to_ack;
2994
2995 BT_DBG("chan %p", chan);
2996
2997 l2cap_chan_lock(chan);
2998
2999 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3000 chan->last_acked_seq);
3001
3002 if (frames_to_ack)
3003 l2cap_send_rr_or_rnr(chan, 0);
3004
3005 l2cap_chan_unlock(chan);
3006 l2cap_chan_put(chan);
3007 }
3008
3009 int l2cap_ertm_init(struct l2cap_chan *chan)
3010 {
3011 int err;
3012
3013 chan->next_tx_seq = 0;
3014 chan->expected_tx_seq = 0;
3015 chan->expected_ack_seq = 0;
3016 chan->unacked_frames = 0;
3017 chan->buffer_seq = 0;
3018 chan->frames_sent = 0;
3019 chan->last_acked_seq = 0;
3020 chan->sdu = NULL;
3021 chan->sdu_last_frag = NULL;
3022 chan->sdu_len = 0;
3023
3024 skb_queue_head_init(&chan->tx_q);
3025
3026 chan->local_amp_id = AMP_ID_BREDR;
3027 chan->move_id = AMP_ID_BREDR;
3028 chan->move_state = L2CAP_MOVE_STABLE;
3029 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3030
3031 if (chan->mode != L2CAP_MODE_ERTM)
3032 return 0;
3033
3034 chan->rx_state = L2CAP_RX_STATE_RECV;
3035 chan->tx_state = L2CAP_TX_STATE_XMIT;
3036
3037 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3038 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3039 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3040
3041 skb_queue_head_init(&chan->srej_q);
3042
3043 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3044 if (err < 0)
3045 return err;
3046
3047 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3048 if (err < 0)
3049 l2cap_seq_list_free(&chan->srej_list);
3050
3051 return err;
3052 }
3053
3054 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3055 {
3056 switch (mode) {
3057 case L2CAP_MODE_STREAMING:
3058 case L2CAP_MODE_ERTM:
3059 if (l2cap_mode_supported(mode, remote_feat_mask))
3060 return mode;
3061 /* fall through */
3062 default:
3063 return L2CAP_MODE_BASIC;
3064 }
3065 }
3066
3067 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3068 {
3069 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3070 }
3071
3072 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3073 {
3074 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3075 }
3076
3077 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3078 struct l2cap_conf_rfc *rfc)
3079 {
3080 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3081 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3082
3083 /* Class 1 devices have must have ERTM timeouts
3084 * exceeding the Link Supervision Timeout. The
3085 * default Link Supervision Timeout for AMP
3086 * controllers is 10 seconds.
3087 *
3088 * Class 1 devices use 0xffffffff for their
3089 * best-effort flush timeout, so the clamping logic
3090 * will result in a timeout that meets the above
3091 * requirement. ERTM timeouts are 16-bit values, so
3092 * the maximum timeout is 65.535 seconds.
3093 */
3094
3095 /* Convert timeout to milliseconds and round */
3096 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3097
3098 /* This is the recommended formula for class 2 devices
3099 * that start ERTM timers when packets are sent to the
3100 * controller.
3101 */
3102 ertm_to = 3 * ertm_to + 500;
3103
3104 if (ertm_to > 0xffff)
3105 ertm_to = 0xffff;
3106
3107 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3108 rfc->monitor_timeout = rfc->retrans_timeout;
3109 } else {
3110 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3111 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3112 }
3113 }
3114
3115 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3116 {
3117 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3118 __l2cap_ews_supported(chan->conn)) {
3119 /* use extended control field */
3120 set_bit(FLAG_EXT_CTRL, &chan->flags);
3121 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3122 } else {
3123 chan->tx_win = min_t(u16, chan->tx_win,
3124 L2CAP_DEFAULT_TX_WINDOW);
3125 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3126 }
3127 chan->ack_win = chan->tx_win;
3128 }
3129
3130 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3131 {
3132 struct l2cap_conf_req *req = data;
3133 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3134 void *ptr = req->data;
3135 u16 size;
3136
3137 BT_DBG("chan %p", chan);
3138
3139 if (chan->num_conf_req || chan->num_conf_rsp)
3140 goto done;
3141
3142 switch (chan->mode) {
3143 case L2CAP_MODE_STREAMING:
3144 case L2CAP_MODE_ERTM:
3145 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3146 break;
3147
3148 if (__l2cap_efs_supported(chan->conn))
3149 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3150
3151 /* fall through */
3152 default:
3153 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3154 break;
3155 }
3156
3157 done:
3158 if (chan->imtu != L2CAP_DEFAULT_MTU)
3159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3160
3161 switch (chan->mode) {
3162 case L2CAP_MODE_BASIC:
3163 if (disable_ertm)
3164 break;
3165
3166 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3167 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3168 break;
3169
3170 rfc.mode = L2CAP_MODE_BASIC;
3171 rfc.txwin_size = 0;
3172 rfc.max_transmit = 0;
3173 rfc.retrans_timeout = 0;
3174 rfc.monitor_timeout = 0;
3175 rfc.max_pdu_size = 0;
3176
3177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3178 (unsigned long) &rfc);
3179 break;
3180
3181 case L2CAP_MODE_ERTM:
3182 rfc.mode = L2CAP_MODE_ERTM;
3183 rfc.max_transmit = chan->max_tx;
3184
3185 __l2cap_set_ertm_timeouts(chan, &rfc);
3186
3187 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3188 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3189 L2CAP_FCS_SIZE);
3190 rfc.max_pdu_size = cpu_to_le16(size);
3191
3192 l2cap_txwin_setup(chan);
3193
3194 rfc.txwin_size = min_t(u16, chan->tx_win,
3195 L2CAP_DEFAULT_TX_WINDOW);
3196
3197 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3198 (unsigned long) &rfc);
3199
3200 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3201 l2cap_add_opt_efs(&ptr, chan);
3202
3203 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3204 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3205 chan->tx_win);
3206
3207 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3208 if (chan->fcs == L2CAP_FCS_NONE ||
3209 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3210 chan->fcs = L2CAP_FCS_NONE;
3211 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3212 chan->fcs);
3213 }
3214 break;
3215
3216 case L2CAP_MODE_STREAMING:
3217 l2cap_txwin_setup(chan);
3218 rfc.mode = L2CAP_MODE_STREAMING;
3219 rfc.txwin_size = 0;
3220 rfc.max_transmit = 0;
3221 rfc.retrans_timeout = 0;
3222 rfc.monitor_timeout = 0;
3223
3224 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3225 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3226 L2CAP_FCS_SIZE);
3227 rfc.max_pdu_size = cpu_to_le16(size);
3228
3229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3230 (unsigned long) &rfc);
3231
3232 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3233 l2cap_add_opt_efs(&ptr, chan);
3234
3235 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3236 if (chan->fcs == L2CAP_FCS_NONE ||
3237 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3238 chan->fcs = L2CAP_FCS_NONE;
3239 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3240 chan->fcs);
3241 }
3242 break;
3243 }
3244
3245 req->dcid = cpu_to_le16(chan->dcid);
3246 req->flags = cpu_to_le16(0);
3247
3248 return ptr - data;
3249 }
3250
3251 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3252 {
3253 struct l2cap_conf_rsp *rsp = data;
3254 void *ptr = rsp->data;
3255 void *req = chan->conf_req;
3256 int len = chan->conf_len;
3257 int type, hint, olen;
3258 unsigned long val;
3259 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3260 struct l2cap_conf_efs efs;
3261 u8 remote_efs = 0;
3262 u16 mtu = L2CAP_DEFAULT_MTU;
3263 u16 result = L2CAP_CONF_SUCCESS;
3264 u16 size;
3265
3266 BT_DBG("chan %p", chan);
3267
3268 while (len >= L2CAP_CONF_OPT_SIZE) {
3269 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3270
3271 hint = type & L2CAP_CONF_HINT;
3272 type &= L2CAP_CONF_MASK;
3273
3274 switch (type) {
3275 case L2CAP_CONF_MTU:
3276 mtu = val;
3277 break;
3278
3279 case L2CAP_CONF_FLUSH_TO:
3280 chan->flush_to = val;
3281 break;
3282
3283 case L2CAP_CONF_QOS:
3284 break;
3285
3286 case L2CAP_CONF_RFC:
3287 if (olen == sizeof(rfc))
3288 memcpy(&rfc, (void *) val, olen);
3289 break;
3290
3291 case L2CAP_CONF_FCS:
3292 if (val == L2CAP_FCS_NONE)
3293 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3294 break;
3295
3296 case L2CAP_CONF_EFS:
3297 remote_efs = 1;
3298 if (olen == sizeof(efs))
3299 memcpy(&efs, (void *) val, olen);
3300 break;
3301
3302 case L2CAP_CONF_EWS:
3303 if (!chan->conn->hs_enabled)
3304 return -ECONNREFUSED;
3305
3306 set_bit(FLAG_EXT_CTRL, &chan->flags);
3307 set_bit(CONF_EWS_RECV, &chan->conf_state);
3308 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3309 chan->remote_tx_win = val;
3310 break;
3311
3312 default:
3313 if (hint)
3314 break;
3315
3316 result = L2CAP_CONF_UNKNOWN;
3317 *((u8 *) ptr++) = type;
3318 break;
3319 }
3320 }
3321
3322 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3323 goto done;
3324
3325 switch (chan->mode) {
3326 case L2CAP_MODE_STREAMING:
3327 case L2CAP_MODE_ERTM:
3328 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3329 chan->mode = l2cap_select_mode(rfc.mode,
3330 chan->conn->feat_mask);
3331 break;
3332 }
3333
3334 if (remote_efs) {
3335 if (__l2cap_efs_supported(chan->conn))
3336 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3337 else
3338 return -ECONNREFUSED;
3339 }
3340
3341 if (chan->mode != rfc.mode)
3342 return -ECONNREFUSED;
3343
3344 break;
3345 }
3346
3347 done:
3348 if (chan->mode != rfc.mode) {
3349 result = L2CAP_CONF_UNACCEPT;
3350 rfc.mode = chan->mode;
3351
3352 if (chan->num_conf_rsp == 1)
3353 return -ECONNREFUSED;
3354
3355 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3356 (unsigned long) &rfc);
3357 }
3358
3359 if (result == L2CAP_CONF_SUCCESS) {
3360 /* Configure output options and let the other side know
3361 * which ones we don't like. */
3362
3363 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3364 result = L2CAP_CONF_UNACCEPT;
3365 else {
3366 chan->omtu = mtu;
3367 set_bit(CONF_MTU_DONE, &chan->conf_state);
3368 }
3369 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3370
3371 if (remote_efs) {
3372 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3373 efs.stype != L2CAP_SERV_NOTRAFIC &&
3374 efs.stype != chan->local_stype) {
3375
3376 result = L2CAP_CONF_UNACCEPT;
3377
3378 if (chan->num_conf_req >= 1)
3379 return -ECONNREFUSED;
3380
3381 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3382 sizeof(efs),
3383 (unsigned long) &efs);
3384 } else {
3385 /* Send PENDING Conf Rsp */
3386 result = L2CAP_CONF_PENDING;
3387 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3388 }
3389 }
3390
3391 switch (rfc.mode) {
3392 case L2CAP_MODE_BASIC:
3393 chan->fcs = L2CAP_FCS_NONE;
3394 set_bit(CONF_MODE_DONE, &chan->conf_state);
3395 break;
3396
3397 case L2CAP_MODE_ERTM:
3398 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3399 chan->remote_tx_win = rfc.txwin_size;
3400 else
3401 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3402
3403 chan->remote_max_tx = rfc.max_transmit;
3404
3405 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3406 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3407 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3408 rfc.max_pdu_size = cpu_to_le16(size);
3409 chan->remote_mps = size;
3410
3411 __l2cap_set_ertm_timeouts(chan, &rfc);
3412
3413 set_bit(CONF_MODE_DONE, &chan->conf_state);
3414
3415 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3416 sizeof(rfc), (unsigned long) &rfc);
3417
3418 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3419 chan->remote_id = efs.id;
3420 chan->remote_stype = efs.stype;
3421 chan->remote_msdu = le16_to_cpu(efs.msdu);
3422 chan->remote_flush_to =
3423 le32_to_cpu(efs.flush_to);
3424 chan->remote_acc_lat =
3425 le32_to_cpu(efs.acc_lat);
3426 chan->remote_sdu_itime =
3427 le32_to_cpu(efs.sdu_itime);
3428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3429 sizeof(efs),
3430 (unsigned long) &efs);
3431 }
3432 break;
3433
3434 case L2CAP_MODE_STREAMING:
3435 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3436 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3437 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3438 rfc.max_pdu_size = cpu_to_le16(size);
3439 chan->remote_mps = size;
3440
3441 set_bit(CONF_MODE_DONE, &chan->conf_state);
3442
3443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3444 (unsigned long) &rfc);
3445
3446 break;
3447
3448 default:
3449 result = L2CAP_CONF_UNACCEPT;
3450
3451 memset(&rfc, 0, sizeof(rfc));
3452 rfc.mode = chan->mode;
3453 }
3454
3455 if (result == L2CAP_CONF_SUCCESS)
3456 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3457 }
3458 rsp->scid = cpu_to_le16(chan->dcid);
3459 rsp->result = cpu_to_le16(result);
3460 rsp->flags = cpu_to_le16(0);
3461
3462 return ptr - data;
3463 }
3464
3465 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3466 void *data, u16 *result)
3467 {
3468 struct l2cap_conf_req *req = data;
3469 void *ptr = req->data;
3470 int type, olen;
3471 unsigned long val;
3472 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3473 struct l2cap_conf_efs efs;
3474
3475 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3476
3477 while (len >= L2CAP_CONF_OPT_SIZE) {
3478 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3479
3480 switch (type) {
3481 case L2CAP_CONF_MTU:
3482 if (val < L2CAP_DEFAULT_MIN_MTU) {
3483 *result = L2CAP_CONF_UNACCEPT;
3484 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3485 } else
3486 chan->imtu = val;
3487 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3488 break;
3489
3490 case L2CAP_CONF_FLUSH_TO:
3491 chan->flush_to = val;
3492 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3493 2, chan->flush_to);
3494 break;
3495
3496 case L2CAP_CONF_RFC:
3497 if (olen == sizeof(rfc))
3498 memcpy(&rfc, (void *)val, olen);
3499
3500 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3501 rfc.mode != chan->mode)
3502 return -ECONNREFUSED;
3503
3504 chan->fcs = 0;
3505
3506 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3507 sizeof(rfc), (unsigned long) &rfc);
3508 break;
3509
3510 case L2CAP_CONF_EWS:
3511 chan->ack_win = min_t(u16, val, chan->ack_win);
3512 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3513 chan->tx_win);
3514 break;
3515
3516 case L2CAP_CONF_EFS:
3517 if (olen == sizeof(efs))
3518 memcpy(&efs, (void *)val, olen);
3519
3520 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3521 efs.stype != L2CAP_SERV_NOTRAFIC &&
3522 efs.stype != chan->local_stype)
3523 return -ECONNREFUSED;
3524
3525 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3526 (unsigned long) &efs);
3527 break;
3528
3529 case L2CAP_CONF_FCS:
3530 if (*result == L2CAP_CONF_PENDING)
3531 if (val == L2CAP_FCS_NONE)
3532 set_bit(CONF_RECV_NO_FCS,
3533 &chan->conf_state);
3534 break;
3535 }
3536 }
3537
3538 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3539 return -ECONNREFUSED;
3540
3541 chan->mode = rfc.mode;
3542
3543 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3544 switch (rfc.mode) {
3545 case L2CAP_MODE_ERTM:
3546 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3547 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3548 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3549 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3550 chan->ack_win = min_t(u16, chan->ack_win,
3551 rfc.txwin_size);
3552
3553 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3554 chan->local_msdu = le16_to_cpu(efs.msdu);
3555 chan->local_sdu_itime =
3556 le32_to_cpu(efs.sdu_itime);
3557 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3558 chan->local_flush_to =
3559 le32_to_cpu(efs.flush_to);
3560 }
3561 break;
3562
3563 case L2CAP_MODE_STREAMING:
3564 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3565 }
3566 }
3567
3568 req->dcid = cpu_to_le16(chan->dcid);
3569 req->flags = cpu_to_le16(0);
3570
3571 return ptr - data;
3572 }
3573
3574 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3575 u16 result, u16 flags)
3576 {
3577 struct l2cap_conf_rsp *rsp = data;
3578 void *ptr = rsp->data;
3579
3580 BT_DBG("chan %p", chan);
3581
3582 rsp->scid = cpu_to_le16(chan->dcid);
3583 rsp->result = cpu_to_le16(result);
3584 rsp->flags = cpu_to_le16(flags);
3585
3586 return ptr - data;
3587 }
3588
3589 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3590 {
3591 struct l2cap_le_conn_rsp rsp;
3592 struct l2cap_conn *conn = chan->conn;
3593
3594 BT_DBG("chan %p", chan);
3595
3596 rsp.dcid = cpu_to_le16(chan->scid);
3597 rsp.mtu = cpu_to_le16(chan->imtu);
3598 rsp.mps = cpu_to_le16(chan->mps);
3599 rsp.credits = cpu_to_le16(chan->rx_credits);
3600 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3601
3602 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3603 &rsp);
3604 }
3605
3606 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3607 {
3608 struct l2cap_conn_rsp rsp;
3609 struct l2cap_conn *conn = chan->conn;
3610 u8 buf[128];
3611 u8 rsp_code;
3612
3613 rsp.scid = cpu_to_le16(chan->dcid);
3614 rsp.dcid = cpu_to_le16(chan->scid);
3615 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3616 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3617
3618 if (chan->hs_hcon)
3619 rsp_code = L2CAP_CREATE_CHAN_RSP;
3620 else
3621 rsp_code = L2CAP_CONN_RSP;
3622
3623 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3624
3625 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3626
3627 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3628 return;
3629
3630 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3631 l2cap_build_conf_req(chan, buf), buf);
3632 chan->num_conf_req++;
3633 }
3634
3635 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3636 {
3637 int type, olen;
3638 unsigned long val;
3639 /* Use sane default values in case a misbehaving remote device
3640 * did not send an RFC or extended window size option.
3641 */
3642 u16 txwin_ext = chan->ack_win;
3643 struct l2cap_conf_rfc rfc = {
3644 .mode = chan->mode,
3645 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3646 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3647 .max_pdu_size = cpu_to_le16(chan->imtu),
3648 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3649 };
3650
3651 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3652
3653 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3654 return;
3655
3656 while (len >= L2CAP_CONF_OPT_SIZE) {
3657 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3658
3659 switch (type) {
3660 case L2CAP_CONF_RFC:
3661 if (olen == sizeof(rfc))
3662 memcpy(&rfc, (void *)val, olen);
3663 break;
3664 case L2CAP_CONF_EWS:
3665 txwin_ext = val;
3666 break;
3667 }
3668 }
3669
3670 switch (rfc.mode) {
3671 case L2CAP_MODE_ERTM:
3672 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3673 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3674 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3675 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3676 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3677 else
3678 chan->ack_win = min_t(u16, chan->ack_win,
3679 rfc.txwin_size);
3680 break;
3681 case L2CAP_MODE_STREAMING:
3682 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3683 }
3684 }
3685
3686 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3687 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3688 u8 *data)
3689 {
3690 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3691
3692 if (cmd_len < sizeof(*rej))
3693 return -EPROTO;
3694
3695 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3696 return 0;
3697
3698 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3699 cmd->ident == conn->info_ident) {
3700 cancel_delayed_work(&conn->info_timer);
3701
3702 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3703 conn->info_ident = 0;
3704
3705 l2cap_conn_start(conn);
3706 }
3707
3708 return 0;
3709 }
3710
3711 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3712 struct l2cap_cmd_hdr *cmd,
3713 u8 *data, u8 rsp_code, u8 amp_id)
3714 {
3715 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3716 struct l2cap_conn_rsp rsp;
3717 struct l2cap_chan *chan = NULL, *pchan;
3718 int result, status = L2CAP_CS_NO_INFO;
3719
3720 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3721 __le16 psm = req->psm;
3722
3723 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3724
3725 /* Check if we have socket listening on psm */
3726 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3727 &conn->hcon->dst, ACL_LINK);
3728 if (!pchan) {
3729 result = L2CAP_CR_BAD_PSM;
3730 goto sendresp;
3731 }
3732
3733 mutex_lock(&conn->chan_lock);
3734 l2cap_chan_lock(pchan);
3735
3736 /* Check if the ACL is secure enough (if not SDP) */
3737 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3738 !hci_conn_check_link_mode(conn->hcon)) {
3739 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3740 result = L2CAP_CR_SEC_BLOCK;
3741 goto response;
3742 }
3743
3744 result = L2CAP_CR_NO_MEM;
3745
3746 /* Check if we already have channel with that dcid */
3747 if (__l2cap_get_chan_by_dcid(conn, scid))
3748 goto response;
3749
3750 chan = pchan->ops->new_connection(pchan);
3751 if (!chan)
3752 goto response;
3753
3754 /* For certain devices (ex: HID mouse), support for authentication,
3755 * pairing and bonding is optional. For such devices, inorder to avoid
3756 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3757 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3758 */
3759 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3760
3761 bacpy(&chan->src, &conn->hcon->src);
3762 bacpy(&chan->dst, &conn->hcon->dst);
3763 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3764 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3765 chan->psm = psm;
3766 chan->dcid = scid;
3767 chan->local_amp_id = amp_id;
3768
3769 __l2cap_chan_add(conn, chan);
3770
3771 dcid = chan->scid;
3772
3773 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3774
3775 chan->ident = cmd->ident;
3776
3777 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3778 if (l2cap_chan_check_security(chan, false)) {
3779 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3780 l2cap_state_change(chan, BT_CONNECT2);
3781 result = L2CAP_CR_PEND;
3782 status = L2CAP_CS_AUTHOR_PEND;
3783 chan->ops->defer(chan);
3784 } else {
3785 /* Force pending result for AMP controllers.
3786 * The connection will succeed after the
3787 * physical link is up.
3788 */
3789 if (amp_id == AMP_ID_BREDR) {
3790 l2cap_state_change(chan, BT_CONFIG);
3791 result = L2CAP_CR_SUCCESS;
3792 } else {
3793 l2cap_state_change(chan, BT_CONNECT2);
3794 result = L2CAP_CR_PEND;
3795 }
3796 status = L2CAP_CS_NO_INFO;
3797 }
3798 } else {
3799 l2cap_state_change(chan, BT_CONNECT2);
3800 result = L2CAP_CR_PEND;
3801 status = L2CAP_CS_AUTHEN_PEND;
3802 }
3803 } else {
3804 l2cap_state_change(chan, BT_CONNECT2);
3805 result = L2CAP_CR_PEND;
3806 status = L2CAP_CS_NO_INFO;
3807 }
3808
3809 response:
3810 l2cap_chan_unlock(pchan);
3811 mutex_unlock(&conn->chan_lock);
3812 l2cap_chan_put(pchan);
3813
3814 sendresp:
3815 rsp.scid = cpu_to_le16(scid);
3816 rsp.dcid = cpu_to_le16(dcid);
3817 rsp.result = cpu_to_le16(result);
3818 rsp.status = cpu_to_le16(status);
3819 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3820
3821 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3822 struct l2cap_info_req info;
3823 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3824
3825 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3826 conn->info_ident = l2cap_get_ident(conn);
3827
3828 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3829
3830 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3831 sizeof(info), &info);
3832 }
3833
3834 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3835 result == L2CAP_CR_SUCCESS) {
3836 u8 buf[128];
3837 set_bit(CONF_REQ_SENT, &chan->conf_state);
3838 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3839 l2cap_build_conf_req(chan, buf), buf);
3840 chan->num_conf_req++;
3841 }
3842
3843 return chan;
3844 }
3845
3846 static int l2cap_connect_req(struct l2cap_conn *conn,
3847 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3848 {
3849 struct hci_dev *hdev = conn->hcon->hdev;
3850 struct hci_conn *hcon = conn->hcon;
3851
3852 if (cmd_len < sizeof(struct l2cap_conn_req))
3853 return -EPROTO;
3854
3855 hci_dev_lock(hdev);
3856 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3857 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3858 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3859 hcon->dst_type, 0, NULL, 0,
3860 hcon->dev_class);
3861 hci_dev_unlock(hdev);
3862
3863 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3864 return 0;
3865 }
3866
3867 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3868 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3869 u8 *data)
3870 {
3871 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3872 u16 scid, dcid, result, status;
3873 struct l2cap_chan *chan;
3874 u8 req[128];
3875 int err;
3876
3877 if (cmd_len < sizeof(*rsp))
3878 return -EPROTO;
3879
3880 scid = __le16_to_cpu(rsp->scid);
3881 dcid = __le16_to_cpu(rsp->dcid);
3882 result = __le16_to_cpu(rsp->result);
3883 status = __le16_to_cpu(rsp->status);
3884
3885 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3886 dcid, scid, result, status);
3887
3888 mutex_lock(&conn->chan_lock);
3889
3890 if (scid) {
3891 chan = __l2cap_get_chan_by_scid(conn, scid);
3892 if (!chan) {
3893 err = -EBADSLT;
3894 goto unlock;
3895 }
3896 } else {
3897 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3898 if (!chan) {
3899 err = -EBADSLT;
3900 goto unlock;
3901 }
3902 }
3903
3904 err = 0;
3905
3906 l2cap_chan_lock(chan);
3907
3908 switch (result) {
3909 case L2CAP_CR_SUCCESS:
3910 l2cap_state_change(chan, BT_CONFIG);
3911 chan->ident = 0;
3912 chan->dcid = dcid;
3913 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3914
3915 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3916 break;
3917
3918 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3919 l2cap_build_conf_req(chan, req), req);
3920 chan->num_conf_req++;
3921 break;
3922
3923 case L2CAP_CR_PEND:
3924 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3925 break;
3926
3927 default:
3928 l2cap_chan_del(chan, ECONNREFUSED);
3929 break;
3930 }
3931
3932 l2cap_chan_unlock(chan);
3933
3934 unlock:
3935 mutex_unlock(&conn->chan_lock);
3936
3937 return err;
3938 }
3939
3940 static inline void set_default_fcs(struct l2cap_chan *chan)
3941 {
3942 /* FCS is enabled only in ERTM or streaming mode, if one or both
3943 * sides request it.
3944 */
3945 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3946 chan->fcs = L2CAP_FCS_NONE;
3947 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3948 chan->fcs = L2CAP_FCS_CRC16;
3949 }
3950
3951 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3952 u8 ident, u16 flags)
3953 {
3954 struct l2cap_conn *conn = chan->conn;
3955
3956 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3957 flags);
3958
3959 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3960 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3961
3962 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3963 l2cap_build_conf_rsp(chan, data,
3964 L2CAP_CONF_SUCCESS, flags), data);
3965 }
3966
3967 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3968 u16 scid, u16 dcid)
3969 {
3970 struct l2cap_cmd_rej_cid rej;
3971
3972 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3973 rej.scid = __cpu_to_le16(scid);
3974 rej.dcid = __cpu_to_le16(dcid);
3975
3976 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3977 }
3978
3979 static inline int l2cap_config_req(struct l2cap_conn *conn,
3980 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3981 u8 *data)
3982 {
3983 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3984 u16 dcid, flags;
3985 u8 rsp[64];
3986 struct l2cap_chan *chan;
3987 int len, err = 0;
3988
3989 if (cmd_len < sizeof(*req))
3990 return -EPROTO;
3991
3992 dcid = __le16_to_cpu(req->dcid);
3993 flags = __le16_to_cpu(req->flags);
3994
3995 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3996
3997 chan = l2cap_get_chan_by_scid(conn, dcid);
3998 if (!chan) {
3999 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4000 return 0;
4001 }
4002
4003 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4004 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4005 chan->dcid);
4006 goto unlock;
4007 }
4008
4009 /* Reject if config buffer is too small. */
4010 len = cmd_len - sizeof(*req);
4011 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4012 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4013 l2cap_build_conf_rsp(chan, rsp,
4014 L2CAP_CONF_REJECT, flags), rsp);
4015 goto unlock;
4016 }
4017
4018 /* Store config. */
4019 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4020 chan->conf_len += len;
4021
4022 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4023 /* Incomplete config. Send empty response. */
4024 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4025 l2cap_build_conf_rsp(chan, rsp,
4026 L2CAP_CONF_SUCCESS, flags), rsp);
4027 goto unlock;
4028 }
4029
4030 /* Complete config. */
4031 len = l2cap_parse_conf_req(chan, rsp);
4032 if (len < 0) {
4033 l2cap_send_disconn_req(chan, ECONNRESET);
4034 goto unlock;
4035 }
4036
4037 chan->ident = cmd->ident;
4038 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4039 chan->num_conf_rsp++;
4040
4041 /* Reset config buffer. */
4042 chan->conf_len = 0;
4043
4044 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4045 goto unlock;
4046
4047 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4048 set_default_fcs(chan);
4049
4050 if (chan->mode == L2CAP_MODE_ERTM ||
4051 chan->mode == L2CAP_MODE_STREAMING)
4052 err = l2cap_ertm_init(chan);
4053
4054 if (err < 0)
4055 l2cap_send_disconn_req(chan, -err);
4056 else
4057 l2cap_chan_ready(chan);
4058
4059 goto unlock;
4060 }
4061
4062 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4063 u8 buf[64];
4064 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4065 l2cap_build_conf_req(chan, buf), buf);
4066 chan->num_conf_req++;
4067 }
4068
4069 /* Got Conf Rsp PENDING from remote side and asume we sent
4070 Conf Rsp PENDING in the code above */
4071 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4072 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4073
4074 /* check compatibility */
4075
4076 /* Send rsp for BR/EDR channel */
4077 if (!chan->hs_hcon)
4078 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4079 else
4080 chan->ident = cmd->ident;
4081 }
4082
4083 unlock:
4084 l2cap_chan_unlock(chan);
4085 return err;
4086 }
4087
4088 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4089 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4090 u8 *data)
4091 {
4092 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4093 u16 scid, flags, result;
4094 struct l2cap_chan *chan;
4095 int len = cmd_len - sizeof(*rsp);
4096 int err = 0;
4097
4098 if (cmd_len < sizeof(*rsp))
4099 return -EPROTO;
4100
4101 scid = __le16_to_cpu(rsp->scid);
4102 flags = __le16_to_cpu(rsp->flags);
4103 result = __le16_to_cpu(rsp->result);
4104
4105 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4106 result, len);
4107
4108 chan = l2cap_get_chan_by_scid(conn, scid);
4109 if (!chan)
4110 return 0;
4111
4112 switch (result) {
4113 case L2CAP_CONF_SUCCESS:
4114 l2cap_conf_rfc_get(chan, rsp->data, len);
4115 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4116 break;
4117
4118 case L2CAP_CONF_PENDING:
4119 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4120
4121 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4122 char buf[64];
4123
4124 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4125 buf, &result);
4126 if (len < 0) {
4127 l2cap_send_disconn_req(chan, ECONNRESET);
4128 goto done;
4129 }
4130
4131 if (!chan->hs_hcon) {
4132 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4133 0);
4134 } else {
4135 if (l2cap_check_efs(chan)) {
4136 amp_create_logical_link(chan);
4137 chan->ident = cmd->ident;
4138 }
4139 }
4140 }
4141 goto done;
4142
4143 case L2CAP_CONF_UNACCEPT:
4144 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4145 char req[64];
4146
4147 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4148 l2cap_send_disconn_req(chan, ECONNRESET);
4149 goto done;
4150 }
4151
4152 /* throw out any old stored conf requests */
4153 result = L2CAP_CONF_SUCCESS;
4154 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4155 req, &result);
4156 if (len < 0) {
4157 l2cap_send_disconn_req(chan, ECONNRESET);
4158 goto done;
4159 }
4160
4161 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4162 L2CAP_CONF_REQ, len, req);
4163 chan->num_conf_req++;
4164 if (result != L2CAP_CONF_SUCCESS)
4165 goto done;
4166 break;
4167 }
4168
4169 default:
4170 l2cap_chan_set_err(chan, ECONNRESET);
4171
4172 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4173 l2cap_send_disconn_req(chan, ECONNRESET);
4174 goto done;
4175 }
4176
4177 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4178 goto done;
4179
4180 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4181
4182 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4183 set_default_fcs(chan);
4184
4185 if (chan->mode == L2CAP_MODE_ERTM ||
4186 chan->mode == L2CAP_MODE_STREAMING)
4187 err = l2cap_ertm_init(chan);
4188
4189 if (err < 0)
4190 l2cap_send_disconn_req(chan, -err);
4191 else
4192 l2cap_chan_ready(chan);
4193 }
4194
4195 done:
4196 l2cap_chan_unlock(chan);
4197 return err;
4198 }
4199
4200 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4201 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4202 u8 *data)
4203 {
4204 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4205 struct l2cap_disconn_rsp rsp;
4206 u16 dcid, scid;
4207 struct l2cap_chan *chan;
4208
4209 if (cmd_len != sizeof(*req))
4210 return -EPROTO;
4211
4212 scid = __le16_to_cpu(req->scid);
4213 dcid = __le16_to_cpu(req->dcid);
4214
4215 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4216
4217 mutex_lock(&conn->chan_lock);
4218
4219 chan = __l2cap_get_chan_by_scid(conn, dcid);
4220 if (!chan) {
4221 mutex_unlock(&conn->chan_lock);
4222 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4223 return 0;
4224 }
4225
4226 l2cap_chan_lock(chan);
4227
4228 rsp.dcid = cpu_to_le16(chan->scid);
4229 rsp.scid = cpu_to_le16(chan->dcid);
4230 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4231
4232 chan->ops->set_shutdown(chan);
4233
4234 l2cap_chan_hold(chan);
4235 l2cap_chan_del(chan, ECONNRESET);
4236
4237 l2cap_chan_unlock(chan);
4238
4239 chan->ops->close(chan);
4240 l2cap_chan_put(chan);
4241
4242 mutex_unlock(&conn->chan_lock);
4243
4244 return 0;
4245 }
4246
4247 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4248 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4249 u8 *data)
4250 {
4251 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4252 u16 dcid, scid;
4253 struct l2cap_chan *chan;
4254
4255 if (cmd_len != sizeof(*rsp))
4256 return -EPROTO;
4257
4258 scid = __le16_to_cpu(rsp->scid);
4259 dcid = __le16_to_cpu(rsp->dcid);
4260
4261 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4262
4263 mutex_lock(&conn->chan_lock);
4264
4265 chan = __l2cap_get_chan_by_scid(conn, scid);
4266 if (!chan) {
4267 mutex_unlock(&conn->chan_lock);
4268 return 0;
4269 }
4270
4271 l2cap_chan_lock(chan);
4272
4273 l2cap_chan_hold(chan);
4274 l2cap_chan_del(chan, 0);
4275
4276 l2cap_chan_unlock(chan);
4277
4278 chan->ops->close(chan);
4279 l2cap_chan_put(chan);
4280
4281 mutex_unlock(&conn->chan_lock);
4282
4283 return 0;
4284 }
4285
4286 static inline int l2cap_information_req(struct l2cap_conn *conn,
4287 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4288 u8 *data)
4289 {
4290 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4291 u16 type;
4292
4293 if (cmd_len != sizeof(*req))
4294 return -EPROTO;
4295
4296 type = __le16_to_cpu(req->type);
4297
4298 BT_DBG("type 0x%4.4x", type);
4299
4300 if (type == L2CAP_IT_FEAT_MASK) {
4301 u8 buf[8];
4302 u32 feat_mask = l2cap_feat_mask;
4303 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4304 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4305 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4306 if (!disable_ertm)
4307 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4308 | L2CAP_FEAT_FCS;
4309 if (conn->hs_enabled)
4310 feat_mask |= L2CAP_FEAT_EXT_FLOW
4311 | L2CAP_FEAT_EXT_WINDOW;
4312
4313 put_unaligned_le32(feat_mask, rsp->data);
4314 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4315 buf);
4316 } else if (type == L2CAP_IT_FIXED_CHAN) {
4317 u8 buf[12];
4318 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4319
4320 if (conn->hs_enabled)
4321 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4322 else
4323 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4324
4325 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4326 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4327 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4328 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4329 buf);
4330 } else {
4331 struct l2cap_info_rsp rsp;
4332 rsp.type = cpu_to_le16(type);
4333 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4334 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4335 &rsp);
4336 }
4337
4338 return 0;
4339 }
4340
4341 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4342 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4343 u8 *data)
4344 {
4345 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4346 u16 type, result;
4347
4348 if (cmd_len < sizeof(*rsp))
4349 return -EPROTO;
4350
4351 type = __le16_to_cpu(rsp->type);
4352 result = __le16_to_cpu(rsp->result);
4353
4354 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4355
4356 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4357 if (cmd->ident != conn->info_ident ||
4358 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4359 return 0;
4360
4361 cancel_delayed_work(&conn->info_timer);
4362
4363 if (result != L2CAP_IR_SUCCESS) {
4364 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4365 conn->info_ident = 0;
4366
4367 l2cap_conn_start(conn);
4368
4369 return 0;
4370 }
4371
4372 switch (type) {
4373 case L2CAP_IT_FEAT_MASK:
4374 conn->feat_mask = get_unaligned_le32(rsp->data);
4375
4376 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4377 struct l2cap_info_req req;
4378 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4379
4380 conn->info_ident = l2cap_get_ident(conn);
4381
4382 l2cap_send_cmd(conn, conn->info_ident,
4383 L2CAP_INFO_REQ, sizeof(req), &req);
4384 } else {
4385 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4386 conn->info_ident = 0;
4387
4388 l2cap_conn_start(conn);
4389 }
4390 break;
4391
4392 case L2CAP_IT_FIXED_CHAN:
4393 conn->fixed_chan_mask = rsp->data[0];
4394 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4395 conn->info_ident = 0;
4396
4397 l2cap_conn_start(conn);
4398 break;
4399 }
4400
4401 return 0;
4402 }
4403
4404 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4405 struct l2cap_cmd_hdr *cmd,
4406 u16 cmd_len, void *data)
4407 {
4408 struct l2cap_create_chan_req *req = data;
4409 struct l2cap_create_chan_rsp rsp;
4410 struct l2cap_chan *chan;
4411 struct hci_dev *hdev;
4412 u16 psm, scid;
4413
4414 if (cmd_len != sizeof(*req))
4415 return -EPROTO;
4416
4417 if (!conn->hs_enabled)
4418 return -EINVAL;
4419
4420 psm = le16_to_cpu(req->psm);
4421 scid = le16_to_cpu(req->scid);
4422
4423 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4424
4425 /* For controller id 0 make BR/EDR connection */
4426 if (req->amp_id == AMP_ID_BREDR) {
4427 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4428 req->amp_id);
4429 return 0;
4430 }
4431
4432 /* Validate AMP controller id */
4433 hdev = hci_dev_get(req->amp_id);
4434 if (!hdev)
4435 goto error;
4436
4437 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4438 hci_dev_put(hdev);
4439 goto error;
4440 }
4441
4442 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4443 req->amp_id);
4444 if (chan) {
4445 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4446 struct hci_conn *hs_hcon;
4447
4448 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4449 &conn->hcon->dst);
4450 if (!hs_hcon) {
4451 hci_dev_put(hdev);
4452 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4453 chan->dcid);
4454 return 0;
4455 }
4456
4457 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4458
4459 mgr->bredr_chan = chan;
4460 chan->hs_hcon = hs_hcon;
4461 chan->fcs = L2CAP_FCS_NONE;
4462 conn->mtu = hdev->block_mtu;
4463 }
4464
4465 hci_dev_put(hdev);
4466
4467 return 0;
4468
4469 error:
4470 rsp.dcid = 0;
4471 rsp.scid = cpu_to_le16(scid);
4472 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4473 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4474
4475 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4476 sizeof(rsp), &rsp);
4477
4478 return 0;
4479 }
4480
4481 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4482 {
4483 struct l2cap_move_chan_req req;
4484 u8 ident;
4485
4486 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4487
4488 ident = l2cap_get_ident(chan->conn);
4489 chan->ident = ident;
4490
4491 req.icid = cpu_to_le16(chan->scid);
4492 req.dest_amp_id = dest_amp_id;
4493
4494 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4495 &req);
4496
4497 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4498 }
4499
4500 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4501 {
4502 struct l2cap_move_chan_rsp rsp;
4503
4504 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4505
4506 rsp.icid = cpu_to_le16(chan->dcid);
4507 rsp.result = cpu_to_le16(result);
4508
4509 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4510 sizeof(rsp), &rsp);
4511 }
4512
4513 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4514 {
4515 struct l2cap_move_chan_cfm cfm;
4516
4517 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4518
4519 chan->ident = l2cap_get_ident(chan->conn);
4520
4521 cfm.icid = cpu_to_le16(chan->scid);
4522 cfm.result = cpu_to_le16(result);
4523
4524 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4525 sizeof(cfm), &cfm);
4526
4527 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4528 }
4529
4530 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4531 {
4532 struct l2cap_move_chan_cfm cfm;
4533
4534 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4535
4536 cfm.icid = cpu_to_le16(icid);
4537 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4538
4539 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4540 sizeof(cfm), &cfm);
4541 }
4542
4543 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4544 u16 icid)
4545 {
4546 struct l2cap_move_chan_cfm_rsp rsp;
4547
4548 BT_DBG("icid 0x%4.4x", icid);
4549
4550 rsp.icid = cpu_to_le16(icid);
4551 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4552 }
4553
4554 static void __release_logical_link(struct l2cap_chan *chan)
4555 {
4556 chan->hs_hchan = NULL;
4557 chan->hs_hcon = NULL;
4558
4559 /* Placeholder - release the logical link */
4560 }
4561
4562 static void l2cap_logical_fail(struct l2cap_chan *chan)
4563 {
4564 /* Logical link setup failed */
4565 if (chan->state != BT_CONNECTED) {
4566 /* Create channel failure, disconnect */
4567 l2cap_send_disconn_req(chan, ECONNRESET);
4568 return;
4569 }
4570
4571 switch (chan->move_role) {
4572 case L2CAP_MOVE_ROLE_RESPONDER:
4573 l2cap_move_done(chan);
4574 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4575 break;
4576 case L2CAP_MOVE_ROLE_INITIATOR:
4577 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4578 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4579 /* Remote has only sent pending or
4580 * success responses, clean up
4581 */
4582 l2cap_move_done(chan);
4583 }
4584
4585 /* Other amp move states imply that the move
4586 * has already aborted
4587 */
4588 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4589 break;
4590 }
4591 }
4592
4593 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4594 struct hci_chan *hchan)
4595 {
4596 struct l2cap_conf_rsp rsp;
4597
4598 chan->hs_hchan = hchan;
4599 chan->hs_hcon->l2cap_data = chan->conn;
4600
4601 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4602
4603 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4604 int err;
4605
4606 set_default_fcs(chan);
4607
4608 err = l2cap_ertm_init(chan);
4609 if (err < 0)
4610 l2cap_send_disconn_req(chan, -err);
4611 else
4612 l2cap_chan_ready(chan);
4613 }
4614 }
4615
4616 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4617 struct hci_chan *hchan)
4618 {
4619 chan->hs_hcon = hchan->conn;
4620 chan->hs_hcon->l2cap_data = chan->conn;
4621
4622 BT_DBG("move_state %d", chan->move_state);
4623
4624 switch (chan->move_state) {
4625 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4626 /* Move confirm will be sent after a success
4627 * response is received
4628 */
4629 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4630 break;
4631 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4632 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4633 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4634 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4635 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4636 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4637 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4638 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4639 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4640 }
4641 break;
4642 default:
4643 /* Move was not in expected state, free the channel */
4644 __release_logical_link(chan);
4645
4646 chan->move_state = L2CAP_MOVE_STABLE;
4647 }
4648 }
4649
4650 /* Call with chan locked */
4651 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4652 u8 status)
4653 {
4654 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4655
4656 if (status) {
4657 l2cap_logical_fail(chan);
4658 __release_logical_link(chan);
4659 return;
4660 }
4661
4662 if (chan->state != BT_CONNECTED) {
4663 /* Ignore logical link if channel is on BR/EDR */
4664 if (chan->local_amp_id != AMP_ID_BREDR)
4665 l2cap_logical_finish_create(chan, hchan);
4666 } else {
4667 l2cap_logical_finish_move(chan, hchan);
4668 }
4669 }
4670
4671 void l2cap_move_start(struct l2cap_chan *chan)
4672 {
4673 BT_DBG("chan %p", chan);
4674
4675 if (chan->local_amp_id == AMP_ID_BREDR) {
4676 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4677 return;
4678 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4679 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4680 /* Placeholder - start physical link setup */
4681 } else {
4682 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4683 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4684 chan->move_id = 0;
4685 l2cap_move_setup(chan);
4686 l2cap_send_move_chan_req(chan, 0);
4687 }
4688 }
4689
4690 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4691 u8 local_amp_id, u8 remote_amp_id)
4692 {
4693 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4694 local_amp_id, remote_amp_id);
4695
4696 chan->fcs = L2CAP_FCS_NONE;
4697
4698 /* Outgoing channel on AMP */
4699 if (chan->state == BT_CONNECT) {
4700 if (result == L2CAP_CR_SUCCESS) {
4701 chan->local_amp_id = local_amp_id;
4702 l2cap_send_create_chan_req(chan, remote_amp_id);
4703 } else {
4704 /* Revert to BR/EDR connect */
4705 l2cap_send_conn_req(chan);
4706 }
4707
4708 return;
4709 }
4710
4711 /* Incoming channel on AMP */
4712 if (__l2cap_no_conn_pending(chan)) {
4713 struct l2cap_conn_rsp rsp;
4714 char buf[128];
4715 rsp.scid = cpu_to_le16(chan->dcid);
4716 rsp.dcid = cpu_to_le16(chan->scid);
4717
4718 if (result == L2CAP_CR_SUCCESS) {
4719 /* Send successful response */
4720 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4721 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4722 } else {
4723 /* Send negative response */
4724 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4725 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4726 }
4727
4728 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4729 sizeof(rsp), &rsp);
4730
4731 if (result == L2CAP_CR_SUCCESS) {
4732 l2cap_state_change(chan, BT_CONFIG);
4733 set_bit(CONF_REQ_SENT, &chan->conf_state);
4734 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4735 L2CAP_CONF_REQ,
4736 l2cap_build_conf_req(chan, buf), buf);
4737 chan->num_conf_req++;
4738 }
4739 }
4740 }
4741
4742 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4743 u8 remote_amp_id)
4744 {
4745 l2cap_move_setup(chan);
4746 chan->move_id = local_amp_id;
4747 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4748
4749 l2cap_send_move_chan_req(chan, remote_amp_id);
4750 }
4751
4752 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4753 {
4754 struct hci_chan *hchan = NULL;
4755
4756 /* Placeholder - get hci_chan for logical link */
4757
4758 if (hchan) {
4759 if (hchan->state == BT_CONNECTED) {
4760 /* Logical link is ready to go */
4761 chan->hs_hcon = hchan->conn;
4762 chan->hs_hcon->l2cap_data = chan->conn;
4763 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4764 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4765
4766 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4767 } else {
4768 /* Wait for logical link to be ready */
4769 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4770 }
4771 } else {
4772 /* Logical link not available */
4773 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4774 }
4775 }
4776
4777 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4778 {
4779 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4780 u8 rsp_result;
4781 if (result == -EINVAL)
4782 rsp_result = L2CAP_MR_BAD_ID;
4783 else
4784 rsp_result = L2CAP_MR_NOT_ALLOWED;
4785
4786 l2cap_send_move_chan_rsp(chan, rsp_result);
4787 }
4788
4789 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4790 chan->move_state = L2CAP_MOVE_STABLE;
4791
4792 /* Restart data transmission */
4793 l2cap_ertm_send(chan);
4794 }
4795
4796 /* Invoke with locked chan */
4797 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4798 {
4799 u8 local_amp_id = chan->local_amp_id;
4800 u8 remote_amp_id = chan->remote_amp_id;
4801
4802 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4803 chan, result, local_amp_id, remote_amp_id);
4804
4805 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4806 l2cap_chan_unlock(chan);
4807 return;
4808 }
4809
4810 if (chan->state != BT_CONNECTED) {
4811 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4812 } else if (result != L2CAP_MR_SUCCESS) {
4813 l2cap_do_move_cancel(chan, result);
4814 } else {
4815 switch (chan->move_role) {
4816 case L2CAP_MOVE_ROLE_INITIATOR:
4817 l2cap_do_move_initiate(chan, local_amp_id,
4818 remote_amp_id);
4819 break;
4820 case L2CAP_MOVE_ROLE_RESPONDER:
4821 l2cap_do_move_respond(chan, result);
4822 break;
4823 default:
4824 l2cap_do_move_cancel(chan, result);
4825 break;
4826 }
4827 }
4828 }
4829
4830 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4831 struct l2cap_cmd_hdr *cmd,
4832 u16 cmd_len, void *data)
4833 {
4834 struct l2cap_move_chan_req *req = data;
4835 struct l2cap_move_chan_rsp rsp;
4836 struct l2cap_chan *chan;
4837 u16 icid = 0;
4838 u16 result = L2CAP_MR_NOT_ALLOWED;
4839
4840 if (cmd_len != sizeof(*req))
4841 return -EPROTO;
4842
4843 icid = le16_to_cpu(req->icid);
4844
4845 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4846
4847 if (!conn->hs_enabled)
4848 return -EINVAL;
4849
4850 chan = l2cap_get_chan_by_dcid(conn, icid);
4851 if (!chan) {
4852 rsp.icid = cpu_to_le16(icid);
4853 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4854 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4855 sizeof(rsp), &rsp);
4856 return 0;
4857 }
4858
4859 chan->ident = cmd->ident;
4860
4861 if (chan->scid < L2CAP_CID_DYN_START ||
4862 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4863 (chan->mode != L2CAP_MODE_ERTM &&
4864 chan->mode != L2CAP_MODE_STREAMING)) {
4865 result = L2CAP_MR_NOT_ALLOWED;
4866 goto send_move_response;
4867 }
4868
4869 if (chan->local_amp_id == req->dest_amp_id) {
4870 result = L2CAP_MR_SAME_ID;
4871 goto send_move_response;
4872 }
4873
4874 if (req->dest_amp_id != AMP_ID_BREDR) {
4875 struct hci_dev *hdev;
4876 hdev = hci_dev_get(req->dest_amp_id);
4877 if (!hdev || hdev->dev_type != HCI_AMP ||
4878 !test_bit(HCI_UP, &hdev->flags)) {
4879 if (hdev)
4880 hci_dev_put(hdev);
4881
4882 result = L2CAP_MR_BAD_ID;
4883 goto send_move_response;
4884 }
4885 hci_dev_put(hdev);
4886 }
4887
4888 /* Detect a move collision. Only send a collision response
4889 * if this side has "lost", otherwise proceed with the move.
4890 * The winner has the larger bd_addr.
4891 */
4892 if ((__chan_is_moving(chan) ||
4893 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4894 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4895 result = L2CAP_MR_COLLISION;
4896 goto send_move_response;
4897 }
4898
4899 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4900 l2cap_move_setup(chan);
4901 chan->move_id = req->dest_amp_id;
4902 icid = chan->dcid;
4903
4904 if (req->dest_amp_id == AMP_ID_BREDR) {
4905 /* Moving to BR/EDR */
4906 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4907 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4908 result = L2CAP_MR_PEND;
4909 } else {
4910 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4911 result = L2CAP_MR_SUCCESS;
4912 }
4913 } else {
4914 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4915 /* Placeholder - uncomment when amp functions are available */
4916 /*amp_accept_physical(chan, req->dest_amp_id);*/
4917 result = L2CAP_MR_PEND;
4918 }
4919
4920 send_move_response:
4921 l2cap_send_move_chan_rsp(chan, result);
4922
4923 l2cap_chan_unlock(chan);
4924
4925 return 0;
4926 }
4927
4928 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4929 {
4930 struct l2cap_chan *chan;
4931 struct hci_chan *hchan = NULL;
4932
4933 chan = l2cap_get_chan_by_scid(conn, icid);
4934 if (!chan) {
4935 l2cap_send_move_chan_cfm_icid(conn, icid);
4936 return;
4937 }
4938
4939 __clear_chan_timer(chan);
4940 if (result == L2CAP_MR_PEND)
4941 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4942
4943 switch (chan->move_state) {
4944 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4945 /* Move confirm will be sent when logical link
4946 * is complete.
4947 */
4948 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4949 break;
4950 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4951 if (result == L2CAP_MR_PEND) {
4952 break;
4953 } else if (test_bit(CONN_LOCAL_BUSY,
4954 &chan->conn_state)) {
4955 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4956 } else {
4957 /* Logical link is up or moving to BR/EDR,
4958 * proceed with move
4959 */
4960 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4961 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4962 }
4963 break;
4964 case L2CAP_MOVE_WAIT_RSP:
4965 /* Moving to AMP */
4966 if (result == L2CAP_MR_SUCCESS) {
4967 /* Remote is ready, send confirm immediately
4968 * after logical link is ready
4969 */
4970 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4971 } else {
4972 /* Both logical link and move success
4973 * are required to confirm
4974 */
4975 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4976 }
4977
4978 /* Placeholder - get hci_chan for logical link */
4979 if (!hchan) {
4980 /* Logical link not available */
4981 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4982 break;
4983 }
4984
4985 /* If the logical link is not yet connected, do not
4986 * send confirmation.
4987 */
4988 if (hchan->state != BT_CONNECTED)
4989 break;
4990
4991 /* Logical link is already ready to go */
4992
4993 chan->hs_hcon = hchan->conn;
4994 chan->hs_hcon->l2cap_data = chan->conn;
4995
4996 if (result == L2CAP_MR_SUCCESS) {
4997 /* Can confirm now */
4998 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4999 } else {
5000 /* Now only need move success
5001 * to confirm
5002 */
5003 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5004 }
5005
5006 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5007 break;
5008 default:
5009 /* Any other amp move state means the move failed. */
5010 chan->move_id = chan->local_amp_id;
5011 l2cap_move_done(chan);
5012 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5013 }
5014
5015 l2cap_chan_unlock(chan);
5016 }
5017
5018 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5019 u16 result)
5020 {
5021 struct l2cap_chan *chan;
5022
5023 chan = l2cap_get_chan_by_ident(conn, ident);
5024 if (!chan) {
5025 /* Could not locate channel, icid is best guess */
5026 l2cap_send_move_chan_cfm_icid(conn, icid);
5027 return;
5028 }
5029
5030 __clear_chan_timer(chan);
5031
5032 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5033 if (result == L2CAP_MR_COLLISION) {
5034 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5035 } else {
5036 /* Cleanup - cancel move */
5037 chan->move_id = chan->local_amp_id;
5038 l2cap_move_done(chan);
5039 }
5040 }
5041
5042 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5043
5044 l2cap_chan_unlock(chan);
5045 }
5046
5047 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5048 struct l2cap_cmd_hdr *cmd,
5049 u16 cmd_len, void *data)
5050 {
5051 struct l2cap_move_chan_rsp *rsp = data;
5052 u16 icid, result;
5053
5054 if (cmd_len != sizeof(*rsp))
5055 return -EPROTO;
5056
5057 icid = le16_to_cpu(rsp->icid);
5058 result = le16_to_cpu(rsp->result);
5059
5060 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5061
5062 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5063 l2cap_move_continue(conn, icid, result);
5064 else
5065 l2cap_move_fail(conn, cmd->ident, icid, result);
5066
5067 return 0;
5068 }
5069
5070 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5071 struct l2cap_cmd_hdr *cmd,
5072 u16 cmd_len, void *data)
5073 {
5074 struct l2cap_move_chan_cfm *cfm = data;
5075 struct l2cap_chan *chan;
5076 u16 icid, result;
5077
5078 if (cmd_len != sizeof(*cfm))
5079 return -EPROTO;
5080
5081 icid = le16_to_cpu(cfm->icid);
5082 result = le16_to_cpu(cfm->result);
5083
5084 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5085
5086 chan = l2cap_get_chan_by_dcid(conn, icid);
5087 if (!chan) {
5088 /* Spec requires a response even if the icid was not found */
5089 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5090 return 0;
5091 }
5092
5093 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5094 if (result == L2CAP_MC_CONFIRMED) {
5095 chan->local_amp_id = chan->move_id;
5096 if (chan->local_amp_id == AMP_ID_BREDR)
5097 __release_logical_link(chan);
5098 } else {
5099 chan->move_id = chan->local_amp_id;
5100 }
5101
5102 l2cap_move_done(chan);
5103 }
5104
5105 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5106
5107 l2cap_chan_unlock(chan);
5108
5109 return 0;
5110 }
5111
5112 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5113 struct l2cap_cmd_hdr *cmd,
5114 u16 cmd_len, void *data)
5115 {
5116 struct l2cap_move_chan_cfm_rsp *rsp = data;
5117 struct l2cap_chan *chan;
5118 u16 icid;
5119
5120 if (cmd_len != sizeof(*rsp))
5121 return -EPROTO;
5122
5123 icid = le16_to_cpu(rsp->icid);
5124
5125 BT_DBG("icid 0x%4.4x", icid);
5126
5127 chan = l2cap_get_chan_by_scid(conn, icid);
5128 if (!chan)
5129 return 0;
5130
5131 __clear_chan_timer(chan);
5132
5133 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5134 chan->local_amp_id = chan->move_id;
5135
5136 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5137 __release_logical_link(chan);
5138
5139 l2cap_move_done(chan);
5140 }
5141
5142 l2cap_chan_unlock(chan);
5143
5144 return 0;
5145 }
5146
5147 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5148 struct l2cap_cmd_hdr *cmd,
5149 u16 cmd_len, u8 *data)
5150 {
5151 struct hci_conn *hcon = conn->hcon;
5152 struct l2cap_conn_param_update_req *req;
5153 struct l2cap_conn_param_update_rsp rsp;
5154 u16 min, max, latency, to_multiplier;
5155 int err;
5156
5157 if (hcon->role != HCI_ROLE_MASTER)
5158 return -EINVAL;
5159
5160 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5161 return -EPROTO;
5162
5163 req = (struct l2cap_conn_param_update_req *) data;
5164 min = __le16_to_cpu(req->min);
5165 max = __le16_to_cpu(req->max);
5166 latency = __le16_to_cpu(req->latency);
5167 to_multiplier = __le16_to_cpu(req->to_multiplier);
5168
5169 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5170 min, max, latency, to_multiplier);
5171
5172 memset(&rsp, 0, sizeof(rsp));
5173
5174 err = hci_check_conn_params(min, max, latency, to_multiplier);
5175 if (err)
5176 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5177 else
5178 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5179
5180 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5181 sizeof(rsp), &rsp);
5182
5183 if (!err) {
5184 u8 store_hint;
5185
5186 store_hint = hci_le_conn_update(hcon, min, max, latency,
5187 to_multiplier);
5188 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5189 store_hint, min, max, latency,
5190 to_multiplier);
5191
5192 }
5193
5194 return 0;
5195 }
5196
5197 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5198 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5199 u8 *data)
5200 {
5201 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5202 u16 dcid, mtu, mps, credits, result;
5203 struct l2cap_chan *chan;
5204 int err;
5205
5206 if (cmd_len < sizeof(*rsp))
5207 return -EPROTO;
5208
5209 dcid = __le16_to_cpu(rsp->dcid);
5210 mtu = __le16_to_cpu(rsp->mtu);
5211 mps = __le16_to_cpu(rsp->mps);
5212 credits = __le16_to_cpu(rsp->credits);
5213 result = __le16_to_cpu(rsp->result);
5214
5215 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5216 return -EPROTO;
5217
5218 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5219 dcid, mtu, mps, credits, result);
5220
5221 mutex_lock(&conn->chan_lock);
5222
5223 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5224 if (!chan) {
5225 err = -EBADSLT;
5226 goto unlock;
5227 }
5228
5229 err = 0;
5230
5231 l2cap_chan_lock(chan);
5232
5233 switch (result) {
5234 case L2CAP_CR_SUCCESS:
5235 chan->ident = 0;
5236 chan->dcid = dcid;
5237 chan->omtu = mtu;
5238 chan->remote_mps = mps;
5239 chan->tx_credits = credits;
5240 l2cap_chan_ready(chan);
5241 break;
5242
5243 default:
5244 l2cap_chan_del(chan, ECONNREFUSED);
5245 break;
5246 }
5247
5248 l2cap_chan_unlock(chan);
5249
5250 unlock:
5251 mutex_unlock(&conn->chan_lock);
5252
5253 return err;
5254 }
5255
5256 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5257 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5258 u8 *data)
5259 {
5260 int err = 0;
5261
5262 switch (cmd->code) {
5263 case L2CAP_COMMAND_REJ:
5264 l2cap_command_rej(conn, cmd, cmd_len, data);
5265 break;
5266
5267 case L2CAP_CONN_REQ:
5268 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5269 break;
5270
5271 case L2CAP_CONN_RSP:
5272 case L2CAP_CREATE_CHAN_RSP:
5273 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5274 break;
5275
5276 case L2CAP_CONF_REQ:
5277 err = l2cap_config_req(conn, cmd, cmd_len, data);
5278 break;
5279
5280 case L2CAP_CONF_RSP:
5281 l2cap_config_rsp(conn, cmd, cmd_len, data);
5282 break;
5283
5284 case L2CAP_DISCONN_REQ:
5285 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5286 break;
5287
5288 case L2CAP_DISCONN_RSP:
5289 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5290 break;
5291
5292 case L2CAP_ECHO_REQ:
5293 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5294 break;
5295
5296 case L2CAP_ECHO_RSP:
5297 break;
5298
5299 case L2CAP_INFO_REQ:
5300 err = l2cap_information_req(conn, cmd, cmd_len, data);
5301 break;
5302
5303 case L2CAP_INFO_RSP:
5304 l2cap_information_rsp(conn, cmd, cmd_len, data);
5305 break;
5306
5307 case L2CAP_CREATE_CHAN_REQ:
5308 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5309 break;
5310
5311 case L2CAP_MOVE_CHAN_REQ:
5312 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5313 break;
5314
5315 case L2CAP_MOVE_CHAN_RSP:
5316 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5317 break;
5318
5319 case L2CAP_MOVE_CHAN_CFM:
5320 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5321 break;
5322
5323 case L2CAP_MOVE_CHAN_CFM_RSP:
5324 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5325 break;
5326
5327 default:
5328 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5329 err = -EINVAL;
5330 break;
5331 }
5332
5333 return err;
5334 }
5335
5336 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5337 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5338 u8 *data)
5339 {
5340 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5341 struct l2cap_le_conn_rsp rsp;
5342 struct l2cap_chan *chan, *pchan;
5343 u16 dcid, scid, credits, mtu, mps;
5344 __le16 psm;
5345 u8 result;
5346
5347 if (cmd_len != sizeof(*req))
5348 return -EPROTO;
5349
5350 scid = __le16_to_cpu(req->scid);
5351 mtu = __le16_to_cpu(req->mtu);
5352 mps = __le16_to_cpu(req->mps);
5353 psm = req->psm;
5354 dcid = 0;
5355 credits = 0;
5356
5357 if (mtu < 23 || mps < 23)
5358 return -EPROTO;
5359
5360 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5361 scid, mtu, mps);
5362
5363 /* Check if we have socket listening on psm */
5364 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5365 &conn->hcon->dst, LE_LINK);
5366 if (!pchan) {
5367 result = L2CAP_CR_BAD_PSM;
5368 chan = NULL;
5369 goto response;
5370 }
5371
5372 mutex_lock(&conn->chan_lock);
5373 l2cap_chan_lock(pchan);
5374
5375 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5376 result = L2CAP_CR_AUTHENTICATION;
5377 chan = NULL;
5378 goto response_unlock;
5379 }
5380
5381 /* Check if we already have channel with that dcid */
5382 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5383 result = L2CAP_CR_NO_MEM;
5384 chan = NULL;
5385 goto response_unlock;
5386 }
5387
5388 chan = pchan->ops->new_connection(pchan);
5389 if (!chan) {
5390 result = L2CAP_CR_NO_MEM;
5391 goto response_unlock;
5392 }
5393
5394 l2cap_le_flowctl_init(chan);
5395
5396 bacpy(&chan->src, &conn->hcon->src);
5397 bacpy(&chan->dst, &conn->hcon->dst);
5398 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5399 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5400 chan->psm = psm;
5401 chan->dcid = scid;
5402 chan->omtu = mtu;
5403 chan->remote_mps = mps;
5404 chan->tx_credits = __le16_to_cpu(req->credits);
5405
5406 __l2cap_chan_add(conn, chan);
5407 dcid = chan->scid;
5408 credits = chan->rx_credits;
5409
5410 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5411
5412 chan->ident = cmd->ident;
5413
5414 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5415 l2cap_state_change(chan, BT_CONNECT2);
5416 /* The following result value is actually not defined
5417 * for LE CoC but we use it to let the function know
5418 * that it should bail out after doing its cleanup
5419 * instead of sending a response.
5420 */
5421 result = L2CAP_CR_PEND;
5422 chan->ops->defer(chan);
5423 } else {
5424 l2cap_chan_ready(chan);
5425 result = L2CAP_CR_SUCCESS;
5426 }
5427
5428 response_unlock:
5429 l2cap_chan_unlock(pchan);
5430 mutex_unlock(&conn->chan_lock);
5431 l2cap_chan_put(pchan);
5432
5433 if (result == L2CAP_CR_PEND)
5434 return 0;
5435
5436 response:
5437 if (chan) {
5438 rsp.mtu = cpu_to_le16(chan->imtu);
5439 rsp.mps = cpu_to_le16(chan->mps);
5440 } else {
5441 rsp.mtu = 0;
5442 rsp.mps = 0;
5443 }
5444
5445 rsp.dcid = cpu_to_le16(dcid);
5446 rsp.credits = cpu_to_le16(credits);
5447 rsp.result = cpu_to_le16(result);
5448
5449 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5450
5451 return 0;
5452 }
5453
5454 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5455 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5456 u8 *data)
5457 {
5458 struct l2cap_le_credits *pkt;
5459 struct l2cap_chan *chan;
5460 u16 cid, credits, max_credits;
5461
5462 if (cmd_len != sizeof(*pkt))
5463 return -EPROTO;
5464
5465 pkt = (struct l2cap_le_credits *) data;
5466 cid = __le16_to_cpu(pkt->cid);
5467 credits = __le16_to_cpu(pkt->credits);
5468
5469 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5470
5471 chan = l2cap_get_chan_by_dcid(conn, cid);
5472 if (!chan)
5473 return -EBADSLT;
5474
5475 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5476 if (credits > max_credits) {
5477 BT_ERR("LE credits overflow");
5478 l2cap_send_disconn_req(chan, ECONNRESET);
5479
5480 /* Return 0 so that we don't trigger an unnecessary
5481 * command reject packet.
5482 */
5483 return 0;
5484 }
5485
5486 chan->tx_credits += credits;
5487
5488 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5489 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5490 chan->tx_credits--;
5491 }
5492
5493 if (chan->tx_credits)
5494 chan->ops->resume(chan);
5495
5496 l2cap_chan_unlock(chan);
5497
5498 return 0;
5499 }
5500
5501 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5502 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5503 u8 *data)
5504 {
5505 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5506 struct l2cap_chan *chan;
5507
5508 if (cmd_len < sizeof(*rej))
5509 return -EPROTO;
5510
5511 mutex_lock(&conn->chan_lock);
5512
5513 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5514 if (!chan)
5515 goto done;
5516
5517 l2cap_chan_lock(chan);
5518 l2cap_chan_del(chan, ECONNREFUSED);
5519 l2cap_chan_unlock(chan);
5520
5521 done:
5522 mutex_unlock(&conn->chan_lock);
5523 return 0;
5524 }
5525
5526 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5527 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5528 u8 *data)
5529 {
5530 int err = 0;
5531
5532 switch (cmd->code) {
5533 case L2CAP_COMMAND_REJ:
5534 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5535 break;
5536
5537 case L2CAP_CONN_PARAM_UPDATE_REQ:
5538 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5539 break;
5540
5541 case L2CAP_CONN_PARAM_UPDATE_RSP:
5542 break;
5543
5544 case L2CAP_LE_CONN_RSP:
5545 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5546 break;
5547
5548 case L2CAP_LE_CONN_REQ:
5549 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5550 break;
5551
5552 case L2CAP_LE_CREDITS:
5553 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5554 break;
5555
5556 case L2CAP_DISCONN_REQ:
5557 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5558 break;
5559
5560 case L2CAP_DISCONN_RSP:
5561 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5562 break;
5563
5564 default:
5565 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5566 err = -EINVAL;
5567 break;
5568 }
5569
5570 return err;
5571 }
5572
5573 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5574 struct sk_buff *skb)
5575 {
5576 struct hci_conn *hcon = conn->hcon;
5577 struct l2cap_cmd_hdr *cmd;
5578 u16 len;
5579 int err;
5580
5581 if (hcon->type != LE_LINK)
5582 goto drop;
5583
5584 if (skb->len < L2CAP_CMD_HDR_SIZE)
5585 goto drop;
5586
5587 cmd = (void *) skb->data;
5588 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5589
5590 len = le16_to_cpu(cmd->len);
5591
5592 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5593
5594 if (len != skb->len || !cmd->ident) {
5595 BT_DBG("corrupted command");
5596 goto drop;
5597 }
5598
5599 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5600 if (err) {
5601 struct l2cap_cmd_rej_unk rej;
5602
5603 BT_ERR("Wrong link type (%d)", err);
5604
5605 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5606 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5607 sizeof(rej), &rej);
5608 }
5609
5610 drop:
5611 kfree_skb(skb);
5612 }
5613
5614 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5615 struct sk_buff *skb)
5616 {
5617 struct hci_conn *hcon = conn->hcon;
5618 u8 *data = skb->data;
5619 int len = skb->len;
5620 struct l2cap_cmd_hdr cmd;
5621 int err;
5622
5623 l2cap_raw_recv(conn, skb);
5624
5625 if (hcon->type != ACL_LINK)
5626 goto drop;
5627
5628 while (len >= L2CAP_CMD_HDR_SIZE) {
5629 u16 cmd_len;
5630 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5631 data += L2CAP_CMD_HDR_SIZE;
5632 len -= L2CAP_CMD_HDR_SIZE;
5633
5634 cmd_len = le16_to_cpu(cmd.len);
5635
5636 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5637 cmd.ident);
5638
5639 if (cmd_len > len || !cmd.ident) {
5640 BT_DBG("corrupted command");
5641 break;
5642 }
5643
5644 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5645 if (err) {
5646 struct l2cap_cmd_rej_unk rej;
5647
5648 BT_ERR("Wrong link type (%d)", err);
5649
5650 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5651 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5652 sizeof(rej), &rej);
5653 }
5654
5655 data += cmd_len;
5656 len -= cmd_len;
5657 }
5658
5659 drop:
5660 kfree_skb(skb);
5661 }
5662
5663 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5664 {
5665 u16 our_fcs, rcv_fcs;
5666 int hdr_size;
5667
5668 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5669 hdr_size = L2CAP_EXT_HDR_SIZE;
5670 else
5671 hdr_size = L2CAP_ENH_HDR_SIZE;
5672
5673 if (chan->fcs == L2CAP_FCS_CRC16) {
5674 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5675 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5676 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5677
5678 if (our_fcs != rcv_fcs)
5679 return -EBADMSG;
5680 }
5681 return 0;
5682 }
5683
5684 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5685 {
5686 struct l2cap_ctrl control;
5687
5688 BT_DBG("chan %p", chan);
5689
5690 memset(&control, 0, sizeof(control));
5691 control.sframe = 1;
5692 control.final = 1;
5693 control.reqseq = chan->buffer_seq;
5694 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5695
5696 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5697 control.super = L2CAP_SUPER_RNR;
5698 l2cap_send_sframe(chan, &control);
5699 }
5700
5701 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5702 chan->unacked_frames > 0)
5703 __set_retrans_timer(chan);
5704
5705 /* Send pending iframes */
5706 l2cap_ertm_send(chan);
5707
5708 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5709 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5710 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5711 * send it now.
5712 */
5713 control.super = L2CAP_SUPER_RR;
5714 l2cap_send_sframe(chan, &control);
5715 }
5716 }
5717
5718 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5719 struct sk_buff **last_frag)
5720 {
5721 /* skb->len reflects data in skb as well as all fragments
5722 * skb->data_len reflects only data in fragments
5723 */
5724 if (!skb_has_frag_list(skb))
5725 skb_shinfo(skb)->frag_list = new_frag;
5726
5727 new_frag->next = NULL;
5728
5729 (*last_frag)->next = new_frag;
5730 *last_frag = new_frag;
5731
5732 skb->len += new_frag->len;
5733 skb->data_len += new_frag->len;
5734 skb->truesize += new_frag->truesize;
5735 }
5736
5737 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5738 struct l2cap_ctrl *control)
5739 {
5740 int err = -EINVAL;
5741
5742 switch (control->sar) {
5743 case L2CAP_SAR_UNSEGMENTED:
5744 if (chan->sdu)
5745 break;
5746
5747 err = chan->ops->recv(chan, skb);
5748 break;
5749
5750 case L2CAP_SAR_START:
5751 if (chan->sdu)
5752 break;
5753
5754 chan->sdu_len = get_unaligned_le16(skb->data);
5755 skb_pull(skb, L2CAP_SDULEN_SIZE);
5756
5757 if (chan->sdu_len > chan->imtu) {
5758 err = -EMSGSIZE;
5759 break;
5760 }
5761
5762 if (skb->len >= chan->sdu_len)
5763 break;
5764
5765 chan->sdu = skb;
5766 chan->sdu_last_frag = skb;
5767
5768 skb = NULL;
5769 err = 0;
5770 break;
5771
5772 case L2CAP_SAR_CONTINUE:
5773 if (!chan->sdu)
5774 break;
5775
5776 append_skb_frag(chan->sdu, skb,
5777 &chan->sdu_last_frag);
5778 skb = NULL;
5779
5780 if (chan->sdu->len >= chan->sdu_len)
5781 break;
5782
5783 err = 0;
5784 break;
5785
5786 case L2CAP_SAR_END:
5787 if (!chan->sdu)
5788 break;
5789
5790 append_skb_frag(chan->sdu, skb,
5791 &chan->sdu_last_frag);
5792 skb = NULL;
5793
5794 if (chan->sdu->len != chan->sdu_len)
5795 break;
5796
5797 err = chan->ops->recv(chan, chan->sdu);
5798
5799 if (!err) {
5800 /* Reassembly complete */
5801 chan->sdu = NULL;
5802 chan->sdu_last_frag = NULL;
5803 chan->sdu_len = 0;
5804 }
5805 break;
5806 }
5807
5808 if (err) {
5809 kfree_skb(skb);
5810 kfree_skb(chan->sdu);
5811 chan->sdu = NULL;
5812 chan->sdu_last_frag = NULL;
5813 chan->sdu_len = 0;
5814 }
5815
5816 return err;
5817 }
5818
5819 static int l2cap_resegment(struct l2cap_chan *chan)
5820 {
5821 /* Placeholder */
5822 return 0;
5823 }
5824
5825 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5826 {
5827 u8 event;
5828
5829 if (chan->mode != L2CAP_MODE_ERTM)
5830 return;
5831
5832 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5833 l2cap_tx(chan, NULL, NULL, event);
5834 }
5835
5836 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5837 {
5838 int err = 0;
5839 /* Pass sequential frames to l2cap_reassemble_sdu()
5840 * until a gap is encountered.
5841 */
5842
5843 BT_DBG("chan %p", chan);
5844
5845 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5846 struct sk_buff *skb;
5847 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5848 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5849
5850 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5851
5852 if (!skb)
5853 break;
5854
5855 skb_unlink(skb, &chan->srej_q);
5856 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5857 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5858 if (err)
5859 break;
5860 }
5861
5862 if (skb_queue_empty(&chan->srej_q)) {
5863 chan->rx_state = L2CAP_RX_STATE_RECV;
5864 l2cap_send_ack(chan);
5865 }
5866
5867 return err;
5868 }
5869
5870 static void l2cap_handle_srej(struct l2cap_chan *chan,
5871 struct l2cap_ctrl *control)
5872 {
5873 struct sk_buff *skb;
5874
5875 BT_DBG("chan %p, control %p", chan, control);
5876
5877 if (control->reqseq == chan->next_tx_seq) {
5878 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5879 l2cap_send_disconn_req(chan, ECONNRESET);
5880 return;
5881 }
5882
5883 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5884
5885 if (skb == NULL) {
5886 BT_DBG("Seq %d not available for retransmission",
5887 control->reqseq);
5888 return;
5889 }
5890
5891 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5892 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5893 l2cap_send_disconn_req(chan, ECONNRESET);
5894 return;
5895 }
5896
5897 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5898
5899 if (control->poll) {
5900 l2cap_pass_to_tx(chan, control);
5901
5902 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5903 l2cap_retransmit(chan, control);
5904 l2cap_ertm_send(chan);
5905
5906 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5907 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5908 chan->srej_save_reqseq = control->reqseq;
5909 }
5910 } else {
5911 l2cap_pass_to_tx_fbit(chan, control);
5912
5913 if (control->final) {
5914 if (chan->srej_save_reqseq != control->reqseq ||
5915 !test_and_clear_bit(CONN_SREJ_ACT,
5916 &chan->conn_state))
5917 l2cap_retransmit(chan, control);
5918 } else {
5919 l2cap_retransmit(chan, control);
5920 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5921 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5922 chan->srej_save_reqseq = control->reqseq;
5923 }
5924 }
5925 }
5926 }
5927
5928 static void l2cap_handle_rej(struct l2cap_chan *chan,
5929 struct l2cap_ctrl *control)
5930 {
5931 struct sk_buff *skb;
5932
5933 BT_DBG("chan %p, control %p", chan, control);
5934
5935 if (control->reqseq == chan->next_tx_seq) {
5936 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5937 l2cap_send_disconn_req(chan, ECONNRESET);
5938 return;
5939 }
5940
5941 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5942
5943 if (chan->max_tx && skb &&
5944 bt_cb(skb)->control.retries >= chan->max_tx) {
5945 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5946 l2cap_send_disconn_req(chan, ECONNRESET);
5947 return;
5948 }
5949
5950 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5951
5952 l2cap_pass_to_tx(chan, control);
5953
5954 if (control->final) {
5955 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5956 l2cap_retransmit_all(chan, control);
5957 } else {
5958 l2cap_retransmit_all(chan, control);
5959 l2cap_ertm_send(chan);
5960 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5961 set_bit(CONN_REJ_ACT, &chan->conn_state);
5962 }
5963 }
5964
5965 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5966 {
5967 BT_DBG("chan %p, txseq %d", chan, txseq);
5968
5969 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5970 chan->expected_tx_seq);
5971
5972 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5973 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5974 chan->tx_win) {
5975 /* See notes below regarding "double poll" and
5976 * invalid packets.
5977 */
5978 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5979 BT_DBG("Invalid/Ignore - after SREJ");
5980 return L2CAP_TXSEQ_INVALID_IGNORE;
5981 } else {
5982 BT_DBG("Invalid - in window after SREJ sent");
5983 return L2CAP_TXSEQ_INVALID;
5984 }
5985 }
5986
5987 if (chan->srej_list.head == txseq) {
5988 BT_DBG("Expected SREJ");
5989 return L2CAP_TXSEQ_EXPECTED_SREJ;
5990 }
5991
5992 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5993 BT_DBG("Duplicate SREJ - txseq already stored");
5994 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5995 }
5996
5997 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5998 BT_DBG("Unexpected SREJ - not requested");
5999 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6000 }
6001 }
6002
6003 if (chan->expected_tx_seq == txseq) {
6004 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6005 chan->tx_win) {
6006 BT_DBG("Invalid - txseq outside tx window");
6007 return L2CAP_TXSEQ_INVALID;
6008 } else {
6009 BT_DBG("Expected");
6010 return L2CAP_TXSEQ_EXPECTED;
6011 }
6012 }
6013
6014 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6015 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6016 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6017 return L2CAP_TXSEQ_DUPLICATE;
6018 }
6019
6020 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6021 /* A source of invalid packets is a "double poll" condition,
6022 * where delays cause us to send multiple poll packets. If
6023 * the remote stack receives and processes both polls,
6024 * sequence numbers can wrap around in such a way that a
6025 * resent frame has a sequence number that looks like new data
6026 * with a sequence gap. This would trigger an erroneous SREJ
6027 * request.
6028 *
6029 * Fortunately, this is impossible with a tx window that's
6030 * less than half of the maximum sequence number, which allows
6031 * invalid frames to be safely ignored.
6032 *
6033 * With tx window sizes greater than half of the tx window
6034 * maximum, the frame is invalid and cannot be ignored. This
6035 * causes a disconnect.
6036 */
6037
6038 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6039 BT_DBG("Invalid/Ignore - txseq outside tx window");
6040 return L2CAP_TXSEQ_INVALID_IGNORE;
6041 } else {
6042 BT_DBG("Invalid - txseq outside tx window");
6043 return L2CAP_TXSEQ_INVALID;
6044 }
6045 } else {
6046 BT_DBG("Unexpected - txseq indicates missing frames");
6047 return L2CAP_TXSEQ_UNEXPECTED;
6048 }
6049 }
6050
6051 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6052 struct l2cap_ctrl *control,
6053 struct sk_buff *skb, u8 event)
6054 {
6055 int err = 0;
6056 bool skb_in_use = false;
6057
6058 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6059 event);
6060
6061 switch (event) {
6062 case L2CAP_EV_RECV_IFRAME:
6063 switch (l2cap_classify_txseq(chan, control->txseq)) {
6064 case L2CAP_TXSEQ_EXPECTED:
6065 l2cap_pass_to_tx(chan, control);
6066
6067 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6068 BT_DBG("Busy, discarding expected seq %d",
6069 control->txseq);
6070 break;
6071 }
6072
6073 chan->expected_tx_seq = __next_seq(chan,
6074 control->txseq);
6075
6076 chan->buffer_seq = chan->expected_tx_seq;
6077 skb_in_use = true;
6078
6079 err = l2cap_reassemble_sdu(chan, skb, control);
6080 if (err)
6081 break;
6082
6083 if (control->final) {
6084 if (!test_and_clear_bit(CONN_REJ_ACT,
6085 &chan->conn_state)) {
6086 control->final = 0;
6087 l2cap_retransmit_all(chan, control);
6088 l2cap_ertm_send(chan);
6089 }
6090 }
6091
6092 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6093 l2cap_send_ack(chan);
6094 break;
6095 case L2CAP_TXSEQ_UNEXPECTED:
6096 l2cap_pass_to_tx(chan, control);
6097
6098 /* Can't issue SREJ frames in the local busy state.
6099 * Drop this frame, it will be seen as missing
6100 * when local busy is exited.
6101 */
6102 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6103 BT_DBG("Busy, discarding unexpected seq %d",
6104 control->txseq);
6105 break;
6106 }
6107
6108 /* There was a gap in the sequence, so an SREJ
6109 * must be sent for each missing frame. The
6110 * current frame is stored for later use.
6111 */
6112 skb_queue_tail(&chan->srej_q, skb);
6113 skb_in_use = true;
6114 BT_DBG("Queued %p (queue len %d)", skb,
6115 skb_queue_len(&chan->srej_q));
6116
6117 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6118 l2cap_seq_list_clear(&chan->srej_list);
6119 l2cap_send_srej(chan, control->txseq);
6120
6121 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6122 break;
6123 case L2CAP_TXSEQ_DUPLICATE:
6124 l2cap_pass_to_tx(chan, control);
6125 break;
6126 case L2CAP_TXSEQ_INVALID_IGNORE:
6127 break;
6128 case L2CAP_TXSEQ_INVALID:
6129 default:
6130 l2cap_send_disconn_req(chan, ECONNRESET);
6131 break;
6132 }
6133 break;
6134 case L2CAP_EV_RECV_RR:
6135 l2cap_pass_to_tx(chan, control);
6136 if (control->final) {
6137 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6138
6139 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6140 !__chan_is_moving(chan)) {
6141 control->final = 0;
6142 l2cap_retransmit_all(chan, control);
6143 }
6144
6145 l2cap_ertm_send(chan);
6146 } else if (control->poll) {
6147 l2cap_send_i_or_rr_or_rnr(chan);
6148 } else {
6149 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6150 &chan->conn_state) &&
6151 chan->unacked_frames)
6152 __set_retrans_timer(chan);
6153
6154 l2cap_ertm_send(chan);
6155 }
6156 break;
6157 case L2CAP_EV_RECV_RNR:
6158 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6159 l2cap_pass_to_tx(chan, control);
6160 if (control && control->poll) {
6161 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6162 l2cap_send_rr_or_rnr(chan, 0);
6163 }
6164 __clear_retrans_timer(chan);
6165 l2cap_seq_list_clear(&chan->retrans_list);
6166 break;
6167 case L2CAP_EV_RECV_REJ:
6168 l2cap_handle_rej(chan, control);
6169 break;
6170 case L2CAP_EV_RECV_SREJ:
6171 l2cap_handle_srej(chan, control);
6172 break;
6173 default:
6174 break;
6175 }
6176
6177 if (skb && !skb_in_use) {
6178 BT_DBG("Freeing %p", skb);
6179 kfree_skb(skb);
6180 }
6181
6182 return err;
6183 }
6184
6185 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6186 struct l2cap_ctrl *control,
6187 struct sk_buff *skb, u8 event)
6188 {
6189 int err = 0;
6190 u16 txseq = control->txseq;
6191 bool skb_in_use = false;
6192
6193 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6194 event);
6195
6196 switch (event) {
6197 case L2CAP_EV_RECV_IFRAME:
6198 switch (l2cap_classify_txseq(chan, txseq)) {
6199 case L2CAP_TXSEQ_EXPECTED:
6200 /* Keep frame for reassembly later */
6201 l2cap_pass_to_tx(chan, control);
6202 skb_queue_tail(&chan->srej_q, skb);
6203 skb_in_use = true;
6204 BT_DBG("Queued %p (queue len %d)", skb,
6205 skb_queue_len(&chan->srej_q));
6206
6207 chan->expected_tx_seq = __next_seq(chan, txseq);
6208 break;
6209 case L2CAP_TXSEQ_EXPECTED_SREJ:
6210 l2cap_seq_list_pop(&chan->srej_list);
6211
6212 l2cap_pass_to_tx(chan, control);
6213 skb_queue_tail(&chan->srej_q, skb);
6214 skb_in_use = true;
6215 BT_DBG("Queued %p (queue len %d)", skb,
6216 skb_queue_len(&chan->srej_q));
6217
6218 err = l2cap_rx_queued_iframes(chan);
6219 if (err)
6220 break;
6221
6222 break;
6223 case L2CAP_TXSEQ_UNEXPECTED:
6224 /* Got a frame that can't be reassembled yet.
6225 * Save it for later, and send SREJs to cover
6226 * the missing frames.
6227 */
6228 skb_queue_tail(&chan->srej_q, skb);
6229 skb_in_use = true;
6230 BT_DBG("Queued %p (queue len %d)", skb,
6231 skb_queue_len(&chan->srej_q));
6232
6233 l2cap_pass_to_tx(chan, control);
6234 l2cap_send_srej(chan, control->txseq);
6235 break;
6236 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6237 /* This frame was requested with an SREJ, but
6238 * some expected retransmitted frames are
6239 * missing. Request retransmission of missing
6240 * SREJ'd frames.
6241 */
6242 skb_queue_tail(&chan->srej_q, skb);
6243 skb_in_use = true;
6244 BT_DBG("Queued %p (queue len %d)", skb,
6245 skb_queue_len(&chan->srej_q));
6246
6247 l2cap_pass_to_tx(chan, control);
6248 l2cap_send_srej_list(chan, control->txseq);
6249 break;
6250 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6251 /* We've already queued this frame. Drop this copy. */
6252 l2cap_pass_to_tx(chan, control);
6253 break;
6254 case L2CAP_TXSEQ_DUPLICATE:
6255 /* Expecting a later sequence number, so this frame
6256 * was already received. Ignore it completely.
6257 */
6258 break;
6259 case L2CAP_TXSEQ_INVALID_IGNORE:
6260 break;
6261 case L2CAP_TXSEQ_INVALID:
6262 default:
6263 l2cap_send_disconn_req(chan, ECONNRESET);
6264 break;
6265 }
6266 break;
6267 case L2CAP_EV_RECV_RR:
6268 l2cap_pass_to_tx(chan, control);
6269 if (control->final) {
6270 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6271
6272 if (!test_and_clear_bit(CONN_REJ_ACT,
6273 &chan->conn_state)) {
6274 control->final = 0;
6275 l2cap_retransmit_all(chan, control);
6276 }
6277
6278 l2cap_ertm_send(chan);
6279 } else if (control->poll) {
6280 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6281 &chan->conn_state) &&
6282 chan->unacked_frames) {
6283 __set_retrans_timer(chan);
6284 }
6285
6286 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6287 l2cap_send_srej_tail(chan);
6288 } else {
6289 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6290 &chan->conn_state) &&
6291 chan->unacked_frames)
6292 __set_retrans_timer(chan);
6293
6294 l2cap_send_ack(chan);
6295 }
6296 break;
6297 case L2CAP_EV_RECV_RNR:
6298 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6299 l2cap_pass_to_tx(chan, control);
6300 if (control->poll) {
6301 l2cap_send_srej_tail(chan);
6302 } else {
6303 struct l2cap_ctrl rr_control;
6304 memset(&rr_control, 0, sizeof(rr_control));
6305 rr_control.sframe = 1;
6306 rr_control.super = L2CAP_SUPER_RR;
6307 rr_control.reqseq = chan->buffer_seq;
6308 l2cap_send_sframe(chan, &rr_control);
6309 }
6310
6311 break;
6312 case L2CAP_EV_RECV_REJ:
6313 l2cap_handle_rej(chan, control);
6314 break;
6315 case L2CAP_EV_RECV_SREJ:
6316 l2cap_handle_srej(chan, control);
6317 break;
6318 }
6319
6320 if (skb && !skb_in_use) {
6321 BT_DBG("Freeing %p", skb);
6322 kfree_skb(skb);
6323 }
6324
6325 return err;
6326 }
6327
6328 static int l2cap_finish_move(struct l2cap_chan *chan)
6329 {
6330 BT_DBG("chan %p", chan);
6331
6332 chan->rx_state = L2CAP_RX_STATE_RECV;
6333
6334 if (chan->hs_hcon)
6335 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6336 else
6337 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6338
6339 return l2cap_resegment(chan);
6340 }
6341
6342 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6343 struct l2cap_ctrl *control,
6344 struct sk_buff *skb, u8 event)
6345 {
6346 int err;
6347
6348 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6349 event);
6350
6351 if (!control->poll)
6352 return -EPROTO;
6353
6354 l2cap_process_reqseq(chan, control->reqseq);
6355
6356 if (!skb_queue_empty(&chan->tx_q))
6357 chan->tx_send_head = skb_peek(&chan->tx_q);
6358 else
6359 chan->tx_send_head = NULL;
6360
6361 /* Rewind next_tx_seq to the point expected
6362 * by the receiver.
6363 */
6364 chan->next_tx_seq = control->reqseq;
6365 chan->unacked_frames = 0;
6366
6367 err = l2cap_finish_move(chan);
6368 if (err)
6369 return err;
6370
6371 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6372 l2cap_send_i_or_rr_or_rnr(chan);
6373
6374 if (event == L2CAP_EV_RECV_IFRAME)
6375 return -EPROTO;
6376
6377 return l2cap_rx_state_recv(chan, control, NULL, event);
6378 }
6379
6380 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6381 struct l2cap_ctrl *control,
6382 struct sk_buff *skb, u8 event)
6383 {
6384 int err;
6385
6386 if (!control->final)
6387 return -EPROTO;
6388
6389 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6390
6391 chan->rx_state = L2CAP_RX_STATE_RECV;
6392 l2cap_process_reqseq(chan, control->reqseq);
6393
6394 if (!skb_queue_empty(&chan->tx_q))
6395 chan->tx_send_head = skb_peek(&chan->tx_q);
6396 else
6397 chan->tx_send_head = NULL;
6398
6399 /* Rewind next_tx_seq to the point expected
6400 * by the receiver.
6401 */
6402 chan->next_tx_seq = control->reqseq;
6403 chan->unacked_frames = 0;
6404
6405 if (chan->hs_hcon)
6406 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6407 else
6408 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6409
6410 err = l2cap_resegment(chan);
6411
6412 if (!err)
6413 err = l2cap_rx_state_recv(chan, control, skb, event);
6414
6415 return err;
6416 }
6417
6418 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6419 {
6420 /* Make sure reqseq is for a packet that has been sent but not acked */
6421 u16 unacked;
6422
6423 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6424 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6425 }
6426
6427 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6428 struct sk_buff *skb, u8 event)
6429 {
6430 int err = 0;
6431
6432 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6433 control, skb, event, chan->rx_state);
6434
6435 if (__valid_reqseq(chan, control->reqseq)) {
6436 switch (chan->rx_state) {
6437 case L2CAP_RX_STATE_RECV:
6438 err = l2cap_rx_state_recv(chan, control, skb, event);
6439 break;
6440 case L2CAP_RX_STATE_SREJ_SENT:
6441 err = l2cap_rx_state_srej_sent(chan, control, skb,
6442 event);
6443 break;
6444 case L2CAP_RX_STATE_WAIT_P:
6445 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6446 break;
6447 case L2CAP_RX_STATE_WAIT_F:
6448 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6449 break;
6450 default:
6451 /* shut it down */
6452 break;
6453 }
6454 } else {
6455 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6456 control->reqseq, chan->next_tx_seq,
6457 chan->expected_ack_seq);
6458 l2cap_send_disconn_req(chan, ECONNRESET);
6459 }
6460
6461 return err;
6462 }
6463
6464 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6465 struct sk_buff *skb)
6466 {
6467 int err = 0;
6468
6469 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6470 chan->rx_state);
6471
6472 if (l2cap_classify_txseq(chan, control->txseq) ==
6473 L2CAP_TXSEQ_EXPECTED) {
6474 l2cap_pass_to_tx(chan, control);
6475
6476 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6477 __next_seq(chan, chan->buffer_seq));
6478
6479 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6480
6481 l2cap_reassemble_sdu(chan, skb, control);
6482 } else {
6483 if (chan->sdu) {
6484 kfree_skb(chan->sdu);
6485 chan->sdu = NULL;
6486 }
6487 chan->sdu_last_frag = NULL;
6488 chan->sdu_len = 0;
6489
6490 if (skb) {
6491 BT_DBG("Freeing %p", skb);
6492 kfree_skb(skb);
6493 }
6494 }
6495
6496 chan->last_acked_seq = control->txseq;
6497 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6498
6499 return err;
6500 }
6501
6502 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6503 {
6504 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6505 u16 len;
6506 u8 event;
6507
6508 __unpack_control(chan, skb);
6509
6510 len = skb->len;
6511
6512 /*
6513 * We can just drop the corrupted I-frame here.
6514 * Receiver will miss it and start proper recovery
6515 * procedures and ask for retransmission.
6516 */
6517 if (l2cap_check_fcs(chan, skb))
6518 goto drop;
6519
6520 if (!control->sframe && control->sar == L2CAP_SAR_START)
6521 len -= L2CAP_SDULEN_SIZE;
6522
6523 if (chan->fcs == L2CAP_FCS_CRC16)
6524 len -= L2CAP_FCS_SIZE;
6525
6526 if (len > chan->mps) {
6527 l2cap_send_disconn_req(chan, ECONNRESET);
6528 goto drop;
6529 }
6530
6531 if (!control->sframe) {
6532 int err;
6533
6534 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6535 control->sar, control->reqseq, control->final,
6536 control->txseq);
6537
6538 /* Validate F-bit - F=0 always valid, F=1 only
6539 * valid in TX WAIT_F
6540 */
6541 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6542 goto drop;
6543
6544 if (chan->mode != L2CAP_MODE_STREAMING) {
6545 event = L2CAP_EV_RECV_IFRAME;
6546 err = l2cap_rx(chan, control, skb, event);
6547 } else {
6548 err = l2cap_stream_rx(chan, control, skb);
6549 }
6550
6551 if (err)
6552 l2cap_send_disconn_req(chan, ECONNRESET);
6553 } else {
6554 const u8 rx_func_to_event[4] = {
6555 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6556 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6557 };
6558
6559 /* Only I-frames are expected in streaming mode */
6560 if (chan->mode == L2CAP_MODE_STREAMING)
6561 goto drop;
6562
6563 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6564 control->reqseq, control->final, control->poll,
6565 control->super);
6566
6567 if (len != 0) {
6568 BT_ERR("Trailing bytes: %d in sframe", len);
6569 l2cap_send_disconn_req(chan, ECONNRESET);
6570 goto drop;
6571 }
6572
6573 /* Validate F and P bits */
6574 if (control->final && (control->poll ||
6575 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6576 goto drop;
6577
6578 event = rx_func_to_event[control->super];
6579 if (l2cap_rx(chan, control, skb, event))
6580 l2cap_send_disconn_req(chan, ECONNRESET);
6581 }
6582
6583 return 0;
6584
6585 drop:
6586 kfree_skb(skb);
6587 return 0;
6588 }
6589
6590 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6591 {
6592 struct l2cap_conn *conn = chan->conn;
6593 struct l2cap_le_credits pkt;
6594 u16 return_credits;
6595
6596 /* We return more credits to the sender only after the amount of
6597 * credits falls below half of the initial amount.
6598 */
6599 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6600 return;
6601
6602 return_credits = le_max_credits - chan->rx_credits;
6603
6604 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6605
6606 chan->rx_credits += return_credits;
6607
6608 pkt.cid = cpu_to_le16(chan->scid);
6609 pkt.credits = cpu_to_le16(return_credits);
6610
6611 chan->ident = l2cap_get_ident(conn);
6612
6613 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6614 }
6615
6616 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6617 {
6618 int err;
6619
6620 if (!chan->rx_credits) {
6621 BT_ERR("No credits to receive LE L2CAP data");
6622 l2cap_send_disconn_req(chan, ECONNRESET);
6623 return -ENOBUFS;
6624 }
6625
6626 if (chan->imtu < skb->len) {
6627 BT_ERR("Too big LE L2CAP PDU");
6628 return -ENOBUFS;
6629 }
6630
6631 chan->rx_credits--;
6632 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6633
6634 l2cap_chan_le_send_credits(chan);
6635
6636 err = 0;
6637
6638 if (!chan->sdu) {
6639 u16 sdu_len;
6640
6641 sdu_len = get_unaligned_le16(skb->data);
6642 skb_pull(skb, L2CAP_SDULEN_SIZE);
6643
6644 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6645 sdu_len, skb->len, chan->imtu);
6646
6647 if (sdu_len > chan->imtu) {
6648 BT_ERR("Too big LE L2CAP SDU length received");
6649 err = -EMSGSIZE;
6650 goto failed;
6651 }
6652
6653 if (skb->len > sdu_len) {
6654 BT_ERR("Too much LE L2CAP data received");
6655 err = -EINVAL;
6656 goto failed;
6657 }
6658
6659 if (skb->len == sdu_len)
6660 return chan->ops->recv(chan, skb);
6661
6662 chan->sdu = skb;
6663 chan->sdu_len = sdu_len;
6664 chan->sdu_last_frag = skb;
6665
6666 return 0;
6667 }
6668
6669 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6670 chan->sdu->len, skb->len, chan->sdu_len);
6671
6672 if (chan->sdu->len + skb->len > chan->sdu_len) {
6673 BT_ERR("Too much LE L2CAP data received");
6674 err = -EINVAL;
6675 goto failed;
6676 }
6677
6678 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6679 skb = NULL;
6680
6681 if (chan->sdu->len == chan->sdu_len) {
6682 err = chan->ops->recv(chan, chan->sdu);
6683 if (!err) {
6684 chan->sdu = NULL;
6685 chan->sdu_last_frag = NULL;
6686 chan->sdu_len = 0;
6687 }
6688 }
6689
6690 failed:
6691 if (err) {
6692 kfree_skb(skb);
6693 kfree_skb(chan->sdu);
6694 chan->sdu = NULL;
6695 chan->sdu_last_frag = NULL;
6696 chan->sdu_len = 0;
6697 }
6698
6699 /* We can't return an error here since we took care of the skb
6700 * freeing internally. An error return would cause the caller to
6701 * do a double-free of the skb.
6702 */
6703 return 0;
6704 }
6705
6706 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6707 struct sk_buff *skb)
6708 {
6709 struct l2cap_chan *chan;
6710
6711 chan = l2cap_get_chan_by_scid(conn, cid);
6712 if (!chan) {
6713 if (cid == L2CAP_CID_A2MP) {
6714 chan = a2mp_channel_create(conn, skb);
6715 if (!chan) {
6716 kfree_skb(skb);
6717 return;
6718 }
6719
6720 l2cap_chan_lock(chan);
6721 } else {
6722 BT_DBG("unknown cid 0x%4.4x", cid);
6723 /* Drop packet and return */
6724 kfree_skb(skb);
6725 return;
6726 }
6727 }
6728
6729 BT_DBG("chan %p, len %d", chan, skb->len);
6730
6731 if (chan->state != BT_CONNECTED)
6732 goto drop;
6733
6734 switch (chan->mode) {
6735 case L2CAP_MODE_LE_FLOWCTL:
6736 if (l2cap_le_data_rcv(chan, skb) < 0)
6737 goto drop;
6738
6739 goto done;
6740
6741 case L2CAP_MODE_BASIC:
6742 /* If socket recv buffers overflows we drop data here
6743 * which is *bad* because L2CAP has to be reliable.
6744 * But we don't have any other choice. L2CAP doesn't
6745 * provide flow control mechanism. */
6746
6747 if (chan->imtu < skb->len) {
6748 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6749 goto drop;
6750 }
6751
6752 if (!chan->ops->recv(chan, skb))
6753 goto done;
6754 break;
6755
6756 case L2CAP_MODE_ERTM:
6757 case L2CAP_MODE_STREAMING:
6758 l2cap_data_rcv(chan, skb);
6759 goto done;
6760
6761 default:
6762 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6763 break;
6764 }
6765
6766 drop:
6767 kfree_skb(skb);
6768
6769 done:
6770 l2cap_chan_unlock(chan);
6771 }
6772
6773 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6774 struct sk_buff *skb)
6775 {
6776 struct hci_conn *hcon = conn->hcon;
6777 struct l2cap_chan *chan;
6778
6779 if (hcon->type != ACL_LINK)
6780 goto free_skb;
6781
6782 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6783 ACL_LINK);
6784 if (!chan)
6785 goto free_skb;
6786
6787 BT_DBG("chan %p, len %d", chan, skb->len);
6788
6789 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6790 goto drop;
6791
6792 if (chan->imtu < skb->len)
6793 goto drop;
6794
6795 /* Store remote BD_ADDR and PSM for msg_name */
6796 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6797 bt_cb(skb)->psm = psm;
6798
6799 if (!chan->ops->recv(chan, skb)) {
6800 l2cap_chan_put(chan);
6801 return;
6802 }
6803
6804 drop:
6805 l2cap_chan_put(chan);
6806 free_skb:
6807 kfree_skb(skb);
6808 }
6809
6810 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6811 {
6812 struct l2cap_hdr *lh = (void *) skb->data;
6813 struct hci_conn *hcon = conn->hcon;
6814 u16 cid, len;
6815 __le16 psm;
6816
6817 if (hcon->state != BT_CONNECTED) {
6818 BT_DBG("queueing pending rx skb");
6819 skb_queue_tail(&conn->pending_rx, skb);
6820 return;
6821 }
6822
6823 skb_pull(skb, L2CAP_HDR_SIZE);
6824 cid = __le16_to_cpu(lh->cid);
6825 len = __le16_to_cpu(lh->len);
6826
6827 if (len != skb->len) {
6828 kfree_skb(skb);
6829 return;
6830 }
6831
6832 /* Since we can't actively block incoming LE connections we must
6833 * at least ensure that we ignore incoming data from them.
6834 */
6835 if (hcon->type == LE_LINK &&
6836 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6837 bdaddr_type(hcon, hcon->dst_type))) {
6838 kfree_skb(skb);
6839 return;
6840 }
6841
6842 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6843
6844 switch (cid) {
6845 case L2CAP_CID_SIGNALING:
6846 l2cap_sig_channel(conn, skb);
6847 break;
6848
6849 case L2CAP_CID_CONN_LESS:
6850 psm = get_unaligned((__le16 *) skb->data);
6851 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6852 l2cap_conless_channel(conn, psm, skb);
6853 break;
6854
6855 case L2CAP_CID_LE_SIGNALING:
6856 l2cap_le_sig_channel(conn, skb);
6857 break;
6858
6859 default:
6860 l2cap_data_channel(conn, cid, skb);
6861 break;
6862 }
6863 }
6864
6865 static void process_pending_rx(struct work_struct *work)
6866 {
6867 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6868 pending_rx_work);
6869 struct sk_buff *skb;
6870
6871 BT_DBG("");
6872
6873 while ((skb = skb_dequeue(&conn->pending_rx)))
6874 l2cap_recv_frame(conn, skb);
6875 }
6876
6877 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6878 {
6879 struct l2cap_conn *conn = hcon->l2cap_data;
6880 struct hci_chan *hchan;
6881
6882 if (conn)
6883 return conn;
6884
6885 hchan = hci_chan_create(hcon);
6886 if (!hchan)
6887 return NULL;
6888
6889 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6890 if (!conn) {
6891 hci_chan_del(hchan);
6892 return NULL;
6893 }
6894
6895 kref_init(&conn->ref);
6896 hcon->l2cap_data = conn;
6897 conn->hcon = hci_conn_get(hcon);
6898 conn->hchan = hchan;
6899
6900 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6901
6902 switch (hcon->type) {
6903 case LE_LINK:
6904 if (hcon->hdev->le_mtu) {
6905 conn->mtu = hcon->hdev->le_mtu;
6906 break;
6907 }
6908 /* fall through */
6909 default:
6910 conn->mtu = hcon->hdev->acl_mtu;
6911 break;
6912 }
6913
6914 conn->feat_mask = 0;
6915
6916 if (hcon->type == ACL_LINK)
6917 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6918 &hcon->hdev->dev_flags);
6919
6920 mutex_init(&conn->ident_lock);
6921 mutex_init(&conn->chan_lock);
6922
6923 INIT_LIST_HEAD(&conn->chan_l);
6924 INIT_LIST_HEAD(&conn->users);
6925
6926 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6927
6928 skb_queue_head_init(&conn->pending_rx);
6929 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6930
6931 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6932
6933 return conn;
6934 }
6935
6936 static bool is_valid_psm(u16 psm, u8 dst_type) {
6937 if (!psm)
6938 return false;
6939
6940 if (bdaddr_type_is_le(dst_type))
6941 return (psm <= 0x00ff);
6942
6943 /* PSM must be odd and lsb of upper byte must be 0 */
6944 return ((psm & 0x0101) == 0x0001);
6945 }
6946
6947 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6948 bdaddr_t *dst, u8 dst_type)
6949 {
6950 struct l2cap_conn *conn;
6951 struct hci_conn *hcon;
6952 struct hci_dev *hdev;
6953 int err;
6954
6955 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6956 dst_type, __le16_to_cpu(psm));
6957
6958 hdev = hci_get_route(dst, &chan->src);
6959 if (!hdev)
6960 return -EHOSTUNREACH;
6961
6962 hci_dev_lock(hdev);
6963
6964 l2cap_chan_lock(chan);
6965
6966 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6967 chan->chan_type != L2CAP_CHAN_RAW) {
6968 err = -EINVAL;
6969 goto done;
6970 }
6971
6972 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6973 err = -EINVAL;
6974 goto done;
6975 }
6976
6977 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6978 err = -EINVAL;
6979 goto done;
6980 }
6981
6982 switch (chan->mode) {
6983 case L2CAP_MODE_BASIC:
6984 break;
6985 case L2CAP_MODE_LE_FLOWCTL:
6986 l2cap_le_flowctl_init(chan);
6987 break;
6988 case L2CAP_MODE_ERTM:
6989 case L2CAP_MODE_STREAMING:
6990 if (!disable_ertm)
6991 break;
6992 /* fall through */
6993 default:
6994 err = -EOPNOTSUPP;
6995 goto done;
6996 }
6997
6998 switch (chan->state) {
6999 case BT_CONNECT:
7000 case BT_CONNECT2:
7001 case BT_CONFIG:
7002 /* Already connecting */
7003 err = 0;
7004 goto done;
7005
7006 case BT_CONNECTED:
7007 /* Already connected */
7008 err = -EISCONN;
7009 goto done;
7010
7011 case BT_OPEN:
7012 case BT_BOUND:
7013 /* Can connect */
7014 break;
7015
7016 default:
7017 err = -EBADFD;
7018 goto done;
7019 }
7020
7021 /* Set destination address and psm */
7022 bacpy(&chan->dst, dst);
7023 chan->dst_type = dst_type;
7024
7025 chan->psm = psm;
7026 chan->dcid = cid;
7027
7028 if (bdaddr_type_is_le(dst_type)) {
7029 u8 role;
7030
7031 /* Convert from L2CAP channel address type to HCI address type
7032 */
7033 if (dst_type == BDADDR_LE_PUBLIC)
7034 dst_type = ADDR_LE_DEV_PUBLIC;
7035 else
7036 dst_type = ADDR_LE_DEV_RANDOM;
7037
7038 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7039 role = HCI_ROLE_SLAVE;
7040 else
7041 role = HCI_ROLE_MASTER;
7042
7043 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7044 HCI_LE_CONN_TIMEOUT, role);
7045 } else {
7046 u8 auth_type = l2cap_get_auth_type(chan);
7047 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7048 }
7049
7050 if (IS_ERR(hcon)) {
7051 err = PTR_ERR(hcon);
7052 goto done;
7053 }
7054
7055 conn = l2cap_conn_add(hcon);
7056 if (!conn) {
7057 hci_conn_drop(hcon);
7058 err = -ENOMEM;
7059 goto done;
7060 }
7061
7062 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7063 hci_conn_drop(hcon);
7064 err = -EBUSY;
7065 goto done;
7066 }
7067
7068 /* Update source addr of the socket */
7069 bacpy(&chan->src, &hcon->src);
7070 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7071
7072 l2cap_chan_add(conn, chan);
7073
7074 /* l2cap_chan_add takes its own ref so we can drop this one */
7075 hci_conn_drop(hcon);
7076
7077 l2cap_state_change(chan, BT_CONNECT);
7078 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7079
7080 /* Release chan->sport so that it can be reused by other
7081 * sockets (as it's only used for listening sockets).
7082 */
7083 write_lock(&chan_list_lock);
7084 chan->sport = 0;
7085 write_unlock(&chan_list_lock);
7086
7087 if (hcon->state == BT_CONNECTED) {
7088 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7089 __clear_chan_timer(chan);
7090 if (l2cap_chan_check_security(chan, true))
7091 l2cap_state_change(chan, BT_CONNECTED);
7092 } else
7093 l2cap_do_start(chan);
7094 }
7095
7096 err = 0;
7097
7098 done:
7099 l2cap_chan_unlock(chan);
7100 hci_dev_unlock(hdev);
7101 hci_dev_put(hdev);
7102 return err;
7103 }
7104 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7105
7106 /* ---- L2CAP interface with lower layer (HCI) ---- */
7107
7108 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7109 {
7110 int exact = 0, lm1 = 0, lm2 = 0;
7111 struct l2cap_chan *c;
7112
7113 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7114
7115 /* Find listening sockets and check their link_mode */
7116 read_lock(&chan_list_lock);
7117 list_for_each_entry(c, &chan_list, global_l) {
7118 if (c->state != BT_LISTEN)
7119 continue;
7120
7121 if (!bacmp(&c->src, &hdev->bdaddr)) {
7122 lm1 |= HCI_LM_ACCEPT;
7123 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7124 lm1 |= HCI_LM_MASTER;
7125 exact++;
7126 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7127 lm2 |= HCI_LM_ACCEPT;
7128 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7129 lm2 |= HCI_LM_MASTER;
7130 }
7131 }
7132 read_unlock(&chan_list_lock);
7133
7134 return exact ? lm1 : lm2;
7135 }
7136
7137 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7138 * from an existing channel in the list or from the beginning of the
7139 * global list (by passing NULL as first parameter).
7140 */
7141 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7142 bdaddr_t *src, u8 link_type)
7143 {
7144 read_lock(&chan_list_lock);
7145
7146 if (c)
7147 c = list_next_entry(c, global_l);
7148 else
7149 c = list_entry(chan_list.next, typeof(*c), global_l);
7150
7151 list_for_each_entry_from(c, &chan_list, global_l) {
7152 if (c->chan_type != L2CAP_CHAN_FIXED)
7153 continue;
7154 if (c->state != BT_LISTEN)
7155 continue;
7156 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7157 continue;
7158 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7159 continue;
7160 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7161 continue;
7162
7163 l2cap_chan_hold(c);
7164 read_unlock(&chan_list_lock);
7165 return c;
7166 }
7167
7168 read_unlock(&chan_list_lock);
7169
7170 return NULL;
7171 }
7172
7173 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7174 {
7175 struct hci_dev *hdev = hcon->hdev;
7176 struct l2cap_conn *conn;
7177 struct l2cap_chan *pchan;
7178 u8 dst_type;
7179
7180 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7181
7182 if (status) {
7183 l2cap_conn_del(hcon, bt_to_errno(status));
7184 return;
7185 }
7186
7187 conn = l2cap_conn_add(hcon);
7188 if (!conn)
7189 return;
7190
7191 dst_type = bdaddr_type(hcon, hcon->dst_type);
7192
7193 /* If device is blocked, do not create channels for it */
7194 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7195 return;
7196
7197 /* Find fixed channels and notify them of the new connection. We
7198 * use multiple individual lookups, continuing each time where
7199 * we left off, because the list lock would prevent calling the
7200 * potentially sleeping l2cap_chan_lock() function.
7201 */
7202 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
7203 while (pchan) {
7204 struct l2cap_chan *chan, *next;
7205
7206 /* Client fixed channels should override server ones */
7207 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7208 goto next;
7209
7210 l2cap_chan_lock(pchan);
7211 chan = pchan->ops->new_connection(pchan);
7212 if (chan) {
7213 bacpy(&chan->src, &hcon->src);
7214 bacpy(&chan->dst, &hcon->dst);
7215 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7216 chan->dst_type = dst_type;
7217
7218 __l2cap_chan_add(conn, chan);
7219 }
7220
7221 l2cap_chan_unlock(pchan);
7222 next:
7223 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7224 hcon->type);
7225 l2cap_chan_put(pchan);
7226 pchan = next;
7227 }
7228
7229 l2cap_conn_ready(conn);
7230 }
7231
7232 int l2cap_disconn_ind(struct hci_conn *hcon)
7233 {
7234 struct l2cap_conn *conn = hcon->l2cap_data;
7235
7236 BT_DBG("hcon %p", hcon);
7237
7238 if (!conn)
7239 return HCI_ERROR_REMOTE_USER_TERM;
7240 return conn->disc_reason;
7241 }
7242
7243 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7244 {
7245 BT_DBG("hcon %p reason %d", hcon, reason);
7246
7247 l2cap_conn_del(hcon, bt_to_errno(reason));
7248 }
7249
7250 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7251 {
7252 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7253 return;
7254
7255 if (encrypt == 0x00) {
7256 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7257 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7258 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7259 chan->sec_level == BT_SECURITY_FIPS)
7260 l2cap_chan_close(chan, ECONNREFUSED);
7261 } else {
7262 if (chan->sec_level == BT_SECURITY_MEDIUM)
7263 __clear_chan_timer(chan);
7264 }
7265 }
7266
7267 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7268 {
7269 struct l2cap_conn *conn = hcon->l2cap_data;
7270 struct l2cap_chan *chan;
7271
7272 if (!conn)
7273 return 0;
7274
7275 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7276
7277 mutex_lock(&conn->chan_lock);
7278
7279 list_for_each_entry(chan, &conn->chan_l, list) {
7280 l2cap_chan_lock(chan);
7281
7282 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7283 state_to_string(chan->state));
7284
7285 if (chan->scid == L2CAP_CID_A2MP) {
7286 l2cap_chan_unlock(chan);
7287 continue;
7288 }
7289
7290 if (!status && encrypt)
7291 chan->sec_level = hcon->sec_level;
7292
7293 if (!__l2cap_no_conn_pending(chan)) {
7294 l2cap_chan_unlock(chan);
7295 continue;
7296 }
7297
7298 if (!status && (chan->state == BT_CONNECTED ||
7299 chan->state == BT_CONFIG)) {
7300 chan->ops->resume(chan);
7301 l2cap_check_encryption(chan, encrypt);
7302 l2cap_chan_unlock(chan);
7303 continue;
7304 }
7305
7306 if (chan->state == BT_CONNECT) {
7307 if (!status)
7308 l2cap_start_connection(chan);
7309 else
7310 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7311 } else if (chan->state == BT_CONNECT2) {
7312 struct l2cap_conn_rsp rsp;
7313 __u16 res, stat;
7314
7315 if (!status) {
7316 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7317 res = L2CAP_CR_PEND;
7318 stat = L2CAP_CS_AUTHOR_PEND;
7319 chan->ops->defer(chan);
7320 } else {
7321 l2cap_state_change(chan, BT_CONFIG);
7322 res = L2CAP_CR_SUCCESS;
7323 stat = L2CAP_CS_NO_INFO;
7324 }
7325 } else {
7326 l2cap_state_change(chan, BT_DISCONN);
7327 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7328 res = L2CAP_CR_SEC_BLOCK;
7329 stat = L2CAP_CS_NO_INFO;
7330 }
7331
7332 rsp.scid = cpu_to_le16(chan->dcid);
7333 rsp.dcid = cpu_to_le16(chan->scid);
7334 rsp.result = cpu_to_le16(res);
7335 rsp.status = cpu_to_le16(stat);
7336 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7337 sizeof(rsp), &rsp);
7338
7339 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7340 res == L2CAP_CR_SUCCESS) {
7341 char buf[128];
7342 set_bit(CONF_REQ_SENT, &chan->conf_state);
7343 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7344 L2CAP_CONF_REQ,
7345 l2cap_build_conf_req(chan, buf),
7346 buf);
7347 chan->num_conf_req++;
7348 }
7349 }
7350
7351 l2cap_chan_unlock(chan);
7352 }
7353
7354 mutex_unlock(&conn->chan_lock);
7355
7356 return 0;
7357 }
7358
7359 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7360 {
7361 struct l2cap_conn *conn = hcon->l2cap_data;
7362 struct l2cap_hdr *hdr;
7363 int len;
7364
7365 /* For AMP controller do not create l2cap conn */
7366 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7367 goto drop;
7368
7369 if (!conn)
7370 conn = l2cap_conn_add(hcon);
7371
7372 if (!conn)
7373 goto drop;
7374
7375 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7376
7377 switch (flags) {
7378 case ACL_START:
7379 case ACL_START_NO_FLUSH:
7380 case ACL_COMPLETE:
7381 if (conn->rx_len) {
7382 BT_ERR("Unexpected start frame (len %d)", skb->len);
7383 kfree_skb(conn->rx_skb);
7384 conn->rx_skb = NULL;
7385 conn->rx_len = 0;
7386 l2cap_conn_unreliable(conn, ECOMM);
7387 }
7388
7389 /* Start fragment always begin with Basic L2CAP header */
7390 if (skb->len < L2CAP_HDR_SIZE) {
7391 BT_ERR("Frame is too short (len %d)", skb->len);
7392 l2cap_conn_unreliable(conn, ECOMM);
7393 goto drop;
7394 }
7395
7396 hdr = (struct l2cap_hdr *) skb->data;
7397 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7398
7399 if (len == skb->len) {
7400 /* Complete frame received */
7401 l2cap_recv_frame(conn, skb);
7402 return 0;
7403 }
7404
7405 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7406
7407 if (skb->len > len) {
7408 BT_ERR("Frame is too long (len %d, expected len %d)",
7409 skb->len, len);
7410 l2cap_conn_unreliable(conn, ECOMM);
7411 goto drop;
7412 }
7413
7414 /* Allocate skb for the complete frame (with header) */
7415 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7416 if (!conn->rx_skb)
7417 goto drop;
7418
7419 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7420 skb->len);
7421 conn->rx_len = len - skb->len;
7422 break;
7423
7424 case ACL_CONT:
7425 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7426
7427 if (!conn->rx_len) {
7428 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7429 l2cap_conn_unreliable(conn, ECOMM);
7430 goto drop;
7431 }
7432
7433 if (skb->len > conn->rx_len) {
7434 BT_ERR("Fragment is too long (len %d, expected %d)",
7435 skb->len, conn->rx_len);
7436 kfree_skb(conn->rx_skb);
7437 conn->rx_skb = NULL;
7438 conn->rx_len = 0;
7439 l2cap_conn_unreliable(conn, ECOMM);
7440 goto drop;
7441 }
7442
7443 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7444 skb->len);
7445 conn->rx_len -= skb->len;
7446
7447 if (!conn->rx_len) {
7448 /* Complete frame received. l2cap_recv_frame
7449 * takes ownership of the skb so set the global
7450 * rx_skb pointer to NULL first.
7451 */
7452 struct sk_buff *rx_skb = conn->rx_skb;
7453 conn->rx_skb = NULL;
7454 l2cap_recv_frame(conn, rx_skb);
7455 }
7456 break;
7457 }
7458
7459 drop:
7460 kfree_skb(skb);
7461 return 0;
7462 }
7463
7464 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7465 {
7466 struct l2cap_chan *c;
7467
7468 read_lock(&chan_list_lock);
7469
7470 list_for_each_entry(c, &chan_list, global_l) {
7471 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7472 &c->src, &c->dst,
7473 c->state, __le16_to_cpu(c->psm),
7474 c->scid, c->dcid, c->imtu, c->omtu,
7475 c->sec_level, c->mode);
7476 }
7477
7478 read_unlock(&chan_list_lock);
7479
7480 return 0;
7481 }
7482
7483 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7484 {
7485 return single_open(file, l2cap_debugfs_show, inode->i_private);
7486 }
7487
7488 static const struct file_operations l2cap_debugfs_fops = {
7489 .open = l2cap_debugfs_open,
7490 .read = seq_read,
7491 .llseek = seq_lseek,
7492 .release = single_release,
7493 };
7494
7495 static struct dentry *l2cap_debugfs;
7496
7497 int __init l2cap_init(void)
7498 {
7499 int err;
7500
7501 err = l2cap_init_sockets();
7502 if (err < 0)
7503 return err;
7504
7505 if (IS_ERR_OR_NULL(bt_debugfs))
7506 return 0;
7507
7508 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7509 NULL, &l2cap_debugfs_fops);
7510
7511 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7512 &le_max_credits);
7513 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7514 &le_default_mps);
7515
7516 return 0;
7517 }
7518
7519 void l2cap_exit(void)
7520 {
7521 debugfs_remove(l2cap_debugfs);
7522 l2cap_cleanup_sockets();
7523 }
7524
7525 module_param(disable_ertm, bool, 0644);
7526 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.176808 seconds and 4 git commands to generate.