Merge tag 'sound-4.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45
46 bool disable_ertm;
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52
53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
55
56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 u8 code, u8 ident, u16 dlen, void *data);
58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 void *data);
60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62
63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 struct sk_buff_head *skbs, u8 event);
65
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76 }
77
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 return bdaddr_type(hcon->type, hcon->src_type);
81 }
82
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87
88 /* ---- L2CAP channels ---- */
89
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100 }
101
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns locked channel. */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 u16 cid)
118 {
119 struct l2cap_chan *c;
120
121 mutex_lock(&conn->chan_lock);
122 c = __l2cap_get_chan_by_scid(conn, cid);
123 if (c)
124 l2cap_chan_lock(c);
125 mutex_unlock(&conn->chan_lock);
126
127 return c;
128 }
129
130 /* Find channel with given DCID.
131 * Returns locked channel.
132 */
133 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
134 u16 cid)
135 {
136 struct l2cap_chan *c;
137
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_dcid(conn, cid);
140 if (c)
141 l2cap_chan_lock(c);
142 mutex_unlock(&conn->chan_lock);
143
144 return c;
145 }
146
147 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 u8 ident)
149 {
150 struct l2cap_chan *c;
151
152 list_for_each_entry(c, &conn->chan_l, list) {
153 if (c->ident == ident)
154 return c;
155 }
156 return NULL;
157 }
158
159 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 u8 ident)
161 {
162 struct l2cap_chan *c;
163
164 mutex_lock(&conn->chan_lock);
165 c = __l2cap_get_chan_by_ident(conn, ident);
166 if (c)
167 l2cap_chan_lock(c);
168 mutex_unlock(&conn->chan_lock);
169
170 return c;
171 }
172
173 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
174 {
175 struct l2cap_chan *c;
176
177 list_for_each_entry(c, &chan_list, global_l) {
178 if (c->sport == psm && !bacmp(&c->src, src))
179 return c;
180 }
181 return NULL;
182 }
183
184 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
185 {
186 int err;
187
188 write_lock(&chan_list_lock);
189
190 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
191 err = -EADDRINUSE;
192 goto done;
193 }
194
195 if (psm) {
196 chan->psm = psm;
197 chan->sport = psm;
198 err = 0;
199 } else {
200 u16 p;
201
202 err = -EINVAL;
203 for (p = 0x1001; p < 0x1100; p += 2)
204 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
205 chan->psm = cpu_to_le16(p);
206 chan->sport = cpu_to_le16(p);
207 err = 0;
208 break;
209 }
210 }
211
212 done:
213 write_unlock(&chan_list_lock);
214 return err;
215 }
216 EXPORT_SYMBOL_GPL(l2cap_add_psm);
217
218 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
219 {
220 write_lock(&chan_list_lock);
221
222 /* Override the defaults (which are for conn-oriented) */
223 chan->omtu = L2CAP_DEFAULT_MTU;
224 chan->chan_type = L2CAP_CHAN_FIXED;
225
226 chan->scid = scid;
227
228 write_unlock(&chan_list_lock);
229
230 return 0;
231 }
232
233 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
234 {
235 u16 cid, dyn_end;
236
237 if (conn->hcon->type == LE_LINK)
238 dyn_end = L2CAP_CID_LE_DYN_END;
239 else
240 dyn_end = L2CAP_CID_DYN_END;
241
242 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
243 if (!__l2cap_get_chan_by_scid(conn, cid))
244 return cid;
245 }
246
247 return 0;
248 }
249
250 static void l2cap_state_change(struct l2cap_chan *chan, int state)
251 {
252 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
253 state_to_string(state));
254
255 chan->state = state;
256 chan->ops->state_change(chan, state, 0);
257 }
258
259 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
260 int state, int err)
261 {
262 chan->state = state;
263 chan->ops->state_change(chan, chan->state, err);
264 }
265
266 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
267 {
268 chan->ops->state_change(chan, chan->state, err);
269 }
270
271 static void __set_retrans_timer(struct l2cap_chan *chan)
272 {
273 if (!delayed_work_pending(&chan->monitor_timer) &&
274 chan->retrans_timeout) {
275 l2cap_set_timer(chan, &chan->retrans_timer,
276 msecs_to_jiffies(chan->retrans_timeout));
277 }
278 }
279
280 static void __set_monitor_timer(struct l2cap_chan *chan)
281 {
282 __clear_retrans_timer(chan);
283 if (chan->monitor_timeout) {
284 l2cap_set_timer(chan, &chan->monitor_timer,
285 msecs_to_jiffies(chan->monitor_timeout));
286 }
287 }
288
289 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
290 u16 seq)
291 {
292 struct sk_buff *skb;
293
294 skb_queue_walk(head, skb) {
295 if (bt_cb(skb)->control.txseq == seq)
296 return skb;
297 }
298
299 return NULL;
300 }
301
302 /* ---- L2CAP sequence number lists ---- */
303
304 /* For ERTM, ordered lists of sequence numbers must be tracked for
305 * SREJ requests that are received and for frames that are to be
306 * retransmitted. These seq_list functions implement a singly-linked
307 * list in an array, where membership in the list can also be checked
308 * in constant time. Items can also be added to the tail of the list
309 * and removed from the head in constant time, without further memory
310 * allocs or frees.
311 */
312
313 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
314 {
315 size_t alloc_size, i;
316
317 /* Allocated size is a power of 2 to map sequence numbers
318 * (which may be up to 14 bits) in to a smaller array that is
319 * sized for the negotiated ERTM transmit windows.
320 */
321 alloc_size = roundup_pow_of_two(size);
322
323 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
324 if (!seq_list->list)
325 return -ENOMEM;
326
327 seq_list->mask = alloc_size - 1;
328 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
329 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
330 for (i = 0; i < alloc_size; i++)
331 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
332
333 return 0;
334 }
335
336 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
337 {
338 kfree(seq_list->list);
339 }
340
341 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
342 u16 seq)
343 {
344 /* Constant-time check for list membership */
345 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
346 }
347
348 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
349 {
350 u16 seq = seq_list->head;
351 u16 mask = seq_list->mask;
352
353 seq_list->head = seq_list->list[seq & mask];
354 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
355
356 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
359 }
360
361 return seq;
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 /* Set default lock nesting level */
437 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
438
439 write_lock(&chan_list_lock);
440 list_add(&chan->global_l, &chan_list);
441 write_unlock(&chan_list_lock);
442
443 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
444
445 chan->state = BT_OPEN;
446
447 kref_init(&chan->kref);
448
449 /* This flag is cleared in l2cap_chan_ready() */
450 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
451
452 BT_DBG("chan %p", chan);
453
454 return chan;
455 }
456 EXPORT_SYMBOL_GPL(l2cap_chan_create);
457
458 static void l2cap_chan_destroy(struct kref *kref)
459 {
460 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
461
462 BT_DBG("chan %p", chan);
463
464 write_lock(&chan_list_lock);
465 list_del(&chan->global_l);
466 write_unlock(&chan_list_lock);
467
468 kfree(chan);
469 }
470
471 void l2cap_chan_hold(struct l2cap_chan *c)
472 {
473 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474
475 kref_get(&c->kref);
476 }
477
478 void l2cap_chan_put(struct l2cap_chan *c)
479 {
480 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
481
482 kref_put(&c->kref, l2cap_chan_destroy);
483 }
484 EXPORT_SYMBOL_GPL(l2cap_chan_put);
485
486 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
487 {
488 chan->fcs = L2CAP_FCS_CRC16;
489 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
490 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
491 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
492 chan->remote_max_tx = chan->max_tx;
493 chan->remote_tx_win = chan->tx_win;
494 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
495 chan->sec_level = BT_SECURITY_LOW;
496 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
497 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
498 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
499 chan->conf_state = 0;
500
501 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
502 }
503 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
504
505 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
506 {
507 chan->sdu = NULL;
508 chan->sdu_last_frag = NULL;
509 chan->sdu_len = 0;
510 chan->tx_credits = 0;
511 chan->rx_credits = le_max_credits;
512 chan->mps = min_t(u16, chan->imtu, le_default_mps);
513
514 skb_queue_head_init(&chan->tx_q);
515 }
516
517 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
518 {
519 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
520 __le16_to_cpu(chan->psm), chan->dcid);
521
522 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
523
524 chan->conn = conn;
525
526 switch (chan->chan_type) {
527 case L2CAP_CHAN_CONN_ORIENTED:
528 /* Alloc CID for connection-oriented socket */
529 chan->scid = l2cap_alloc_cid(conn);
530 if (conn->hcon->type == ACL_LINK)
531 chan->omtu = L2CAP_DEFAULT_MTU;
532 break;
533
534 case L2CAP_CHAN_CONN_LESS:
535 /* Connectionless socket */
536 chan->scid = L2CAP_CID_CONN_LESS;
537 chan->dcid = L2CAP_CID_CONN_LESS;
538 chan->omtu = L2CAP_DEFAULT_MTU;
539 break;
540
541 case L2CAP_CHAN_FIXED:
542 /* Caller will set CID and CID specific MTU values */
543 break;
544
545 default:
546 /* Raw socket can send/recv signalling messages only */
547 chan->scid = L2CAP_CID_SIGNALING;
548 chan->dcid = L2CAP_CID_SIGNALING;
549 chan->omtu = L2CAP_DEFAULT_MTU;
550 }
551
552 chan->local_id = L2CAP_BESTEFFORT_ID;
553 chan->local_stype = L2CAP_SERV_BESTEFFORT;
554 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
555 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
556 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
557 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
558
559 l2cap_chan_hold(chan);
560
561 /* Only keep a reference for fixed channels if they requested it */
562 if (chan->chan_type != L2CAP_CHAN_FIXED ||
563 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
564 hci_conn_hold(conn->hcon);
565
566 list_add(&chan->list, &conn->chan_l);
567 }
568
569 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
570 {
571 mutex_lock(&conn->chan_lock);
572 __l2cap_chan_add(conn, chan);
573 mutex_unlock(&conn->chan_lock);
574 }
575
576 void l2cap_chan_del(struct l2cap_chan *chan, int err)
577 {
578 struct l2cap_conn *conn = chan->conn;
579
580 __clear_chan_timer(chan);
581
582 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
583 state_to_string(chan->state));
584
585 chan->ops->teardown(chan, err);
586
587 if (conn) {
588 struct amp_mgr *mgr = conn->hcon->amp_mgr;
589 /* Delete from channel list */
590 list_del(&chan->list);
591
592 l2cap_chan_put(chan);
593
594 chan->conn = NULL;
595
596 /* Reference was only held for non-fixed channels or
597 * fixed channels that explicitly requested it using the
598 * FLAG_HOLD_HCI_CONN flag.
599 */
600 if (chan->chan_type != L2CAP_CHAN_FIXED ||
601 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
602 hci_conn_drop(conn->hcon);
603
604 if (mgr && mgr->bredr_chan == chan)
605 mgr->bredr_chan = NULL;
606 }
607
608 if (chan->hs_hchan) {
609 struct hci_chan *hs_hchan = chan->hs_hchan;
610
611 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
612 amp_disconnect_logical_link(hs_hchan);
613 }
614
615 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
616 return;
617
618 switch(chan->mode) {
619 case L2CAP_MODE_BASIC:
620 break;
621
622 case L2CAP_MODE_LE_FLOWCTL:
623 skb_queue_purge(&chan->tx_q);
624 break;
625
626 case L2CAP_MODE_ERTM:
627 __clear_retrans_timer(chan);
628 __clear_monitor_timer(chan);
629 __clear_ack_timer(chan);
630
631 skb_queue_purge(&chan->srej_q);
632
633 l2cap_seq_list_free(&chan->srej_list);
634 l2cap_seq_list_free(&chan->retrans_list);
635
636 /* fall through */
637
638 case L2CAP_MODE_STREAMING:
639 skb_queue_purge(&chan->tx_q);
640 break;
641 }
642
643 return;
644 }
645 EXPORT_SYMBOL_GPL(l2cap_chan_del);
646
647 static void l2cap_conn_update_id_addr(struct work_struct *work)
648 {
649 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
650 id_addr_update_work);
651 struct hci_conn *hcon = conn->hcon;
652 struct l2cap_chan *chan;
653
654 mutex_lock(&conn->chan_lock);
655
656 list_for_each_entry(chan, &conn->chan_l, list) {
657 l2cap_chan_lock(chan);
658 bacpy(&chan->dst, &hcon->dst);
659 chan->dst_type = bdaddr_dst_type(hcon);
660 l2cap_chan_unlock(chan);
661 }
662
663 mutex_unlock(&conn->chan_lock);
664 }
665
666 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
667 {
668 struct l2cap_conn *conn = chan->conn;
669 struct l2cap_le_conn_rsp rsp;
670 u16 result;
671
672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
673 result = L2CAP_CR_AUTHORIZATION;
674 else
675 result = L2CAP_CR_BAD_PSM;
676
677 l2cap_state_change(chan, BT_DISCONN);
678
679 rsp.dcid = cpu_to_le16(chan->scid);
680 rsp.mtu = cpu_to_le16(chan->imtu);
681 rsp.mps = cpu_to_le16(chan->mps);
682 rsp.credits = cpu_to_le16(chan->rx_credits);
683 rsp.result = cpu_to_le16(result);
684
685 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
686 &rsp);
687 }
688
689 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
690 {
691 struct l2cap_conn *conn = chan->conn;
692 struct l2cap_conn_rsp rsp;
693 u16 result;
694
695 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
696 result = L2CAP_CR_SEC_BLOCK;
697 else
698 result = L2CAP_CR_BAD_PSM;
699
700 l2cap_state_change(chan, BT_DISCONN);
701
702 rsp.scid = cpu_to_le16(chan->dcid);
703 rsp.dcid = cpu_to_le16(chan->scid);
704 rsp.result = cpu_to_le16(result);
705 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
706
707 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
708 }
709
710 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
711 {
712 struct l2cap_conn *conn = chan->conn;
713
714 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
715
716 switch (chan->state) {
717 case BT_LISTEN:
718 chan->ops->teardown(chan, 0);
719 break;
720
721 case BT_CONNECTED:
722 case BT_CONFIG:
723 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
724 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
725 l2cap_send_disconn_req(chan, reason);
726 } else
727 l2cap_chan_del(chan, reason);
728 break;
729
730 case BT_CONNECT2:
731 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
732 if (conn->hcon->type == ACL_LINK)
733 l2cap_chan_connect_reject(chan);
734 else if (conn->hcon->type == LE_LINK)
735 l2cap_chan_le_connect_reject(chan);
736 }
737
738 l2cap_chan_del(chan, reason);
739 break;
740
741 case BT_CONNECT:
742 case BT_DISCONN:
743 l2cap_chan_del(chan, reason);
744 break;
745
746 default:
747 chan->ops->teardown(chan, 0);
748 break;
749 }
750 }
751 EXPORT_SYMBOL(l2cap_chan_close);
752
753 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
754 {
755 switch (chan->chan_type) {
756 case L2CAP_CHAN_RAW:
757 switch (chan->sec_level) {
758 case BT_SECURITY_HIGH:
759 case BT_SECURITY_FIPS:
760 return HCI_AT_DEDICATED_BONDING_MITM;
761 case BT_SECURITY_MEDIUM:
762 return HCI_AT_DEDICATED_BONDING;
763 default:
764 return HCI_AT_NO_BONDING;
765 }
766 break;
767 case L2CAP_CHAN_CONN_LESS:
768 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
769 if (chan->sec_level == BT_SECURITY_LOW)
770 chan->sec_level = BT_SECURITY_SDP;
771 }
772 if (chan->sec_level == BT_SECURITY_HIGH ||
773 chan->sec_level == BT_SECURITY_FIPS)
774 return HCI_AT_NO_BONDING_MITM;
775 else
776 return HCI_AT_NO_BONDING;
777 break;
778 case L2CAP_CHAN_CONN_ORIENTED:
779 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
780 if (chan->sec_level == BT_SECURITY_LOW)
781 chan->sec_level = BT_SECURITY_SDP;
782
783 if (chan->sec_level == BT_SECURITY_HIGH ||
784 chan->sec_level == BT_SECURITY_FIPS)
785 return HCI_AT_NO_BONDING_MITM;
786 else
787 return HCI_AT_NO_BONDING;
788 }
789 /* fall through */
790 default:
791 switch (chan->sec_level) {
792 case BT_SECURITY_HIGH:
793 case BT_SECURITY_FIPS:
794 return HCI_AT_GENERAL_BONDING_MITM;
795 case BT_SECURITY_MEDIUM:
796 return HCI_AT_GENERAL_BONDING;
797 default:
798 return HCI_AT_NO_BONDING;
799 }
800 break;
801 }
802 }
803
804 /* Service level security */
805 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
806 {
807 struct l2cap_conn *conn = chan->conn;
808 __u8 auth_type;
809
810 if (conn->hcon->type == LE_LINK)
811 return smp_conn_security(conn->hcon, chan->sec_level);
812
813 auth_type = l2cap_get_auth_type(chan);
814
815 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
816 initiator);
817 }
818
819 static u8 l2cap_get_ident(struct l2cap_conn *conn)
820 {
821 u8 id;
822
823 /* Get next available identificator.
824 * 1 - 128 are used by kernel.
825 * 129 - 199 are reserved.
826 * 200 - 254 are used by utilities like l2ping, etc.
827 */
828
829 mutex_lock(&conn->ident_lock);
830
831 if (++conn->tx_ident > 128)
832 conn->tx_ident = 1;
833
834 id = conn->tx_ident;
835
836 mutex_unlock(&conn->ident_lock);
837
838 return id;
839 }
840
841 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
842 void *data)
843 {
844 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
845 u8 flags;
846
847 BT_DBG("code 0x%2.2x", code);
848
849 if (!skb)
850 return;
851
852 /* Use NO_FLUSH if supported or we have an LE link (which does
853 * not support auto-flushing packets) */
854 if (lmp_no_flush_capable(conn->hcon->hdev) ||
855 conn->hcon->type == LE_LINK)
856 flags = ACL_START_NO_FLUSH;
857 else
858 flags = ACL_START;
859
860 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
861 skb->priority = HCI_PRIO_MAX;
862
863 hci_send_acl(conn->hchan, skb, flags);
864 }
865
866 static bool __chan_is_moving(struct l2cap_chan *chan)
867 {
868 return chan->move_state != L2CAP_MOVE_STABLE &&
869 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
870 }
871
872 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
873 {
874 struct hci_conn *hcon = chan->conn->hcon;
875 u16 flags;
876
877 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
878 skb->priority);
879
880 if (chan->hs_hcon && !__chan_is_moving(chan)) {
881 if (chan->hs_hchan)
882 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
883 else
884 kfree_skb(skb);
885
886 return;
887 }
888
889 /* Use NO_FLUSH for LE links (where this is the only option) or
890 * if the BR/EDR link supports it and flushing has not been
891 * explicitly requested (through FLAG_FLUSHABLE).
892 */
893 if (hcon->type == LE_LINK ||
894 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
895 lmp_no_flush_capable(hcon->hdev)))
896 flags = ACL_START_NO_FLUSH;
897 else
898 flags = ACL_START;
899
900 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
901 hci_send_acl(chan->conn->hchan, skb, flags);
902 }
903
904 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
905 {
906 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
907 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
908
909 if (enh & L2CAP_CTRL_FRAME_TYPE) {
910 /* S-Frame */
911 control->sframe = 1;
912 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
913 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
914
915 control->sar = 0;
916 control->txseq = 0;
917 } else {
918 /* I-Frame */
919 control->sframe = 0;
920 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
921 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
922
923 control->poll = 0;
924 control->super = 0;
925 }
926 }
927
928 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
929 {
930 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
931 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
932
933 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
934 /* S-Frame */
935 control->sframe = 1;
936 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
937 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
938
939 control->sar = 0;
940 control->txseq = 0;
941 } else {
942 /* I-Frame */
943 control->sframe = 0;
944 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
945 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
946
947 control->poll = 0;
948 control->super = 0;
949 }
950 }
951
952 static inline void __unpack_control(struct l2cap_chan *chan,
953 struct sk_buff *skb)
954 {
955 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
956 __unpack_extended_control(get_unaligned_le32(skb->data),
957 &bt_cb(skb)->control);
958 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
959 } else {
960 __unpack_enhanced_control(get_unaligned_le16(skb->data),
961 &bt_cb(skb)->control);
962 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
963 }
964 }
965
966 static u32 __pack_extended_control(struct l2cap_ctrl *control)
967 {
968 u32 packed;
969
970 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
971 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
972
973 if (control->sframe) {
974 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
975 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
976 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
977 } else {
978 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
979 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
980 }
981
982 return packed;
983 }
984
985 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
986 {
987 u16 packed;
988
989 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
990 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
991
992 if (control->sframe) {
993 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
994 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
995 packed |= L2CAP_CTRL_FRAME_TYPE;
996 } else {
997 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
998 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
999 }
1000
1001 return packed;
1002 }
1003
1004 static inline void __pack_control(struct l2cap_chan *chan,
1005 struct l2cap_ctrl *control,
1006 struct sk_buff *skb)
1007 {
1008 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1009 put_unaligned_le32(__pack_extended_control(control),
1010 skb->data + L2CAP_HDR_SIZE);
1011 } else {
1012 put_unaligned_le16(__pack_enhanced_control(control),
1013 skb->data + L2CAP_HDR_SIZE);
1014 }
1015 }
1016
1017 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1018 {
1019 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1020 return L2CAP_EXT_HDR_SIZE;
1021 else
1022 return L2CAP_ENH_HDR_SIZE;
1023 }
1024
1025 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1026 u32 control)
1027 {
1028 struct sk_buff *skb;
1029 struct l2cap_hdr *lh;
1030 int hlen = __ertm_hdr_size(chan);
1031
1032 if (chan->fcs == L2CAP_FCS_CRC16)
1033 hlen += L2CAP_FCS_SIZE;
1034
1035 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1036
1037 if (!skb)
1038 return ERR_PTR(-ENOMEM);
1039
1040 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1041 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1042 lh->cid = cpu_to_le16(chan->dcid);
1043
1044 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1045 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1046 else
1047 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1048
1049 if (chan->fcs == L2CAP_FCS_CRC16) {
1050 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1051 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1052 }
1053
1054 skb->priority = HCI_PRIO_MAX;
1055 return skb;
1056 }
1057
1058 static void l2cap_send_sframe(struct l2cap_chan *chan,
1059 struct l2cap_ctrl *control)
1060 {
1061 struct sk_buff *skb;
1062 u32 control_field;
1063
1064 BT_DBG("chan %p, control %p", chan, control);
1065
1066 if (!control->sframe)
1067 return;
1068
1069 if (__chan_is_moving(chan))
1070 return;
1071
1072 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1073 !control->poll)
1074 control->final = 1;
1075
1076 if (control->super == L2CAP_SUPER_RR)
1077 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1078 else if (control->super == L2CAP_SUPER_RNR)
1079 set_bit(CONN_RNR_SENT, &chan->conn_state);
1080
1081 if (control->super != L2CAP_SUPER_SREJ) {
1082 chan->last_acked_seq = control->reqseq;
1083 __clear_ack_timer(chan);
1084 }
1085
1086 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1087 control->final, control->poll, control->super);
1088
1089 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1090 control_field = __pack_extended_control(control);
1091 else
1092 control_field = __pack_enhanced_control(control);
1093
1094 skb = l2cap_create_sframe_pdu(chan, control_field);
1095 if (!IS_ERR(skb))
1096 l2cap_do_send(chan, skb);
1097 }
1098
1099 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1100 {
1101 struct l2cap_ctrl control;
1102
1103 BT_DBG("chan %p, poll %d", chan, poll);
1104
1105 memset(&control, 0, sizeof(control));
1106 control.sframe = 1;
1107 control.poll = poll;
1108
1109 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1110 control.super = L2CAP_SUPER_RNR;
1111 else
1112 control.super = L2CAP_SUPER_RR;
1113
1114 control.reqseq = chan->buffer_seq;
1115 l2cap_send_sframe(chan, &control);
1116 }
1117
1118 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1119 {
1120 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1121 return true;
1122
1123 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1124 }
1125
1126 static bool __amp_capable(struct l2cap_chan *chan)
1127 {
1128 struct l2cap_conn *conn = chan->conn;
1129 struct hci_dev *hdev;
1130 bool amp_available = false;
1131
1132 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1133 return false;
1134
1135 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1136 return false;
1137
1138 read_lock(&hci_dev_list_lock);
1139 list_for_each_entry(hdev, &hci_dev_list, list) {
1140 if (hdev->amp_type != AMP_TYPE_BREDR &&
1141 test_bit(HCI_UP, &hdev->flags)) {
1142 amp_available = true;
1143 break;
1144 }
1145 }
1146 read_unlock(&hci_dev_list_lock);
1147
1148 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1149 return amp_available;
1150
1151 return false;
1152 }
1153
1154 static bool l2cap_check_efs(struct l2cap_chan *chan)
1155 {
1156 /* Check EFS parameters */
1157 return true;
1158 }
1159
1160 void l2cap_send_conn_req(struct l2cap_chan *chan)
1161 {
1162 struct l2cap_conn *conn = chan->conn;
1163 struct l2cap_conn_req req;
1164
1165 req.scid = cpu_to_le16(chan->scid);
1166 req.psm = chan->psm;
1167
1168 chan->ident = l2cap_get_ident(conn);
1169
1170 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1171
1172 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1173 }
1174
1175 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1176 {
1177 struct l2cap_create_chan_req req;
1178 req.scid = cpu_to_le16(chan->scid);
1179 req.psm = chan->psm;
1180 req.amp_id = amp_id;
1181
1182 chan->ident = l2cap_get_ident(chan->conn);
1183
1184 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1185 sizeof(req), &req);
1186 }
1187
1188 static void l2cap_move_setup(struct l2cap_chan *chan)
1189 {
1190 struct sk_buff *skb;
1191
1192 BT_DBG("chan %p", chan);
1193
1194 if (chan->mode != L2CAP_MODE_ERTM)
1195 return;
1196
1197 __clear_retrans_timer(chan);
1198 __clear_monitor_timer(chan);
1199 __clear_ack_timer(chan);
1200
1201 chan->retry_count = 0;
1202 skb_queue_walk(&chan->tx_q, skb) {
1203 if (bt_cb(skb)->control.retries)
1204 bt_cb(skb)->control.retries = 1;
1205 else
1206 break;
1207 }
1208
1209 chan->expected_tx_seq = chan->buffer_seq;
1210
1211 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1212 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1213 l2cap_seq_list_clear(&chan->retrans_list);
1214 l2cap_seq_list_clear(&chan->srej_list);
1215 skb_queue_purge(&chan->srej_q);
1216
1217 chan->tx_state = L2CAP_TX_STATE_XMIT;
1218 chan->rx_state = L2CAP_RX_STATE_MOVE;
1219
1220 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1221 }
1222
1223 static void l2cap_move_done(struct l2cap_chan *chan)
1224 {
1225 u8 move_role = chan->move_role;
1226 BT_DBG("chan %p", chan);
1227
1228 chan->move_state = L2CAP_MOVE_STABLE;
1229 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1230
1231 if (chan->mode != L2CAP_MODE_ERTM)
1232 return;
1233
1234 switch (move_role) {
1235 case L2CAP_MOVE_ROLE_INITIATOR:
1236 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1237 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1238 break;
1239 case L2CAP_MOVE_ROLE_RESPONDER:
1240 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1241 break;
1242 }
1243 }
1244
1245 static void l2cap_chan_ready(struct l2cap_chan *chan)
1246 {
1247 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1248 chan->conf_state = 0;
1249 __clear_chan_timer(chan);
1250
1251 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1252 chan->ops->suspend(chan);
1253
1254 chan->state = BT_CONNECTED;
1255
1256 chan->ops->ready(chan);
1257 }
1258
1259 static void l2cap_le_connect(struct l2cap_chan *chan)
1260 {
1261 struct l2cap_conn *conn = chan->conn;
1262 struct l2cap_le_conn_req req;
1263
1264 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1265 return;
1266
1267 req.psm = chan->psm;
1268 req.scid = cpu_to_le16(chan->scid);
1269 req.mtu = cpu_to_le16(chan->imtu);
1270 req.mps = cpu_to_le16(chan->mps);
1271 req.credits = cpu_to_le16(chan->rx_credits);
1272
1273 chan->ident = l2cap_get_ident(conn);
1274
1275 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1276 sizeof(req), &req);
1277 }
1278
1279 static void l2cap_le_start(struct l2cap_chan *chan)
1280 {
1281 struct l2cap_conn *conn = chan->conn;
1282
1283 if (!smp_conn_security(conn->hcon, chan->sec_level))
1284 return;
1285
1286 if (!chan->psm) {
1287 l2cap_chan_ready(chan);
1288 return;
1289 }
1290
1291 if (chan->state == BT_CONNECT)
1292 l2cap_le_connect(chan);
1293 }
1294
1295 static void l2cap_start_connection(struct l2cap_chan *chan)
1296 {
1297 if (__amp_capable(chan)) {
1298 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1299 a2mp_discover_amp(chan);
1300 } else if (chan->conn->hcon->type == LE_LINK) {
1301 l2cap_le_start(chan);
1302 } else {
1303 l2cap_send_conn_req(chan);
1304 }
1305 }
1306
1307 static void l2cap_request_info(struct l2cap_conn *conn)
1308 {
1309 struct l2cap_info_req req;
1310
1311 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1312 return;
1313
1314 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1315
1316 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1317 conn->info_ident = l2cap_get_ident(conn);
1318
1319 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1320
1321 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1322 sizeof(req), &req);
1323 }
1324
1325 static void l2cap_do_start(struct l2cap_chan *chan)
1326 {
1327 struct l2cap_conn *conn = chan->conn;
1328
1329 if (conn->hcon->type == LE_LINK) {
1330 l2cap_le_start(chan);
1331 return;
1332 }
1333
1334 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1335 l2cap_request_info(conn);
1336 return;
1337 }
1338
1339 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1340 return;
1341
1342 if (l2cap_chan_check_security(chan, true) &&
1343 __l2cap_no_conn_pending(chan))
1344 l2cap_start_connection(chan);
1345 }
1346
1347 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1348 {
1349 u32 local_feat_mask = l2cap_feat_mask;
1350 if (!disable_ertm)
1351 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1352
1353 switch (mode) {
1354 case L2CAP_MODE_ERTM:
1355 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1356 case L2CAP_MODE_STREAMING:
1357 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1358 default:
1359 return 0x00;
1360 }
1361 }
1362
1363 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1364 {
1365 struct l2cap_conn *conn = chan->conn;
1366 struct l2cap_disconn_req req;
1367
1368 if (!conn)
1369 return;
1370
1371 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1372 __clear_retrans_timer(chan);
1373 __clear_monitor_timer(chan);
1374 __clear_ack_timer(chan);
1375 }
1376
1377 if (chan->scid == L2CAP_CID_A2MP) {
1378 l2cap_state_change(chan, BT_DISCONN);
1379 return;
1380 }
1381
1382 req.dcid = cpu_to_le16(chan->dcid);
1383 req.scid = cpu_to_le16(chan->scid);
1384 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1385 sizeof(req), &req);
1386
1387 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1388 }
1389
1390 /* ---- L2CAP connections ---- */
1391 static void l2cap_conn_start(struct l2cap_conn *conn)
1392 {
1393 struct l2cap_chan *chan, *tmp;
1394
1395 BT_DBG("conn %p", conn);
1396
1397 mutex_lock(&conn->chan_lock);
1398
1399 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1400 l2cap_chan_lock(chan);
1401
1402 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1403 l2cap_chan_ready(chan);
1404 l2cap_chan_unlock(chan);
1405 continue;
1406 }
1407
1408 if (chan->state == BT_CONNECT) {
1409 if (!l2cap_chan_check_security(chan, true) ||
1410 !__l2cap_no_conn_pending(chan)) {
1411 l2cap_chan_unlock(chan);
1412 continue;
1413 }
1414
1415 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1416 && test_bit(CONF_STATE2_DEVICE,
1417 &chan->conf_state)) {
1418 l2cap_chan_close(chan, ECONNRESET);
1419 l2cap_chan_unlock(chan);
1420 continue;
1421 }
1422
1423 l2cap_start_connection(chan);
1424
1425 } else if (chan->state == BT_CONNECT2) {
1426 struct l2cap_conn_rsp rsp;
1427 char buf[128];
1428 rsp.scid = cpu_to_le16(chan->dcid);
1429 rsp.dcid = cpu_to_le16(chan->scid);
1430
1431 if (l2cap_chan_check_security(chan, false)) {
1432 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1433 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1434 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1435 chan->ops->defer(chan);
1436
1437 } else {
1438 l2cap_state_change(chan, BT_CONFIG);
1439 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1440 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1441 }
1442 } else {
1443 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1444 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1445 }
1446
1447 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1448 sizeof(rsp), &rsp);
1449
1450 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1451 rsp.result != L2CAP_CR_SUCCESS) {
1452 l2cap_chan_unlock(chan);
1453 continue;
1454 }
1455
1456 set_bit(CONF_REQ_SENT, &chan->conf_state);
1457 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1458 l2cap_build_conf_req(chan, buf), buf);
1459 chan->num_conf_req++;
1460 }
1461
1462 l2cap_chan_unlock(chan);
1463 }
1464
1465 mutex_unlock(&conn->chan_lock);
1466 }
1467
1468 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1469 {
1470 struct hci_conn *hcon = conn->hcon;
1471 struct hci_dev *hdev = hcon->hdev;
1472
1473 BT_DBG("%s conn %p", hdev->name, conn);
1474
1475 /* For outgoing pairing which doesn't necessarily have an
1476 * associated socket (e.g. mgmt_pair_device).
1477 */
1478 if (hcon->out)
1479 smp_conn_security(hcon, hcon->pending_sec_level);
1480
1481 /* For LE slave connections, make sure the connection interval
1482 * is in the range of the minium and maximum interval that has
1483 * been configured for this connection. If not, then trigger
1484 * the connection update procedure.
1485 */
1486 if (hcon->role == HCI_ROLE_SLAVE &&
1487 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1488 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1489 struct l2cap_conn_param_update_req req;
1490
1491 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1492 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1493 req.latency = cpu_to_le16(hcon->le_conn_latency);
1494 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1495
1496 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1497 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1498 }
1499 }
1500
1501 static void l2cap_conn_ready(struct l2cap_conn *conn)
1502 {
1503 struct l2cap_chan *chan;
1504 struct hci_conn *hcon = conn->hcon;
1505
1506 BT_DBG("conn %p", conn);
1507
1508 if (hcon->type == ACL_LINK)
1509 l2cap_request_info(conn);
1510
1511 mutex_lock(&conn->chan_lock);
1512
1513 list_for_each_entry(chan, &conn->chan_l, list) {
1514
1515 l2cap_chan_lock(chan);
1516
1517 if (chan->scid == L2CAP_CID_A2MP) {
1518 l2cap_chan_unlock(chan);
1519 continue;
1520 }
1521
1522 if (hcon->type == LE_LINK) {
1523 l2cap_le_start(chan);
1524 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1525 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1526 l2cap_chan_ready(chan);
1527 } else if (chan->state == BT_CONNECT) {
1528 l2cap_do_start(chan);
1529 }
1530
1531 l2cap_chan_unlock(chan);
1532 }
1533
1534 mutex_unlock(&conn->chan_lock);
1535
1536 if (hcon->type == LE_LINK)
1537 l2cap_le_conn_ready(conn);
1538
1539 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1540 }
1541
1542 /* Notify sockets that we cannot guaranty reliability anymore */
1543 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1544 {
1545 struct l2cap_chan *chan;
1546
1547 BT_DBG("conn %p", conn);
1548
1549 mutex_lock(&conn->chan_lock);
1550
1551 list_for_each_entry(chan, &conn->chan_l, list) {
1552 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1553 l2cap_chan_set_err(chan, err);
1554 }
1555
1556 mutex_unlock(&conn->chan_lock);
1557 }
1558
1559 static void l2cap_info_timeout(struct work_struct *work)
1560 {
1561 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1562 info_timer.work);
1563
1564 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1565 conn->info_ident = 0;
1566
1567 l2cap_conn_start(conn);
1568 }
1569
1570 /*
1571 * l2cap_user
1572 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1573 * callback is called during registration. The ->remove callback is called
1574 * during unregistration.
1575 * An l2cap_user object can either be explicitly unregistered or when the
1576 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1577 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1578 * External modules must own a reference to the l2cap_conn object if they intend
1579 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1580 * any time if they don't.
1581 */
1582
1583 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1584 {
1585 struct hci_dev *hdev = conn->hcon->hdev;
1586 int ret;
1587
1588 /* We need to check whether l2cap_conn is registered. If it is not, we
1589 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1590 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1591 * relies on the parent hci_conn object to be locked. This itself relies
1592 * on the hci_dev object to be locked. So we must lock the hci device
1593 * here, too. */
1594
1595 hci_dev_lock(hdev);
1596
1597 if (user->list.next || user->list.prev) {
1598 ret = -EINVAL;
1599 goto out_unlock;
1600 }
1601
1602 /* conn->hchan is NULL after l2cap_conn_del() was called */
1603 if (!conn->hchan) {
1604 ret = -ENODEV;
1605 goto out_unlock;
1606 }
1607
1608 ret = user->probe(conn, user);
1609 if (ret)
1610 goto out_unlock;
1611
1612 list_add(&user->list, &conn->users);
1613 ret = 0;
1614
1615 out_unlock:
1616 hci_dev_unlock(hdev);
1617 return ret;
1618 }
1619 EXPORT_SYMBOL(l2cap_register_user);
1620
1621 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1622 {
1623 struct hci_dev *hdev = conn->hcon->hdev;
1624
1625 hci_dev_lock(hdev);
1626
1627 if (!user->list.next || !user->list.prev)
1628 goto out_unlock;
1629
1630 list_del(&user->list);
1631 user->list.next = NULL;
1632 user->list.prev = NULL;
1633 user->remove(conn, user);
1634
1635 out_unlock:
1636 hci_dev_unlock(hdev);
1637 }
1638 EXPORT_SYMBOL(l2cap_unregister_user);
1639
1640 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1641 {
1642 struct l2cap_user *user;
1643
1644 while (!list_empty(&conn->users)) {
1645 user = list_first_entry(&conn->users, struct l2cap_user, list);
1646 list_del(&user->list);
1647 user->list.next = NULL;
1648 user->list.prev = NULL;
1649 user->remove(conn, user);
1650 }
1651 }
1652
1653 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1654 {
1655 struct l2cap_conn *conn = hcon->l2cap_data;
1656 struct l2cap_chan *chan, *l;
1657
1658 if (!conn)
1659 return;
1660
1661 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1662
1663 kfree_skb(conn->rx_skb);
1664
1665 skb_queue_purge(&conn->pending_rx);
1666
1667 /* We can not call flush_work(&conn->pending_rx_work) here since we
1668 * might block if we are running on a worker from the same workqueue
1669 * pending_rx_work is waiting on.
1670 */
1671 if (work_pending(&conn->pending_rx_work))
1672 cancel_work_sync(&conn->pending_rx_work);
1673
1674 if (work_pending(&conn->id_addr_update_work))
1675 cancel_work_sync(&conn->id_addr_update_work);
1676
1677 l2cap_unregister_all_users(conn);
1678
1679 /* Force the connection to be immediately dropped */
1680 hcon->disc_timeout = 0;
1681
1682 mutex_lock(&conn->chan_lock);
1683
1684 /* Kill channels */
1685 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1686 l2cap_chan_hold(chan);
1687 l2cap_chan_lock(chan);
1688
1689 l2cap_chan_del(chan, err);
1690
1691 l2cap_chan_unlock(chan);
1692
1693 chan->ops->close(chan);
1694 l2cap_chan_put(chan);
1695 }
1696
1697 mutex_unlock(&conn->chan_lock);
1698
1699 hci_chan_del(conn->hchan);
1700
1701 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1702 cancel_delayed_work_sync(&conn->info_timer);
1703
1704 hcon->l2cap_data = NULL;
1705 conn->hchan = NULL;
1706 l2cap_conn_put(conn);
1707 }
1708
1709 static void l2cap_conn_free(struct kref *ref)
1710 {
1711 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1712
1713 hci_conn_put(conn->hcon);
1714 kfree(conn);
1715 }
1716
1717 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1718 {
1719 kref_get(&conn->ref);
1720 return conn;
1721 }
1722 EXPORT_SYMBOL(l2cap_conn_get);
1723
1724 void l2cap_conn_put(struct l2cap_conn *conn)
1725 {
1726 kref_put(&conn->ref, l2cap_conn_free);
1727 }
1728 EXPORT_SYMBOL(l2cap_conn_put);
1729
1730 /* ---- Socket interface ---- */
1731
1732 /* Find socket with psm and source / destination bdaddr.
1733 * Returns closest match.
1734 */
1735 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1736 bdaddr_t *src,
1737 bdaddr_t *dst,
1738 u8 link_type)
1739 {
1740 struct l2cap_chan *c, *c1 = NULL;
1741
1742 read_lock(&chan_list_lock);
1743
1744 list_for_each_entry(c, &chan_list, global_l) {
1745 if (state && c->state != state)
1746 continue;
1747
1748 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1749 continue;
1750
1751 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1752 continue;
1753
1754 if (c->psm == psm) {
1755 int src_match, dst_match;
1756 int src_any, dst_any;
1757
1758 /* Exact match. */
1759 src_match = !bacmp(&c->src, src);
1760 dst_match = !bacmp(&c->dst, dst);
1761 if (src_match && dst_match) {
1762 l2cap_chan_hold(c);
1763 read_unlock(&chan_list_lock);
1764 return c;
1765 }
1766
1767 /* Closest match */
1768 src_any = !bacmp(&c->src, BDADDR_ANY);
1769 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1770 if ((src_match && dst_any) || (src_any && dst_match) ||
1771 (src_any && dst_any))
1772 c1 = c;
1773 }
1774 }
1775
1776 if (c1)
1777 l2cap_chan_hold(c1);
1778
1779 read_unlock(&chan_list_lock);
1780
1781 return c1;
1782 }
1783
1784 static void l2cap_monitor_timeout(struct work_struct *work)
1785 {
1786 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1787 monitor_timer.work);
1788
1789 BT_DBG("chan %p", chan);
1790
1791 l2cap_chan_lock(chan);
1792
1793 if (!chan->conn) {
1794 l2cap_chan_unlock(chan);
1795 l2cap_chan_put(chan);
1796 return;
1797 }
1798
1799 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1800
1801 l2cap_chan_unlock(chan);
1802 l2cap_chan_put(chan);
1803 }
1804
1805 static void l2cap_retrans_timeout(struct work_struct *work)
1806 {
1807 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1808 retrans_timer.work);
1809
1810 BT_DBG("chan %p", chan);
1811
1812 l2cap_chan_lock(chan);
1813
1814 if (!chan->conn) {
1815 l2cap_chan_unlock(chan);
1816 l2cap_chan_put(chan);
1817 return;
1818 }
1819
1820 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1821 l2cap_chan_unlock(chan);
1822 l2cap_chan_put(chan);
1823 }
1824
1825 static void l2cap_streaming_send(struct l2cap_chan *chan,
1826 struct sk_buff_head *skbs)
1827 {
1828 struct sk_buff *skb;
1829 struct l2cap_ctrl *control;
1830
1831 BT_DBG("chan %p, skbs %p", chan, skbs);
1832
1833 if (__chan_is_moving(chan))
1834 return;
1835
1836 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1837
1838 while (!skb_queue_empty(&chan->tx_q)) {
1839
1840 skb = skb_dequeue(&chan->tx_q);
1841
1842 bt_cb(skb)->control.retries = 1;
1843 control = &bt_cb(skb)->control;
1844
1845 control->reqseq = 0;
1846 control->txseq = chan->next_tx_seq;
1847
1848 __pack_control(chan, control, skb);
1849
1850 if (chan->fcs == L2CAP_FCS_CRC16) {
1851 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1852 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1853 }
1854
1855 l2cap_do_send(chan, skb);
1856
1857 BT_DBG("Sent txseq %u", control->txseq);
1858
1859 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1860 chan->frames_sent++;
1861 }
1862 }
1863
1864 static int l2cap_ertm_send(struct l2cap_chan *chan)
1865 {
1866 struct sk_buff *skb, *tx_skb;
1867 struct l2cap_ctrl *control;
1868 int sent = 0;
1869
1870 BT_DBG("chan %p", chan);
1871
1872 if (chan->state != BT_CONNECTED)
1873 return -ENOTCONN;
1874
1875 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1876 return 0;
1877
1878 if (__chan_is_moving(chan))
1879 return 0;
1880
1881 while (chan->tx_send_head &&
1882 chan->unacked_frames < chan->remote_tx_win &&
1883 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1884
1885 skb = chan->tx_send_head;
1886
1887 bt_cb(skb)->control.retries = 1;
1888 control = &bt_cb(skb)->control;
1889
1890 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1891 control->final = 1;
1892
1893 control->reqseq = chan->buffer_seq;
1894 chan->last_acked_seq = chan->buffer_seq;
1895 control->txseq = chan->next_tx_seq;
1896
1897 __pack_control(chan, control, skb);
1898
1899 if (chan->fcs == L2CAP_FCS_CRC16) {
1900 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1901 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1902 }
1903
1904 /* Clone after data has been modified. Data is assumed to be
1905 read-only (for locking purposes) on cloned sk_buffs.
1906 */
1907 tx_skb = skb_clone(skb, GFP_KERNEL);
1908
1909 if (!tx_skb)
1910 break;
1911
1912 __set_retrans_timer(chan);
1913
1914 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1915 chan->unacked_frames++;
1916 chan->frames_sent++;
1917 sent++;
1918
1919 if (skb_queue_is_last(&chan->tx_q, skb))
1920 chan->tx_send_head = NULL;
1921 else
1922 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1923
1924 l2cap_do_send(chan, tx_skb);
1925 BT_DBG("Sent txseq %u", control->txseq);
1926 }
1927
1928 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1929 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1930
1931 return sent;
1932 }
1933
1934 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1935 {
1936 struct l2cap_ctrl control;
1937 struct sk_buff *skb;
1938 struct sk_buff *tx_skb;
1939 u16 seq;
1940
1941 BT_DBG("chan %p", chan);
1942
1943 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1944 return;
1945
1946 if (__chan_is_moving(chan))
1947 return;
1948
1949 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1950 seq = l2cap_seq_list_pop(&chan->retrans_list);
1951
1952 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1953 if (!skb) {
1954 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1955 seq);
1956 continue;
1957 }
1958
1959 bt_cb(skb)->control.retries++;
1960 control = bt_cb(skb)->control;
1961
1962 if (chan->max_tx != 0 &&
1963 bt_cb(skb)->control.retries > chan->max_tx) {
1964 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1965 l2cap_send_disconn_req(chan, ECONNRESET);
1966 l2cap_seq_list_clear(&chan->retrans_list);
1967 break;
1968 }
1969
1970 control.reqseq = chan->buffer_seq;
1971 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1972 control.final = 1;
1973 else
1974 control.final = 0;
1975
1976 if (skb_cloned(skb)) {
1977 /* Cloned sk_buffs are read-only, so we need a
1978 * writeable copy
1979 */
1980 tx_skb = skb_copy(skb, GFP_KERNEL);
1981 } else {
1982 tx_skb = skb_clone(skb, GFP_KERNEL);
1983 }
1984
1985 if (!tx_skb) {
1986 l2cap_seq_list_clear(&chan->retrans_list);
1987 break;
1988 }
1989
1990 /* Update skb contents */
1991 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1992 put_unaligned_le32(__pack_extended_control(&control),
1993 tx_skb->data + L2CAP_HDR_SIZE);
1994 } else {
1995 put_unaligned_le16(__pack_enhanced_control(&control),
1996 tx_skb->data + L2CAP_HDR_SIZE);
1997 }
1998
1999 /* Update FCS */
2000 if (chan->fcs == L2CAP_FCS_CRC16) {
2001 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2002 tx_skb->len - L2CAP_FCS_SIZE);
2003 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2004 L2CAP_FCS_SIZE);
2005 }
2006
2007 l2cap_do_send(chan, tx_skb);
2008
2009 BT_DBG("Resent txseq %d", control.txseq);
2010
2011 chan->last_acked_seq = chan->buffer_seq;
2012 }
2013 }
2014
2015 static void l2cap_retransmit(struct l2cap_chan *chan,
2016 struct l2cap_ctrl *control)
2017 {
2018 BT_DBG("chan %p, control %p", chan, control);
2019
2020 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2021 l2cap_ertm_resend(chan);
2022 }
2023
2024 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2025 struct l2cap_ctrl *control)
2026 {
2027 struct sk_buff *skb;
2028
2029 BT_DBG("chan %p, control %p", chan, control);
2030
2031 if (control->poll)
2032 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2033
2034 l2cap_seq_list_clear(&chan->retrans_list);
2035
2036 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2037 return;
2038
2039 if (chan->unacked_frames) {
2040 skb_queue_walk(&chan->tx_q, skb) {
2041 if (bt_cb(skb)->control.txseq == control->reqseq ||
2042 skb == chan->tx_send_head)
2043 break;
2044 }
2045
2046 skb_queue_walk_from(&chan->tx_q, skb) {
2047 if (skb == chan->tx_send_head)
2048 break;
2049
2050 l2cap_seq_list_append(&chan->retrans_list,
2051 bt_cb(skb)->control.txseq);
2052 }
2053
2054 l2cap_ertm_resend(chan);
2055 }
2056 }
2057
2058 static void l2cap_send_ack(struct l2cap_chan *chan)
2059 {
2060 struct l2cap_ctrl control;
2061 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2062 chan->last_acked_seq);
2063 int threshold;
2064
2065 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2066 chan, chan->last_acked_seq, chan->buffer_seq);
2067
2068 memset(&control, 0, sizeof(control));
2069 control.sframe = 1;
2070
2071 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2072 chan->rx_state == L2CAP_RX_STATE_RECV) {
2073 __clear_ack_timer(chan);
2074 control.super = L2CAP_SUPER_RNR;
2075 control.reqseq = chan->buffer_seq;
2076 l2cap_send_sframe(chan, &control);
2077 } else {
2078 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2079 l2cap_ertm_send(chan);
2080 /* If any i-frames were sent, they included an ack */
2081 if (chan->buffer_seq == chan->last_acked_seq)
2082 frames_to_ack = 0;
2083 }
2084
2085 /* Ack now if the window is 3/4ths full.
2086 * Calculate without mul or div
2087 */
2088 threshold = chan->ack_win;
2089 threshold += threshold << 1;
2090 threshold >>= 2;
2091
2092 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2093 threshold);
2094
2095 if (frames_to_ack >= threshold) {
2096 __clear_ack_timer(chan);
2097 control.super = L2CAP_SUPER_RR;
2098 control.reqseq = chan->buffer_seq;
2099 l2cap_send_sframe(chan, &control);
2100 frames_to_ack = 0;
2101 }
2102
2103 if (frames_to_ack)
2104 __set_ack_timer(chan);
2105 }
2106 }
2107
2108 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2109 struct msghdr *msg, int len,
2110 int count, struct sk_buff *skb)
2111 {
2112 struct l2cap_conn *conn = chan->conn;
2113 struct sk_buff **frag;
2114 int sent = 0;
2115
2116 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2117 return -EFAULT;
2118
2119 sent += count;
2120 len -= count;
2121
2122 /* Continuation fragments (no L2CAP header) */
2123 frag = &skb_shinfo(skb)->frag_list;
2124 while (len) {
2125 struct sk_buff *tmp;
2126
2127 count = min_t(unsigned int, conn->mtu, len);
2128
2129 tmp = chan->ops->alloc_skb(chan, 0, count,
2130 msg->msg_flags & MSG_DONTWAIT);
2131 if (IS_ERR(tmp))
2132 return PTR_ERR(tmp);
2133
2134 *frag = tmp;
2135
2136 if (copy_from_iter(skb_put(*frag, count), count,
2137 &msg->msg_iter) != count)
2138 return -EFAULT;
2139
2140 sent += count;
2141 len -= count;
2142
2143 skb->len += (*frag)->len;
2144 skb->data_len += (*frag)->len;
2145
2146 frag = &(*frag)->next;
2147 }
2148
2149 return sent;
2150 }
2151
2152 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2153 struct msghdr *msg, size_t len)
2154 {
2155 struct l2cap_conn *conn = chan->conn;
2156 struct sk_buff *skb;
2157 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2158 struct l2cap_hdr *lh;
2159
2160 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2161 __le16_to_cpu(chan->psm), len);
2162
2163 count = min_t(unsigned int, (conn->mtu - hlen), len);
2164
2165 skb = chan->ops->alloc_skb(chan, hlen, count,
2166 msg->msg_flags & MSG_DONTWAIT);
2167 if (IS_ERR(skb))
2168 return skb;
2169
2170 /* Create L2CAP header */
2171 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2172 lh->cid = cpu_to_le16(chan->dcid);
2173 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2174 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2175
2176 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2177 if (unlikely(err < 0)) {
2178 kfree_skb(skb);
2179 return ERR_PTR(err);
2180 }
2181 return skb;
2182 }
2183
2184 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2185 struct msghdr *msg, size_t len)
2186 {
2187 struct l2cap_conn *conn = chan->conn;
2188 struct sk_buff *skb;
2189 int err, count;
2190 struct l2cap_hdr *lh;
2191
2192 BT_DBG("chan %p len %zu", chan, len);
2193
2194 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2195
2196 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2197 msg->msg_flags & MSG_DONTWAIT);
2198 if (IS_ERR(skb))
2199 return skb;
2200
2201 /* Create L2CAP header */
2202 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2203 lh->cid = cpu_to_le16(chan->dcid);
2204 lh->len = cpu_to_le16(len);
2205
2206 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2207 if (unlikely(err < 0)) {
2208 kfree_skb(skb);
2209 return ERR_PTR(err);
2210 }
2211 return skb;
2212 }
2213
2214 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2215 struct msghdr *msg, size_t len,
2216 u16 sdulen)
2217 {
2218 struct l2cap_conn *conn = chan->conn;
2219 struct sk_buff *skb;
2220 int err, count, hlen;
2221 struct l2cap_hdr *lh;
2222
2223 BT_DBG("chan %p len %zu", chan, len);
2224
2225 if (!conn)
2226 return ERR_PTR(-ENOTCONN);
2227
2228 hlen = __ertm_hdr_size(chan);
2229
2230 if (sdulen)
2231 hlen += L2CAP_SDULEN_SIZE;
2232
2233 if (chan->fcs == L2CAP_FCS_CRC16)
2234 hlen += L2CAP_FCS_SIZE;
2235
2236 count = min_t(unsigned int, (conn->mtu - hlen), len);
2237
2238 skb = chan->ops->alloc_skb(chan, hlen, count,
2239 msg->msg_flags & MSG_DONTWAIT);
2240 if (IS_ERR(skb))
2241 return skb;
2242
2243 /* Create L2CAP header */
2244 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2245 lh->cid = cpu_to_le16(chan->dcid);
2246 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2247
2248 /* Control header is populated later */
2249 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2250 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2251 else
2252 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2253
2254 if (sdulen)
2255 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2256
2257 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2258 if (unlikely(err < 0)) {
2259 kfree_skb(skb);
2260 return ERR_PTR(err);
2261 }
2262
2263 bt_cb(skb)->control.fcs = chan->fcs;
2264 bt_cb(skb)->control.retries = 0;
2265 return skb;
2266 }
2267
2268 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2269 struct sk_buff_head *seg_queue,
2270 struct msghdr *msg, size_t len)
2271 {
2272 struct sk_buff *skb;
2273 u16 sdu_len;
2274 size_t pdu_len;
2275 u8 sar;
2276
2277 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2278
2279 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2280 * so fragmented skbs are not used. The HCI layer's handling
2281 * of fragmented skbs is not compatible with ERTM's queueing.
2282 */
2283
2284 /* PDU size is derived from the HCI MTU */
2285 pdu_len = chan->conn->mtu;
2286
2287 /* Constrain PDU size for BR/EDR connections */
2288 if (!chan->hs_hcon)
2289 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2290
2291 /* Adjust for largest possible L2CAP overhead. */
2292 if (chan->fcs)
2293 pdu_len -= L2CAP_FCS_SIZE;
2294
2295 pdu_len -= __ertm_hdr_size(chan);
2296
2297 /* Remote device may have requested smaller PDUs */
2298 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2299
2300 if (len <= pdu_len) {
2301 sar = L2CAP_SAR_UNSEGMENTED;
2302 sdu_len = 0;
2303 pdu_len = len;
2304 } else {
2305 sar = L2CAP_SAR_START;
2306 sdu_len = len;
2307 }
2308
2309 while (len > 0) {
2310 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2311
2312 if (IS_ERR(skb)) {
2313 __skb_queue_purge(seg_queue);
2314 return PTR_ERR(skb);
2315 }
2316
2317 bt_cb(skb)->control.sar = sar;
2318 __skb_queue_tail(seg_queue, skb);
2319
2320 len -= pdu_len;
2321 if (sdu_len)
2322 sdu_len = 0;
2323
2324 if (len <= pdu_len) {
2325 sar = L2CAP_SAR_END;
2326 pdu_len = len;
2327 } else {
2328 sar = L2CAP_SAR_CONTINUE;
2329 }
2330 }
2331
2332 return 0;
2333 }
2334
2335 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2336 struct msghdr *msg,
2337 size_t len, u16 sdulen)
2338 {
2339 struct l2cap_conn *conn = chan->conn;
2340 struct sk_buff *skb;
2341 int err, count, hlen;
2342 struct l2cap_hdr *lh;
2343
2344 BT_DBG("chan %p len %zu", chan, len);
2345
2346 if (!conn)
2347 return ERR_PTR(-ENOTCONN);
2348
2349 hlen = L2CAP_HDR_SIZE;
2350
2351 if (sdulen)
2352 hlen += L2CAP_SDULEN_SIZE;
2353
2354 count = min_t(unsigned int, (conn->mtu - hlen), len);
2355
2356 skb = chan->ops->alloc_skb(chan, hlen, count,
2357 msg->msg_flags & MSG_DONTWAIT);
2358 if (IS_ERR(skb))
2359 return skb;
2360
2361 /* Create L2CAP header */
2362 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2363 lh->cid = cpu_to_le16(chan->dcid);
2364 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2365
2366 if (sdulen)
2367 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2368
2369 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2370 if (unlikely(err < 0)) {
2371 kfree_skb(skb);
2372 return ERR_PTR(err);
2373 }
2374
2375 return skb;
2376 }
2377
2378 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2379 struct sk_buff_head *seg_queue,
2380 struct msghdr *msg, size_t len)
2381 {
2382 struct sk_buff *skb;
2383 size_t pdu_len;
2384 u16 sdu_len;
2385
2386 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2387
2388 sdu_len = len;
2389 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2390
2391 while (len > 0) {
2392 if (len <= pdu_len)
2393 pdu_len = len;
2394
2395 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2396 if (IS_ERR(skb)) {
2397 __skb_queue_purge(seg_queue);
2398 return PTR_ERR(skb);
2399 }
2400
2401 __skb_queue_tail(seg_queue, skb);
2402
2403 len -= pdu_len;
2404
2405 if (sdu_len) {
2406 sdu_len = 0;
2407 pdu_len += L2CAP_SDULEN_SIZE;
2408 }
2409 }
2410
2411 return 0;
2412 }
2413
2414 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2415 {
2416 struct sk_buff *skb;
2417 int err;
2418 struct sk_buff_head seg_queue;
2419
2420 if (!chan->conn)
2421 return -ENOTCONN;
2422
2423 /* Connectionless channel */
2424 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2425 skb = l2cap_create_connless_pdu(chan, msg, len);
2426 if (IS_ERR(skb))
2427 return PTR_ERR(skb);
2428
2429 /* Channel lock is released before requesting new skb and then
2430 * reacquired thus we need to recheck channel state.
2431 */
2432 if (chan->state != BT_CONNECTED) {
2433 kfree_skb(skb);
2434 return -ENOTCONN;
2435 }
2436
2437 l2cap_do_send(chan, skb);
2438 return len;
2439 }
2440
2441 switch (chan->mode) {
2442 case L2CAP_MODE_LE_FLOWCTL:
2443 /* Check outgoing MTU */
2444 if (len > chan->omtu)
2445 return -EMSGSIZE;
2446
2447 if (!chan->tx_credits)
2448 return -EAGAIN;
2449
2450 __skb_queue_head_init(&seg_queue);
2451
2452 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2453
2454 if (chan->state != BT_CONNECTED) {
2455 __skb_queue_purge(&seg_queue);
2456 err = -ENOTCONN;
2457 }
2458
2459 if (err)
2460 return err;
2461
2462 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2463
2464 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2465 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2466 chan->tx_credits--;
2467 }
2468
2469 if (!chan->tx_credits)
2470 chan->ops->suspend(chan);
2471
2472 err = len;
2473
2474 break;
2475
2476 case L2CAP_MODE_BASIC:
2477 /* Check outgoing MTU */
2478 if (len > chan->omtu)
2479 return -EMSGSIZE;
2480
2481 /* Create a basic PDU */
2482 skb = l2cap_create_basic_pdu(chan, msg, len);
2483 if (IS_ERR(skb))
2484 return PTR_ERR(skb);
2485
2486 /* Channel lock is released before requesting new skb and then
2487 * reacquired thus we need to recheck channel state.
2488 */
2489 if (chan->state != BT_CONNECTED) {
2490 kfree_skb(skb);
2491 return -ENOTCONN;
2492 }
2493
2494 l2cap_do_send(chan, skb);
2495 err = len;
2496 break;
2497
2498 case L2CAP_MODE_ERTM:
2499 case L2CAP_MODE_STREAMING:
2500 /* Check outgoing MTU */
2501 if (len > chan->omtu) {
2502 err = -EMSGSIZE;
2503 break;
2504 }
2505
2506 __skb_queue_head_init(&seg_queue);
2507
2508 /* Do segmentation before calling in to the state machine,
2509 * since it's possible to block while waiting for memory
2510 * allocation.
2511 */
2512 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2513
2514 /* The channel could have been closed while segmenting,
2515 * check that it is still connected.
2516 */
2517 if (chan->state != BT_CONNECTED) {
2518 __skb_queue_purge(&seg_queue);
2519 err = -ENOTCONN;
2520 }
2521
2522 if (err)
2523 break;
2524
2525 if (chan->mode == L2CAP_MODE_ERTM)
2526 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2527 else
2528 l2cap_streaming_send(chan, &seg_queue);
2529
2530 err = len;
2531
2532 /* If the skbs were not queued for sending, they'll still be in
2533 * seg_queue and need to be purged.
2534 */
2535 __skb_queue_purge(&seg_queue);
2536 break;
2537
2538 default:
2539 BT_DBG("bad state %1.1x", chan->mode);
2540 err = -EBADFD;
2541 }
2542
2543 return err;
2544 }
2545 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2546
2547 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2548 {
2549 struct l2cap_ctrl control;
2550 u16 seq;
2551
2552 BT_DBG("chan %p, txseq %u", chan, txseq);
2553
2554 memset(&control, 0, sizeof(control));
2555 control.sframe = 1;
2556 control.super = L2CAP_SUPER_SREJ;
2557
2558 for (seq = chan->expected_tx_seq; seq != txseq;
2559 seq = __next_seq(chan, seq)) {
2560 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2561 control.reqseq = seq;
2562 l2cap_send_sframe(chan, &control);
2563 l2cap_seq_list_append(&chan->srej_list, seq);
2564 }
2565 }
2566
2567 chan->expected_tx_seq = __next_seq(chan, txseq);
2568 }
2569
2570 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2571 {
2572 struct l2cap_ctrl control;
2573
2574 BT_DBG("chan %p", chan);
2575
2576 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2577 return;
2578
2579 memset(&control, 0, sizeof(control));
2580 control.sframe = 1;
2581 control.super = L2CAP_SUPER_SREJ;
2582 control.reqseq = chan->srej_list.tail;
2583 l2cap_send_sframe(chan, &control);
2584 }
2585
2586 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2587 {
2588 struct l2cap_ctrl control;
2589 u16 initial_head;
2590 u16 seq;
2591
2592 BT_DBG("chan %p, txseq %u", chan, txseq);
2593
2594 memset(&control, 0, sizeof(control));
2595 control.sframe = 1;
2596 control.super = L2CAP_SUPER_SREJ;
2597
2598 /* Capture initial list head to allow only one pass through the list. */
2599 initial_head = chan->srej_list.head;
2600
2601 do {
2602 seq = l2cap_seq_list_pop(&chan->srej_list);
2603 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2604 break;
2605
2606 control.reqseq = seq;
2607 l2cap_send_sframe(chan, &control);
2608 l2cap_seq_list_append(&chan->srej_list, seq);
2609 } while (chan->srej_list.head != initial_head);
2610 }
2611
2612 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2613 {
2614 struct sk_buff *acked_skb;
2615 u16 ackseq;
2616
2617 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2618
2619 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2620 return;
2621
2622 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2623 chan->expected_ack_seq, chan->unacked_frames);
2624
2625 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2626 ackseq = __next_seq(chan, ackseq)) {
2627
2628 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2629 if (acked_skb) {
2630 skb_unlink(acked_skb, &chan->tx_q);
2631 kfree_skb(acked_skb);
2632 chan->unacked_frames--;
2633 }
2634 }
2635
2636 chan->expected_ack_seq = reqseq;
2637
2638 if (chan->unacked_frames == 0)
2639 __clear_retrans_timer(chan);
2640
2641 BT_DBG("unacked_frames %u", chan->unacked_frames);
2642 }
2643
2644 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2645 {
2646 BT_DBG("chan %p", chan);
2647
2648 chan->expected_tx_seq = chan->buffer_seq;
2649 l2cap_seq_list_clear(&chan->srej_list);
2650 skb_queue_purge(&chan->srej_q);
2651 chan->rx_state = L2CAP_RX_STATE_RECV;
2652 }
2653
2654 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2655 struct l2cap_ctrl *control,
2656 struct sk_buff_head *skbs, u8 event)
2657 {
2658 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2659 event);
2660
2661 switch (event) {
2662 case L2CAP_EV_DATA_REQUEST:
2663 if (chan->tx_send_head == NULL)
2664 chan->tx_send_head = skb_peek(skbs);
2665
2666 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2667 l2cap_ertm_send(chan);
2668 break;
2669 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2670 BT_DBG("Enter LOCAL_BUSY");
2671 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2672
2673 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2674 /* The SREJ_SENT state must be aborted if we are to
2675 * enter the LOCAL_BUSY state.
2676 */
2677 l2cap_abort_rx_srej_sent(chan);
2678 }
2679
2680 l2cap_send_ack(chan);
2681
2682 break;
2683 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2684 BT_DBG("Exit LOCAL_BUSY");
2685 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2686
2687 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2688 struct l2cap_ctrl local_control;
2689
2690 memset(&local_control, 0, sizeof(local_control));
2691 local_control.sframe = 1;
2692 local_control.super = L2CAP_SUPER_RR;
2693 local_control.poll = 1;
2694 local_control.reqseq = chan->buffer_seq;
2695 l2cap_send_sframe(chan, &local_control);
2696
2697 chan->retry_count = 1;
2698 __set_monitor_timer(chan);
2699 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2700 }
2701 break;
2702 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2703 l2cap_process_reqseq(chan, control->reqseq);
2704 break;
2705 case L2CAP_EV_EXPLICIT_POLL:
2706 l2cap_send_rr_or_rnr(chan, 1);
2707 chan->retry_count = 1;
2708 __set_monitor_timer(chan);
2709 __clear_ack_timer(chan);
2710 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2711 break;
2712 case L2CAP_EV_RETRANS_TO:
2713 l2cap_send_rr_or_rnr(chan, 1);
2714 chan->retry_count = 1;
2715 __set_monitor_timer(chan);
2716 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2717 break;
2718 case L2CAP_EV_RECV_FBIT:
2719 /* Nothing to process */
2720 break;
2721 default:
2722 break;
2723 }
2724 }
2725
2726 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2727 struct l2cap_ctrl *control,
2728 struct sk_buff_head *skbs, u8 event)
2729 {
2730 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2731 event);
2732
2733 switch (event) {
2734 case L2CAP_EV_DATA_REQUEST:
2735 if (chan->tx_send_head == NULL)
2736 chan->tx_send_head = skb_peek(skbs);
2737 /* Queue data, but don't send. */
2738 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2739 break;
2740 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2741 BT_DBG("Enter LOCAL_BUSY");
2742 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2743
2744 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2745 /* The SREJ_SENT state must be aborted if we are to
2746 * enter the LOCAL_BUSY state.
2747 */
2748 l2cap_abort_rx_srej_sent(chan);
2749 }
2750
2751 l2cap_send_ack(chan);
2752
2753 break;
2754 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2755 BT_DBG("Exit LOCAL_BUSY");
2756 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2757
2758 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2759 struct l2cap_ctrl local_control;
2760 memset(&local_control, 0, sizeof(local_control));
2761 local_control.sframe = 1;
2762 local_control.super = L2CAP_SUPER_RR;
2763 local_control.poll = 1;
2764 local_control.reqseq = chan->buffer_seq;
2765 l2cap_send_sframe(chan, &local_control);
2766
2767 chan->retry_count = 1;
2768 __set_monitor_timer(chan);
2769 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2770 }
2771 break;
2772 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2773 l2cap_process_reqseq(chan, control->reqseq);
2774
2775 /* Fall through */
2776
2777 case L2CAP_EV_RECV_FBIT:
2778 if (control && control->final) {
2779 __clear_monitor_timer(chan);
2780 if (chan->unacked_frames > 0)
2781 __set_retrans_timer(chan);
2782 chan->retry_count = 0;
2783 chan->tx_state = L2CAP_TX_STATE_XMIT;
2784 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2785 }
2786 break;
2787 case L2CAP_EV_EXPLICIT_POLL:
2788 /* Ignore */
2789 break;
2790 case L2CAP_EV_MONITOR_TO:
2791 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2792 l2cap_send_rr_or_rnr(chan, 1);
2793 __set_monitor_timer(chan);
2794 chan->retry_count++;
2795 } else {
2796 l2cap_send_disconn_req(chan, ECONNABORTED);
2797 }
2798 break;
2799 default:
2800 break;
2801 }
2802 }
2803
2804 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2805 struct sk_buff_head *skbs, u8 event)
2806 {
2807 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2808 chan, control, skbs, event, chan->tx_state);
2809
2810 switch (chan->tx_state) {
2811 case L2CAP_TX_STATE_XMIT:
2812 l2cap_tx_state_xmit(chan, control, skbs, event);
2813 break;
2814 case L2CAP_TX_STATE_WAIT_F:
2815 l2cap_tx_state_wait_f(chan, control, skbs, event);
2816 break;
2817 default:
2818 /* Ignore event */
2819 break;
2820 }
2821 }
2822
2823 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2824 struct l2cap_ctrl *control)
2825 {
2826 BT_DBG("chan %p, control %p", chan, control);
2827 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2828 }
2829
2830 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2831 struct l2cap_ctrl *control)
2832 {
2833 BT_DBG("chan %p, control %p", chan, control);
2834 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2835 }
2836
2837 /* Copy frame to all raw sockets on that connection */
2838 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2839 {
2840 struct sk_buff *nskb;
2841 struct l2cap_chan *chan;
2842
2843 BT_DBG("conn %p", conn);
2844
2845 mutex_lock(&conn->chan_lock);
2846
2847 list_for_each_entry(chan, &conn->chan_l, list) {
2848 if (chan->chan_type != L2CAP_CHAN_RAW)
2849 continue;
2850
2851 /* Don't send frame to the channel it came from */
2852 if (bt_cb(skb)->chan == chan)
2853 continue;
2854
2855 nskb = skb_clone(skb, GFP_KERNEL);
2856 if (!nskb)
2857 continue;
2858 if (chan->ops->recv(chan, nskb))
2859 kfree_skb(nskb);
2860 }
2861
2862 mutex_unlock(&conn->chan_lock);
2863 }
2864
2865 /* ---- L2CAP signalling commands ---- */
2866 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2867 u8 ident, u16 dlen, void *data)
2868 {
2869 struct sk_buff *skb, **frag;
2870 struct l2cap_cmd_hdr *cmd;
2871 struct l2cap_hdr *lh;
2872 int len, count;
2873
2874 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2875 conn, code, ident, dlen);
2876
2877 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2878 return NULL;
2879
2880 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2881 count = min_t(unsigned int, conn->mtu, len);
2882
2883 skb = bt_skb_alloc(count, GFP_KERNEL);
2884 if (!skb)
2885 return NULL;
2886
2887 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2888 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2889
2890 if (conn->hcon->type == LE_LINK)
2891 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2892 else
2893 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2894
2895 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2896 cmd->code = code;
2897 cmd->ident = ident;
2898 cmd->len = cpu_to_le16(dlen);
2899
2900 if (dlen) {
2901 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2902 memcpy(skb_put(skb, count), data, count);
2903 data += count;
2904 }
2905
2906 len -= skb->len;
2907
2908 /* Continuation fragments (no L2CAP header) */
2909 frag = &skb_shinfo(skb)->frag_list;
2910 while (len) {
2911 count = min_t(unsigned int, conn->mtu, len);
2912
2913 *frag = bt_skb_alloc(count, GFP_KERNEL);
2914 if (!*frag)
2915 goto fail;
2916
2917 memcpy(skb_put(*frag, count), data, count);
2918
2919 len -= count;
2920 data += count;
2921
2922 frag = &(*frag)->next;
2923 }
2924
2925 return skb;
2926
2927 fail:
2928 kfree_skb(skb);
2929 return NULL;
2930 }
2931
2932 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2933 unsigned long *val)
2934 {
2935 struct l2cap_conf_opt *opt = *ptr;
2936 int len;
2937
2938 len = L2CAP_CONF_OPT_SIZE + opt->len;
2939 *ptr += len;
2940
2941 *type = opt->type;
2942 *olen = opt->len;
2943
2944 switch (opt->len) {
2945 case 1:
2946 *val = *((u8 *) opt->val);
2947 break;
2948
2949 case 2:
2950 *val = get_unaligned_le16(opt->val);
2951 break;
2952
2953 case 4:
2954 *val = get_unaligned_le32(opt->val);
2955 break;
2956
2957 default:
2958 *val = (unsigned long) opt->val;
2959 break;
2960 }
2961
2962 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2963 return len;
2964 }
2965
2966 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2967 {
2968 struct l2cap_conf_opt *opt = *ptr;
2969
2970 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2971
2972 opt->type = type;
2973 opt->len = len;
2974
2975 switch (len) {
2976 case 1:
2977 *((u8 *) opt->val) = val;
2978 break;
2979
2980 case 2:
2981 put_unaligned_le16(val, opt->val);
2982 break;
2983
2984 case 4:
2985 put_unaligned_le32(val, opt->val);
2986 break;
2987
2988 default:
2989 memcpy(opt->val, (void *) val, len);
2990 break;
2991 }
2992
2993 *ptr += L2CAP_CONF_OPT_SIZE + len;
2994 }
2995
2996 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2997 {
2998 struct l2cap_conf_efs efs;
2999
3000 switch (chan->mode) {
3001 case L2CAP_MODE_ERTM:
3002 efs.id = chan->local_id;
3003 efs.stype = chan->local_stype;
3004 efs.msdu = cpu_to_le16(chan->local_msdu);
3005 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3006 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3007 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3008 break;
3009
3010 case L2CAP_MODE_STREAMING:
3011 efs.id = 1;
3012 efs.stype = L2CAP_SERV_BESTEFFORT;
3013 efs.msdu = cpu_to_le16(chan->local_msdu);
3014 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3015 efs.acc_lat = 0;
3016 efs.flush_to = 0;
3017 break;
3018
3019 default:
3020 return;
3021 }
3022
3023 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3024 (unsigned long) &efs);
3025 }
3026
3027 static void l2cap_ack_timeout(struct work_struct *work)
3028 {
3029 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3030 ack_timer.work);
3031 u16 frames_to_ack;
3032
3033 BT_DBG("chan %p", chan);
3034
3035 l2cap_chan_lock(chan);
3036
3037 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3038 chan->last_acked_seq);
3039
3040 if (frames_to_ack)
3041 l2cap_send_rr_or_rnr(chan, 0);
3042
3043 l2cap_chan_unlock(chan);
3044 l2cap_chan_put(chan);
3045 }
3046
3047 int l2cap_ertm_init(struct l2cap_chan *chan)
3048 {
3049 int err;
3050
3051 chan->next_tx_seq = 0;
3052 chan->expected_tx_seq = 0;
3053 chan->expected_ack_seq = 0;
3054 chan->unacked_frames = 0;
3055 chan->buffer_seq = 0;
3056 chan->frames_sent = 0;
3057 chan->last_acked_seq = 0;
3058 chan->sdu = NULL;
3059 chan->sdu_last_frag = NULL;
3060 chan->sdu_len = 0;
3061
3062 skb_queue_head_init(&chan->tx_q);
3063
3064 chan->local_amp_id = AMP_ID_BREDR;
3065 chan->move_id = AMP_ID_BREDR;
3066 chan->move_state = L2CAP_MOVE_STABLE;
3067 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3068
3069 if (chan->mode != L2CAP_MODE_ERTM)
3070 return 0;
3071
3072 chan->rx_state = L2CAP_RX_STATE_RECV;
3073 chan->tx_state = L2CAP_TX_STATE_XMIT;
3074
3075 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3076 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3077 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3078
3079 skb_queue_head_init(&chan->srej_q);
3080
3081 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3082 if (err < 0)
3083 return err;
3084
3085 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3086 if (err < 0)
3087 l2cap_seq_list_free(&chan->srej_list);
3088
3089 return err;
3090 }
3091
3092 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3093 {
3094 switch (mode) {
3095 case L2CAP_MODE_STREAMING:
3096 case L2CAP_MODE_ERTM:
3097 if (l2cap_mode_supported(mode, remote_feat_mask))
3098 return mode;
3099 /* fall through */
3100 default:
3101 return L2CAP_MODE_BASIC;
3102 }
3103 }
3104
3105 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3106 {
3107 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3108 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3109 }
3110
3111 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3112 {
3113 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3114 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3115 }
3116
3117 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3118 struct l2cap_conf_rfc *rfc)
3119 {
3120 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3121 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3122
3123 /* Class 1 devices have must have ERTM timeouts
3124 * exceeding the Link Supervision Timeout. The
3125 * default Link Supervision Timeout for AMP
3126 * controllers is 10 seconds.
3127 *
3128 * Class 1 devices use 0xffffffff for their
3129 * best-effort flush timeout, so the clamping logic
3130 * will result in a timeout that meets the above
3131 * requirement. ERTM timeouts are 16-bit values, so
3132 * the maximum timeout is 65.535 seconds.
3133 */
3134
3135 /* Convert timeout to milliseconds and round */
3136 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3137
3138 /* This is the recommended formula for class 2 devices
3139 * that start ERTM timers when packets are sent to the
3140 * controller.
3141 */
3142 ertm_to = 3 * ertm_to + 500;
3143
3144 if (ertm_to > 0xffff)
3145 ertm_to = 0xffff;
3146
3147 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3148 rfc->monitor_timeout = rfc->retrans_timeout;
3149 } else {
3150 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3151 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3152 }
3153 }
3154
3155 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3156 {
3157 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3158 __l2cap_ews_supported(chan->conn)) {
3159 /* use extended control field */
3160 set_bit(FLAG_EXT_CTRL, &chan->flags);
3161 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3162 } else {
3163 chan->tx_win = min_t(u16, chan->tx_win,
3164 L2CAP_DEFAULT_TX_WINDOW);
3165 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3166 }
3167 chan->ack_win = chan->tx_win;
3168 }
3169
3170 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3171 {
3172 struct l2cap_conf_req *req = data;
3173 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3174 void *ptr = req->data;
3175 u16 size;
3176
3177 BT_DBG("chan %p", chan);
3178
3179 if (chan->num_conf_req || chan->num_conf_rsp)
3180 goto done;
3181
3182 switch (chan->mode) {
3183 case L2CAP_MODE_STREAMING:
3184 case L2CAP_MODE_ERTM:
3185 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3186 break;
3187
3188 if (__l2cap_efs_supported(chan->conn))
3189 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3190
3191 /* fall through */
3192 default:
3193 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3194 break;
3195 }
3196
3197 done:
3198 if (chan->imtu != L2CAP_DEFAULT_MTU)
3199 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3200
3201 switch (chan->mode) {
3202 case L2CAP_MODE_BASIC:
3203 if (disable_ertm)
3204 break;
3205
3206 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3207 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3208 break;
3209
3210 rfc.mode = L2CAP_MODE_BASIC;
3211 rfc.txwin_size = 0;
3212 rfc.max_transmit = 0;
3213 rfc.retrans_timeout = 0;
3214 rfc.monitor_timeout = 0;
3215 rfc.max_pdu_size = 0;
3216
3217 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3218 (unsigned long) &rfc);
3219 break;
3220
3221 case L2CAP_MODE_ERTM:
3222 rfc.mode = L2CAP_MODE_ERTM;
3223 rfc.max_transmit = chan->max_tx;
3224
3225 __l2cap_set_ertm_timeouts(chan, &rfc);
3226
3227 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3228 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3229 L2CAP_FCS_SIZE);
3230 rfc.max_pdu_size = cpu_to_le16(size);
3231
3232 l2cap_txwin_setup(chan);
3233
3234 rfc.txwin_size = min_t(u16, chan->tx_win,
3235 L2CAP_DEFAULT_TX_WINDOW);
3236
3237 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3238 (unsigned long) &rfc);
3239
3240 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3241 l2cap_add_opt_efs(&ptr, chan);
3242
3243 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3244 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3245 chan->tx_win);
3246
3247 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3248 if (chan->fcs == L2CAP_FCS_NONE ||
3249 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3250 chan->fcs = L2CAP_FCS_NONE;
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3252 chan->fcs);
3253 }
3254 break;
3255
3256 case L2CAP_MODE_STREAMING:
3257 l2cap_txwin_setup(chan);
3258 rfc.mode = L2CAP_MODE_STREAMING;
3259 rfc.txwin_size = 0;
3260 rfc.max_transmit = 0;
3261 rfc.retrans_timeout = 0;
3262 rfc.monitor_timeout = 0;
3263
3264 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3265 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3266 L2CAP_FCS_SIZE);
3267 rfc.max_pdu_size = cpu_to_le16(size);
3268
3269 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3270 (unsigned long) &rfc);
3271
3272 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3273 l2cap_add_opt_efs(&ptr, chan);
3274
3275 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3276 if (chan->fcs == L2CAP_FCS_NONE ||
3277 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3278 chan->fcs = L2CAP_FCS_NONE;
3279 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3280 chan->fcs);
3281 }
3282 break;
3283 }
3284
3285 req->dcid = cpu_to_le16(chan->dcid);
3286 req->flags = cpu_to_le16(0);
3287
3288 return ptr - data;
3289 }
3290
3291 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3292 {
3293 struct l2cap_conf_rsp *rsp = data;
3294 void *ptr = rsp->data;
3295 void *req = chan->conf_req;
3296 int len = chan->conf_len;
3297 int type, hint, olen;
3298 unsigned long val;
3299 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3300 struct l2cap_conf_efs efs;
3301 u8 remote_efs = 0;
3302 u16 mtu = L2CAP_DEFAULT_MTU;
3303 u16 result = L2CAP_CONF_SUCCESS;
3304 u16 size;
3305
3306 BT_DBG("chan %p", chan);
3307
3308 while (len >= L2CAP_CONF_OPT_SIZE) {
3309 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3310
3311 hint = type & L2CAP_CONF_HINT;
3312 type &= L2CAP_CONF_MASK;
3313
3314 switch (type) {
3315 case L2CAP_CONF_MTU:
3316 mtu = val;
3317 break;
3318
3319 case L2CAP_CONF_FLUSH_TO:
3320 chan->flush_to = val;
3321 break;
3322
3323 case L2CAP_CONF_QOS:
3324 break;
3325
3326 case L2CAP_CONF_RFC:
3327 if (olen == sizeof(rfc))
3328 memcpy(&rfc, (void *) val, olen);
3329 break;
3330
3331 case L2CAP_CONF_FCS:
3332 if (val == L2CAP_FCS_NONE)
3333 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3334 break;
3335
3336 case L2CAP_CONF_EFS:
3337 remote_efs = 1;
3338 if (olen == sizeof(efs))
3339 memcpy(&efs, (void *) val, olen);
3340 break;
3341
3342 case L2CAP_CONF_EWS:
3343 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3344 return -ECONNREFUSED;
3345
3346 set_bit(FLAG_EXT_CTRL, &chan->flags);
3347 set_bit(CONF_EWS_RECV, &chan->conf_state);
3348 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3349 chan->remote_tx_win = val;
3350 break;
3351
3352 default:
3353 if (hint)
3354 break;
3355
3356 result = L2CAP_CONF_UNKNOWN;
3357 *((u8 *) ptr++) = type;
3358 break;
3359 }
3360 }
3361
3362 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3363 goto done;
3364
3365 switch (chan->mode) {
3366 case L2CAP_MODE_STREAMING:
3367 case L2CAP_MODE_ERTM:
3368 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3369 chan->mode = l2cap_select_mode(rfc.mode,
3370 chan->conn->feat_mask);
3371 break;
3372 }
3373
3374 if (remote_efs) {
3375 if (__l2cap_efs_supported(chan->conn))
3376 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3377 else
3378 return -ECONNREFUSED;
3379 }
3380
3381 if (chan->mode != rfc.mode)
3382 return -ECONNREFUSED;
3383
3384 break;
3385 }
3386
3387 done:
3388 if (chan->mode != rfc.mode) {
3389 result = L2CAP_CONF_UNACCEPT;
3390 rfc.mode = chan->mode;
3391
3392 if (chan->num_conf_rsp == 1)
3393 return -ECONNREFUSED;
3394
3395 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3396 (unsigned long) &rfc);
3397 }
3398
3399 if (result == L2CAP_CONF_SUCCESS) {
3400 /* Configure output options and let the other side know
3401 * which ones we don't like. */
3402
3403 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3404 result = L2CAP_CONF_UNACCEPT;
3405 else {
3406 chan->omtu = mtu;
3407 set_bit(CONF_MTU_DONE, &chan->conf_state);
3408 }
3409 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3410
3411 if (remote_efs) {
3412 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3413 efs.stype != L2CAP_SERV_NOTRAFIC &&
3414 efs.stype != chan->local_stype) {
3415
3416 result = L2CAP_CONF_UNACCEPT;
3417
3418 if (chan->num_conf_req >= 1)
3419 return -ECONNREFUSED;
3420
3421 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3422 sizeof(efs),
3423 (unsigned long) &efs);
3424 } else {
3425 /* Send PENDING Conf Rsp */
3426 result = L2CAP_CONF_PENDING;
3427 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3428 }
3429 }
3430
3431 switch (rfc.mode) {
3432 case L2CAP_MODE_BASIC:
3433 chan->fcs = L2CAP_FCS_NONE;
3434 set_bit(CONF_MODE_DONE, &chan->conf_state);
3435 break;
3436
3437 case L2CAP_MODE_ERTM:
3438 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3439 chan->remote_tx_win = rfc.txwin_size;
3440 else
3441 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3442
3443 chan->remote_max_tx = rfc.max_transmit;
3444
3445 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3446 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3447 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3448 rfc.max_pdu_size = cpu_to_le16(size);
3449 chan->remote_mps = size;
3450
3451 __l2cap_set_ertm_timeouts(chan, &rfc);
3452
3453 set_bit(CONF_MODE_DONE, &chan->conf_state);
3454
3455 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3456 sizeof(rfc), (unsigned long) &rfc);
3457
3458 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3459 chan->remote_id = efs.id;
3460 chan->remote_stype = efs.stype;
3461 chan->remote_msdu = le16_to_cpu(efs.msdu);
3462 chan->remote_flush_to =
3463 le32_to_cpu(efs.flush_to);
3464 chan->remote_acc_lat =
3465 le32_to_cpu(efs.acc_lat);
3466 chan->remote_sdu_itime =
3467 le32_to_cpu(efs.sdu_itime);
3468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3469 sizeof(efs),
3470 (unsigned long) &efs);
3471 }
3472 break;
3473
3474 case L2CAP_MODE_STREAMING:
3475 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3476 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3477 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3478 rfc.max_pdu_size = cpu_to_le16(size);
3479 chan->remote_mps = size;
3480
3481 set_bit(CONF_MODE_DONE, &chan->conf_state);
3482
3483 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3484 (unsigned long) &rfc);
3485
3486 break;
3487
3488 default:
3489 result = L2CAP_CONF_UNACCEPT;
3490
3491 memset(&rfc, 0, sizeof(rfc));
3492 rfc.mode = chan->mode;
3493 }
3494
3495 if (result == L2CAP_CONF_SUCCESS)
3496 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3497 }
3498 rsp->scid = cpu_to_le16(chan->dcid);
3499 rsp->result = cpu_to_le16(result);
3500 rsp->flags = cpu_to_le16(0);
3501
3502 return ptr - data;
3503 }
3504
3505 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3506 void *data, u16 *result)
3507 {
3508 struct l2cap_conf_req *req = data;
3509 void *ptr = req->data;
3510 int type, olen;
3511 unsigned long val;
3512 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3513 struct l2cap_conf_efs efs;
3514
3515 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3516
3517 while (len >= L2CAP_CONF_OPT_SIZE) {
3518 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3519
3520 switch (type) {
3521 case L2CAP_CONF_MTU:
3522 if (val < L2CAP_DEFAULT_MIN_MTU) {
3523 *result = L2CAP_CONF_UNACCEPT;
3524 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3525 } else
3526 chan->imtu = val;
3527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3528 break;
3529
3530 case L2CAP_CONF_FLUSH_TO:
3531 chan->flush_to = val;
3532 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3533 2, chan->flush_to);
3534 break;
3535
3536 case L2CAP_CONF_RFC:
3537 if (olen == sizeof(rfc))
3538 memcpy(&rfc, (void *)val, olen);
3539
3540 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3541 rfc.mode != chan->mode)
3542 return -ECONNREFUSED;
3543
3544 chan->fcs = 0;
3545
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3547 sizeof(rfc), (unsigned long) &rfc);
3548 break;
3549
3550 case L2CAP_CONF_EWS:
3551 chan->ack_win = min_t(u16, val, chan->ack_win);
3552 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3553 chan->tx_win);
3554 break;
3555
3556 case L2CAP_CONF_EFS:
3557 if (olen == sizeof(efs))
3558 memcpy(&efs, (void *)val, olen);
3559
3560 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3561 efs.stype != L2CAP_SERV_NOTRAFIC &&
3562 efs.stype != chan->local_stype)
3563 return -ECONNREFUSED;
3564
3565 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3566 (unsigned long) &efs);
3567 break;
3568
3569 case L2CAP_CONF_FCS:
3570 if (*result == L2CAP_CONF_PENDING)
3571 if (val == L2CAP_FCS_NONE)
3572 set_bit(CONF_RECV_NO_FCS,
3573 &chan->conf_state);
3574 break;
3575 }
3576 }
3577
3578 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3579 return -ECONNREFUSED;
3580
3581 chan->mode = rfc.mode;
3582
3583 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3584 switch (rfc.mode) {
3585 case L2CAP_MODE_ERTM:
3586 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3587 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3588 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3589 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3590 chan->ack_win = min_t(u16, chan->ack_win,
3591 rfc.txwin_size);
3592
3593 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3594 chan->local_msdu = le16_to_cpu(efs.msdu);
3595 chan->local_sdu_itime =
3596 le32_to_cpu(efs.sdu_itime);
3597 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3598 chan->local_flush_to =
3599 le32_to_cpu(efs.flush_to);
3600 }
3601 break;
3602
3603 case L2CAP_MODE_STREAMING:
3604 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3605 }
3606 }
3607
3608 req->dcid = cpu_to_le16(chan->dcid);
3609 req->flags = cpu_to_le16(0);
3610
3611 return ptr - data;
3612 }
3613
3614 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3615 u16 result, u16 flags)
3616 {
3617 struct l2cap_conf_rsp *rsp = data;
3618 void *ptr = rsp->data;
3619
3620 BT_DBG("chan %p", chan);
3621
3622 rsp->scid = cpu_to_le16(chan->dcid);
3623 rsp->result = cpu_to_le16(result);
3624 rsp->flags = cpu_to_le16(flags);
3625
3626 return ptr - data;
3627 }
3628
3629 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3630 {
3631 struct l2cap_le_conn_rsp rsp;
3632 struct l2cap_conn *conn = chan->conn;
3633
3634 BT_DBG("chan %p", chan);
3635
3636 rsp.dcid = cpu_to_le16(chan->scid);
3637 rsp.mtu = cpu_to_le16(chan->imtu);
3638 rsp.mps = cpu_to_le16(chan->mps);
3639 rsp.credits = cpu_to_le16(chan->rx_credits);
3640 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3641
3642 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3643 &rsp);
3644 }
3645
3646 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3647 {
3648 struct l2cap_conn_rsp rsp;
3649 struct l2cap_conn *conn = chan->conn;
3650 u8 buf[128];
3651 u8 rsp_code;
3652
3653 rsp.scid = cpu_to_le16(chan->dcid);
3654 rsp.dcid = cpu_to_le16(chan->scid);
3655 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3656 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3657
3658 if (chan->hs_hcon)
3659 rsp_code = L2CAP_CREATE_CHAN_RSP;
3660 else
3661 rsp_code = L2CAP_CONN_RSP;
3662
3663 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3664
3665 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3666
3667 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3668 return;
3669
3670 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3671 l2cap_build_conf_req(chan, buf), buf);
3672 chan->num_conf_req++;
3673 }
3674
3675 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3676 {
3677 int type, olen;
3678 unsigned long val;
3679 /* Use sane default values in case a misbehaving remote device
3680 * did not send an RFC or extended window size option.
3681 */
3682 u16 txwin_ext = chan->ack_win;
3683 struct l2cap_conf_rfc rfc = {
3684 .mode = chan->mode,
3685 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3686 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3687 .max_pdu_size = cpu_to_le16(chan->imtu),
3688 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3689 };
3690
3691 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3692
3693 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3694 return;
3695
3696 while (len >= L2CAP_CONF_OPT_SIZE) {
3697 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3698
3699 switch (type) {
3700 case L2CAP_CONF_RFC:
3701 if (olen == sizeof(rfc))
3702 memcpy(&rfc, (void *)val, olen);
3703 break;
3704 case L2CAP_CONF_EWS:
3705 txwin_ext = val;
3706 break;
3707 }
3708 }
3709
3710 switch (rfc.mode) {
3711 case L2CAP_MODE_ERTM:
3712 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3713 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3714 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3715 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3716 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3717 else
3718 chan->ack_win = min_t(u16, chan->ack_win,
3719 rfc.txwin_size);
3720 break;
3721 case L2CAP_MODE_STREAMING:
3722 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3723 }
3724 }
3725
3726 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3727 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3728 u8 *data)
3729 {
3730 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3731
3732 if (cmd_len < sizeof(*rej))
3733 return -EPROTO;
3734
3735 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3736 return 0;
3737
3738 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3739 cmd->ident == conn->info_ident) {
3740 cancel_delayed_work(&conn->info_timer);
3741
3742 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3743 conn->info_ident = 0;
3744
3745 l2cap_conn_start(conn);
3746 }
3747
3748 return 0;
3749 }
3750
3751 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3752 struct l2cap_cmd_hdr *cmd,
3753 u8 *data, u8 rsp_code, u8 amp_id)
3754 {
3755 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3756 struct l2cap_conn_rsp rsp;
3757 struct l2cap_chan *chan = NULL, *pchan;
3758 int result, status = L2CAP_CS_NO_INFO;
3759
3760 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3761 __le16 psm = req->psm;
3762
3763 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3764
3765 /* Check if we have socket listening on psm */
3766 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3767 &conn->hcon->dst, ACL_LINK);
3768 if (!pchan) {
3769 result = L2CAP_CR_BAD_PSM;
3770 goto sendresp;
3771 }
3772
3773 mutex_lock(&conn->chan_lock);
3774 l2cap_chan_lock(pchan);
3775
3776 /* Check if the ACL is secure enough (if not SDP) */
3777 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3778 !hci_conn_check_link_mode(conn->hcon)) {
3779 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3780 result = L2CAP_CR_SEC_BLOCK;
3781 goto response;
3782 }
3783
3784 result = L2CAP_CR_NO_MEM;
3785
3786 /* Check if we already have channel with that dcid */
3787 if (__l2cap_get_chan_by_dcid(conn, scid))
3788 goto response;
3789
3790 chan = pchan->ops->new_connection(pchan);
3791 if (!chan)
3792 goto response;
3793
3794 /* For certain devices (ex: HID mouse), support for authentication,
3795 * pairing and bonding is optional. For such devices, inorder to avoid
3796 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3797 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3798 */
3799 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3800
3801 bacpy(&chan->src, &conn->hcon->src);
3802 bacpy(&chan->dst, &conn->hcon->dst);
3803 chan->src_type = bdaddr_src_type(conn->hcon);
3804 chan->dst_type = bdaddr_dst_type(conn->hcon);
3805 chan->psm = psm;
3806 chan->dcid = scid;
3807 chan->local_amp_id = amp_id;
3808
3809 __l2cap_chan_add(conn, chan);
3810
3811 dcid = chan->scid;
3812
3813 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3814
3815 chan->ident = cmd->ident;
3816
3817 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3818 if (l2cap_chan_check_security(chan, false)) {
3819 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3820 l2cap_state_change(chan, BT_CONNECT2);
3821 result = L2CAP_CR_PEND;
3822 status = L2CAP_CS_AUTHOR_PEND;
3823 chan->ops->defer(chan);
3824 } else {
3825 /* Force pending result for AMP controllers.
3826 * The connection will succeed after the
3827 * physical link is up.
3828 */
3829 if (amp_id == AMP_ID_BREDR) {
3830 l2cap_state_change(chan, BT_CONFIG);
3831 result = L2CAP_CR_SUCCESS;
3832 } else {
3833 l2cap_state_change(chan, BT_CONNECT2);
3834 result = L2CAP_CR_PEND;
3835 }
3836 status = L2CAP_CS_NO_INFO;
3837 }
3838 } else {
3839 l2cap_state_change(chan, BT_CONNECT2);
3840 result = L2CAP_CR_PEND;
3841 status = L2CAP_CS_AUTHEN_PEND;
3842 }
3843 } else {
3844 l2cap_state_change(chan, BT_CONNECT2);
3845 result = L2CAP_CR_PEND;
3846 status = L2CAP_CS_NO_INFO;
3847 }
3848
3849 response:
3850 l2cap_chan_unlock(pchan);
3851 mutex_unlock(&conn->chan_lock);
3852 l2cap_chan_put(pchan);
3853
3854 sendresp:
3855 rsp.scid = cpu_to_le16(scid);
3856 rsp.dcid = cpu_to_le16(dcid);
3857 rsp.result = cpu_to_le16(result);
3858 rsp.status = cpu_to_le16(status);
3859 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3860
3861 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3862 struct l2cap_info_req info;
3863 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3864
3865 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3866 conn->info_ident = l2cap_get_ident(conn);
3867
3868 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3869
3870 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3871 sizeof(info), &info);
3872 }
3873
3874 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3875 result == L2CAP_CR_SUCCESS) {
3876 u8 buf[128];
3877 set_bit(CONF_REQ_SENT, &chan->conf_state);
3878 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3879 l2cap_build_conf_req(chan, buf), buf);
3880 chan->num_conf_req++;
3881 }
3882
3883 return chan;
3884 }
3885
3886 static int l2cap_connect_req(struct l2cap_conn *conn,
3887 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3888 {
3889 struct hci_dev *hdev = conn->hcon->hdev;
3890 struct hci_conn *hcon = conn->hcon;
3891
3892 if (cmd_len < sizeof(struct l2cap_conn_req))
3893 return -EPROTO;
3894
3895 hci_dev_lock(hdev);
3896 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3897 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3898 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3899 hci_dev_unlock(hdev);
3900
3901 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3902 return 0;
3903 }
3904
3905 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3906 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3907 u8 *data)
3908 {
3909 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3910 u16 scid, dcid, result, status;
3911 struct l2cap_chan *chan;
3912 u8 req[128];
3913 int err;
3914
3915 if (cmd_len < sizeof(*rsp))
3916 return -EPROTO;
3917
3918 scid = __le16_to_cpu(rsp->scid);
3919 dcid = __le16_to_cpu(rsp->dcid);
3920 result = __le16_to_cpu(rsp->result);
3921 status = __le16_to_cpu(rsp->status);
3922
3923 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3924 dcid, scid, result, status);
3925
3926 mutex_lock(&conn->chan_lock);
3927
3928 if (scid) {
3929 chan = __l2cap_get_chan_by_scid(conn, scid);
3930 if (!chan) {
3931 err = -EBADSLT;
3932 goto unlock;
3933 }
3934 } else {
3935 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3936 if (!chan) {
3937 err = -EBADSLT;
3938 goto unlock;
3939 }
3940 }
3941
3942 err = 0;
3943
3944 l2cap_chan_lock(chan);
3945
3946 switch (result) {
3947 case L2CAP_CR_SUCCESS:
3948 l2cap_state_change(chan, BT_CONFIG);
3949 chan->ident = 0;
3950 chan->dcid = dcid;
3951 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3952
3953 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3954 break;
3955
3956 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3957 l2cap_build_conf_req(chan, req), req);
3958 chan->num_conf_req++;
3959 break;
3960
3961 case L2CAP_CR_PEND:
3962 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3963 break;
3964
3965 default:
3966 l2cap_chan_del(chan, ECONNREFUSED);
3967 break;
3968 }
3969
3970 l2cap_chan_unlock(chan);
3971
3972 unlock:
3973 mutex_unlock(&conn->chan_lock);
3974
3975 return err;
3976 }
3977
3978 static inline void set_default_fcs(struct l2cap_chan *chan)
3979 {
3980 /* FCS is enabled only in ERTM or streaming mode, if one or both
3981 * sides request it.
3982 */
3983 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3984 chan->fcs = L2CAP_FCS_NONE;
3985 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3986 chan->fcs = L2CAP_FCS_CRC16;
3987 }
3988
3989 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3990 u8 ident, u16 flags)
3991 {
3992 struct l2cap_conn *conn = chan->conn;
3993
3994 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3995 flags);
3996
3997 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3998 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3999
4000 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4001 l2cap_build_conf_rsp(chan, data,
4002 L2CAP_CONF_SUCCESS, flags), data);
4003 }
4004
4005 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4006 u16 scid, u16 dcid)
4007 {
4008 struct l2cap_cmd_rej_cid rej;
4009
4010 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4011 rej.scid = __cpu_to_le16(scid);
4012 rej.dcid = __cpu_to_le16(dcid);
4013
4014 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4015 }
4016
4017 static inline int l2cap_config_req(struct l2cap_conn *conn,
4018 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4019 u8 *data)
4020 {
4021 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4022 u16 dcid, flags;
4023 u8 rsp[64];
4024 struct l2cap_chan *chan;
4025 int len, err = 0;
4026
4027 if (cmd_len < sizeof(*req))
4028 return -EPROTO;
4029
4030 dcid = __le16_to_cpu(req->dcid);
4031 flags = __le16_to_cpu(req->flags);
4032
4033 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4034
4035 chan = l2cap_get_chan_by_scid(conn, dcid);
4036 if (!chan) {
4037 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4038 return 0;
4039 }
4040
4041 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4042 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4043 chan->dcid);
4044 goto unlock;
4045 }
4046
4047 /* Reject if config buffer is too small. */
4048 len = cmd_len - sizeof(*req);
4049 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4050 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4051 l2cap_build_conf_rsp(chan, rsp,
4052 L2CAP_CONF_REJECT, flags), rsp);
4053 goto unlock;
4054 }
4055
4056 /* Store config. */
4057 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4058 chan->conf_len += len;
4059
4060 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4061 /* Incomplete config. Send empty response. */
4062 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4063 l2cap_build_conf_rsp(chan, rsp,
4064 L2CAP_CONF_SUCCESS, flags), rsp);
4065 goto unlock;
4066 }
4067
4068 /* Complete config. */
4069 len = l2cap_parse_conf_req(chan, rsp);
4070 if (len < 0) {
4071 l2cap_send_disconn_req(chan, ECONNRESET);
4072 goto unlock;
4073 }
4074
4075 chan->ident = cmd->ident;
4076 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4077 chan->num_conf_rsp++;
4078
4079 /* Reset config buffer. */
4080 chan->conf_len = 0;
4081
4082 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4083 goto unlock;
4084
4085 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4086 set_default_fcs(chan);
4087
4088 if (chan->mode == L2CAP_MODE_ERTM ||
4089 chan->mode == L2CAP_MODE_STREAMING)
4090 err = l2cap_ertm_init(chan);
4091
4092 if (err < 0)
4093 l2cap_send_disconn_req(chan, -err);
4094 else
4095 l2cap_chan_ready(chan);
4096
4097 goto unlock;
4098 }
4099
4100 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4101 u8 buf[64];
4102 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4103 l2cap_build_conf_req(chan, buf), buf);
4104 chan->num_conf_req++;
4105 }
4106
4107 /* Got Conf Rsp PENDING from remote side and assume we sent
4108 Conf Rsp PENDING in the code above */
4109 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4110 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4111
4112 /* check compatibility */
4113
4114 /* Send rsp for BR/EDR channel */
4115 if (!chan->hs_hcon)
4116 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4117 else
4118 chan->ident = cmd->ident;
4119 }
4120
4121 unlock:
4122 l2cap_chan_unlock(chan);
4123 return err;
4124 }
4125
4126 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4127 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4128 u8 *data)
4129 {
4130 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4131 u16 scid, flags, result;
4132 struct l2cap_chan *chan;
4133 int len = cmd_len - sizeof(*rsp);
4134 int err = 0;
4135
4136 if (cmd_len < sizeof(*rsp))
4137 return -EPROTO;
4138
4139 scid = __le16_to_cpu(rsp->scid);
4140 flags = __le16_to_cpu(rsp->flags);
4141 result = __le16_to_cpu(rsp->result);
4142
4143 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4144 result, len);
4145
4146 chan = l2cap_get_chan_by_scid(conn, scid);
4147 if (!chan)
4148 return 0;
4149
4150 switch (result) {
4151 case L2CAP_CONF_SUCCESS:
4152 l2cap_conf_rfc_get(chan, rsp->data, len);
4153 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4154 break;
4155
4156 case L2CAP_CONF_PENDING:
4157 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4158
4159 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4160 char buf[64];
4161
4162 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4163 buf, &result);
4164 if (len < 0) {
4165 l2cap_send_disconn_req(chan, ECONNRESET);
4166 goto done;
4167 }
4168
4169 if (!chan->hs_hcon) {
4170 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4171 0);
4172 } else {
4173 if (l2cap_check_efs(chan)) {
4174 amp_create_logical_link(chan);
4175 chan->ident = cmd->ident;
4176 }
4177 }
4178 }
4179 goto done;
4180
4181 case L2CAP_CONF_UNACCEPT:
4182 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4183 char req[64];
4184
4185 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4186 l2cap_send_disconn_req(chan, ECONNRESET);
4187 goto done;
4188 }
4189
4190 /* throw out any old stored conf requests */
4191 result = L2CAP_CONF_SUCCESS;
4192 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4193 req, &result);
4194 if (len < 0) {
4195 l2cap_send_disconn_req(chan, ECONNRESET);
4196 goto done;
4197 }
4198
4199 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4200 L2CAP_CONF_REQ, len, req);
4201 chan->num_conf_req++;
4202 if (result != L2CAP_CONF_SUCCESS)
4203 goto done;
4204 break;
4205 }
4206
4207 default:
4208 l2cap_chan_set_err(chan, ECONNRESET);
4209
4210 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4211 l2cap_send_disconn_req(chan, ECONNRESET);
4212 goto done;
4213 }
4214
4215 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4216 goto done;
4217
4218 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4219
4220 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4221 set_default_fcs(chan);
4222
4223 if (chan->mode == L2CAP_MODE_ERTM ||
4224 chan->mode == L2CAP_MODE_STREAMING)
4225 err = l2cap_ertm_init(chan);
4226
4227 if (err < 0)
4228 l2cap_send_disconn_req(chan, -err);
4229 else
4230 l2cap_chan_ready(chan);
4231 }
4232
4233 done:
4234 l2cap_chan_unlock(chan);
4235 return err;
4236 }
4237
4238 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4239 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4240 u8 *data)
4241 {
4242 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4243 struct l2cap_disconn_rsp rsp;
4244 u16 dcid, scid;
4245 struct l2cap_chan *chan;
4246
4247 if (cmd_len != sizeof(*req))
4248 return -EPROTO;
4249
4250 scid = __le16_to_cpu(req->scid);
4251 dcid = __le16_to_cpu(req->dcid);
4252
4253 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4254
4255 mutex_lock(&conn->chan_lock);
4256
4257 chan = __l2cap_get_chan_by_scid(conn, dcid);
4258 if (!chan) {
4259 mutex_unlock(&conn->chan_lock);
4260 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4261 return 0;
4262 }
4263
4264 l2cap_chan_lock(chan);
4265
4266 rsp.dcid = cpu_to_le16(chan->scid);
4267 rsp.scid = cpu_to_le16(chan->dcid);
4268 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4269
4270 chan->ops->set_shutdown(chan);
4271
4272 l2cap_chan_hold(chan);
4273 l2cap_chan_del(chan, ECONNRESET);
4274
4275 l2cap_chan_unlock(chan);
4276
4277 chan->ops->close(chan);
4278 l2cap_chan_put(chan);
4279
4280 mutex_unlock(&conn->chan_lock);
4281
4282 return 0;
4283 }
4284
4285 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4286 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4287 u8 *data)
4288 {
4289 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4290 u16 dcid, scid;
4291 struct l2cap_chan *chan;
4292
4293 if (cmd_len != sizeof(*rsp))
4294 return -EPROTO;
4295
4296 scid = __le16_to_cpu(rsp->scid);
4297 dcid = __le16_to_cpu(rsp->dcid);
4298
4299 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4300
4301 mutex_lock(&conn->chan_lock);
4302
4303 chan = __l2cap_get_chan_by_scid(conn, scid);
4304 if (!chan) {
4305 mutex_unlock(&conn->chan_lock);
4306 return 0;
4307 }
4308
4309 l2cap_chan_lock(chan);
4310
4311 l2cap_chan_hold(chan);
4312 l2cap_chan_del(chan, 0);
4313
4314 l2cap_chan_unlock(chan);
4315
4316 chan->ops->close(chan);
4317 l2cap_chan_put(chan);
4318
4319 mutex_unlock(&conn->chan_lock);
4320
4321 return 0;
4322 }
4323
4324 static inline int l2cap_information_req(struct l2cap_conn *conn,
4325 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4326 u8 *data)
4327 {
4328 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4329 u16 type;
4330
4331 if (cmd_len != sizeof(*req))
4332 return -EPROTO;
4333
4334 type = __le16_to_cpu(req->type);
4335
4336 BT_DBG("type 0x%4.4x", type);
4337
4338 if (type == L2CAP_IT_FEAT_MASK) {
4339 u8 buf[8];
4340 u32 feat_mask = l2cap_feat_mask;
4341 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4342 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4343 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4344 if (!disable_ertm)
4345 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4346 | L2CAP_FEAT_FCS;
4347 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4348 feat_mask |= L2CAP_FEAT_EXT_FLOW
4349 | L2CAP_FEAT_EXT_WINDOW;
4350
4351 put_unaligned_le32(feat_mask, rsp->data);
4352 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4353 buf);
4354 } else if (type == L2CAP_IT_FIXED_CHAN) {
4355 u8 buf[12];
4356 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4357
4358 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4359 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4360 rsp->data[0] = conn->local_fixed_chan;
4361 memset(rsp->data + 1, 0, 7);
4362 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4363 buf);
4364 } else {
4365 struct l2cap_info_rsp rsp;
4366 rsp.type = cpu_to_le16(type);
4367 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4368 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4369 &rsp);
4370 }
4371
4372 return 0;
4373 }
4374
4375 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4376 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4377 u8 *data)
4378 {
4379 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4380 u16 type, result;
4381
4382 if (cmd_len < sizeof(*rsp))
4383 return -EPROTO;
4384
4385 type = __le16_to_cpu(rsp->type);
4386 result = __le16_to_cpu(rsp->result);
4387
4388 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4389
4390 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4391 if (cmd->ident != conn->info_ident ||
4392 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4393 return 0;
4394
4395 cancel_delayed_work(&conn->info_timer);
4396
4397 if (result != L2CAP_IR_SUCCESS) {
4398 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4399 conn->info_ident = 0;
4400
4401 l2cap_conn_start(conn);
4402
4403 return 0;
4404 }
4405
4406 switch (type) {
4407 case L2CAP_IT_FEAT_MASK:
4408 conn->feat_mask = get_unaligned_le32(rsp->data);
4409
4410 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4411 struct l2cap_info_req req;
4412 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4413
4414 conn->info_ident = l2cap_get_ident(conn);
4415
4416 l2cap_send_cmd(conn, conn->info_ident,
4417 L2CAP_INFO_REQ, sizeof(req), &req);
4418 } else {
4419 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4420 conn->info_ident = 0;
4421
4422 l2cap_conn_start(conn);
4423 }
4424 break;
4425
4426 case L2CAP_IT_FIXED_CHAN:
4427 conn->remote_fixed_chan = rsp->data[0];
4428 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4429 conn->info_ident = 0;
4430
4431 l2cap_conn_start(conn);
4432 break;
4433 }
4434
4435 return 0;
4436 }
4437
4438 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4439 struct l2cap_cmd_hdr *cmd,
4440 u16 cmd_len, void *data)
4441 {
4442 struct l2cap_create_chan_req *req = data;
4443 struct l2cap_create_chan_rsp rsp;
4444 struct l2cap_chan *chan;
4445 struct hci_dev *hdev;
4446 u16 psm, scid;
4447
4448 if (cmd_len != sizeof(*req))
4449 return -EPROTO;
4450
4451 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4452 return -EINVAL;
4453
4454 psm = le16_to_cpu(req->psm);
4455 scid = le16_to_cpu(req->scid);
4456
4457 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4458
4459 /* For controller id 0 make BR/EDR connection */
4460 if (req->amp_id == AMP_ID_BREDR) {
4461 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4462 req->amp_id);
4463 return 0;
4464 }
4465
4466 /* Validate AMP controller id */
4467 hdev = hci_dev_get(req->amp_id);
4468 if (!hdev)
4469 goto error;
4470
4471 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4472 hci_dev_put(hdev);
4473 goto error;
4474 }
4475
4476 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4477 req->amp_id);
4478 if (chan) {
4479 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4480 struct hci_conn *hs_hcon;
4481
4482 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4483 &conn->hcon->dst);
4484 if (!hs_hcon) {
4485 hci_dev_put(hdev);
4486 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4487 chan->dcid);
4488 return 0;
4489 }
4490
4491 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4492
4493 mgr->bredr_chan = chan;
4494 chan->hs_hcon = hs_hcon;
4495 chan->fcs = L2CAP_FCS_NONE;
4496 conn->mtu = hdev->block_mtu;
4497 }
4498
4499 hci_dev_put(hdev);
4500
4501 return 0;
4502
4503 error:
4504 rsp.dcid = 0;
4505 rsp.scid = cpu_to_le16(scid);
4506 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4507 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4508
4509 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4510 sizeof(rsp), &rsp);
4511
4512 return 0;
4513 }
4514
4515 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4516 {
4517 struct l2cap_move_chan_req req;
4518 u8 ident;
4519
4520 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4521
4522 ident = l2cap_get_ident(chan->conn);
4523 chan->ident = ident;
4524
4525 req.icid = cpu_to_le16(chan->scid);
4526 req.dest_amp_id = dest_amp_id;
4527
4528 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4529 &req);
4530
4531 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4532 }
4533
4534 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4535 {
4536 struct l2cap_move_chan_rsp rsp;
4537
4538 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4539
4540 rsp.icid = cpu_to_le16(chan->dcid);
4541 rsp.result = cpu_to_le16(result);
4542
4543 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4544 sizeof(rsp), &rsp);
4545 }
4546
4547 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4548 {
4549 struct l2cap_move_chan_cfm cfm;
4550
4551 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4552
4553 chan->ident = l2cap_get_ident(chan->conn);
4554
4555 cfm.icid = cpu_to_le16(chan->scid);
4556 cfm.result = cpu_to_le16(result);
4557
4558 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4559 sizeof(cfm), &cfm);
4560
4561 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4562 }
4563
4564 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4565 {
4566 struct l2cap_move_chan_cfm cfm;
4567
4568 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4569
4570 cfm.icid = cpu_to_le16(icid);
4571 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4572
4573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4574 sizeof(cfm), &cfm);
4575 }
4576
4577 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4578 u16 icid)
4579 {
4580 struct l2cap_move_chan_cfm_rsp rsp;
4581
4582 BT_DBG("icid 0x%4.4x", icid);
4583
4584 rsp.icid = cpu_to_le16(icid);
4585 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4586 }
4587
4588 static void __release_logical_link(struct l2cap_chan *chan)
4589 {
4590 chan->hs_hchan = NULL;
4591 chan->hs_hcon = NULL;
4592
4593 /* Placeholder - release the logical link */
4594 }
4595
4596 static void l2cap_logical_fail(struct l2cap_chan *chan)
4597 {
4598 /* Logical link setup failed */
4599 if (chan->state != BT_CONNECTED) {
4600 /* Create channel failure, disconnect */
4601 l2cap_send_disconn_req(chan, ECONNRESET);
4602 return;
4603 }
4604
4605 switch (chan->move_role) {
4606 case L2CAP_MOVE_ROLE_RESPONDER:
4607 l2cap_move_done(chan);
4608 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4609 break;
4610 case L2CAP_MOVE_ROLE_INITIATOR:
4611 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4612 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4613 /* Remote has only sent pending or
4614 * success responses, clean up
4615 */
4616 l2cap_move_done(chan);
4617 }
4618
4619 /* Other amp move states imply that the move
4620 * has already aborted
4621 */
4622 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4623 break;
4624 }
4625 }
4626
4627 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4628 struct hci_chan *hchan)
4629 {
4630 struct l2cap_conf_rsp rsp;
4631
4632 chan->hs_hchan = hchan;
4633 chan->hs_hcon->l2cap_data = chan->conn;
4634
4635 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4636
4637 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4638 int err;
4639
4640 set_default_fcs(chan);
4641
4642 err = l2cap_ertm_init(chan);
4643 if (err < 0)
4644 l2cap_send_disconn_req(chan, -err);
4645 else
4646 l2cap_chan_ready(chan);
4647 }
4648 }
4649
4650 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4651 struct hci_chan *hchan)
4652 {
4653 chan->hs_hcon = hchan->conn;
4654 chan->hs_hcon->l2cap_data = chan->conn;
4655
4656 BT_DBG("move_state %d", chan->move_state);
4657
4658 switch (chan->move_state) {
4659 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4660 /* Move confirm will be sent after a success
4661 * response is received
4662 */
4663 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4664 break;
4665 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4666 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4667 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4668 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4669 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4670 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4671 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4672 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4673 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4674 }
4675 break;
4676 default:
4677 /* Move was not in expected state, free the channel */
4678 __release_logical_link(chan);
4679
4680 chan->move_state = L2CAP_MOVE_STABLE;
4681 }
4682 }
4683
4684 /* Call with chan locked */
4685 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4686 u8 status)
4687 {
4688 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4689
4690 if (status) {
4691 l2cap_logical_fail(chan);
4692 __release_logical_link(chan);
4693 return;
4694 }
4695
4696 if (chan->state != BT_CONNECTED) {
4697 /* Ignore logical link if channel is on BR/EDR */
4698 if (chan->local_amp_id != AMP_ID_BREDR)
4699 l2cap_logical_finish_create(chan, hchan);
4700 } else {
4701 l2cap_logical_finish_move(chan, hchan);
4702 }
4703 }
4704
4705 void l2cap_move_start(struct l2cap_chan *chan)
4706 {
4707 BT_DBG("chan %p", chan);
4708
4709 if (chan->local_amp_id == AMP_ID_BREDR) {
4710 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4711 return;
4712 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4713 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4714 /* Placeholder - start physical link setup */
4715 } else {
4716 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4717 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4718 chan->move_id = 0;
4719 l2cap_move_setup(chan);
4720 l2cap_send_move_chan_req(chan, 0);
4721 }
4722 }
4723
4724 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4725 u8 local_amp_id, u8 remote_amp_id)
4726 {
4727 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4728 local_amp_id, remote_amp_id);
4729
4730 chan->fcs = L2CAP_FCS_NONE;
4731
4732 /* Outgoing channel on AMP */
4733 if (chan->state == BT_CONNECT) {
4734 if (result == L2CAP_CR_SUCCESS) {
4735 chan->local_amp_id = local_amp_id;
4736 l2cap_send_create_chan_req(chan, remote_amp_id);
4737 } else {
4738 /* Revert to BR/EDR connect */
4739 l2cap_send_conn_req(chan);
4740 }
4741
4742 return;
4743 }
4744
4745 /* Incoming channel on AMP */
4746 if (__l2cap_no_conn_pending(chan)) {
4747 struct l2cap_conn_rsp rsp;
4748 char buf[128];
4749 rsp.scid = cpu_to_le16(chan->dcid);
4750 rsp.dcid = cpu_to_le16(chan->scid);
4751
4752 if (result == L2CAP_CR_SUCCESS) {
4753 /* Send successful response */
4754 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4755 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4756 } else {
4757 /* Send negative response */
4758 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4759 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4760 }
4761
4762 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4763 sizeof(rsp), &rsp);
4764
4765 if (result == L2CAP_CR_SUCCESS) {
4766 l2cap_state_change(chan, BT_CONFIG);
4767 set_bit(CONF_REQ_SENT, &chan->conf_state);
4768 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4769 L2CAP_CONF_REQ,
4770 l2cap_build_conf_req(chan, buf), buf);
4771 chan->num_conf_req++;
4772 }
4773 }
4774 }
4775
4776 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4777 u8 remote_amp_id)
4778 {
4779 l2cap_move_setup(chan);
4780 chan->move_id = local_amp_id;
4781 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4782
4783 l2cap_send_move_chan_req(chan, remote_amp_id);
4784 }
4785
4786 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4787 {
4788 struct hci_chan *hchan = NULL;
4789
4790 /* Placeholder - get hci_chan for logical link */
4791
4792 if (hchan) {
4793 if (hchan->state == BT_CONNECTED) {
4794 /* Logical link is ready to go */
4795 chan->hs_hcon = hchan->conn;
4796 chan->hs_hcon->l2cap_data = chan->conn;
4797 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4798 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4799
4800 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4801 } else {
4802 /* Wait for logical link to be ready */
4803 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4804 }
4805 } else {
4806 /* Logical link not available */
4807 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4808 }
4809 }
4810
4811 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4812 {
4813 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4814 u8 rsp_result;
4815 if (result == -EINVAL)
4816 rsp_result = L2CAP_MR_BAD_ID;
4817 else
4818 rsp_result = L2CAP_MR_NOT_ALLOWED;
4819
4820 l2cap_send_move_chan_rsp(chan, rsp_result);
4821 }
4822
4823 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4824 chan->move_state = L2CAP_MOVE_STABLE;
4825
4826 /* Restart data transmission */
4827 l2cap_ertm_send(chan);
4828 }
4829
4830 /* Invoke with locked chan */
4831 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4832 {
4833 u8 local_amp_id = chan->local_amp_id;
4834 u8 remote_amp_id = chan->remote_amp_id;
4835
4836 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4837 chan, result, local_amp_id, remote_amp_id);
4838
4839 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4840 l2cap_chan_unlock(chan);
4841 return;
4842 }
4843
4844 if (chan->state != BT_CONNECTED) {
4845 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4846 } else if (result != L2CAP_MR_SUCCESS) {
4847 l2cap_do_move_cancel(chan, result);
4848 } else {
4849 switch (chan->move_role) {
4850 case L2CAP_MOVE_ROLE_INITIATOR:
4851 l2cap_do_move_initiate(chan, local_amp_id,
4852 remote_amp_id);
4853 break;
4854 case L2CAP_MOVE_ROLE_RESPONDER:
4855 l2cap_do_move_respond(chan, result);
4856 break;
4857 default:
4858 l2cap_do_move_cancel(chan, result);
4859 break;
4860 }
4861 }
4862 }
4863
4864 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4865 struct l2cap_cmd_hdr *cmd,
4866 u16 cmd_len, void *data)
4867 {
4868 struct l2cap_move_chan_req *req = data;
4869 struct l2cap_move_chan_rsp rsp;
4870 struct l2cap_chan *chan;
4871 u16 icid = 0;
4872 u16 result = L2CAP_MR_NOT_ALLOWED;
4873
4874 if (cmd_len != sizeof(*req))
4875 return -EPROTO;
4876
4877 icid = le16_to_cpu(req->icid);
4878
4879 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4880
4881 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4882 return -EINVAL;
4883
4884 chan = l2cap_get_chan_by_dcid(conn, icid);
4885 if (!chan) {
4886 rsp.icid = cpu_to_le16(icid);
4887 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4888 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4889 sizeof(rsp), &rsp);
4890 return 0;
4891 }
4892
4893 chan->ident = cmd->ident;
4894
4895 if (chan->scid < L2CAP_CID_DYN_START ||
4896 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4897 (chan->mode != L2CAP_MODE_ERTM &&
4898 chan->mode != L2CAP_MODE_STREAMING)) {
4899 result = L2CAP_MR_NOT_ALLOWED;
4900 goto send_move_response;
4901 }
4902
4903 if (chan->local_amp_id == req->dest_amp_id) {
4904 result = L2CAP_MR_SAME_ID;
4905 goto send_move_response;
4906 }
4907
4908 if (req->dest_amp_id != AMP_ID_BREDR) {
4909 struct hci_dev *hdev;
4910 hdev = hci_dev_get(req->dest_amp_id);
4911 if (!hdev || hdev->dev_type != HCI_AMP ||
4912 !test_bit(HCI_UP, &hdev->flags)) {
4913 if (hdev)
4914 hci_dev_put(hdev);
4915
4916 result = L2CAP_MR_BAD_ID;
4917 goto send_move_response;
4918 }
4919 hci_dev_put(hdev);
4920 }
4921
4922 /* Detect a move collision. Only send a collision response
4923 * if this side has "lost", otherwise proceed with the move.
4924 * The winner has the larger bd_addr.
4925 */
4926 if ((__chan_is_moving(chan) ||
4927 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4928 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4929 result = L2CAP_MR_COLLISION;
4930 goto send_move_response;
4931 }
4932
4933 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4934 l2cap_move_setup(chan);
4935 chan->move_id = req->dest_amp_id;
4936 icid = chan->dcid;
4937
4938 if (req->dest_amp_id == AMP_ID_BREDR) {
4939 /* Moving to BR/EDR */
4940 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4941 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4942 result = L2CAP_MR_PEND;
4943 } else {
4944 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4945 result = L2CAP_MR_SUCCESS;
4946 }
4947 } else {
4948 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4949 /* Placeholder - uncomment when amp functions are available */
4950 /*amp_accept_physical(chan, req->dest_amp_id);*/
4951 result = L2CAP_MR_PEND;
4952 }
4953
4954 send_move_response:
4955 l2cap_send_move_chan_rsp(chan, result);
4956
4957 l2cap_chan_unlock(chan);
4958
4959 return 0;
4960 }
4961
4962 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4963 {
4964 struct l2cap_chan *chan;
4965 struct hci_chan *hchan = NULL;
4966
4967 chan = l2cap_get_chan_by_scid(conn, icid);
4968 if (!chan) {
4969 l2cap_send_move_chan_cfm_icid(conn, icid);
4970 return;
4971 }
4972
4973 __clear_chan_timer(chan);
4974 if (result == L2CAP_MR_PEND)
4975 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4976
4977 switch (chan->move_state) {
4978 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4979 /* Move confirm will be sent when logical link
4980 * is complete.
4981 */
4982 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4983 break;
4984 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4985 if (result == L2CAP_MR_PEND) {
4986 break;
4987 } else if (test_bit(CONN_LOCAL_BUSY,
4988 &chan->conn_state)) {
4989 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4990 } else {
4991 /* Logical link is up or moving to BR/EDR,
4992 * proceed with move
4993 */
4994 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4995 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4996 }
4997 break;
4998 case L2CAP_MOVE_WAIT_RSP:
4999 /* Moving to AMP */
5000 if (result == L2CAP_MR_SUCCESS) {
5001 /* Remote is ready, send confirm immediately
5002 * after logical link is ready
5003 */
5004 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5005 } else {
5006 /* Both logical link and move success
5007 * are required to confirm
5008 */
5009 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5010 }
5011
5012 /* Placeholder - get hci_chan for logical link */
5013 if (!hchan) {
5014 /* Logical link not available */
5015 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5016 break;
5017 }
5018
5019 /* If the logical link is not yet connected, do not
5020 * send confirmation.
5021 */
5022 if (hchan->state != BT_CONNECTED)
5023 break;
5024
5025 /* Logical link is already ready to go */
5026
5027 chan->hs_hcon = hchan->conn;
5028 chan->hs_hcon->l2cap_data = chan->conn;
5029
5030 if (result == L2CAP_MR_SUCCESS) {
5031 /* Can confirm now */
5032 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5033 } else {
5034 /* Now only need move success
5035 * to confirm
5036 */
5037 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5038 }
5039
5040 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5041 break;
5042 default:
5043 /* Any other amp move state means the move failed. */
5044 chan->move_id = chan->local_amp_id;
5045 l2cap_move_done(chan);
5046 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5047 }
5048
5049 l2cap_chan_unlock(chan);
5050 }
5051
5052 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5053 u16 result)
5054 {
5055 struct l2cap_chan *chan;
5056
5057 chan = l2cap_get_chan_by_ident(conn, ident);
5058 if (!chan) {
5059 /* Could not locate channel, icid is best guess */
5060 l2cap_send_move_chan_cfm_icid(conn, icid);
5061 return;
5062 }
5063
5064 __clear_chan_timer(chan);
5065
5066 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5067 if (result == L2CAP_MR_COLLISION) {
5068 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5069 } else {
5070 /* Cleanup - cancel move */
5071 chan->move_id = chan->local_amp_id;
5072 l2cap_move_done(chan);
5073 }
5074 }
5075
5076 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5077
5078 l2cap_chan_unlock(chan);
5079 }
5080
5081 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5082 struct l2cap_cmd_hdr *cmd,
5083 u16 cmd_len, void *data)
5084 {
5085 struct l2cap_move_chan_rsp *rsp = data;
5086 u16 icid, result;
5087
5088 if (cmd_len != sizeof(*rsp))
5089 return -EPROTO;
5090
5091 icid = le16_to_cpu(rsp->icid);
5092 result = le16_to_cpu(rsp->result);
5093
5094 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5095
5096 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5097 l2cap_move_continue(conn, icid, result);
5098 else
5099 l2cap_move_fail(conn, cmd->ident, icid, result);
5100
5101 return 0;
5102 }
5103
5104 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5105 struct l2cap_cmd_hdr *cmd,
5106 u16 cmd_len, void *data)
5107 {
5108 struct l2cap_move_chan_cfm *cfm = data;
5109 struct l2cap_chan *chan;
5110 u16 icid, result;
5111
5112 if (cmd_len != sizeof(*cfm))
5113 return -EPROTO;
5114
5115 icid = le16_to_cpu(cfm->icid);
5116 result = le16_to_cpu(cfm->result);
5117
5118 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5119
5120 chan = l2cap_get_chan_by_dcid(conn, icid);
5121 if (!chan) {
5122 /* Spec requires a response even if the icid was not found */
5123 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5124 return 0;
5125 }
5126
5127 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5128 if (result == L2CAP_MC_CONFIRMED) {
5129 chan->local_amp_id = chan->move_id;
5130 if (chan->local_amp_id == AMP_ID_BREDR)
5131 __release_logical_link(chan);
5132 } else {
5133 chan->move_id = chan->local_amp_id;
5134 }
5135
5136 l2cap_move_done(chan);
5137 }
5138
5139 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5140
5141 l2cap_chan_unlock(chan);
5142
5143 return 0;
5144 }
5145
5146 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5147 struct l2cap_cmd_hdr *cmd,
5148 u16 cmd_len, void *data)
5149 {
5150 struct l2cap_move_chan_cfm_rsp *rsp = data;
5151 struct l2cap_chan *chan;
5152 u16 icid;
5153
5154 if (cmd_len != sizeof(*rsp))
5155 return -EPROTO;
5156
5157 icid = le16_to_cpu(rsp->icid);
5158
5159 BT_DBG("icid 0x%4.4x", icid);
5160
5161 chan = l2cap_get_chan_by_scid(conn, icid);
5162 if (!chan)
5163 return 0;
5164
5165 __clear_chan_timer(chan);
5166
5167 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5168 chan->local_amp_id = chan->move_id;
5169
5170 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5171 __release_logical_link(chan);
5172
5173 l2cap_move_done(chan);
5174 }
5175
5176 l2cap_chan_unlock(chan);
5177
5178 return 0;
5179 }
5180
5181 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5182 struct l2cap_cmd_hdr *cmd,
5183 u16 cmd_len, u8 *data)
5184 {
5185 struct hci_conn *hcon = conn->hcon;
5186 struct l2cap_conn_param_update_req *req;
5187 struct l2cap_conn_param_update_rsp rsp;
5188 u16 min, max, latency, to_multiplier;
5189 int err;
5190
5191 if (hcon->role != HCI_ROLE_MASTER)
5192 return -EINVAL;
5193
5194 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5195 return -EPROTO;
5196
5197 req = (struct l2cap_conn_param_update_req *) data;
5198 min = __le16_to_cpu(req->min);
5199 max = __le16_to_cpu(req->max);
5200 latency = __le16_to_cpu(req->latency);
5201 to_multiplier = __le16_to_cpu(req->to_multiplier);
5202
5203 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5204 min, max, latency, to_multiplier);
5205
5206 memset(&rsp, 0, sizeof(rsp));
5207
5208 err = hci_check_conn_params(min, max, latency, to_multiplier);
5209 if (err)
5210 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5211 else
5212 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5213
5214 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5215 sizeof(rsp), &rsp);
5216
5217 if (!err) {
5218 u8 store_hint;
5219
5220 store_hint = hci_le_conn_update(hcon, min, max, latency,
5221 to_multiplier);
5222 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5223 store_hint, min, max, latency,
5224 to_multiplier);
5225
5226 }
5227
5228 return 0;
5229 }
5230
5231 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5232 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5233 u8 *data)
5234 {
5235 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5236 struct hci_conn *hcon = conn->hcon;
5237 u16 dcid, mtu, mps, credits, result;
5238 struct l2cap_chan *chan;
5239 int err, sec_level;
5240
5241 if (cmd_len < sizeof(*rsp))
5242 return -EPROTO;
5243
5244 dcid = __le16_to_cpu(rsp->dcid);
5245 mtu = __le16_to_cpu(rsp->mtu);
5246 mps = __le16_to_cpu(rsp->mps);
5247 credits = __le16_to_cpu(rsp->credits);
5248 result = __le16_to_cpu(rsp->result);
5249
5250 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5251 return -EPROTO;
5252
5253 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5254 dcid, mtu, mps, credits, result);
5255
5256 mutex_lock(&conn->chan_lock);
5257
5258 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5259 if (!chan) {
5260 err = -EBADSLT;
5261 goto unlock;
5262 }
5263
5264 err = 0;
5265
5266 l2cap_chan_lock(chan);
5267
5268 switch (result) {
5269 case L2CAP_CR_SUCCESS:
5270 chan->ident = 0;
5271 chan->dcid = dcid;
5272 chan->omtu = mtu;
5273 chan->remote_mps = mps;
5274 chan->tx_credits = credits;
5275 l2cap_chan_ready(chan);
5276 break;
5277
5278 case L2CAP_CR_AUTHENTICATION:
5279 case L2CAP_CR_ENCRYPTION:
5280 /* If we already have MITM protection we can't do
5281 * anything.
5282 */
5283 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5284 l2cap_chan_del(chan, ECONNREFUSED);
5285 break;
5286 }
5287
5288 sec_level = hcon->sec_level + 1;
5289 if (chan->sec_level < sec_level)
5290 chan->sec_level = sec_level;
5291
5292 /* We'll need to send a new Connect Request */
5293 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5294
5295 smp_conn_security(hcon, chan->sec_level);
5296 break;
5297
5298 default:
5299 l2cap_chan_del(chan, ECONNREFUSED);
5300 break;
5301 }
5302
5303 l2cap_chan_unlock(chan);
5304
5305 unlock:
5306 mutex_unlock(&conn->chan_lock);
5307
5308 return err;
5309 }
5310
5311 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5312 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5313 u8 *data)
5314 {
5315 int err = 0;
5316
5317 switch (cmd->code) {
5318 case L2CAP_COMMAND_REJ:
5319 l2cap_command_rej(conn, cmd, cmd_len, data);
5320 break;
5321
5322 case L2CAP_CONN_REQ:
5323 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5324 break;
5325
5326 case L2CAP_CONN_RSP:
5327 case L2CAP_CREATE_CHAN_RSP:
5328 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5329 break;
5330
5331 case L2CAP_CONF_REQ:
5332 err = l2cap_config_req(conn, cmd, cmd_len, data);
5333 break;
5334
5335 case L2CAP_CONF_RSP:
5336 l2cap_config_rsp(conn, cmd, cmd_len, data);
5337 break;
5338
5339 case L2CAP_DISCONN_REQ:
5340 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5341 break;
5342
5343 case L2CAP_DISCONN_RSP:
5344 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5345 break;
5346
5347 case L2CAP_ECHO_REQ:
5348 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5349 break;
5350
5351 case L2CAP_ECHO_RSP:
5352 break;
5353
5354 case L2CAP_INFO_REQ:
5355 err = l2cap_information_req(conn, cmd, cmd_len, data);
5356 break;
5357
5358 case L2CAP_INFO_RSP:
5359 l2cap_information_rsp(conn, cmd, cmd_len, data);
5360 break;
5361
5362 case L2CAP_CREATE_CHAN_REQ:
5363 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5364 break;
5365
5366 case L2CAP_MOVE_CHAN_REQ:
5367 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5368 break;
5369
5370 case L2CAP_MOVE_CHAN_RSP:
5371 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5372 break;
5373
5374 case L2CAP_MOVE_CHAN_CFM:
5375 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5376 break;
5377
5378 case L2CAP_MOVE_CHAN_CFM_RSP:
5379 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5380 break;
5381
5382 default:
5383 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5384 err = -EINVAL;
5385 break;
5386 }
5387
5388 return err;
5389 }
5390
5391 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5392 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5393 u8 *data)
5394 {
5395 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5396 struct l2cap_le_conn_rsp rsp;
5397 struct l2cap_chan *chan, *pchan;
5398 u16 dcid, scid, credits, mtu, mps;
5399 __le16 psm;
5400 u8 result;
5401
5402 if (cmd_len != sizeof(*req))
5403 return -EPROTO;
5404
5405 scid = __le16_to_cpu(req->scid);
5406 mtu = __le16_to_cpu(req->mtu);
5407 mps = __le16_to_cpu(req->mps);
5408 psm = req->psm;
5409 dcid = 0;
5410 credits = 0;
5411
5412 if (mtu < 23 || mps < 23)
5413 return -EPROTO;
5414
5415 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5416 scid, mtu, mps);
5417
5418 /* Check if we have socket listening on psm */
5419 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5420 &conn->hcon->dst, LE_LINK);
5421 if (!pchan) {
5422 result = L2CAP_CR_BAD_PSM;
5423 chan = NULL;
5424 goto response;
5425 }
5426
5427 mutex_lock(&conn->chan_lock);
5428 l2cap_chan_lock(pchan);
5429
5430 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5431 SMP_ALLOW_STK)) {
5432 result = L2CAP_CR_AUTHENTICATION;
5433 chan = NULL;
5434 goto response_unlock;
5435 }
5436
5437 /* Check if we already have channel with that dcid */
5438 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5439 result = L2CAP_CR_NO_MEM;
5440 chan = NULL;
5441 goto response_unlock;
5442 }
5443
5444 chan = pchan->ops->new_connection(pchan);
5445 if (!chan) {
5446 result = L2CAP_CR_NO_MEM;
5447 goto response_unlock;
5448 }
5449
5450 l2cap_le_flowctl_init(chan);
5451
5452 bacpy(&chan->src, &conn->hcon->src);
5453 bacpy(&chan->dst, &conn->hcon->dst);
5454 chan->src_type = bdaddr_src_type(conn->hcon);
5455 chan->dst_type = bdaddr_dst_type(conn->hcon);
5456 chan->psm = psm;
5457 chan->dcid = scid;
5458 chan->omtu = mtu;
5459 chan->remote_mps = mps;
5460 chan->tx_credits = __le16_to_cpu(req->credits);
5461
5462 __l2cap_chan_add(conn, chan);
5463 dcid = chan->scid;
5464 credits = chan->rx_credits;
5465
5466 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5467
5468 chan->ident = cmd->ident;
5469
5470 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5471 l2cap_state_change(chan, BT_CONNECT2);
5472 /* The following result value is actually not defined
5473 * for LE CoC but we use it to let the function know
5474 * that it should bail out after doing its cleanup
5475 * instead of sending a response.
5476 */
5477 result = L2CAP_CR_PEND;
5478 chan->ops->defer(chan);
5479 } else {
5480 l2cap_chan_ready(chan);
5481 result = L2CAP_CR_SUCCESS;
5482 }
5483
5484 response_unlock:
5485 l2cap_chan_unlock(pchan);
5486 mutex_unlock(&conn->chan_lock);
5487 l2cap_chan_put(pchan);
5488
5489 if (result == L2CAP_CR_PEND)
5490 return 0;
5491
5492 response:
5493 if (chan) {
5494 rsp.mtu = cpu_to_le16(chan->imtu);
5495 rsp.mps = cpu_to_le16(chan->mps);
5496 } else {
5497 rsp.mtu = 0;
5498 rsp.mps = 0;
5499 }
5500
5501 rsp.dcid = cpu_to_le16(dcid);
5502 rsp.credits = cpu_to_le16(credits);
5503 rsp.result = cpu_to_le16(result);
5504
5505 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5506
5507 return 0;
5508 }
5509
5510 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5511 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5512 u8 *data)
5513 {
5514 struct l2cap_le_credits *pkt;
5515 struct l2cap_chan *chan;
5516 u16 cid, credits, max_credits;
5517
5518 if (cmd_len != sizeof(*pkt))
5519 return -EPROTO;
5520
5521 pkt = (struct l2cap_le_credits *) data;
5522 cid = __le16_to_cpu(pkt->cid);
5523 credits = __le16_to_cpu(pkt->credits);
5524
5525 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5526
5527 chan = l2cap_get_chan_by_dcid(conn, cid);
5528 if (!chan)
5529 return -EBADSLT;
5530
5531 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5532 if (credits > max_credits) {
5533 BT_ERR("LE credits overflow");
5534 l2cap_send_disconn_req(chan, ECONNRESET);
5535 l2cap_chan_unlock(chan);
5536
5537 /* Return 0 so that we don't trigger an unnecessary
5538 * command reject packet.
5539 */
5540 return 0;
5541 }
5542
5543 chan->tx_credits += credits;
5544
5545 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5546 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5547 chan->tx_credits--;
5548 }
5549
5550 if (chan->tx_credits)
5551 chan->ops->resume(chan);
5552
5553 l2cap_chan_unlock(chan);
5554
5555 return 0;
5556 }
5557
5558 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5559 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5560 u8 *data)
5561 {
5562 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5563 struct l2cap_chan *chan;
5564
5565 if (cmd_len < sizeof(*rej))
5566 return -EPROTO;
5567
5568 mutex_lock(&conn->chan_lock);
5569
5570 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5571 if (!chan)
5572 goto done;
5573
5574 l2cap_chan_lock(chan);
5575 l2cap_chan_del(chan, ECONNREFUSED);
5576 l2cap_chan_unlock(chan);
5577
5578 done:
5579 mutex_unlock(&conn->chan_lock);
5580 return 0;
5581 }
5582
5583 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5584 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5585 u8 *data)
5586 {
5587 int err = 0;
5588
5589 switch (cmd->code) {
5590 case L2CAP_COMMAND_REJ:
5591 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5592 break;
5593
5594 case L2CAP_CONN_PARAM_UPDATE_REQ:
5595 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5596 break;
5597
5598 case L2CAP_CONN_PARAM_UPDATE_RSP:
5599 break;
5600
5601 case L2CAP_LE_CONN_RSP:
5602 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5603 break;
5604
5605 case L2CAP_LE_CONN_REQ:
5606 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5607 break;
5608
5609 case L2CAP_LE_CREDITS:
5610 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5611 break;
5612
5613 case L2CAP_DISCONN_REQ:
5614 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5615 break;
5616
5617 case L2CAP_DISCONN_RSP:
5618 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5619 break;
5620
5621 default:
5622 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5623 err = -EINVAL;
5624 break;
5625 }
5626
5627 return err;
5628 }
5629
5630 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5631 struct sk_buff *skb)
5632 {
5633 struct hci_conn *hcon = conn->hcon;
5634 struct l2cap_cmd_hdr *cmd;
5635 u16 len;
5636 int err;
5637
5638 if (hcon->type != LE_LINK)
5639 goto drop;
5640
5641 if (skb->len < L2CAP_CMD_HDR_SIZE)
5642 goto drop;
5643
5644 cmd = (void *) skb->data;
5645 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5646
5647 len = le16_to_cpu(cmd->len);
5648
5649 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5650
5651 if (len != skb->len || !cmd->ident) {
5652 BT_DBG("corrupted command");
5653 goto drop;
5654 }
5655
5656 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5657 if (err) {
5658 struct l2cap_cmd_rej_unk rej;
5659
5660 BT_ERR("Wrong link type (%d)", err);
5661
5662 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5663 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5664 sizeof(rej), &rej);
5665 }
5666
5667 drop:
5668 kfree_skb(skb);
5669 }
5670
5671 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5672 struct sk_buff *skb)
5673 {
5674 struct hci_conn *hcon = conn->hcon;
5675 u8 *data = skb->data;
5676 int len = skb->len;
5677 struct l2cap_cmd_hdr cmd;
5678 int err;
5679
5680 l2cap_raw_recv(conn, skb);
5681
5682 if (hcon->type != ACL_LINK)
5683 goto drop;
5684
5685 while (len >= L2CAP_CMD_HDR_SIZE) {
5686 u16 cmd_len;
5687 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5688 data += L2CAP_CMD_HDR_SIZE;
5689 len -= L2CAP_CMD_HDR_SIZE;
5690
5691 cmd_len = le16_to_cpu(cmd.len);
5692
5693 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5694 cmd.ident);
5695
5696 if (cmd_len > len || !cmd.ident) {
5697 BT_DBG("corrupted command");
5698 break;
5699 }
5700
5701 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5702 if (err) {
5703 struct l2cap_cmd_rej_unk rej;
5704
5705 BT_ERR("Wrong link type (%d)", err);
5706
5707 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5708 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5709 sizeof(rej), &rej);
5710 }
5711
5712 data += cmd_len;
5713 len -= cmd_len;
5714 }
5715
5716 drop:
5717 kfree_skb(skb);
5718 }
5719
5720 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5721 {
5722 u16 our_fcs, rcv_fcs;
5723 int hdr_size;
5724
5725 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5726 hdr_size = L2CAP_EXT_HDR_SIZE;
5727 else
5728 hdr_size = L2CAP_ENH_HDR_SIZE;
5729
5730 if (chan->fcs == L2CAP_FCS_CRC16) {
5731 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5732 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5733 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5734
5735 if (our_fcs != rcv_fcs)
5736 return -EBADMSG;
5737 }
5738 return 0;
5739 }
5740
5741 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5742 {
5743 struct l2cap_ctrl control;
5744
5745 BT_DBG("chan %p", chan);
5746
5747 memset(&control, 0, sizeof(control));
5748 control.sframe = 1;
5749 control.final = 1;
5750 control.reqseq = chan->buffer_seq;
5751 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5752
5753 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5754 control.super = L2CAP_SUPER_RNR;
5755 l2cap_send_sframe(chan, &control);
5756 }
5757
5758 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5759 chan->unacked_frames > 0)
5760 __set_retrans_timer(chan);
5761
5762 /* Send pending iframes */
5763 l2cap_ertm_send(chan);
5764
5765 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5766 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5767 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5768 * send it now.
5769 */
5770 control.super = L2CAP_SUPER_RR;
5771 l2cap_send_sframe(chan, &control);
5772 }
5773 }
5774
5775 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5776 struct sk_buff **last_frag)
5777 {
5778 /* skb->len reflects data in skb as well as all fragments
5779 * skb->data_len reflects only data in fragments
5780 */
5781 if (!skb_has_frag_list(skb))
5782 skb_shinfo(skb)->frag_list = new_frag;
5783
5784 new_frag->next = NULL;
5785
5786 (*last_frag)->next = new_frag;
5787 *last_frag = new_frag;
5788
5789 skb->len += new_frag->len;
5790 skb->data_len += new_frag->len;
5791 skb->truesize += new_frag->truesize;
5792 }
5793
5794 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5795 struct l2cap_ctrl *control)
5796 {
5797 int err = -EINVAL;
5798
5799 switch (control->sar) {
5800 case L2CAP_SAR_UNSEGMENTED:
5801 if (chan->sdu)
5802 break;
5803
5804 err = chan->ops->recv(chan, skb);
5805 break;
5806
5807 case L2CAP_SAR_START:
5808 if (chan->sdu)
5809 break;
5810
5811 chan->sdu_len = get_unaligned_le16(skb->data);
5812 skb_pull(skb, L2CAP_SDULEN_SIZE);
5813
5814 if (chan->sdu_len > chan->imtu) {
5815 err = -EMSGSIZE;
5816 break;
5817 }
5818
5819 if (skb->len >= chan->sdu_len)
5820 break;
5821
5822 chan->sdu = skb;
5823 chan->sdu_last_frag = skb;
5824
5825 skb = NULL;
5826 err = 0;
5827 break;
5828
5829 case L2CAP_SAR_CONTINUE:
5830 if (!chan->sdu)
5831 break;
5832
5833 append_skb_frag(chan->sdu, skb,
5834 &chan->sdu_last_frag);
5835 skb = NULL;
5836
5837 if (chan->sdu->len >= chan->sdu_len)
5838 break;
5839
5840 err = 0;
5841 break;
5842
5843 case L2CAP_SAR_END:
5844 if (!chan->sdu)
5845 break;
5846
5847 append_skb_frag(chan->sdu, skb,
5848 &chan->sdu_last_frag);
5849 skb = NULL;
5850
5851 if (chan->sdu->len != chan->sdu_len)
5852 break;
5853
5854 err = chan->ops->recv(chan, chan->sdu);
5855
5856 if (!err) {
5857 /* Reassembly complete */
5858 chan->sdu = NULL;
5859 chan->sdu_last_frag = NULL;
5860 chan->sdu_len = 0;
5861 }
5862 break;
5863 }
5864
5865 if (err) {
5866 kfree_skb(skb);
5867 kfree_skb(chan->sdu);
5868 chan->sdu = NULL;
5869 chan->sdu_last_frag = NULL;
5870 chan->sdu_len = 0;
5871 }
5872
5873 return err;
5874 }
5875
5876 static int l2cap_resegment(struct l2cap_chan *chan)
5877 {
5878 /* Placeholder */
5879 return 0;
5880 }
5881
5882 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5883 {
5884 u8 event;
5885
5886 if (chan->mode != L2CAP_MODE_ERTM)
5887 return;
5888
5889 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5890 l2cap_tx(chan, NULL, NULL, event);
5891 }
5892
5893 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5894 {
5895 int err = 0;
5896 /* Pass sequential frames to l2cap_reassemble_sdu()
5897 * until a gap is encountered.
5898 */
5899
5900 BT_DBG("chan %p", chan);
5901
5902 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5903 struct sk_buff *skb;
5904 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5905 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5906
5907 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5908
5909 if (!skb)
5910 break;
5911
5912 skb_unlink(skb, &chan->srej_q);
5913 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5914 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5915 if (err)
5916 break;
5917 }
5918
5919 if (skb_queue_empty(&chan->srej_q)) {
5920 chan->rx_state = L2CAP_RX_STATE_RECV;
5921 l2cap_send_ack(chan);
5922 }
5923
5924 return err;
5925 }
5926
5927 static void l2cap_handle_srej(struct l2cap_chan *chan,
5928 struct l2cap_ctrl *control)
5929 {
5930 struct sk_buff *skb;
5931
5932 BT_DBG("chan %p, control %p", chan, control);
5933
5934 if (control->reqseq == chan->next_tx_seq) {
5935 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5936 l2cap_send_disconn_req(chan, ECONNRESET);
5937 return;
5938 }
5939
5940 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5941
5942 if (skb == NULL) {
5943 BT_DBG("Seq %d not available for retransmission",
5944 control->reqseq);
5945 return;
5946 }
5947
5948 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5949 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5950 l2cap_send_disconn_req(chan, ECONNRESET);
5951 return;
5952 }
5953
5954 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5955
5956 if (control->poll) {
5957 l2cap_pass_to_tx(chan, control);
5958
5959 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5960 l2cap_retransmit(chan, control);
5961 l2cap_ertm_send(chan);
5962
5963 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5964 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5965 chan->srej_save_reqseq = control->reqseq;
5966 }
5967 } else {
5968 l2cap_pass_to_tx_fbit(chan, control);
5969
5970 if (control->final) {
5971 if (chan->srej_save_reqseq != control->reqseq ||
5972 !test_and_clear_bit(CONN_SREJ_ACT,
5973 &chan->conn_state))
5974 l2cap_retransmit(chan, control);
5975 } else {
5976 l2cap_retransmit(chan, control);
5977 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5978 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5979 chan->srej_save_reqseq = control->reqseq;
5980 }
5981 }
5982 }
5983 }
5984
5985 static void l2cap_handle_rej(struct l2cap_chan *chan,
5986 struct l2cap_ctrl *control)
5987 {
5988 struct sk_buff *skb;
5989
5990 BT_DBG("chan %p, control %p", chan, control);
5991
5992 if (control->reqseq == chan->next_tx_seq) {
5993 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5994 l2cap_send_disconn_req(chan, ECONNRESET);
5995 return;
5996 }
5997
5998 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5999
6000 if (chan->max_tx && skb &&
6001 bt_cb(skb)->control.retries >= chan->max_tx) {
6002 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6003 l2cap_send_disconn_req(chan, ECONNRESET);
6004 return;
6005 }
6006
6007 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6008
6009 l2cap_pass_to_tx(chan, control);
6010
6011 if (control->final) {
6012 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6013 l2cap_retransmit_all(chan, control);
6014 } else {
6015 l2cap_retransmit_all(chan, control);
6016 l2cap_ertm_send(chan);
6017 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6018 set_bit(CONN_REJ_ACT, &chan->conn_state);
6019 }
6020 }
6021
6022 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6023 {
6024 BT_DBG("chan %p, txseq %d", chan, txseq);
6025
6026 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6027 chan->expected_tx_seq);
6028
6029 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6030 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6031 chan->tx_win) {
6032 /* See notes below regarding "double poll" and
6033 * invalid packets.
6034 */
6035 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6036 BT_DBG("Invalid/Ignore - after SREJ");
6037 return L2CAP_TXSEQ_INVALID_IGNORE;
6038 } else {
6039 BT_DBG("Invalid - in window after SREJ sent");
6040 return L2CAP_TXSEQ_INVALID;
6041 }
6042 }
6043
6044 if (chan->srej_list.head == txseq) {
6045 BT_DBG("Expected SREJ");
6046 return L2CAP_TXSEQ_EXPECTED_SREJ;
6047 }
6048
6049 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6050 BT_DBG("Duplicate SREJ - txseq already stored");
6051 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6052 }
6053
6054 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6055 BT_DBG("Unexpected SREJ - not requested");
6056 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6057 }
6058 }
6059
6060 if (chan->expected_tx_seq == txseq) {
6061 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6062 chan->tx_win) {
6063 BT_DBG("Invalid - txseq outside tx window");
6064 return L2CAP_TXSEQ_INVALID;
6065 } else {
6066 BT_DBG("Expected");
6067 return L2CAP_TXSEQ_EXPECTED;
6068 }
6069 }
6070
6071 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6072 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6073 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6074 return L2CAP_TXSEQ_DUPLICATE;
6075 }
6076
6077 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6078 /* A source of invalid packets is a "double poll" condition,
6079 * where delays cause us to send multiple poll packets. If
6080 * the remote stack receives and processes both polls,
6081 * sequence numbers can wrap around in such a way that a
6082 * resent frame has a sequence number that looks like new data
6083 * with a sequence gap. This would trigger an erroneous SREJ
6084 * request.
6085 *
6086 * Fortunately, this is impossible with a tx window that's
6087 * less than half of the maximum sequence number, which allows
6088 * invalid frames to be safely ignored.
6089 *
6090 * With tx window sizes greater than half of the tx window
6091 * maximum, the frame is invalid and cannot be ignored. This
6092 * causes a disconnect.
6093 */
6094
6095 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6096 BT_DBG("Invalid/Ignore - txseq outside tx window");
6097 return L2CAP_TXSEQ_INVALID_IGNORE;
6098 } else {
6099 BT_DBG("Invalid - txseq outside tx window");
6100 return L2CAP_TXSEQ_INVALID;
6101 }
6102 } else {
6103 BT_DBG("Unexpected - txseq indicates missing frames");
6104 return L2CAP_TXSEQ_UNEXPECTED;
6105 }
6106 }
6107
6108 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6109 struct l2cap_ctrl *control,
6110 struct sk_buff *skb, u8 event)
6111 {
6112 int err = 0;
6113 bool skb_in_use = false;
6114
6115 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6116 event);
6117
6118 switch (event) {
6119 case L2CAP_EV_RECV_IFRAME:
6120 switch (l2cap_classify_txseq(chan, control->txseq)) {
6121 case L2CAP_TXSEQ_EXPECTED:
6122 l2cap_pass_to_tx(chan, control);
6123
6124 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6125 BT_DBG("Busy, discarding expected seq %d",
6126 control->txseq);
6127 break;
6128 }
6129
6130 chan->expected_tx_seq = __next_seq(chan,
6131 control->txseq);
6132
6133 chan->buffer_seq = chan->expected_tx_seq;
6134 skb_in_use = true;
6135
6136 err = l2cap_reassemble_sdu(chan, skb, control);
6137 if (err)
6138 break;
6139
6140 if (control->final) {
6141 if (!test_and_clear_bit(CONN_REJ_ACT,
6142 &chan->conn_state)) {
6143 control->final = 0;
6144 l2cap_retransmit_all(chan, control);
6145 l2cap_ertm_send(chan);
6146 }
6147 }
6148
6149 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6150 l2cap_send_ack(chan);
6151 break;
6152 case L2CAP_TXSEQ_UNEXPECTED:
6153 l2cap_pass_to_tx(chan, control);
6154
6155 /* Can't issue SREJ frames in the local busy state.
6156 * Drop this frame, it will be seen as missing
6157 * when local busy is exited.
6158 */
6159 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6160 BT_DBG("Busy, discarding unexpected seq %d",
6161 control->txseq);
6162 break;
6163 }
6164
6165 /* There was a gap in the sequence, so an SREJ
6166 * must be sent for each missing frame. The
6167 * current frame is stored for later use.
6168 */
6169 skb_queue_tail(&chan->srej_q, skb);
6170 skb_in_use = true;
6171 BT_DBG("Queued %p (queue len %d)", skb,
6172 skb_queue_len(&chan->srej_q));
6173
6174 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6175 l2cap_seq_list_clear(&chan->srej_list);
6176 l2cap_send_srej(chan, control->txseq);
6177
6178 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6179 break;
6180 case L2CAP_TXSEQ_DUPLICATE:
6181 l2cap_pass_to_tx(chan, control);
6182 break;
6183 case L2CAP_TXSEQ_INVALID_IGNORE:
6184 break;
6185 case L2CAP_TXSEQ_INVALID:
6186 default:
6187 l2cap_send_disconn_req(chan, ECONNRESET);
6188 break;
6189 }
6190 break;
6191 case L2CAP_EV_RECV_RR:
6192 l2cap_pass_to_tx(chan, control);
6193 if (control->final) {
6194 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6195
6196 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6197 !__chan_is_moving(chan)) {
6198 control->final = 0;
6199 l2cap_retransmit_all(chan, control);
6200 }
6201
6202 l2cap_ertm_send(chan);
6203 } else if (control->poll) {
6204 l2cap_send_i_or_rr_or_rnr(chan);
6205 } else {
6206 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6207 &chan->conn_state) &&
6208 chan->unacked_frames)
6209 __set_retrans_timer(chan);
6210
6211 l2cap_ertm_send(chan);
6212 }
6213 break;
6214 case L2CAP_EV_RECV_RNR:
6215 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6216 l2cap_pass_to_tx(chan, control);
6217 if (control && control->poll) {
6218 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6219 l2cap_send_rr_or_rnr(chan, 0);
6220 }
6221 __clear_retrans_timer(chan);
6222 l2cap_seq_list_clear(&chan->retrans_list);
6223 break;
6224 case L2CAP_EV_RECV_REJ:
6225 l2cap_handle_rej(chan, control);
6226 break;
6227 case L2CAP_EV_RECV_SREJ:
6228 l2cap_handle_srej(chan, control);
6229 break;
6230 default:
6231 break;
6232 }
6233
6234 if (skb && !skb_in_use) {
6235 BT_DBG("Freeing %p", skb);
6236 kfree_skb(skb);
6237 }
6238
6239 return err;
6240 }
6241
6242 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6243 struct l2cap_ctrl *control,
6244 struct sk_buff *skb, u8 event)
6245 {
6246 int err = 0;
6247 u16 txseq = control->txseq;
6248 bool skb_in_use = false;
6249
6250 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6251 event);
6252
6253 switch (event) {
6254 case L2CAP_EV_RECV_IFRAME:
6255 switch (l2cap_classify_txseq(chan, txseq)) {
6256 case L2CAP_TXSEQ_EXPECTED:
6257 /* Keep frame for reassembly later */
6258 l2cap_pass_to_tx(chan, control);
6259 skb_queue_tail(&chan->srej_q, skb);
6260 skb_in_use = true;
6261 BT_DBG("Queued %p (queue len %d)", skb,
6262 skb_queue_len(&chan->srej_q));
6263
6264 chan->expected_tx_seq = __next_seq(chan, txseq);
6265 break;
6266 case L2CAP_TXSEQ_EXPECTED_SREJ:
6267 l2cap_seq_list_pop(&chan->srej_list);
6268
6269 l2cap_pass_to_tx(chan, control);
6270 skb_queue_tail(&chan->srej_q, skb);
6271 skb_in_use = true;
6272 BT_DBG("Queued %p (queue len %d)", skb,
6273 skb_queue_len(&chan->srej_q));
6274
6275 err = l2cap_rx_queued_iframes(chan);
6276 if (err)
6277 break;
6278
6279 break;
6280 case L2CAP_TXSEQ_UNEXPECTED:
6281 /* Got a frame that can't be reassembled yet.
6282 * Save it for later, and send SREJs to cover
6283 * the missing frames.
6284 */
6285 skb_queue_tail(&chan->srej_q, skb);
6286 skb_in_use = true;
6287 BT_DBG("Queued %p (queue len %d)", skb,
6288 skb_queue_len(&chan->srej_q));
6289
6290 l2cap_pass_to_tx(chan, control);
6291 l2cap_send_srej(chan, control->txseq);
6292 break;
6293 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6294 /* This frame was requested with an SREJ, but
6295 * some expected retransmitted frames are
6296 * missing. Request retransmission of missing
6297 * SREJ'd frames.
6298 */
6299 skb_queue_tail(&chan->srej_q, skb);
6300 skb_in_use = true;
6301 BT_DBG("Queued %p (queue len %d)", skb,
6302 skb_queue_len(&chan->srej_q));
6303
6304 l2cap_pass_to_tx(chan, control);
6305 l2cap_send_srej_list(chan, control->txseq);
6306 break;
6307 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6308 /* We've already queued this frame. Drop this copy. */
6309 l2cap_pass_to_tx(chan, control);
6310 break;
6311 case L2CAP_TXSEQ_DUPLICATE:
6312 /* Expecting a later sequence number, so this frame
6313 * was already received. Ignore it completely.
6314 */
6315 break;
6316 case L2CAP_TXSEQ_INVALID_IGNORE:
6317 break;
6318 case L2CAP_TXSEQ_INVALID:
6319 default:
6320 l2cap_send_disconn_req(chan, ECONNRESET);
6321 break;
6322 }
6323 break;
6324 case L2CAP_EV_RECV_RR:
6325 l2cap_pass_to_tx(chan, control);
6326 if (control->final) {
6327 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6328
6329 if (!test_and_clear_bit(CONN_REJ_ACT,
6330 &chan->conn_state)) {
6331 control->final = 0;
6332 l2cap_retransmit_all(chan, control);
6333 }
6334
6335 l2cap_ertm_send(chan);
6336 } else if (control->poll) {
6337 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6338 &chan->conn_state) &&
6339 chan->unacked_frames) {
6340 __set_retrans_timer(chan);
6341 }
6342
6343 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6344 l2cap_send_srej_tail(chan);
6345 } else {
6346 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6347 &chan->conn_state) &&
6348 chan->unacked_frames)
6349 __set_retrans_timer(chan);
6350
6351 l2cap_send_ack(chan);
6352 }
6353 break;
6354 case L2CAP_EV_RECV_RNR:
6355 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6356 l2cap_pass_to_tx(chan, control);
6357 if (control->poll) {
6358 l2cap_send_srej_tail(chan);
6359 } else {
6360 struct l2cap_ctrl rr_control;
6361 memset(&rr_control, 0, sizeof(rr_control));
6362 rr_control.sframe = 1;
6363 rr_control.super = L2CAP_SUPER_RR;
6364 rr_control.reqseq = chan->buffer_seq;
6365 l2cap_send_sframe(chan, &rr_control);
6366 }
6367
6368 break;
6369 case L2CAP_EV_RECV_REJ:
6370 l2cap_handle_rej(chan, control);
6371 break;
6372 case L2CAP_EV_RECV_SREJ:
6373 l2cap_handle_srej(chan, control);
6374 break;
6375 }
6376
6377 if (skb && !skb_in_use) {
6378 BT_DBG("Freeing %p", skb);
6379 kfree_skb(skb);
6380 }
6381
6382 return err;
6383 }
6384
6385 static int l2cap_finish_move(struct l2cap_chan *chan)
6386 {
6387 BT_DBG("chan %p", chan);
6388
6389 chan->rx_state = L2CAP_RX_STATE_RECV;
6390
6391 if (chan->hs_hcon)
6392 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6393 else
6394 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6395
6396 return l2cap_resegment(chan);
6397 }
6398
6399 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6400 struct l2cap_ctrl *control,
6401 struct sk_buff *skb, u8 event)
6402 {
6403 int err;
6404
6405 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6406 event);
6407
6408 if (!control->poll)
6409 return -EPROTO;
6410
6411 l2cap_process_reqseq(chan, control->reqseq);
6412
6413 if (!skb_queue_empty(&chan->tx_q))
6414 chan->tx_send_head = skb_peek(&chan->tx_q);
6415 else
6416 chan->tx_send_head = NULL;
6417
6418 /* Rewind next_tx_seq to the point expected
6419 * by the receiver.
6420 */
6421 chan->next_tx_seq = control->reqseq;
6422 chan->unacked_frames = 0;
6423
6424 err = l2cap_finish_move(chan);
6425 if (err)
6426 return err;
6427
6428 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6429 l2cap_send_i_or_rr_or_rnr(chan);
6430
6431 if (event == L2CAP_EV_RECV_IFRAME)
6432 return -EPROTO;
6433
6434 return l2cap_rx_state_recv(chan, control, NULL, event);
6435 }
6436
6437 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6438 struct l2cap_ctrl *control,
6439 struct sk_buff *skb, u8 event)
6440 {
6441 int err;
6442
6443 if (!control->final)
6444 return -EPROTO;
6445
6446 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6447
6448 chan->rx_state = L2CAP_RX_STATE_RECV;
6449 l2cap_process_reqseq(chan, control->reqseq);
6450
6451 if (!skb_queue_empty(&chan->tx_q))
6452 chan->tx_send_head = skb_peek(&chan->tx_q);
6453 else
6454 chan->tx_send_head = NULL;
6455
6456 /* Rewind next_tx_seq to the point expected
6457 * by the receiver.
6458 */
6459 chan->next_tx_seq = control->reqseq;
6460 chan->unacked_frames = 0;
6461
6462 if (chan->hs_hcon)
6463 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6464 else
6465 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6466
6467 err = l2cap_resegment(chan);
6468
6469 if (!err)
6470 err = l2cap_rx_state_recv(chan, control, skb, event);
6471
6472 return err;
6473 }
6474
6475 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6476 {
6477 /* Make sure reqseq is for a packet that has been sent but not acked */
6478 u16 unacked;
6479
6480 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6481 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6482 }
6483
6484 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6485 struct sk_buff *skb, u8 event)
6486 {
6487 int err = 0;
6488
6489 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6490 control, skb, event, chan->rx_state);
6491
6492 if (__valid_reqseq(chan, control->reqseq)) {
6493 switch (chan->rx_state) {
6494 case L2CAP_RX_STATE_RECV:
6495 err = l2cap_rx_state_recv(chan, control, skb, event);
6496 break;
6497 case L2CAP_RX_STATE_SREJ_SENT:
6498 err = l2cap_rx_state_srej_sent(chan, control, skb,
6499 event);
6500 break;
6501 case L2CAP_RX_STATE_WAIT_P:
6502 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6503 break;
6504 case L2CAP_RX_STATE_WAIT_F:
6505 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6506 break;
6507 default:
6508 /* shut it down */
6509 break;
6510 }
6511 } else {
6512 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6513 control->reqseq, chan->next_tx_seq,
6514 chan->expected_ack_seq);
6515 l2cap_send_disconn_req(chan, ECONNRESET);
6516 }
6517
6518 return err;
6519 }
6520
6521 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6522 struct sk_buff *skb)
6523 {
6524 int err = 0;
6525
6526 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6527 chan->rx_state);
6528
6529 if (l2cap_classify_txseq(chan, control->txseq) ==
6530 L2CAP_TXSEQ_EXPECTED) {
6531 l2cap_pass_to_tx(chan, control);
6532
6533 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6534 __next_seq(chan, chan->buffer_seq));
6535
6536 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6537
6538 l2cap_reassemble_sdu(chan, skb, control);
6539 } else {
6540 if (chan->sdu) {
6541 kfree_skb(chan->sdu);
6542 chan->sdu = NULL;
6543 }
6544 chan->sdu_last_frag = NULL;
6545 chan->sdu_len = 0;
6546
6547 if (skb) {
6548 BT_DBG("Freeing %p", skb);
6549 kfree_skb(skb);
6550 }
6551 }
6552
6553 chan->last_acked_seq = control->txseq;
6554 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6555
6556 return err;
6557 }
6558
6559 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6560 {
6561 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6562 u16 len;
6563 u8 event;
6564
6565 __unpack_control(chan, skb);
6566
6567 len = skb->len;
6568
6569 /*
6570 * We can just drop the corrupted I-frame here.
6571 * Receiver will miss it and start proper recovery
6572 * procedures and ask for retransmission.
6573 */
6574 if (l2cap_check_fcs(chan, skb))
6575 goto drop;
6576
6577 if (!control->sframe && control->sar == L2CAP_SAR_START)
6578 len -= L2CAP_SDULEN_SIZE;
6579
6580 if (chan->fcs == L2CAP_FCS_CRC16)
6581 len -= L2CAP_FCS_SIZE;
6582
6583 if (len > chan->mps) {
6584 l2cap_send_disconn_req(chan, ECONNRESET);
6585 goto drop;
6586 }
6587
6588 if (!control->sframe) {
6589 int err;
6590
6591 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6592 control->sar, control->reqseq, control->final,
6593 control->txseq);
6594
6595 /* Validate F-bit - F=0 always valid, F=1 only
6596 * valid in TX WAIT_F
6597 */
6598 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6599 goto drop;
6600
6601 if (chan->mode != L2CAP_MODE_STREAMING) {
6602 event = L2CAP_EV_RECV_IFRAME;
6603 err = l2cap_rx(chan, control, skb, event);
6604 } else {
6605 err = l2cap_stream_rx(chan, control, skb);
6606 }
6607
6608 if (err)
6609 l2cap_send_disconn_req(chan, ECONNRESET);
6610 } else {
6611 const u8 rx_func_to_event[4] = {
6612 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6613 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6614 };
6615
6616 /* Only I-frames are expected in streaming mode */
6617 if (chan->mode == L2CAP_MODE_STREAMING)
6618 goto drop;
6619
6620 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6621 control->reqseq, control->final, control->poll,
6622 control->super);
6623
6624 if (len != 0) {
6625 BT_ERR("Trailing bytes: %d in sframe", len);
6626 l2cap_send_disconn_req(chan, ECONNRESET);
6627 goto drop;
6628 }
6629
6630 /* Validate F and P bits */
6631 if (control->final && (control->poll ||
6632 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6633 goto drop;
6634
6635 event = rx_func_to_event[control->super];
6636 if (l2cap_rx(chan, control, skb, event))
6637 l2cap_send_disconn_req(chan, ECONNRESET);
6638 }
6639
6640 return 0;
6641
6642 drop:
6643 kfree_skb(skb);
6644 return 0;
6645 }
6646
6647 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6648 {
6649 struct l2cap_conn *conn = chan->conn;
6650 struct l2cap_le_credits pkt;
6651 u16 return_credits;
6652
6653 /* We return more credits to the sender only after the amount of
6654 * credits falls below half of the initial amount.
6655 */
6656 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6657 return;
6658
6659 return_credits = le_max_credits - chan->rx_credits;
6660
6661 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6662
6663 chan->rx_credits += return_credits;
6664
6665 pkt.cid = cpu_to_le16(chan->scid);
6666 pkt.credits = cpu_to_le16(return_credits);
6667
6668 chan->ident = l2cap_get_ident(conn);
6669
6670 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6671 }
6672
6673 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6674 {
6675 int err;
6676
6677 if (!chan->rx_credits) {
6678 BT_ERR("No credits to receive LE L2CAP data");
6679 l2cap_send_disconn_req(chan, ECONNRESET);
6680 return -ENOBUFS;
6681 }
6682
6683 if (chan->imtu < skb->len) {
6684 BT_ERR("Too big LE L2CAP PDU");
6685 return -ENOBUFS;
6686 }
6687
6688 chan->rx_credits--;
6689 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6690
6691 l2cap_chan_le_send_credits(chan);
6692
6693 err = 0;
6694
6695 if (!chan->sdu) {
6696 u16 sdu_len;
6697
6698 sdu_len = get_unaligned_le16(skb->data);
6699 skb_pull(skb, L2CAP_SDULEN_SIZE);
6700
6701 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6702 sdu_len, skb->len, chan->imtu);
6703
6704 if (sdu_len > chan->imtu) {
6705 BT_ERR("Too big LE L2CAP SDU length received");
6706 err = -EMSGSIZE;
6707 goto failed;
6708 }
6709
6710 if (skb->len > sdu_len) {
6711 BT_ERR("Too much LE L2CAP data received");
6712 err = -EINVAL;
6713 goto failed;
6714 }
6715
6716 if (skb->len == sdu_len)
6717 return chan->ops->recv(chan, skb);
6718
6719 chan->sdu = skb;
6720 chan->sdu_len = sdu_len;
6721 chan->sdu_last_frag = skb;
6722
6723 return 0;
6724 }
6725
6726 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6727 chan->sdu->len, skb->len, chan->sdu_len);
6728
6729 if (chan->sdu->len + skb->len > chan->sdu_len) {
6730 BT_ERR("Too much LE L2CAP data received");
6731 err = -EINVAL;
6732 goto failed;
6733 }
6734
6735 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6736 skb = NULL;
6737
6738 if (chan->sdu->len == chan->sdu_len) {
6739 err = chan->ops->recv(chan, chan->sdu);
6740 if (!err) {
6741 chan->sdu = NULL;
6742 chan->sdu_last_frag = NULL;
6743 chan->sdu_len = 0;
6744 }
6745 }
6746
6747 failed:
6748 if (err) {
6749 kfree_skb(skb);
6750 kfree_skb(chan->sdu);
6751 chan->sdu = NULL;
6752 chan->sdu_last_frag = NULL;
6753 chan->sdu_len = 0;
6754 }
6755
6756 /* We can't return an error here since we took care of the skb
6757 * freeing internally. An error return would cause the caller to
6758 * do a double-free of the skb.
6759 */
6760 return 0;
6761 }
6762
6763 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6764 struct sk_buff *skb)
6765 {
6766 struct l2cap_chan *chan;
6767
6768 chan = l2cap_get_chan_by_scid(conn, cid);
6769 if (!chan) {
6770 if (cid == L2CAP_CID_A2MP) {
6771 chan = a2mp_channel_create(conn, skb);
6772 if (!chan) {
6773 kfree_skb(skb);
6774 return;
6775 }
6776
6777 l2cap_chan_lock(chan);
6778 } else {
6779 BT_DBG("unknown cid 0x%4.4x", cid);
6780 /* Drop packet and return */
6781 kfree_skb(skb);
6782 return;
6783 }
6784 }
6785
6786 BT_DBG("chan %p, len %d", chan, skb->len);
6787
6788 if (chan->state != BT_CONNECTED)
6789 goto drop;
6790
6791 switch (chan->mode) {
6792 case L2CAP_MODE_LE_FLOWCTL:
6793 if (l2cap_le_data_rcv(chan, skb) < 0)
6794 goto drop;
6795
6796 goto done;
6797
6798 case L2CAP_MODE_BASIC:
6799 /* If socket recv buffers overflows we drop data here
6800 * which is *bad* because L2CAP has to be reliable.
6801 * But we don't have any other choice. L2CAP doesn't
6802 * provide flow control mechanism. */
6803
6804 if (chan->imtu < skb->len) {
6805 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6806 goto drop;
6807 }
6808
6809 if (!chan->ops->recv(chan, skb))
6810 goto done;
6811 break;
6812
6813 case L2CAP_MODE_ERTM:
6814 case L2CAP_MODE_STREAMING:
6815 l2cap_data_rcv(chan, skb);
6816 goto done;
6817
6818 default:
6819 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6820 break;
6821 }
6822
6823 drop:
6824 kfree_skb(skb);
6825
6826 done:
6827 l2cap_chan_unlock(chan);
6828 }
6829
6830 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6831 struct sk_buff *skb)
6832 {
6833 struct hci_conn *hcon = conn->hcon;
6834 struct l2cap_chan *chan;
6835
6836 if (hcon->type != ACL_LINK)
6837 goto free_skb;
6838
6839 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6840 ACL_LINK);
6841 if (!chan)
6842 goto free_skb;
6843
6844 BT_DBG("chan %p, len %d", chan, skb->len);
6845
6846 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6847 goto drop;
6848
6849 if (chan->imtu < skb->len)
6850 goto drop;
6851
6852 /* Store remote BD_ADDR and PSM for msg_name */
6853 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6854 bt_cb(skb)->psm = psm;
6855
6856 if (!chan->ops->recv(chan, skb)) {
6857 l2cap_chan_put(chan);
6858 return;
6859 }
6860
6861 drop:
6862 l2cap_chan_put(chan);
6863 free_skb:
6864 kfree_skb(skb);
6865 }
6866
6867 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6868 {
6869 struct l2cap_hdr *lh = (void *) skb->data;
6870 struct hci_conn *hcon = conn->hcon;
6871 u16 cid, len;
6872 __le16 psm;
6873
6874 if (hcon->state != BT_CONNECTED) {
6875 BT_DBG("queueing pending rx skb");
6876 skb_queue_tail(&conn->pending_rx, skb);
6877 return;
6878 }
6879
6880 skb_pull(skb, L2CAP_HDR_SIZE);
6881 cid = __le16_to_cpu(lh->cid);
6882 len = __le16_to_cpu(lh->len);
6883
6884 if (len != skb->len) {
6885 kfree_skb(skb);
6886 return;
6887 }
6888
6889 /* Since we can't actively block incoming LE connections we must
6890 * at least ensure that we ignore incoming data from them.
6891 */
6892 if (hcon->type == LE_LINK &&
6893 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6894 bdaddr_dst_type(hcon))) {
6895 kfree_skb(skb);
6896 return;
6897 }
6898
6899 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6900
6901 switch (cid) {
6902 case L2CAP_CID_SIGNALING:
6903 l2cap_sig_channel(conn, skb);
6904 break;
6905
6906 case L2CAP_CID_CONN_LESS:
6907 psm = get_unaligned((__le16 *) skb->data);
6908 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6909 l2cap_conless_channel(conn, psm, skb);
6910 break;
6911
6912 case L2CAP_CID_LE_SIGNALING:
6913 l2cap_le_sig_channel(conn, skb);
6914 break;
6915
6916 default:
6917 l2cap_data_channel(conn, cid, skb);
6918 break;
6919 }
6920 }
6921
6922 static void process_pending_rx(struct work_struct *work)
6923 {
6924 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6925 pending_rx_work);
6926 struct sk_buff *skb;
6927
6928 BT_DBG("");
6929
6930 while ((skb = skb_dequeue(&conn->pending_rx)))
6931 l2cap_recv_frame(conn, skb);
6932 }
6933
6934 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6935 {
6936 struct l2cap_conn *conn = hcon->l2cap_data;
6937 struct hci_chan *hchan;
6938
6939 if (conn)
6940 return conn;
6941
6942 hchan = hci_chan_create(hcon);
6943 if (!hchan)
6944 return NULL;
6945
6946 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6947 if (!conn) {
6948 hci_chan_del(hchan);
6949 return NULL;
6950 }
6951
6952 kref_init(&conn->ref);
6953 hcon->l2cap_data = conn;
6954 conn->hcon = hci_conn_get(hcon);
6955 conn->hchan = hchan;
6956
6957 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6958
6959 switch (hcon->type) {
6960 case LE_LINK:
6961 if (hcon->hdev->le_mtu) {
6962 conn->mtu = hcon->hdev->le_mtu;
6963 break;
6964 }
6965 /* fall through */
6966 default:
6967 conn->mtu = hcon->hdev->acl_mtu;
6968 break;
6969 }
6970
6971 conn->feat_mask = 0;
6972
6973 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6974
6975 if (hcon->type == ACL_LINK &&
6976 test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
6977 conn->local_fixed_chan |= L2CAP_FC_A2MP;
6978
6979 if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
6980 (bredr_sc_enabled(hcon->hdev) ||
6981 test_bit(HCI_FORCE_BREDR_SMP, &hcon->hdev->dbg_flags)))
6982 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6983
6984 mutex_init(&conn->ident_lock);
6985 mutex_init(&conn->chan_lock);
6986
6987 INIT_LIST_HEAD(&conn->chan_l);
6988 INIT_LIST_HEAD(&conn->users);
6989
6990 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6991
6992 skb_queue_head_init(&conn->pending_rx);
6993 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6994 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
6995
6996 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6997
6998 return conn;
6999 }
7000
7001 static bool is_valid_psm(u16 psm, u8 dst_type) {
7002 if (!psm)
7003 return false;
7004
7005 if (bdaddr_type_is_le(dst_type))
7006 return (psm <= 0x00ff);
7007
7008 /* PSM must be odd and lsb of upper byte must be 0 */
7009 return ((psm & 0x0101) == 0x0001);
7010 }
7011
7012 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7013 bdaddr_t *dst, u8 dst_type)
7014 {
7015 struct l2cap_conn *conn;
7016 struct hci_conn *hcon;
7017 struct hci_dev *hdev;
7018 int err;
7019
7020 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7021 dst_type, __le16_to_cpu(psm));
7022
7023 hdev = hci_get_route(dst, &chan->src);
7024 if (!hdev)
7025 return -EHOSTUNREACH;
7026
7027 hci_dev_lock(hdev);
7028
7029 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7030 chan->chan_type != L2CAP_CHAN_RAW) {
7031 err = -EINVAL;
7032 goto done;
7033 }
7034
7035 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7036 err = -EINVAL;
7037 goto done;
7038 }
7039
7040 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7041 err = -EINVAL;
7042 goto done;
7043 }
7044
7045 switch (chan->mode) {
7046 case L2CAP_MODE_BASIC:
7047 break;
7048 case L2CAP_MODE_LE_FLOWCTL:
7049 l2cap_le_flowctl_init(chan);
7050 break;
7051 case L2CAP_MODE_ERTM:
7052 case L2CAP_MODE_STREAMING:
7053 if (!disable_ertm)
7054 break;
7055 /* fall through */
7056 default:
7057 err = -EOPNOTSUPP;
7058 goto done;
7059 }
7060
7061 switch (chan->state) {
7062 case BT_CONNECT:
7063 case BT_CONNECT2:
7064 case BT_CONFIG:
7065 /* Already connecting */
7066 err = 0;
7067 goto done;
7068
7069 case BT_CONNECTED:
7070 /* Already connected */
7071 err = -EISCONN;
7072 goto done;
7073
7074 case BT_OPEN:
7075 case BT_BOUND:
7076 /* Can connect */
7077 break;
7078
7079 default:
7080 err = -EBADFD;
7081 goto done;
7082 }
7083
7084 /* Set destination address and psm */
7085 bacpy(&chan->dst, dst);
7086 chan->dst_type = dst_type;
7087
7088 chan->psm = psm;
7089 chan->dcid = cid;
7090
7091 if (bdaddr_type_is_le(dst_type)) {
7092 u8 role;
7093
7094 /* Convert from L2CAP channel address type to HCI address type
7095 */
7096 if (dst_type == BDADDR_LE_PUBLIC)
7097 dst_type = ADDR_LE_DEV_PUBLIC;
7098 else
7099 dst_type = ADDR_LE_DEV_RANDOM;
7100
7101 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7102 role = HCI_ROLE_SLAVE;
7103 else
7104 role = HCI_ROLE_MASTER;
7105
7106 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7107 HCI_LE_CONN_TIMEOUT, role);
7108 } else {
7109 u8 auth_type = l2cap_get_auth_type(chan);
7110 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7111 }
7112
7113 if (IS_ERR(hcon)) {
7114 err = PTR_ERR(hcon);
7115 goto done;
7116 }
7117
7118 conn = l2cap_conn_add(hcon);
7119 if (!conn) {
7120 hci_conn_drop(hcon);
7121 err = -ENOMEM;
7122 goto done;
7123 }
7124
7125 mutex_lock(&conn->chan_lock);
7126 l2cap_chan_lock(chan);
7127
7128 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7129 hci_conn_drop(hcon);
7130 err = -EBUSY;
7131 goto chan_unlock;
7132 }
7133
7134 /* Update source addr of the socket */
7135 bacpy(&chan->src, &hcon->src);
7136 chan->src_type = bdaddr_src_type(hcon);
7137
7138 __l2cap_chan_add(conn, chan);
7139
7140 /* l2cap_chan_add takes its own ref so we can drop this one */
7141 hci_conn_drop(hcon);
7142
7143 l2cap_state_change(chan, BT_CONNECT);
7144 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7145
7146 /* Release chan->sport so that it can be reused by other
7147 * sockets (as it's only used for listening sockets).
7148 */
7149 write_lock(&chan_list_lock);
7150 chan->sport = 0;
7151 write_unlock(&chan_list_lock);
7152
7153 if (hcon->state == BT_CONNECTED) {
7154 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7155 __clear_chan_timer(chan);
7156 if (l2cap_chan_check_security(chan, true))
7157 l2cap_state_change(chan, BT_CONNECTED);
7158 } else
7159 l2cap_do_start(chan);
7160 }
7161
7162 err = 0;
7163
7164 chan_unlock:
7165 l2cap_chan_unlock(chan);
7166 mutex_unlock(&conn->chan_lock);
7167 done:
7168 hci_dev_unlock(hdev);
7169 hci_dev_put(hdev);
7170 return err;
7171 }
7172 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7173
7174 /* ---- L2CAP interface with lower layer (HCI) ---- */
7175
7176 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7177 {
7178 int exact = 0, lm1 = 0, lm2 = 0;
7179 struct l2cap_chan *c;
7180
7181 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7182
7183 /* Find listening sockets and check their link_mode */
7184 read_lock(&chan_list_lock);
7185 list_for_each_entry(c, &chan_list, global_l) {
7186 if (c->state != BT_LISTEN)
7187 continue;
7188
7189 if (!bacmp(&c->src, &hdev->bdaddr)) {
7190 lm1 |= HCI_LM_ACCEPT;
7191 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7192 lm1 |= HCI_LM_MASTER;
7193 exact++;
7194 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7195 lm2 |= HCI_LM_ACCEPT;
7196 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7197 lm2 |= HCI_LM_MASTER;
7198 }
7199 }
7200 read_unlock(&chan_list_lock);
7201
7202 return exact ? lm1 : lm2;
7203 }
7204
7205 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7206 * from an existing channel in the list or from the beginning of the
7207 * global list (by passing NULL as first parameter).
7208 */
7209 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7210 struct hci_conn *hcon)
7211 {
7212 u8 src_type = bdaddr_src_type(hcon);
7213
7214 read_lock(&chan_list_lock);
7215
7216 if (c)
7217 c = list_next_entry(c, global_l);
7218 else
7219 c = list_entry(chan_list.next, typeof(*c), global_l);
7220
7221 list_for_each_entry_from(c, &chan_list, global_l) {
7222 if (c->chan_type != L2CAP_CHAN_FIXED)
7223 continue;
7224 if (c->state != BT_LISTEN)
7225 continue;
7226 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7227 continue;
7228 if (src_type != c->src_type)
7229 continue;
7230
7231 l2cap_chan_hold(c);
7232 read_unlock(&chan_list_lock);
7233 return c;
7234 }
7235
7236 read_unlock(&chan_list_lock);
7237
7238 return NULL;
7239 }
7240
7241 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7242 {
7243 struct hci_dev *hdev = hcon->hdev;
7244 struct l2cap_conn *conn;
7245 struct l2cap_chan *pchan;
7246 u8 dst_type;
7247
7248 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7249
7250 if (status) {
7251 l2cap_conn_del(hcon, bt_to_errno(status));
7252 return;
7253 }
7254
7255 conn = l2cap_conn_add(hcon);
7256 if (!conn)
7257 return;
7258
7259 dst_type = bdaddr_dst_type(hcon);
7260
7261 /* If device is blocked, do not create channels for it */
7262 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7263 return;
7264
7265 /* Find fixed channels and notify them of the new connection. We
7266 * use multiple individual lookups, continuing each time where
7267 * we left off, because the list lock would prevent calling the
7268 * potentially sleeping l2cap_chan_lock() function.
7269 */
7270 pchan = l2cap_global_fixed_chan(NULL, hcon);
7271 while (pchan) {
7272 struct l2cap_chan *chan, *next;
7273
7274 /* Client fixed channels should override server ones */
7275 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7276 goto next;
7277
7278 l2cap_chan_lock(pchan);
7279 chan = pchan->ops->new_connection(pchan);
7280 if (chan) {
7281 bacpy(&chan->src, &hcon->src);
7282 bacpy(&chan->dst, &hcon->dst);
7283 chan->src_type = bdaddr_src_type(hcon);
7284 chan->dst_type = dst_type;
7285
7286 __l2cap_chan_add(conn, chan);
7287 }
7288
7289 l2cap_chan_unlock(pchan);
7290 next:
7291 next = l2cap_global_fixed_chan(pchan, hcon);
7292 l2cap_chan_put(pchan);
7293 pchan = next;
7294 }
7295
7296 l2cap_conn_ready(conn);
7297 }
7298
7299 int l2cap_disconn_ind(struct hci_conn *hcon)
7300 {
7301 struct l2cap_conn *conn = hcon->l2cap_data;
7302
7303 BT_DBG("hcon %p", hcon);
7304
7305 if (!conn)
7306 return HCI_ERROR_REMOTE_USER_TERM;
7307 return conn->disc_reason;
7308 }
7309
7310 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7311 {
7312 BT_DBG("hcon %p reason %d", hcon, reason);
7313
7314 l2cap_conn_del(hcon, bt_to_errno(reason));
7315 }
7316
7317 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7318 {
7319 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7320 return;
7321
7322 if (encrypt == 0x00) {
7323 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7324 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7325 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7326 chan->sec_level == BT_SECURITY_FIPS)
7327 l2cap_chan_close(chan, ECONNREFUSED);
7328 } else {
7329 if (chan->sec_level == BT_SECURITY_MEDIUM)
7330 __clear_chan_timer(chan);
7331 }
7332 }
7333
7334 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7335 {
7336 struct l2cap_conn *conn = hcon->l2cap_data;
7337 struct l2cap_chan *chan;
7338
7339 if (!conn)
7340 return 0;
7341
7342 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7343
7344 mutex_lock(&conn->chan_lock);
7345
7346 list_for_each_entry(chan, &conn->chan_l, list) {
7347 l2cap_chan_lock(chan);
7348
7349 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7350 state_to_string(chan->state));
7351
7352 if (chan->scid == L2CAP_CID_A2MP) {
7353 l2cap_chan_unlock(chan);
7354 continue;
7355 }
7356
7357 if (!status && encrypt)
7358 chan->sec_level = hcon->sec_level;
7359
7360 if (!__l2cap_no_conn_pending(chan)) {
7361 l2cap_chan_unlock(chan);
7362 continue;
7363 }
7364
7365 if (!status && (chan->state == BT_CONNECTED ||
7366 chan->state == BT_CONFIG)) {
7367 chan->ops->resume(chan);
7368 l2cap_check_encryption(chan, encrypt);
7369 l2cap_chan_unlock(chan);
7370 continue;
7371 }
7372
7373 if (chan->state == BT_CONNECT) {
7374 if (!status)
7375 l2cap_start_connection(chan);
7376 else
7377 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7378 } else if (chan->state == BT_CONNECT2 &&
7379 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7380 struct l2cap_conn_rsp rsp;
7381 __u16 res, stat;
7382
7383 if (!status) {
7384 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7385 res = L2CAP_CR_PEND;
7386 stat = L2CAP_CS_AUTHOR_PEND;
7387 chan->ops->defer(chan);
7388 } else {
7389 l2cap_state_change(chan, BT_CONFIG);
7390 res = L2CAP_CR_SUCCESS;
7391 stat = L2CAP_CS_NO_INFO;
7392 }
7393 } else {
7394 l2cap_state_change(chan, BT_DISCONN);
7395 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7396 res = L2CAP_CR_SEC_BLOCK;
7397 stat = L2CAP_CS_NO_INFO;
7398 }
7399
7400 rsp.scid = cpu_to_le16(chan->dcid);
7401 rsp.dcid = cpu_to_le16(chan->scid);
7402 rsp.result = cpu_to_le16(res);
7403 rsp.status = cpu_to_le16(stat);
7404 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7405 sizeof(rsp), &rsp);
7406
7407 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7408 res == L2CAP_CR_SUCCESS) {
7409 char buf[128];
7410 set_bit(CONF_REQ_SENT, &chan->conf_state);
7411 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7412 L2CAP_CONF_REQ,
7413 l2cap_build_conf_req(chan, buf),
7414 buf);
7415 chan->num_conf_req++;
7416 }
7417 }
7418
7419 l2cap_chan_unlock(chan);
7420 }
7421
7422 mutex_unlock(&conn->chan_lock);
7423
7424 return 0;
7425 }
7426
7427 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7428 {
7429 struct l2cap_conn *conn = hcon->l2cap_data;
7430 struct l2cap_hdr *hdr;
7431 int len;
7432
7433 /* For AMP controller do not create l2cap conn */
7434 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7435 goto drop;
7436
7437 if (!conn)
7438 conn = l2cap_conn_add(hcon);
7439
7440 if (!conn)
7441 goto drop;
7442
7443 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7444
7445 switch (flags) {
7446 case ACL_START:
7447 case ACL_START_NO_FLUSH:
7448 case ACL_COMPLETE:
7449 if (conn->rx_len) {
7450 BT_ERR("Unexpected start frame (len %d)", skb->len);
7451 kfree_skb(conn->rx_skb);
7452 conn->rx_skb = NULL;
7453 conn->rx_len = 0;
7454 l2cap_conn_unreliable(conn, ECOMM);
7455 }
7456
7457 /* Start fragment always begin with Basic L2CAP header */
7458 if (skb->len < L2CAP_HDR_SIZE) {
7459 BT_ERR("Frame is too short (len %d)", skb->len);
7460 l2cap_conn_unreliable(conn, ECOMM);
7461 goto drop;
7462 }
7463
7464 hdr = (struct l2cap_hdr *) skb->data;
7465 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7466
7467 if (len == skb->len) {
7468 /* Complete frame received */
7469 l2cap_recv_frame(conn, skb);
7470 return 0;
7471 }
7472
7473 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7474
7475 if (skb->len > len) {
7476 BT_ERR("Frame is too long (len %d, expected len %d)",
7477 skb->len, len);
7478 l2cap_conn_unreliable(conn, ECOMM);
7479 goto drop;
7480 }
7481
7482 /* Allocate skb for the complete frame (with header) */
7483 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7484 if (!conn->rx_skb)
7485 goto drop;
7486
7487 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7488 skb->len);
7489 conn->rx_len = len - skb->len;
7490 break;
7491
7492 case ACL_CONT:
7493 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7494
7495 if (!conn->rx_len) {
7496 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7497 l2cap_conn_unreliable(conn, ECOMM);
7498 goto drop;
7499 }
7500
7501 if (skb->len > conn->rx_len) {
7502 BT_ERR("Fragment is too long (len %d, expected %d)",
7503 skb->len, conn->rx_len);
7504 kfree_skb(conn->rx_skb);
7505 conn->rx_skb = NULL;
7506 conn->rx_len = 0;
7507 l2cap_conn_unreliable(conn, ECOMM);
7508 goto drop;
7509 }
7510
7511 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7512 skb->len);
7513 conn->rx_len -= skb->len;
7514
7515 if (!conn->rx_len) {
7516 /* Complete frame received. l2cap_recv_frame
7517 * takes ownership of the skb so set the global
7518 * rx_skb pointer to NULL first.
7519 */
7520 struct sk_buff *rx_skb = conn->rx_skb;
7521 conn->rx_skb = NULL;
7522 l2cap_recv_frame(conn, rx_skb);
7523 }
7524 break;
7525 }
7526
7527 drop:
7528 kfree_skb(skb);
7529 return 0;
7530 }
7531
7532 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7533 {
7534 struct l2cap_chan *c;
7535
7536 read_lock(&chan_list_lock);
7537
7538 list_for_each_entry(c, &chan_list, global_l) {
7539 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7540 &c->src, c->src_type, &c->dst, c->dst_type,
7541 c->state, __le16_to_cpu(c->psm),
7542 c->scid, c->dcid, c->imtu, c->omtu,
7543 c->sec_level, c->mode);
7544 }
7545
7546 read_unlock(&chan_list_lock);
7547
7548 return 0;
7549 }
7550
7551 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7552 {
7553 return single_open(file, l2cap_debugfs_show, inode->i_private);
7554 }
7555
7556 static const struct file_operations l2cap_debugfs_fops = {
7557 .open = l2cap_debugfs_open,
7558 .read = seq_read,
7559 .llseek = seq_lseek,
7560 .release = single_release,
7561 };
7562
7563 static struct dentry *l2cap_debugfs;
7564
7565 int __init l2cap_init(void)
7566 {
7567 int err;
7568
7569 err = l2cap_init_sockets();
7570 if (err < 0)
7571 return err;
7572
7573 if (IS_ERR_OR_NULL(bt_debugfs))
7574 return 0;
7575
7576 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7577 NULL, &l2cap_debugfs_fops);
7578
7579 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7580 &le_max_credits);
7581 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7582 &le_default_mps);
7583
7584 return 0;
7585 }
7586
7587 void l2cap_exit(void)
7588 {
7589 debugfs_remove(l2cap_debugfs);
7590 l2cap_cleanup_sockets();
7591 }
7592
7593 module_param(disable_ertm, bool, 0644);
7594 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.329528 seconds and 5 git commands to generate.