Bluetooth: l2cap: Set more channel defaults
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 #include "6lowpan.h"
44
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46
47 bool disable_ertm;
48
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
51
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54
55 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
56 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57
58 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
59 u8 code, u8 ident, u16 dlen, void *data);
60 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 void *data);
62 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
63 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64
65 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
66 struct sk_buff_head *skbs, u8 event);
67
68 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
69 {
70 if (hcon->type == LE_LINK) {
71 if (type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
73 else
74 return BDADDR_LE_RANDOM;
75 }
76
77 return BDADDR_BREDR;
78 }
79
80 /* ---- L2CAP channels ---- */
81
82 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
83 u16 cid)
84 {
85 struct l2cap_chan *c;
86
87 list_for_each_entry(c, &conn->chan_l, list) {
88 if (c->dcid == cid)
89 return c;
90 }
91 return NULL;
92 }
93
94 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
95 u16 cid)
96 {
97 struct l2cap_chan *c;
98
99 list_for_each_entry(c, &conn->chan_l, list) {
100 if (c->scid == cid)
101 return c;
102 }
103 return NULL;
104 }
105
106 /* Find channel with given SCID.
107 * Returns locked channel. */
108 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
109 u16 cid)
110 {
111 struct l2cap_chan *c;
112
113 mutex_lock(&conn->chan_lock);
114 c = __l2cap_get_chan_by_scid(conn, cid);
115 if (c)
116 l2cap_chan_lock(c);
117 mutex_unlock(&conn->chan_lock);
118
119 return c;
120 }
121
122 /* Find channel with given DCID.
123 * Returns locked channel.
124 */
125 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
126 u16 cid)
127 {
128 struct l2cap_chan *c;
129
130 mutex_lock(&conn->chan_lock);
131 c = __l2cap_get_chan_by_dcid(conn, cid);
132 if (c)
133 l2cap_chan_lock(c);
134 mutex_unlock(&conn->chan_lock);
135
136 return c;
137 }
138
139 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
140 u8 ident)
141 {
142 struct l2cap_chan *c;
143
144 list_for_each_entry(c, &conn->chan_l, list) {
145 if (c->ident == ident)
146 return c;
147 }
148 return NULL;
149 }
150
151 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
152 u8 ident)
153 {
154 struct l2cap_chan *c;
155
156 mutex_lock(&conn->chan_lock);
157 c = __l2cap_get_chan_by_ident(conn, ident);
158 if (c)
159 l2cap_chan_lock(c);
160 mutex_unlock(&conn->chan_lock);
161
162 return c;
163 }
164
165 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
166 {
167 struct l2cap_chan *c;
168
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (c->sport == psm && !bacmp(&c->src, src))
171 return c;
172 }
173 return NULL;
174 }
175
176 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
177 {
178 int err;
179
180 write_lock(&chan_list_lock);
181
182 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 err = -EADDRINUSE;
184 goto done;
185 }
186
187 if (psm) {
188 chan->psm = psm;
189 chan->sport = psm;
190 err = 0;
191 } else {
192 u16 p;
193
194 err = -EINVAL;
195 for (p = 0x1001; p < 0x1100; p += 2)
196 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
197 chan->psm = cpu_to_le16(p);
198 chan->sport = cpu_to_le16(p);
199 err = 0;
200 break;
201 }
202 }
203
204 done:
205 write_unlock(&chan_list_lock);
206 return err;
207 }
208
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210 {
211 write_lock(&chan_list_lock);
212
213 chan->scid = scid;
214
215 write_unlock(&chan_list_lock);
216
217 return 0;
218 }
219
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221 {
222 u16 cid, dyn_end;
223
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
226 else
227 dyn_end = L2CAP_CID_DYN_END;
228
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
231 return cid;
232 }
233
234 return 0;
235 }
236
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
241
242 chan->state = state;
243 chan->ops->state_change(chan, state, 0);
244 }
245
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 int state, int err)
248 {
249 chan->state = state;
250 chan->ops->state_change(chan, chan->state, err);
251 }
252
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
254 {
255 chan->ops->state_change(chan, chan->state, err);
256 }
257
258 static void __set_retrans_timer(struct l2cap_chan *chan)
259 {
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
264 }
265 }
266
267 static void __set_monitor_timer(struct l2cap_chan *chan)
268 {
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
273 }
274 }
275
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
277 u16 seq)
278 {
279 struct sk_buff *skb;
280
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
283 return skb;
284 }
285
286 return NULL;
287 }
288
289 /* ---- L2CAP sequence number lists ---- */
290
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
297 * allocs or frees.
298 */
299
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
301 {
302 size_t alloc_size, i;
303
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
307 */
308 alloc_size = roundup_pow_of_two(size);
309
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
311 if (!seq_list->list)
312 return -ENOMEM;
313
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
319
320 return 0;
321 }
322
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
324 {
325 kfree(seq_list->list);
326 }
327
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
329 u16 seq)
330 {
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
333 }
334
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
336 {
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
339
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
342
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 }
347
348 return seq;
349 }
350
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
352 {
353 u16 i;
354
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
356 return;
357
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
360
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 }
364
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
366 {
367 u16 mask = seq_list->mask;
368
369 /* All appends happen in constant time */
370
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
372 return;
373
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
376 else
377 seq_list->list[seq_list->tail & mask] = seq;
378
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
381 }
382
383 static void l2cap_chan_timeout(struct work_struct *work)
384 {
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
386 chan_timer.work);
387 struct l2cap_conn *conn = chan->conn;
388 int reason;
389
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
391
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
394
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
400 else
401 reason = ETIMEDOUT;
402
403 l2cap_chan_close(chan, reason);
404
405 l2cap_chan_unlock(chan);
406
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
409
410 l2cap_chan_put(chan);
411 }
412
413 struct l2cap_chan *l2cap_chan_create(void)
414 {
415 struct l2cap_chan *chan;
416
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
418 if (!chan)
419 return NULL;
420
421 mutex_init(&chan->lock);
422
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
426
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
428
429 chan->state = BT_OPEN;
430
431 kref_init(&chan->kref);
432
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
435
436 BT_DBG("chan %p", chan);
437
438 return chan;
439 }
440
441 static void l2cap_chan_destroy(struct kref *kref)
442 {
443 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
444
445 BT_DBG("chan %p", chan);
446
447 write_lock(&chan_list_lock);
448 list_del(&chan->global_l);
449 write_unlock(&chan_list_lock);
450
451 kfree(chan);
452 }
453
454 void l2cap_chan_hold(struct l2cap_chan *c)
455 {
456 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
457
458 kref_get(&c->kref);
459 }
460
461 void l2cap_chan_put(struct l2cap_chan *c)
462 {
463 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
464
465 kref_put(&c->kref, l2cap_chan_destroy);
466 }
467
468 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
469 {
470 chan->fcs = L2CAP_FCS_CRC16;
471 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
472 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
473 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
474 chan->remote_max_tx = chan->max_tx;
475 chan->remote_tx_win = chan->tx_win;
476 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
477 chan->sec_level = BT_SECURITY_LOW;
478 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
479 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
480 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
481 chan->conf_state = 0;
482
483 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
484 }
485
486 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
487 {
488 chan->sdu = NULL;
489 chan->sdu_last_frag = NULL;
490 chan->sdu_len = 0;
491 chan->tx_credits = 0;
492 chan->rx_credits = le_max_credits;
493 chan->mps = min_t(u16, chan->imtu, le_default_mps);
494
495 skb_queue_head_init(&chan->tx_q);
496 }
497
498 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
499 {
500 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
501 __le16_to_cpu(chan->psm), chan->dcid);
502
503 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
504
505 chan->conn = conn;
506
507 switch (chan->chan_type) {
508 case L2CAP_CHAN_CONN_ORIENTED:
509 /* Alloc CID for connection-oriented socket */
510 chan->scid = l2cap_alloc_cid(conn);
511 if (conn->hcon->type == ACL_LINK)
512 chan->omtu = L2CAP_DEFAULT_MTU;
513 break;
514
515 case L2CAP_CHAN_CONN_LESS:
516 /* Connectionless socket */
517 chan->scid = L2CAP_CID_CONN_LESS;
518 chan->dcid = L2CAP_CID_CONN_LESS;
519 chan->omtu = L2CAP_DEFAULT_MTU;
520 break;
521
522 case L2CAP_CHAN_FIXED:
523 /* Caller will set CID and CID specific MTU values */
524 break;
525
526 default:
527 /* Raw socket can send/recv signalling messages only */
528 chan->scid = L2CAP_CID_SIGNALING;
529 chan->dcid = L2CAP_CID_SIGNALING;
530 chan->omtu = L2CAP_DEFAULT_MTU;
531 }
532
533 chan->local_id = L2CAP_BESTEFFORT_ID;
534 chan->local_stype = L2CAP_SERV_BESTEFFORT;
535 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
536 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
537 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
538 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
539
540 l2cap_chan_hold(chan);
541
542 hci_conn_hold(conn->hcon);
543
544 list_add(&chan->list, &conn->chan_l);
545 }
546
547 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
548 {
549 mutex_lock(&conn->chan_lock);
550 __l2cap_chan_add(conn, chan);
551 mutex_unlock(&conn->chan_lock);
552 }
553
554 void l2cap_chan_del(struct l2cap_chan *chan, int err)
555 {
556 struct l2cap_conn *conn = chan->conn;
557
558 __clear_chan_timer(chan);
559
560 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
561
562 if (conn) {
563 struct amp_mgr *mgr = conn->hcon->amp_mgr;
564 /* Delete from channel list */
565 list_del(&chan->list);
566
567 l2cap_chan_put(chan);
568
569 chan->conn = NULL;
570
571 if (chan->scid != L2CAP_CID_A2MP)
572 hci_conn_drop(conn->hcon);
573
574 if (mgr && mgr->bredr_chan == chan)
575 mgr->bredr_chan = NULL;
576 }
577
578 if (chan->hs_hchan) {
579 struct hci_chan *hs_hchan = chan->hs_hchan;
580
581 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
582 amp_disconnect_logical_link(hs_hchan);
583 }
584
585 chan->ops->teardown(chan, err);
586
587 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
588 return;
589
590 switch(chan->mode) {
591 case L2CAP_MODE_BASIC:
592 break;
593
594 case L2CAP_MODE_LE_FLOWCTL:
595 skb_queue_purge(&chan->tx_q);
596 break;
597
598 case L2CAP_MODE_ERTM:
599 __clear_retrans_timer(chan);
600 __clear_monitor_timer(chan);
601 __clear_ack_timer(chan);
602
603 skb_queue_purge(&chan->srej_q);
604
605 l2cap_seq_list_free(&chan->srej_list);
606 l2cap_seq_list_free(&chan->retrans_list);
607
608 /* fall through */
609
610 case L2CAP_MODE_STREAMING:
611 skb_queue_purge(&chan->tx_q);
612 break;
613 }
614
615 return;
616 }
617
618 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
619 {
620 struct l2cap_conn *conn = hcon->l2cap_data;
621 struct l2cap_chan *chan;
622
623 mutex_lock(&conn->chan_lock);
624
625 list_for_each_entry(chan, &conn->chan_l, list) {
626 l2cap_chan_lock(chan);
627 bacpy(&chan->dst, &hcon->dst);
628 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
629 l2cap_chan_unlock(chan);
630 }
631
632 mutex_unlock(&conn->chan_lock);
633 }
634
635 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
636 {
637 struct l2cap_conn *conn = chan->conn;
638 struct l2cap_le_conn_rsp rsp;
639 u16 result;
640
641 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
642 result = L2CAP_CR_AUTHORIZATION;
643 else
644 result = L2CAP_CR_BAD_PSM;
645
646 l2cap_state_change(chan, BT_DISCONN);
647
648 rsp.dcid = cpu_to_le16(chan->scid);
649 rsp.mtu = cpu_to_le16(chan->imtu);
650 rsp.mps = cpu_to_le16(chan->mps);
651 rsp.credits = cpu_to_le16(chan->rx_credits);
652 rsp.result = cpu_to_le16(result);
653
654 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
655 &rsp);
656 }
657
658 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
659 {
660 struct l2cap_conn *conn = chan->conn;
661 struct l2cap_conn_rsp rsp;
662 u16 result;
663
664 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
665 result = L2CAP_CR_SEC_BLOCK;
666 else
667 result = L2CAP_CR_BAD_PSM;
668
669 l2cap_state_change(chan, BT_DISCONN);
670
671 rsp.scid = cpu_to_le16(chan->dcid);
672 rsp.dcid = cpu_to_le16(chan->scid);
673 rsp.result = cpu_to_le16(result);
674 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
675
676 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
677 }
678
679 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
680 {
681 struct l2cap_conn *conn = chan->conn;
682
683 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
684
685 switch (chan->state) {
686 case BT_LISTEN:
687 chan->ops->teardown(chan, 0);
688 break;
689
690 case BT_CONNECTED:
691 case BT_CONFIG:
692 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
693 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
694 l2cap_send_disconn_req(chan, reason);
695 } else
696 l2cap_chan_del(chan, reason);
697 break;
698
699 case BT_CONNECT2:
700 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
701 if (conn->hcon->type == ACL_LINK)
702 l2cap_chan_connect_reject(chan);
703 else if (conn->hcon->type == LE_LINK)
704 l2cap_chan_le_connect_reject(chan);
705 }
706
707 l2cap_chan_del(chan, reason);
708 break;
709
710 case BT_CONNECT:
711 case BT_DISCONN:
712 l2cap_chan_del(chan, reason);
713 break;
714
715 default:
716 chan->ops->teardown(chan, 0);
717 break;
718 }
719 }
720
721 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
722 {
723 switch (chan->chan_type) {
724 case L2CAP_CHAN_RAW:
725 switch (chan->sec_level) {
726 case BT_SECURITY_HIGH:
727 case BT_SECURITY_FIPS:
728 return HCI_AT_DEDICATED_BONDING_MITM;
729 case BT_SECURITY_MEDIUM:
730 return HCI_AT_DEDICATED_BONDING;
731 default:
732 return HCI_AT_NO_BONDING;
733 }
734 break;
735 case L2CAP_CHAN_CONN_LESS:
736 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
737 if (chan->sec_level == BT_SECURITY_LOW)
738 chan->sec_level = BT_SECURITY_SDP;
739 }
740 if (chan->sec_level == BT_SECURITY_HIGH ||
741 chan->sec_level == BT_SECURITY_FIPS)
742 return HCI_AT_NO_BONDING_MITM;
743 else
744 return HCI_AT_NO_BONDING;
745 break;
746 case L2CAP_CHAN_CONN_ORIENTED:
747 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
748 if (chan->sec_level == BT_SECURITY_LOW)
749 chan->sec_level = BT_SECURITY_SDP;
750
751 if (chan->sec_level == BT_SECURITY_HIGH ||
752 chan->sec_level == BT_SECURITY_FIPS)
753 return HCI_AT_NO_BONDING_MITM;
754 else
755 return HCI_AT_NO_BONDING;
756 }
757 /* fall through */
758 default:
759 switch (chan->sec_level) {
760 case BT_SECURITY_HIGH:
761 case BT_SECURITY_FIPS:
762 return HCI_AT_GENERAL_BONDING_MITM;
763 case BT_SECURITY_MEDIUM:
764 return HCI_AT_GENERAL_BONDING;
765 default:
766 return HCI_AT_NO_BONDING;
767 }
768 break;
769 }
770 }
771
772 /* Service level security */
773 int l2cap_chan_check_security(struct l2cap_chan *chan)
774 {
775 struct l2cap_conn *conn = chan->conn;
776 __u8 auth_type;
777
778 if (conn->hcon->type == LE_LINK)
779 return smp_conn_security(conn->hcon, chan->sec_level);
780
781 auth_type = l2cap_get_auth_type(chan);
782
783 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
784 }
785
786 static u8 l2cap_get_ident(struct l2cap_conn *conn)
787 {
788 u8 id;
789
790 /* Get next available identificator.
791 * 1 - 128 are used by kernel.
792 * 129 - 199 are reserved.
793 * 200 - 254 are used by utilities like l2ping, etc.
794 */
795
796 spin_lock(&conn->lock);
797
798 if (++conn->tx_ident > 128)
799 conn->tx_ident = 1;
800
801 id = conn->tx_ident;
802
803 spin_unlock(&conn->lock);
804
805 return id;
806 }
807
808 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
809 void *data)
810 {
811 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
812 u8 flags;
813
814 BT_DBG("code 0x%2.2x", code);
815
816 if (!skb)
817 return;
818
819 if (lmp_no_flush_capable(conn->hcon->hdev))
820 flags = ACL_START_NO_FLUSH;
821 else
822 flags = ACL_START;
823
824 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
825 skb->priority = HCI_PRIO_MAX;
826
827 hci_send_acl(conn->hchan, skb, flags);
828 }
829
830 static bool __chan_is_moving(struct l2cap_chan *chan)
831 {
832 return chan->move_state != L2CAP_MOVE_STABLE &&
833 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
834 }
835
836 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
837 {
838 struct hci_conn *hcon = chan->conn->hcon;
839 u16 flags;
840
841 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
842 skb->priority);
843
844 if (chan->hs_hcon && !__chan_is_moving(chan)) {
845 if (chan->hs_hchan)
846 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
847 else
848 kfree_skb(skb);
849
850 return;
851 }
852
853 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
854 lmp_no_flush_capable(hcon->hdev))
855 flags = ACL_START_NO_FLUSH;
856 else
857 flags = ACL_START;
858
859 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
860 hci_send_acl(chan->conn->hchan, skb, flags);
861 }
862
863 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
864 {
865 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
866 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
867
868 if (enh & L2CAP_CTRL_FRAME_TYPE) {
869 /* S-Frame */
870 control->sframe = 1;
871 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
872 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
873
874 control->sar = 0;
875 control->txseq = 0;
876 } else {
877 /* I-Frame */
878 control->sframe = 0;
879 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
880 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
881
882 control->poll = 0;
883 control->super = 0;
884 }
885 }
886
887 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
888 {
889 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
890 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
891
892 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
893 /* S-Frame */
894 control->sframe = 1;
895 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
896 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
897
898 control->sar = 0;
899 control->txseq = 0;
900 } else {
901 /* I-Frame */
902 control->sframe = 0;
903 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
904 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
905
906 control->poll = 0;
907 control->super = 0;
908 }
909 }
910
911 static inline void __unpack_control(struct l2cap_chan *chan,
912 struct sk_buff *skb)
913 {
914 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
915 __unpack_extended_control(get_unaligned_le32(skb->data),
916 &bt_cb(skb)->control);
917 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
918 } else {
919 __unpack_enhanced_control(get_unaligned_le16(skb->data),
920 &bt_cb(skb)->control);
921 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
922 }
923 }
924
925 static u32 __pack_extended_control(struct l2cap_ctrl *control)
926 {
927 u32 packed;
928
929 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
930 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
931
932 if (control->sframe) {
933 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
934 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
935 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
936 } else {
937 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
938 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
939 }
940
941 return packed;
942 }
943
944 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
945 {
946 u16 packed;
947
948 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
949 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
950
951 if (control->sframe) {
952 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
953 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
954 packed |= L2CAP_CTRL_FRAME_TYPE;
955 } else {
956 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
957 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
958 }
959
960 return packed;
961 }
962
963 static inline void __pack_control(struct l2cap_chan *chan,
964 struct l2cap_ctrl *control,
965 struct sk_buff *skb)
966 {
967 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
968 put_unaligned_le32(__pack_extended_control(control),
969 skb->data + L2CAP_HDR_SIZE);
970 } else {
971 put_unaligned_le16(__pack_enhanced_control(control),
972 skb->data + L2CAP_HDR_SIZE);
973 }
974 }
975
976 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
977 {
978 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
979 return L2CAP_EXT_HDR_SIZE;
980 else
981 return L2CAP_ENH_HDR_SIZE;
982 }
983
984 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
985 u32 control)
986 {
987 struct sk_buff *skb;
988 struct l2cap_hdr *lh;
989 int hlen = __ertm_hdr_size(chan);
990
991 if (chan->fcs == L2CAP_FCS_CRC16)
992 hlen += L2CAP_FCS_SIZE;
993
994 skb = bt_skb_alloc(hlen, GFP_KERNEL);
995
996 if (!skb)
997 return ERR_PTR(-ENOMEM);
998
999 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1000 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1001 lh->cid = cpu_to_le16(chan->dcid);
1002
1003 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1004 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1005 else
1006 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1007
1008 if (chan->fcs == L2CAP_FCS_CRC16) {
1009 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1010 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1011 }
1012
1013 skb->priority = HCI_PRIO_MAX;
1014 return skb;
1015 }
1016
1017 static void l2cap_send_sframe(struct l2cap_chan *chan,
1018 struct l2cap_ctrl *control)
1019 {
1020 struct sk_buff *skb;
1021 u32 control_field;
1022
1023 BT_DBG("chan %p, control %p", chan, control);
1024
1025 if (!control->sframe)
1026 return;
1027
1028 if (__chan_is_moving(chan))
1029 return;
1030
1031 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1032 !control->poll)
1033 control->final = 1;
1034
1035 if (control->super == L2CAP_SUPER_RR)
1036 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1037 else if (control->super == L2CAP_SUPER_RNR)
1038 set_bit(CONN_RNR_SENT, &chan->conn_state);
1039
1040 if (control->super != L2CAP_SUPER_SREJ) {
1041 chan->last_acked_seq = control->reqseq;
1042 __clear_ack_timer(chan);
1043 }
1044
1045 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1046 control->final, control->poll, control->super);
1047
1048 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1049 control_field = __pack_extended_control(control);
1050 else
1051 control_field = __pack_enhanced_control(control);
1052
1053 skb = l2cap_create_sframe_pdu(chan, control_field);
1054 if (!IS_ERR(skb))
1055 l2cap_do_send(chan, skb);
1056 }
1057
1058 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1059 {
1060 struct l2cap_ctrl control;
1061
1062 BT_DBG("chan %p, poll %d", chan, poll);
1063
1064 memset(&control, 0, sizeof(control));
1065 control.sframe = 1;
1066 control.poll = poll;
1067
1068 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1069 control.super = L2CAP_SUPER_RNR;
1070 else
1071 control.super = L2CAP_SUPER_RR;
1072
1073 control.reqseq = chan->buffer_seq;
1074 l2cap_send_sframe(chan, &control);
1075 }
1076
1077 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1078 {
1079 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1080 }
1081
1082 static bool __amp_capable(struct l2cap_chan *chan)
1083 {
1084 struct l2cap_conn *conn = chan->conn;
1085 struct hci_dev *hdev;
1086 bool amp_available = false;
1087
1088 if (!conn->hs_enabled)
1089 return false;
1090
1091 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1092 return false;
1093
1094 read_lock(&hci_dev_list_lock);
1095 list_for_each_entry(hdev, &hci_dev_list, list) {
1096 if (hdev->amp_type != AMP_TYPE_BREDR &&
1097 test_bit(HCI_UP, &hdev->flags)) {
1098 amp_available = true;
1099 break;
1100 }
1101 }
1102 read_unlock(&hci_dev_list_lock);
1103
1104 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1105 return amp_available;
1106
1107 return false;
1108 }
1109
1110 static bool l2cap_check_efs(struct l2cap_chan *chan)
1111 {
1112 /* Check EFS parameters */
1113 return true;
1114 }
1115
1116 void l2cap_send_conn_req(struct l2cap_chan *chan)
1117 {
1118 struct l2cap_conn *conn = chan->conn;
1119 struct l2cap_conn_req req;
1120
1121 req.scid = cpu_to_le16(chan->scid);
1122 req.psm = chan->psm;
1123
1124 chan->ident = l2cap_get_ident(conn);
1125
1126 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1127
1128 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1129 }
1130
1131 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1132 {
1133 struct l2cap_create_chan_req req;
1134 req.scid = cpu_to_le16(chan->scid);
1135 req.psm = chan->psm;
1136 req.amp_id = amp_id;
1137
1138 chan->ident = l2cap_get_ident(chan->conn);
1139
1140 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1141 sizeof(req), &req);
1142 }
1143
1144 static void l2cap_move_setup(struct l2cap_chan *chan)
1145 {
1146 struct sk_buff *skb;
1147
1148 BT_DBG("chan %p", chan);
1149
1150 if (chan->mode != L2CAP_MODE_ERTM)
1151 return;
1152
1153 __clear_retrans_timer(chan);
1154 __clear_monitor_timer(chan);
1155 __clear_ack_timer(chan);
1156
1157 chan->retry_count = 0;
1158 skb_queue_walk(&chan->tx_q, skb) {
1159 if (bt_cb(skb)->control.retries)
1160 bt_cb(skb)->control.retries = 1;
1161 else
1162 break;
1163 }
1164
1165 chan->expected_tx_seq = chan->buffer_seq;
1166
1167 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1168 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1169 l2cap_seq_list_clear(&chan->retrans_list);
1170 l2cap_seq_list_clear(&chan->srej_list);
1171 skb_queue_purge(&chan->srej_q);
1172
1173 chan->tx_state = L2CAP_TX_STATE_XMIT;
1174 chan->rx_state = L2CAP_RX_STATE_MOVE;
1175
1176 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1177 }
1178
1179 static void l2cap_move_done(struct l2cap_chan *chan)
1180 {
1181 u8 move_role = chan->move_role;
1182 BT_DBG("chan %p", chan);
1183
1184 chan->move_state = L2CAP_MOVE_STABLE;
1185 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1186
1187 if (chan->mode != L2CAP_MODE_ERTM)
1188 return;
1189
1190 switch (move_role) {
1191 case L2CAP_MOVE_ROLE_INITIATOR:
1192 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1193 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1194 break;
1195 case L2CAP_MOVE_ROLE_RESPONDER:
1196 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1197 break;
1198 }
1199 }
1200
1201 static void l2cap_chan_ready(struct l2cap_chan *chan)
1202 {
1203 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1204 chan->conf_state = 0;
1205 __clear_chan_timer(chan);
1206
1207 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1208 chan->ops->suspend(chan);
1209
1210 chan->state = BT_CONNECTED;
1211
1212 chan->ops->ready(chan);
1213 }
1214
1215 static void l2cap_le_connect(struct l2cap_chan *chan)
1216 {
1217 struct l2cap_conn *conn = chan->conn;
1218 struct l2cap_le_conn_req req;
1219
1220 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1221 return;
1222
1223 req.psm = chan->psm;
1224 req.scid = cpu_to_le16(chan->scid);
1225 req.mtu = cpu_to_le16(chan->imtu);
1226 req.mps = cpu_to_le16(chan->mps);
1227 req.credits = cpu_to_le16(chan->rx_credits);
1228
1229 chan->ident = l2cap_get_ident(conn);
1230
1231 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1232 sizeof(req), &req);
1233 }
1234
1235 static void l2cap_le_start(struct l2cap_chan *chan)
1236 {
1237 struct l2cap_conn *conn = chan->conn;
1238
1239 if (!smp_conn_security(conn->hcon, chan->sec_level))
1240 return;
1241
1242 if (!chan->psm) {
1243 l2cap_chan_ready(chan);
1244 return;
1245 }
1246
1247 if (chan->state == BT_CONNECT)
1248 l2cap_le_connect(chan);
1249 }
1250
1251 static void l2cap_start_connection(struct l2cap_chan *chan)
1252 {
1253 if (__amp_capable(chan)) {
1254 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1255 a2mp_discover_amp(chan);
1256 } else if (chan->conn->hcon->type == LE_LINK) {
1257 l2cap_le_start(chan);
1258 } else {
1259 l2cap_send_conn_req(chan);
1260 }
1261 }
1262
1263 static void l2cap_do_start(struct l2cap_chan *chan)
1264 {
1265 struct l2cap_conn *conn = chan->conn;
1266
1267 if (conn->hcon->type == LE_LINK) {
1268 l2cap_le_start(chan);
1269 return;
1270 }
1271
1272 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1273 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1274 return;
1275
1276 if (l2cap_chan_check_security(chan) &&
1277 __l2cap_no_conn_pending(chan)) {
1278 l2cap_start_connection(chan);
1279 }
1280 } else {
1281 struct l2cap_info_req req;
1282 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1283
1284 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1285 conn->info_ident = l2cap_get_ident(conn);
1286
1287 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1288
1289 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1290 sizeof(req), &req);
1291 }
1292 }
1293
1294 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1295 {
1296 u32 local_feat_mask = l2cap_feat_mask;
1297 if (!disable_ertm)
1298 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1299
1300 switch (mode) {
1301 case L2CAP_MODE_ERTM:
1302 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1303 case L2CAP_MODE_STREAMING:
1304 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1305 default:
1306 return 0x00;
1307 }
1308 }
1309
1310 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1311 {
1312 struct l2cap_conn *conn = chan->conn;
1313 struct l2cap_disconn_req req;
1314
1315 if (!conn)
1316 return;
1317
1318 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1319 __clear_retrans_timer(chan);
1320 __clear_monitor_timer(chan);
1321 __clear_ack_timer(chan);
1322 }
1323
1324 if (chan->scid == L2CAP_CID_A2MP) {
1325 l2cap_state_change(chan, BT_DISCONN);
1326 return;
1327 }
1328
1329 req.dcid = cpu_to_le16(chan->dcid);
1330 req.scid = cpu_to_le16(chan->scid);
1331 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1332 sizeof(req), &req);
1333
1334 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1335 }
1336
1337 /* ---- L2CAP connections ---- */
1338 static void l2cap_conn_start(struct l2cap_conn *conn)
1339 {
1340 struct l2cap_chan *chan, *tmp;
1341
1342 BT_DBG("conn %p", conn);
1343
1344 mutex_lock(&conn->chan_lock);
1345
1346 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1347 l2cap_chan_lock(chan);
1348
1349 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1350 l2cap_chan_unlock(chan);
1351 continue;
1352 }
1353
1354 if (chan->state == BT_CONNECT) {
1355 if (!l2cap_chan_check_security(chan) ||
1356 !__l2cap_no_conn_pending(chan)) {
1357 l2cap_chan_unlock(chan);
1358 continue;
1359 }
1360
1361 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1362 && test_bit(CONF_STATE2_DEVICE,
1363 &chan->conf_state)) {
1364 l2cap_chan_close(chan, ECONNRESET);
1365 l2cap_chan_unlock(chan);
1366 continue;
1367 }
1368
1369 l2cap_start_connection(chan);
1370
1371 } else if (chan->state == BT_CONNECT2) {
1372 struct l2cap_conn_rsp rsp;
1373 char buf[128];
1374 rsp.scid = cpu_to_le16(chan->dcid);
1375 rsp.dcid = cpu_to_le16(chan->scid);
1376
1377 if (l2cap_chan_check_security(chan)) {
1378 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1379 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1380 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1381 chan->ops->defer(chan);
1382
1383 } else {
1384 l2cap_state_change(chan, BT_CONFIG);
1385 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1386 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1387 }
1388 } else {
1389 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1390 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1391 }
1392
1393 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1394 sizeof(rsp), &rsp);
1395
1396 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1397 rsp.result != L2CAP_CR_SUCCESS) {
1398 l2cap_chan_unlock(chan);
1399 continue;
1400 }
1401
1402 set_bit(CONF_REQ_SENT, &chan->conf_state);
1403 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1404 l2cap_build_conf_req(chan, buf), buf);
1405 chan->num_conf_req++;
1406 }
1407
1408 l2cap_chan_unlock(chan);
1409 }
1410
1411 mutex_unlock(&conn->chan_lock);
1412 }
1413
1414 /* Find socket with cid and source/destination bdaddr.
1415 * Returns closest match, locked.
1416 */
1417 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1418 bdaddr_t *src,
1419 bdaddr_t *dst)
1420 {
1421 struct l2cap_chan *c, *c1 = NULL;
1422
1423 read_lock(&chan_list_lock);
1424
1425 list_for_each_entry(c, &chan_list, global_l) {
1426 if (state && c->state != state)
1427 continue;
1428
1429 if (c->scid == cid) {
1430 int src_match, dst_match;
1431 int src_any, dst_any;
1432
1433 /* Exact match. */
1434 src_match = !bacmp(&c->src, src);
1435 dst_match = !bacmp(&c->dst, dst);
1436 if (src_match && dst_match) {
1437 read_unlock(&chan_list_lock);
1438 return c;
1439 }
1440
1441 /* Closest match */
1442 src_any = !bacmp(&c->src, BDADDR_ANY);
1443 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1444 if ((src_match && dst_any) || (src_any && dst_match) ||
1445 (src_any && dst_any))
1446 c1 = c;
1447 }
1448 }
1449
1450 read_unlock(&chan_list_lock);
1451
1452 return c1;
1453 }
1454
1455 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1456 {
1457 struct hci_conn *hcon = conn->hcon;
1458 struct l2cap_chan *chan, *pchan;
1459 u8 dst_type;
1460
1461 BT_DBG("");
1462
1463 bt_6lowpan_add_conn(conn);
1464
1465 /* Check if we have socket listening on cid */
1466 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1467 &hcon->src, &hcon->dst);
1468 if (!pchan)
1469 return;
1470
1471 /* Client ATT sockets should override the server one */
1472 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1473 return;
1474
1475 dst_type = bdaddr_type(hcon, hcon->dst_type);
1476
1477 /* If device is blocked, do not create a channel for it */
1478 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1479 return;
1480
1481 l2cap_chan_lock(pchan);
1482
1483 chan = pchan->ops->new_connection(pchan);
1484 if (!chan)
1485 goto clean;
1486
1487 bacpy(&chan->src, &hcon->src);
1488 bacpy(&chan->dst, &hcon->dst);
1489 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1490 chan->dst_type = dst_type;
1491
1492 __l2cap_chan_add(conn, chan);
1493
1494 clean:
1495 l2cap_chan_unlock(pchan);
1496 }
1497
1498 static void l2cap_conn_ready(struct l2cap_conn *conn)
1499 {
1500 struct l2cap_chan *chan;
1501 struct hci_conn *hcon = conn->hcon;
1502
1503 BT_DBG("conn %p", conn);
1504
1505 /* For outgoing pairing which doesn't necessarily have an
1506 * associated socket (e.g. mgmt_pair_device).
1507 */
1508 if (hcon->out && hcon->type == LE_LINK)
1509 smp_conn_security(hcon, hcon->pending_sec_level);
1510
1511 mutex_lock(&conn->chan_lock);
1512
1513 if (hcon->type == LE_LINK)
1514 l2cap_le_conn_ready(conn);
1515
1516 list_for_each_entry(chan, &conn->chan_l, list) {
1517
1518 l2cap_chan_lock(chan);
1519
1520 if (chan->scid == L2CAP_CID_A2MP) {
1521 l2cap_chan_unlock(chan);
1522 continue;
1523 }
1524
1525 if (hcon->type == LE_LINK) {
1526 l2cap_le_start(chan);
1527 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1528 l2cap_chan_ready(chan);
1529
1530 } else if (chan->state == BT_CONNECT) {
1531 l2cap_do_start(chan);
1532 }
1533
1534 l2cap_chan_unlock(chan);
1535 }
1536
1537 mutex_unlock(&conn->chan_lock);
1538
1539 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1540 }
1541
1542 /* Notify sockets that we cannot guaranty reliability anymore */
1543 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1544 {
1545 struct l2cap_chan *chan;
1546
1547 BT_DBG("conn %p", conn);
1548
1549 mutex_lock(&conn->chan_lock);
1550
1551 list_for_each_entry(chan, &conn->chan_l, list) {
1552 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1553 l2cap_chan_set_err(chan, err);
1554 }
1555
1556 mutex_unlock(&conn->chan_lock);
1557 }
1558
1559 static void l2cap_info_timeout(struct work_struct *work)
1560 {
1561 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1562 info_timer.work);
1563
1564 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1565 conn->info_ident = 0;
1566
1567 l2cap_conn_start(conn);
1568 }
1569
1570 /*
1571 * l2cap_user
1572 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1573 * callback is called during registration. The ->remove callback is called
1574 * during unregistration.
1575 * An l2cap_user object can either be explicitly unregistered or when the
1576 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1577 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1578 * External modules must own a reference to the l2cap_conn object if they intend
1579 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1580 * any time if they don't.
1581 */
1582
1583 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1584 {
1585 struct hci_dev *hdev = conn->hcon->hdev;
1586 int ret;
1587
1588 /* We need to check whether l2cap_conn is registered. If it is not, we
1589 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1590 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1591 * relies on the parent hci_conn object to be locked. This itself relies
1592 * on the hci_dev object to be locked. So we must lock the hci device
1593 * here, too. */
1594
1595 hci_dev_lock(hdev);
1596
1597 if (user->list.next || user->list.prev) {
1598 ret = -EINVAL;
1599 goto out_unlock;
1600 }
1601
1602 /* conn->hchan is NULL after l2cap_conn_del() was called */
1603 if (!conn->hchan) {
1604 ret = -ENODEV;
1605 goto out_unlock;
1606 }
1607
1608 ret = user->probe(conn, user);
1609 if (ret)
1610 goto out_unlock;
1611
1612 list_add(&user->list, &conn->users);
1613 ret = 0;
1614
1615 out_unlock:
1616 hci_dev_unlock(hdev);
1617 return ret;
1618 }
1619 EXPORT_SYMBOL(l2cap_register_user);
1620
1621 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1622 {
1623 struct hci_dev *hdev = conn->hcon->hdev;
1624
1625 hci_dev_lock(hdev);
1626
1627 if (!user->list.next || !user->list.prev)
1628 goto out_unlock;
1629
1630 list_del(&user->list);
1631 user->list.next = NULL;
1632 user->list.prev = NULL;
1633 user->remove(conn, user);
1634
1635 out_unlock:
1636 hci_dev_unlock(hdev);
1637 }
1638 EXPORT_SYMBOL(l2cap_unregister_user);
1639
1640 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1641 {
1642 struct l2cap_user *user;
1643
1644 while (!list_empty(&conn->users)) {
1645 user = list_first_entry(&conn->users, struct l2cap_user, list);
1646 list_del(&user->list);
1647 user->list.next = NULL;
1648 user->list.prev = NULL;
1649 user->remove(conn, user);
1650 }
1651 }
1652
1653 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1654 {
1655 struct l2cap_conn *conn = hcon->l2cap_data;
1656 struct l2cap_chan *chan, *l;
1657
1658 if (!conn)
1659 return;
1660
1661 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1662
1663 kfree_skb(conn->rx_skb);
1664
1665 skb_queue_purge(&conn->pending_rx);
1666 flush_work(&conn->pending_rx_work);
1667
1668 l2cap_unregister_all_users(conn);
1669
1670 mutex_lock(&conn->chan_lock);
1671
1672 /* Kill channels */
1673 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1674 l2cap_chan_hold(chan);
1675 l2cap_chan_lock(chan);
1676
1677 l2cap_chan_del(chan, err);
1678
1679 l2cap_chan_unlock(chan);
1680
1681 chan->ops->close(chan);
1682 l2cap_chan_put(chan);
1683 }
1684
1685 mutex_unlock(&conn->chan_lock);
1686
1687 hci_chan_del(conn->hchan);
1688
1689 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1690 cancel_delayed_work_sync(&conn->info_timer);
1691
1692 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1693 cancel_delayed_work_sync(&conn->security_timer);
1694 smp_chan_destroy(conn);
1695 }
1696
1697 hcon->l2cap_data = NULL;
1698 conn->hchan = NULL;
1699 l2cap_conn_put(conn);
1700 }
1701
1702 static void security_timeout(struct work_struct *work)
1703 {
1704 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1705 security_timer.work);
1706
1707 BT_DBG("conn %p", conn);
1708
1709 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1710 smp_chan_destroy(conn);
1711 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1712 }
1713 }
1714
1715 static void l2cap_conn_free(struct kref *ref)
1716 {
1717 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1718
1719 hci_conn_put(conn->hcon);
1720 kfree(conn);
1721 }
1722
1723 void l2cap_conn_get(struct l2cap_conn *conn)
1724 {
1725 kref_get(&conn->ref);
1726 }
1727 EXPORT_SYMBOL(l2cap_conn_get);
1728
1729 void l2cap_conn_put(struct l2cap_conn *conn)
1730 {
1731 kref_put(&conn->ref, l2cap_conn_free);
1732 }
1733 EXPORT_SYMBOL(l2cap_conn_put);
1734
1735 /* ---- Socket interface ---- */
1736
1737 /* Find socket with psm and source / destination bdaddr.
1738 * Returns closest match.
1739 */
1740 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1741 bdaddr_t *src,
1742 bdaddr_t *dst,
1743 u8 link_type)
1744 {
1745 struct l2cap_chan *c, *c1 = NULL;
1746
1747 read_lock(&chan_list_lock);
1748
1749 list_for_each_entry(c, &chan_list, global_l) {
1750 if (state && c->state != state)
1751 continue;
1752
1753 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1754 continue;
1755
1756 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1757 continue;
1758
1759 if (c->psm == psm) {
1760 int src_match, dst_match;
1761 int src_any, dst_any;
1762
1763 /* Exact match. */
1764 src_match = !bacmp(&c->src, src);
1765 dst_match = !bacmp(&c->dst, dst);
1766 if (src_match && dst_match) {
1767 read_unlock(&chan_list_lock);
1768 return c;
1769 }
1770
1771 /* Closest match */
1772 src_any = !bacmp(&c->src, BDADDR_ANY);
1773 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1774 if ((src_match && dst_any) || (src_any && dst_match) ||
1775 (src_any && dst_any))
1776 c1 = c;
1777 }
1778 }
1779
1780 read_unlock(&chan_list_lock);
1781
1782 return c1;
1783 }
1784
1785 static void l2cap_monitor_timeout(struct work_struct *work)
1786 {
1787 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1788 monitor_timer.work);
1789
1790 BT_DBG("chan %p", chan);
1791
1792 l2cap_chan_lock(chan);
1793
1794 if (!chan->conn) {
1795 l2cap_chan_unlock(chan);
1796 l2cap_chan_put(chan);
1797 return;
1798 }
1799
1800 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1801
1802 l2cap_chan_unlock(chan);
1803 l2cap_chan_put(chan);
1804 }
1805
1806 static void l2cap_retrans_timeout(struct work_struct *work)
1807 {
1808 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1809 retrans_timer.work);
1810
1811 BT_DBG("chan %p", chan);
1812
1813 l2cap_chan_lock(chan);
1814
1815 if (!chan->conn) {
1816 l2cap_chan_unlock(chan);
1817 l2cap_chan_put(chan);
1818 return;
1819 }
1820
1821 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1822 l2cap_chan_unlock(chan);
1823 l2cap_chan_put(chan);
1824 }
1825
1826 static void l2cap_streaming_send(struct l2cap_chan *chan,
1827 struct sk_buff_head *skbs)
1828 {
1829 struct sk_buff *skb;
1830 struct l2cap_ctrl *control;
1831
1832 BT_DBG("chan %p, skbs %p", chan, skbs);
1833
1834 if (__chan_is_moving(chan))
1835 return;
1836
1837 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1838
1839 while (!skb_queue_empty(&chan->tx_q)) {
1840
1841 skb = skb_dequeue(&chan->tx_q);
1842
1843 bt_cb(skb)->control.retries = 1;
1844 control = &bt_cb(skb)->control;
1845
1846 control->reqseq = 0;
1847 control->txseq = chan->next_tx_seq;
1848
1849 __pack_control(chan, control, skb);
1850
1851 if (chan->fcs == L2CAP_FCS_CRC16) {
1852 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1853 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1854 }
1855
1856 l2cap_do_send(chan, skb);
1857
1858 BT_DBG("Sent txseq %u", control->txseq);
1859
1860 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1861 chan->frames_sent++;
1862 }
1863 }
1864
1865 static int l2cap_ertm_send(struct l2cap_chan *chan)
1866 {
1867 struct sk_buff *skb, *tx_skb;
1868 struct l2cap_ctrl *control;
1869 int sent = 0;
1870
1871 BT_DBG("chan %p", chan);
1872
1873 if (chan->state != BT_CONNECTED)
1874 return -ENOTCONN;
1875
1876 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1877 return 0;
1878
1879 if (__chan_is_moving(chan))
1880 return 0;
1881
1882 while (chan->tx_send_head &&
1883 chan->unacked_frames < chan->remote_tx_win &&
1884 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1885
1886 skb = chan->tx_send_head;
1887
1888 bt_cb(skb)->control.retries = 1;
1889 control = &bt_cb(skb)->control;
1890
1891 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1892 control->final = 1;
1893
1894 control->reqseq = chan->buffer_seq;
1895 chan->last_acked_seq = chan->buffer_seq;
1896 control->txseq = chan->next_tx_seq;
1897
1898 __pack_control(chan, control, skb);
1899
1900 if (chan->fcs == L2CAP_FCS_CRC16) {
1901 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1902 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1903 }
1904
1905 /* Clone after data has been modified. Data is assumed to be
1906 read-only (for locking purposes) on cloned sk_buffs.
1907 */
1908 tx_skb = skb_clone(skb, GFP_KERNEL);
1909
1910 if (!tx_skb)
1911 break;
1912
1913 __set_retrans_timer(chan);
1914
1915 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1916 chan->unacked_frames++;
1917 chan->frames_sent++;
1918 sent++;
1919
1920 if (skb_queue_is_last(&chan->tx_q, skb))
1921 chan->tx_send_head = NULL;
1922 else
1923 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1924
1925 l2cap_do_send(chan, tx_skb);
1926 BT_DBG("Sent txseq %u", control->txseq);
1927 }
1928
1929 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1930 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1931
1932 return sent;
1933 }
1934
1935 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1936 {
1937 struct l2cap_ctrl control;
1938 struct sk_buff *skb;
1939 struct sk_buff *tx_skb;
1940 u16 seq;
1941
1942 BT_DBG("chan %p", chan);
1943
1944 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1945 return;
1946
1947 if (__chan_is_moving(chan))
1948 return;
1949
1950 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1951 seq = l2cap_seq_list_pop(&chan->retrans_list);
1952
1953 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1954 if (!skb) {
1955 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1956 seq);
1957 continue;
1958 }
1959
1960 bt_cb(skb)->control.retries++;
1961 control = bt_cb(skb)->control;
1962
1963 if (chan->max_tx != 0 &&
1964 bt_cb(skb)->control.retries > chan->max_tx) {
1965 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1966 l2cap_send_disconn_req(chan, ECONNRESET);
1967 l2cap_seq_list_clear(&chan->retrans_list);
1968 break;
1969 }
1970
1971 control.reqseq = chan->buffer_seq;
1972 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1973 control.final = 1;
1974 else
1975 control.final = 0;
1976
1977 if (skb_cloned(skb)) {
1978 /* Cloned sk_buffs are read-only, so we need a
1979 * writeable copy
1980 */
1981 tx_skb = skb_copy(skb, GFP_KERNEL);
1982 } else {
1983 tx_skb = skb_clone(skb, GFP_KERNEL);
1984 }
1985
1986 if (!tx_skb) {
1987 l2cap_seq_list_clear(&chan->retrans_list);
1988 break;
1989 }
1990
1991 /* Update skb contents */
1992 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1993 put_unaligned_le32(__pack_extended_control(&control),
1994 tx_skb->data + L2CAP_HDR_SIZE);
1995 } else {
1996 put_unaligned_le16(__pack_enhanced_control(&control),
1997 tx_skb->data + L2CAP_HDR_SIZE);
1998 }
1999
2000 if (chan->fcs == L2CAP_FCS_CRC16) {
2001 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2002 put_unaligned_le16(fcs, skb_put(tx_skb,
2003 L2CAP_FCS_SIZE));
2004 }
2005
2006 l2cap_do_send(chan, tx_skb);
2007
2008 BT_DBG("Resent txseq %d", control.txseq);
2009
2010 chan->last_acked_seq = chan->buffer_seq;
2011 }
2012 }
2013
2014 static void l2cap_retransmit(struct l2cap_chan *chan,
2015 struct l2cap_ctrl *control)
2016 {
2017 BT_DBG("chan %p, control %p", chan, control);
2018
2019 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2020 l2cap_ertm_resend(chan);
2021 }
2022
2023 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2024 struct l2cap_ctrl *control)
2025 {
2026 struct sk_buff *skb;
2027
2028 BT_DBG("chan %p, control %p", chan, control);
2029
2030 if (control->poll)
2031 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2032
2033 l2cap_seq_list_clear(&chan->retrans_list);
2034
2035 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2036 return;
2037
2038 if (chan->unacked_frames) {
2039 skb_queue_walk(&chan->tx_q, skb) {
2040 if (bt_cb(skb)->control.txseq == control->reqseq ||
2041 skb == chan->tx_send_head)
2042 break;
2043 }
2044
2045 skb_queue_walk_from(&chan->tx_q, skb) {
2046 if (skb == chan->tx_send_head)
2047 break;
2048
2049 l2cap_seq_list_append(&chan->retrans_list,
2050 bt_cb(skb)->control.txseq);
2051 }
2052
2053 l2cap_ertm_resend(chan);
2054 }
2055 }
2056
2057 static void l2cap_send_ack(struct l2cap_chan *chan)
2058 {
2059 struct l2cap_ctrl control;
2060 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2061 chan->last_acked_seq);
2062 int threshold;
2063
2064 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2065 chan, chan->last_acked_seq, chan->buffer_seq);
2066
2067 memset(&control, 0, sizeof(control));
2068 control.sframe = 1;
2069
2070 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2071 chan->rx_state == L2CAP_RX_STATE_RECV) {
2072 __clear_ack_timer(chan);
2073 control.super = L2CAP_SUPER_RNR;
2074 control.reqseq = chan->buffer_seq;
2075 l2cap_send_sframe(chan, &control);
2076 } else {
2077 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2078 l2cap_ertm_send(chan);
2079 /* If any i-frames were sent, they included an ack */
2080 if (chan->buffer_seq == chan->last_acked_seq)
2081 frames_to_ack = 0;
2082 }
2083
2084 /* Ack now if the window is 3/4ths full.
2085 * Calculate without mul or div
2086 */
2087 threshold = chan->ack_win;
2088 threshold += threshold << 1;
2089 threshold >>= 2;
2090
2091 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2092 threshold);
2093
2094 if (frames_to_ack >= threshold) {
2095 __clear_ack_timer(chan);
2096 control.super = L2CAP_SUPER_RR;
2097 control.reqseq = chan->buffer_seq;
2098 l2cap_send_sframe(chan, &control);
2099 frames_to_ack = 0;
2100 }
2101
2102 if (frames_to_ack)
2103 __set_ack_timer(chan);
2104 }
2105 }
2106
2107 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2108 struct msghdr *msg, int len,
2109 int count, struct sk_buff *skb)
2110 {
2111 struct l2cap_conn *conn = chan->conn;
2112 struct sk_buff **frag;
2113 int sent = 0;
2114
2115 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2116 return -EFAULT;
2117
2118 sent += count;
2119 len -= count;
2120
2121 /* Continuation fragments (no L2CAP header) */
2122 frag = &skb_shinfo(skb)->frag_list;
2123 while (len) {
2124 struct sk_buff *tmp;
2125
2126 count = min_t(unsigned int, conn->mtu, len);
2127
2128 tmp = chan->ops->alloc_skb(chan, count,
2129 msg->msg_flags & MSG_DONTWAIT);
2130 if (IS_ERR(tmp))
2131 return PTR_ERR(tmp);
2132
2133 *frag = tmp;
2134
2135 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2136 return -EFAULT;
2137
2138 (*frag)->priority = skb->priority;
2139
2140 sent += count;
2141 len -= count;
2142
2143 skb->len += (*frag)->len;
2144 skb->data_len += (*frag)->len;
2145
2146 frag = &(*frag)->next;
2147 }
2148
2149 return sent;
2150 }
2151
2152 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2153 struct msghdr *msg, size_t len,
2154 u32 priority)
2155 {
2156 struct l2cap_conn *conn = chan->conn;
2157 struct sk_buff *skb;
2158 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2159 struct l2cap_hdr *lh;
2160
2161 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2162 __le16_to_cpu(chan->psm), len, priority);
2163
2164 count = min_t(unsigned int, (conn->mtu - hlen), len);
2165
2166 skb = chan->ops->alloc_skb(chan, count + hlen,
2167 msg->msg_flags & MSG_DONTWAIT);
2168 if (IS_ERR(skb))
2169 return skb;
2170
2171 skb->priority = priority;
2172
2173 /* Create L2CAP header */
2174 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2175 lh->cid = cpu_to_le16(chan->dcid);
2176 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2177 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2178
2179 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2180 if (unlikely(err < 0)) {
2181 kfree_skb(skb);
2182 return ERR_PTR(err);
2183 }
2184 return skb;
2185 }
2186
2187 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2188 struct msghdr *msg, size_t len,
2189 u32 priority)
2190 {
2191 struct l2cap_conn *conn = chan->conn;
2192 struct sk_buff *skb;
2193 int err, count;
2194 struct l2cap_hdr *lh;
2195
2196 BT_DBG("chan %p len %zu", chan, len);
2197
2198 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2199
2200 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2201 msg->msg_flags & MSG_DONTWAIT);
2202 if (IS_ERR(skb))
2203 return skb;
2204
2205 skb->priority = priority;
2206
2207 /* Create L2CAP header */
2208 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2209 lh->cid = cpu_to_le16(chan->dcid);
2210 lh->len = cpu_to_le16(len);
2211
2212 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2213 if (unlikely(err < 0)) {
2214 kfree_skb(skb);
2215 return ERR_PTR(err);
2216 }
2217 return skb;
2218 }
2219
2220 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2221 struct msghdr *msg, size_t len,
2222 u16 sdulen)
2223 {
2224 struct l2cap_conn *conn = chan->conn;
2225 struct sk_buff *skb;
2226 int err, count, hlen;
2227 struct l2cap_hdr *lh;
2228
2229 BT_DBG("chan %p len %zu", chan, len);
2230
2231 if (!conn)
2232 return ERR_PTR(-ENOTCONN);
2233
2234 hlen = __ertm_hdr_size(chan);
2235
2236 if (sdulen)
2237 hlen += L2CAP_SDULEN_SIZE;
2238
2239 if (chan->fcs == L2CAP_FCS_CRC16)
2240 hlen += L2CAP_FCS_SIZE;
2241
2242 count = min_t(unsigned int, (conn->mtu - hlen), len);
2243
2244 skb = chan->ops->alloc_skb(chan, count + hlen,
2245 msg->msg_flags & MSG_DONTWAIT);
2246 if (IS_ERR(skb))
2247 return skb;
2248
2249 /* Create L2CAP header */
2250 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2251 lh->cid = cpu_to_le16(chan->dcid);
2252 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2253
2254 /* Control header is populated later */
2255 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2256 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2257 else
2258 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2259
2260 if (sdulen)
2261 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2262
2263 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2264 if (unlikely(err < 0)) {
2265 kfree_skb(skb);
2266 return ERR_PTR(err);
2267 }
2268
2269 bt_cb(skb)->control.fcs = chan->fcs;
2270 bt_cb(skb)->control.retries = 0;
2271 return skb;
2272 }
2273
2274 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2275 struct sk_buff_head *seg_queue,
2276 struct msghdr *msg, size_t len)
2277 {
2278 struct sk_buff *skb;
2279 u16 sdu_len;
2280 size_t pdu_len;
2281 u8 sar;
2282
2283 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2284
2285 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2286 * so fragmented skbs are not used. The HCI layer's handling
2287 * of fragmented skbs is not compatible with ERTM's queueing.
2288 */
2289
2290 /* PDU size is derived from the HCI MTU */
2291 pdu_len = chan->conn->mtu;
2292
2293 /* Constrain PDU size for BR/EDR connections */
2294 if (!chan->hs_hcon)
2295 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2296
2297 /* Adjust for largest possible L2CAP overhead. */
2298 if (chan->fcs)
2299 pdu_len -= L2CAP_FCS_SIZE;
2300
2301 pdu_len -= __ertm_hdr_size(chan);
2302
2303 /* Remote device may have requested smaller PDUs */
2304 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2305
2306 if (len <= pdu_len) {
2307 sar = L2CAP_SAR_UNSEGMENTED;
2308 sdu_len = 0;
2309 pdu_len = len;
2310 } else {
2311 sar = L2CAP_SAR_START;
2312 sdu_len = len;
2313 pdu_len -= L2CAP_SDULEN_SIZE;
2314 }
2315
2316 while (len > 0) {
2317 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2318
2319 if (IS_ERR(skb)) {
2320 __skb_queue_purge(seg_queue);
2321 return PTR_ERR(skb);
2322 }
2323
2324 bt_cb(skb)->control.sar = sar;
2325 __skb_queue_tail(seg_queue, skb);
2326
2327 len -= pdu_len;
2328 if (sdu_len) {
2329 sdu_len = 0;
2330 pdu_len += L2CAP_SDULEN_SIZE;
2331 }
2332
2333 if (len <= pdu_len) {
2334 sar = L2CAP_SAR_END;
2335 pdu_len = len;
2336 } else {
2337 sar = L2CAP_SAR_CONTINUE;
2338 }
2339 }
2340
2341 return 0;
2342 }
2343
2344 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2345 struct msghdr *msg,
2346 size_t len, u16 sdulen)
2347 {
2348 struct l2cap_conn *conn = chan->conn;
2349 struct sk_buff *skb;
2350 int err, count, hlen;
2351 struct l2cap_hdr *lh;
2352
2353 BT_DBG("chan %p len %zu", chan, len);
2354
2355 if (!conn)
2356 return ERR_PTR(-ENOTCONN);
2357
2358 hlen = L2CAP_HDR_SIZE;
2359
2360 if (sdulen)
2361 hlen += L2CAP_SDULEN_SIZE;
2362
2363 count = min_t(unsigned int, (conn->mtu - hlen), len);
2364
2365 skb = chan->ops->alloc_skb(chan, count + hlen,
2366 msg->msg_flags & MSG_DONTWAIT);
2367 if (IS_ERR(skb))
2368 return skb;
2369
2370 /* Create L2CAP header */
2371 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2372 lh->cid = cpu_to_le16(chan->dcid);
2373 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2374
2375 if (sdulen)
2376 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2377
2378 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2379 if (unlikely(err < 0)) {
2380 kfree_skb(skb);
2381 return ERR_PTR(err);
2382 }
2383
2384 return skb;
2385 }
2386
2387 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2388 struct sk_buff_head *seg_queue,
2389 struct msghdr *msg, size_t len)
2390 {
2391 struct sk_buff *skb;
2392 size_t pdu_len;
2393 u16 sdu_len;
2394
2395 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2396
2397 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2398
2399 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2400
2401 sdu_len = len;
2402 pdu_len -= L2CAP_SDULEN_SIZE;
2403
2404 while (len > 0) {
2405 if (len <= pdu_len)
2406 pdu_len = len;
2407
2408 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2409 if (IS_ERR(skb)) {
2410 __skb_queue_purge(seg_queue);
2411 return PTR_ERR(skb);
2412 }
2413
2414 __skb_queue_tail(seg_queue, skb);
2415
2416 len -= pdu_len;
2417
2418 if (sdu_len) {
2419 sdu_len = 0;
2420 pdu_len += L2CAP_SDULEN_SIZE;
2421 }
2422 }
2423
2424 return 0;
2425 }
2426
2427 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2428 u32 priority)
2429 {
2430 struct sk_buff *skb;
2431 int err;
2432 struct sk_buff_head seg_queue;
2433
2434 if (!chan->conn)
2435 return -ENOTCONN;
2436
2437 /* Connectionless channel */
2438 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2439 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2440 if (IS_ERR(skb))
2441 return PTR_ERR(skb);
2442
2443 /* Channel lock is released before requesting new skb and then
2444 * reacquired thus we need to recheck channel state.
2445 */
2446 if (chan->state != BT_CONNECTED) {
2447 kfree_skb(skb);
2448 return -ENOTCONN;
2449 }
2450
2451 l2cap_do_send(chan, skb);
2452 return len;
2453 }
2454
2455 switch (chan->mode) {
2456 case L2CAP_MODE_LE_FLOWCTL:
2457 /* Check outgoing MTU */
2458 if (len > chan->omtu)
2459 return -EMSGSIZE;
2460
2461 if (!chan->tx_credits)
2462 return -EAGAIN;
2463
2464 __skb_queue_head_init(&seg_queue);
2465
2466 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2467
2468 if (chan->state != BT_CONNECTED) {
2469 __skb_queue_purge(&seg_queue);
2470 err = -ENOTCONN;
2471 }
2472
2473 if (err)
2474 return err;
2475
2476 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2477
2478 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2479 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2480 chan->tx_credits--;
2481 }
2482
2483 if (!chan->tx_credits)
2484 chan->ops->suspend(chan);
2485
2486 err = len;
2487
2488 break;
2489
2490 case L2CAP_MODE_BASIC:
2491 /* Check outgoing MTU */
2492 if (len > chan->omtu)
2493 return -EMSGSIZE;
2494
2495 /* Create a basic PDU */
2496 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2497 if (IS_ERR(skb))
2498 return PTR_ERR(skb);
2499
2500 /* Channel lock is released before requesting new skb and then
2501 * reacquired thus we need to recheck channel state.
2502 */
2503 if (chan->state != BT_CONNECTED) {
2504 kfree_skb(skb);
2505 return -ENOTCONN;
2506 }
2507
2508 l2cap_do_send(chan, skb);
2509 err = len;
2510 break;
2511
2512 case L2CAP_MODE_ERTM:
2513 case L2CAP_MODE_STREAMING:
2514 /* Check outgoing MTU */
2515 if (len > chan->omtu) {
2516 err = -EMSGSIZE;
2517 break;
2518 }
2519
2520 __skb_queue_head_init(&seg_queue);
2521
2522 /* Do segmentation before calling in to the state machine,
2523 * since it's possible to block while waiting for memory
2524 * allocation.
2525 */
2526 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2527
2528 /* The channel could have been closed while segmenting,
2529 * check that it is still connected.
2530 */
2531 if (chan->state != BT_CONNECTED) {
2532 __skb_queue_purge(&seg_queue);
2533 err = -ENOTCONN;
2534 }
2535
2536 if (err)
2537 break;
2538
2539 if (chan->mode == L2CAP_MODE_ERTM)
2540 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2541 else
2542 l2cap_streaming_send(chan, &seg_queue);
2543
2544 err = len;
2545
2546 /* If the skbs were not queued for sending, they'll still be in
2547 * seg_queue and need to be purged.
2548 */
2549 __skb_queue_purge(&seg_queue);
2550 break;
2551
2552 default:
2553 BT_DBG("bad state %1.1x", chan->mode);
2554 err = -EBADFD;
2555 }
2556
2557 return err;
2558 }
2559
2560 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2561 {
2562 struct l2cap_ctrl control;
2563 u16 seq;
2564
2565 BT_DBG("chan %p, txseq %u", chan, txseq);
2566
2567 memset(&control, 0, sizeof(control));
2568 control.sframe = 1;
2569 control.super = L2CAP_SUPER_SREJ;
2570
2571 for (seq = chan->expected_tx_seq; seq != txseq;
2572 seq = __next_seq(chan, seq)) {
2573 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2574 control.reqseq = seq;
2575 l2cap_send_sframe(chan, &control);
2576 l2cap_seq_list_append(&chan->srej_list, seq);
2577 }
2578 }
2579
2580 chan->expected_tx_seq = __next_seq(chan, txseq);
2581 }
2582
2583 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2584 {
2585 struct l2cap_ctrl control;
2586
2587 BT_DBG("chan %p", chan);
2588
2589 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2590 return;
2591
2592 memset(&control, 0, sizeof(control));
2593 control.sframe = 1;
2594 control.super = L2CAP_SUPER_SREJ;
2595 control.reqseq = chan->srej_list.tail;
2596 l2cap_send_sframe(chan, &control);
2597 }
2598
2599 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2600 {
2601 struct l2cap_ctrl control;
2602 u16 initial_head;
2603 u16 seq;
2604
2605 BT_DBG("chan %p, txseq %u", chan, txseq);
2606
2607 memset(&control, 0, sizeof(control));
2608 control.sframe = 1;
2609 control.super = L2CAP_SUPER_SREJ;
2610
2611 /* Capture initial list head to allow only one pass through the list. */
2612 initial_head = chan->srej_list.head;
2613
2614 do {
2615 seq = l2cap_seq_list_pop(&chan->srej_list);
2616 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2617 break;
2618
2619 control.reqseq = seq;
2620 l2cap_send_sframe(chan, &control);
2621 l2cap_seq_list_append(&chan->srej_list, seq);
2622 } while (chan->srej_list.head != initial_head);
2623 }
2624
2625 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2626 {
2627 struct sk_buff *acked_skb;
2628 u16 ackseq;
2629
2630 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2631
2632 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2633 return;
2634
2635 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2636 chan->expected_ack_seq, chan->unacked_frames);
2637
2638 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2639 ackseq = __next_seq(chan, ackseq)) {
2640
2641 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2642 if (acked_skb) {
2643 skb_unlink(acked_skb, &chan->tx_q);
2644 kfree_skb(acked_skb);
2645 chan->unacked_frames--;
2646 }
2647 }
2648
2649 chan->expected_ack_seq = reqseq;
2650
2651 if (chan->unacked_frames == 0)
2652 __clear_retrans_timer(chan);
2653
2654 BT_DBG("unacked_frames %u", chan->unacked_frames);
2655 }
2656
2657 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2658 {
2659 BT_DBG("chan %p", chan);
2660
2661 chan->expected_tx_seq = chan->buffer_seq;
2662 l2cap_seq_list_clear(&chan->srej_list);
2663 skb_queue_purge(&chan->srej_q);
2664 chan->rx_state = L2CAP_RX_STATE_RECV;
2665 }
2666
2667 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2668 struct l2cap_ctrl *control,
2669 struct sk_buff_head *skbs, u8 event)
2670 {
2671 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2672 event);
2673
2674 switch (event) {
2675 case L2CAP_EV_DATA_REQUEST:
2676 if (chan->tx_send_head == NULL)
2677 chan->tx_send_head = skb_peek(skbs);
2678
2679 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2680 l2cap_ertm_send(chan);
2681 break;
2682 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2683 BT_DBG("Enter LOCAL_BUSY");
2684 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2685
2686 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2687 /* The SREJ_SENT state must be aborted if we are to
2688 * enter the LOCAL_BUSY state.
2689 */
2690 l2cap_abort_rx_srej_sent(chan);
2691 }
2692
2693 l2cap_send_ack(chan);
2694
2695 break;
2696 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2697 BT_DBG("Exit LOCAL_BUSY");
2698 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2699
2700 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2701 struct l2cap_ctrl local_control;
2702
2703 memset(&local_control, 0, sizeof(local_control));
2704 local_control.sframe = 1;
2705 local_control.super = L2CAP_SUPER_RR;
2706 local_control.poll = 1;
2707 local_control.reqseq = chan->buffer_seq;
2708 l2cap_send_sframe(chan, &local_control);
2709
2710 chan->retry_count = 1;
2711 __set_monitor_timer(chan);
2712 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2713 }
2714 break;
2715 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2716 l2cap_process_reqseq(chan, control->reqseq);
2717 break;
2718 case L2CAP_EV_EXPLICIT_POLL:
2719 l2cap_send_rr_or_rnr(chan, 1);
2720 chan->retry_count = 1;
2721 __set_monitor_timer(chan);
2722 __clear_ack_timer(chan);
2723 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2724 break;
2725 case L2CAP_EV_RETRANS_TO:
2726 l2cap_send_rr_or_rnr(chan, 1);
2727 chan->retry_count = 1;
2728 __set_monitor_timer(chan);
2729 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2730 break;
2731 case L2CAP_EV_RECV_FBIT:
2732 /* Nothing to process */
2733 break;
2734 default:
2735 break;
2736 }
2737 }
2738
2739 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2740 struct l2cap_ctrl *control,
2741 struct sk_buff_head *skbs, u8 event)
2742 {
2743 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2744 event);
2745
2746 switch (event) {
2747 case L2CAP_EV_DATA_REQUEST:
2748 if (chan->tx_send_head == NULL)
2749 chan->tx_send_head = skb_peek(skbs);
2750 /* Queue data, but don't send. */
2751 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2752 break;
2753 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2754 BT_DBG("Enter LOCAL_BUSY");
2755 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2756
2757 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2758 /* The SREJ_SENT state must be aborted if we are to
2759 * enter the LOCAL_BUSY state.
2760 */
2761 l2cap_abort_rx_srej_sent(chan);
2762 }
2763
2764 l2cap_send_ack(chan);
2765
2766 break;
2767 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2768 BT_DBG("Exit LOCAL_BUSY");
2769 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2770
2771 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2772 struct l2cap_ctrl local_control;
2773 memset(&local_control, 0, sizeof(local_control));
2774 local_control.sframe = 1;
2775 local_control.super = L2CAP_SUPER_RR;
2776 local_control.poll = 1;
2777 local_control.reqseq = chan->buffer_seq;
2778 l2cap_send_sframe(chan, &local_control);
2779
2780 chan->retry_count = 1;
2781 __set_monitor_timer(chan);
2782 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2783 }
2784 break;
2785 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2786 l2cap_process_reqseq(chan, control->reqseq);
2787
2788 /* Fall through */
2789
2790 case L2CAP_EV_RECV_FBIT:
2791 if (control && control->final) {
2792 __clear_monitor_timer(chan);
2793 if (chan->unacked_frames > 0)
2794 __set_retrans_timer(chan);
2795 chan->retry_count = 0;
2796 chan->tx_state = L2CAP_TX_STATE_XMIT;
2797 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2798 }
2799 break;
2800 case L2CAP_EV_EXPLICIT_POLL:
2801 /* Ignore */
2802 break;
2803 case L2CAP_EV_MONITOR_TO:
2804 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2805 l2cap_send_rr_or_rnr(chan, 1);
2806 __set_monitor_timer(chan);
2807 chan->retry_count++;
2808 } else {
2809 l2cap_send_disconn_req(chan, ECONNABORTED);
2810 }
2811 break;
2812 default:
2813 break;
2814 }
2815 }
2816
2817 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2818 struct sk_buff_head *skbs, u8 event)
2819 {
2820 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2821 chan, control, skbs, event, chan->tx_state);
2822
2823 switch (chan->tx_state) {
2824 case L2CAP_TX_STATE_XMIT:
2825 l2cap_tx_state_xmit(chan, control, skbs, event);
2826 break;
2827 case L2CAP_TX_STATE_WAIT_F:
2828 l2cap_tx_state_wait_f(chan, control, skbs, event);
2829 break;
2830 default:
2831 /* Ignore event */
2832 break;
2833 }
2834 }
2835
2836 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2837 struct l2cap_ctrl *control)
2838 {
2839 BT_DBG("chan %p, control %p", chan, control);
2840 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2841 }
2842
2843 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2844 struct l2cap_ctrl *control)
2845 {
2846 BT_DBG("chan %p, control %p", chan, control);
2847 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2848 }
2849
2850 /* Copy frame to all raw sockets on that connection */
2851 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2852 {
2853 struct sk_buff *nskb;
2854 struct l2cap_chan *chan;
2855
2856 BT_DBG("conn %p", conn);
2857
2858 mutex_lock(&conn->chan_lock);
2859
2860 list_for_each_entry(chan, &conn->chan_l, list) {
2861 if (chan->chan_type != L2CAP_CHAN_RAW)
2862 continue;
2863
2864 /* Don't send frame to the channel it came from */
2865 if (bt_cb(skb)->chan == chan)
2866 continue;
2867
2868 nskb = skb_clone(skb, GFP_KERNEL);
2869 if (!nskb)
2870 continue;
2871 if (chan->ops->recv(chan, nskb))
2872 kfree_skb(nskb);
2873 }
2874
2875 mutex_unlock(&conn->chan_lock);
2876 }
2877
2878 /* ---- L2CAP signalling commands ---- */
2879 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2880 u8 ident, u16 dlen, void *data)
2881 {
2882 struct sk_buff *skb, **frag;
2883 struct l2cap_cmd_hdr *cmd;
2884 struct l2cap_hdr *lh;
2885 int len, count;
2886
2887 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2888 conn, code, ident, dlen);
2889
2890 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2891 return NULL;
2892
2893 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2894 count = min_t(unsigned int, conn->mtu, len);
2895
2896 skb = bt_skb_alloc(count, GFP_KERNEL);
2897 if (!skb)
2898 return NULL;
2899
2900 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2901 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2902
2903 if (conn->hcon->type == LE_LINK)
2904 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2905 else
2906 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2907
2908 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2909 cmd->code = code;
2910 cmd->ident = ident;
2911 cmd->len = cpu_to_le16(dlen);
2912
2913 if (dlen) {
2914 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2915 memcpy(skb_put(skb, count), data, count);
2916 data += count;
2917 }
2918
2919 len -= skb->len;
2920
2921 /* Continuation fragments (no L2CAP header) */
2922 frag = &skb_shinfo(skb)->frag_list;
2923 while (len) {
2924 count = min_t(unsigned int, conn->mtu, len);
2925
2926 *frag = bt_skb_alloc(count, GFP_KERNEL);
2927 if (!*frag)
2928 goto fail;
2929
2930 memcpy(skb_put(*frag, count), data, count);
2931
2932 len -= count;
2933 data += count;
2934
2935 frag = &(*frag)->next;
2936 }
2937
2938 return skb;
2939
2940 fail:
2941 kfree_skb(skb);
2942 return NULL;
2943 }
2944
2945 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2946 unsigned long *val)
2947 {
2948 struct l2cap_conf_opt *opt = *ptr;
2949 int len;
2950
2951 len = L2CAP_CONF_OPT_SIZE + opt->len;
2952 *ptr += len;
2953
2954 *type = opt->type;
2955 *olen = opt->len;
2956
2957 switch (opt->len) {
2958 case 1:
2959 *val = *((u8 *) opt->val);
2960 break;
2961
2962 case 2:
2963 *val = get_unaligned_le16(opt->val);
2964 break;
2965
2966 case 4:
2967 *val = get_unaligned_le32(opt->val);
2968 break;
2969
2970 default:
2971 *val = (unsigned long) opt->val;
2972 break;
2973 }
2974
2975 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2976 return len;
2977 }
2978
2979 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2980 {
2981 struct l2cap_conf_opt *opt = *ptr;
2982
2983 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2984
2985 opt->type = type;
2986 opt->len = len;
2987
2988 switch (len) {
2989 case 1:
2990 *((u8 *) opt->val) = val;
2991 break;
2992
2993 case 2:
2994 put_unaligned_le16(val, opt->val);
2995 break;
2996
2997 case 4:
2998 put_unaligned_le32(val, opt->val);
2999 break;
3000
3001 default:
3002 memcpy(opt->val, (void *) val, len);
3003 break;
3004 }
3005
3006 *ptr += L2CAP_CONF_OPT_SIZE + len;
3007 }
3008
3009 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3010 {
3011 struct l2cap_conf_efs efs;
3012
3013 switch (chan->mode) {
3014 case L2CAP_MODE_ERTM:
3015 efs.id = chan->local_id;
3016 efs.stype = chan->local_stype;
3017 efs.msdu = cpu_to_le16(chan->local_msdu);
3018 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3019 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3020 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3021 break;
3022
3023 case L2CAP_MODE_STREAMING:
3024 efs.id = 1;
3025 efs.stype = L2CAP_SERV_BESTEFFORT;
3026 efs.msdu = cpu_to_le16(chan->local_msdu);
3027 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3028 efs.acc_lat = 0;
3029 efs.flush_to = 0;
3030 break;
3031
3032 default:
3033 return;
3034 }
3035
3036 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3037 (unsigned long) &efs);
3038 }
3039
3040 static void l2cap_ack_timeout(struct work_struct *work)
3041 {
3042 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3043 ack_timer.work);
3044 u16 frames_to_ack;
3045
3046 BT_DBG("chan %p", chan);
3047
3048 l2cap_chan_lock(chan);
3049
3050 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3051 chan->last_acked_seq);
3052
3053 if (frames_to_ack)
3054 l2cap_send_rr_or_rnr(chan, 0);
3055
3056 l2cap_chan_unlock(chan);
3057 l2cap_chan_put(chan);
3058 }
3059
3060 int l2cap_ertm_init(struct l2cap_chan *chan)
3061 {
3062 int err;
3063
3064 chan->next_tx_seq = 0;
3065 chan->expected_tx_seq = 0;
3066 chan->expected_ack_seq = 0;
3067 chan->unacked_frames = 0;
3068 chan->buffer_seq = 0;
3069 chan->frames_sent = 0;
3070 chan->last_acked_seq = 0;
3071 chan->sdu = NULL;
3072 chan->sdu_last_frag = NULL;
3073 chan->sdu_len = 0;
3074
3075 skb_queue_head_init(&chan->tx_q);
3076
3077 chan->local_amp_id = AMP_ID_BREDR;
3078 chan->move_id = AMP_ID_BREDR;
3079 chan->move_state = L2CAP_MOVE_STABLE;
3080 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3081
3082 if (chan->mode != L2CAP_MODE_ERTM)
3083 return 0;
3084
3085 chan->rx_state = L2CAP_RX_STATE_RECV;
3086 chan->tx_state = L2CAP_TX_STATE_XMIT;
3087
3088 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3089 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3090 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3091
3092 skb_queue_head_init(&chan->srej_q);
3093
3094 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3095 if (err < 0)
3096 return err;
3097
3098 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3099 if (err < 0)
3100 l2cap_seq_list_free(&chan->srej_list);
3101
3102 return err;
3103 }
3104
3105 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3106 {
3107 switch (mode) {
3108 case L2CAP_MODE_STREAMING:
3109 case L2CAP_MODE_ERTM:
3110 if (l2cap_mode_supported(mode, remote_feat_mask))
3111 return mode;
3112 /* fall through */
3113 default:
3114 return L2CAP_MODE_BASIC;
3115 }
3116 }
3117
3118 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3119 {
3120 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3121 }
3122
3123 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3124 {
3125 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3126 }
3127
3128 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3129 struct l2cap_conf_rfc *rfc)
3130 {
3131 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3132 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3133
3134 /* Class 1 devices have must have ERTM timeouts
3135 * exceeding the Link Supervision Timeout. The
3136 * default Link Supervision Timeout for AMP
3137 * controllers is 10 seconds.
3138 *
3139 * Class 1 devices use 0xffffffff for their
3140 * best-effort flush timeout, so the clamping logic
3141 * will result in a timeout that meets the above
3142 * requirement. ERTM timeouts are 16-bit values, so
3143 * the maximum timeout is 65.535 seconds.
3144 */
3145
3146 /* Convert timeout to milliseconds and round */
3147 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3148
3149 /* This is the recommended formula for class 2 devices
3150 * that start ERTM timers when packets are sent to the
3151 * controller.
3152 */
3153 ertm_to = 3 * ertm_to + 500;
3154
3155 if (ertm_to > 0xffff)
3156 ertm_to = 0xffff;
3157
3158 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3159 rfc->monitor_timeout = rfc->retrans_timeout;
3160 } else {
3161 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3162 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3163 }
3164 }
3165
3166 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3167 {
3168 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3169 __l2cap_ews_supported(chan->conn)) {
3170 /* use extended control field */
3171 set_bit(FLAG_EXT_CTRL, &chan->flags);
3172 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3173 } else {
3174 chan->tx_win = min_t(u16, chan->tx_win,
3175 L2CAP_DEFAULT_TX_WINDOW);
3176 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3177 }
3178 chan->ack_win = chan->tx_win;
3179 }
3180
3181 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3182 {
3183 struct l2cap_conf_req *req = data;
3184 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3185 void *ptr = req->data;
3186 u16 size;
3187
3188 BT_DBG("chan %p", chan);
3189
3190 if (chan->num_conf_req || chan->num_conf_rsp)
3191 goto done;
3192
3193 switch (chan->mode) {
3194 case L2CAP_MODE_STREAMING:
3195 case L2CAP_MODE_ERTM:
3196 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3197 break;
3198
3199 if (__l2cap_efs_supported(chan->conn))
3200 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3201
3202 /* fall through */
3203 default:
3204 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3205 break;
3206 }
3207
3208 done:
3209 if (chan->imtu != L2CAP_DEFAULT_MTU)
3210 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3211
3212 switch (chan->mode) {
3213 case L2CAP_MODE_BASIC:
3214 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3215 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3216 break;
3217
3218 rfc.mode = L2CAP_MODE_BASIC;
3219 rfc.txwin_size = 0;
3220 rfc.max_transmit = 0;
3221 rfc.retrans_timeout = 0;
3222 rfc.monitor_timeout = 0;
3223 rfc.max_pdu_size = 0;
3224
3225 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3226 (unsigned long) &rfc);
3227 break;
3228
3229 case L2CAP_MODE_ERTM:
3230 rfc.mode = L2CAP_MODE_ERTM;
3231 rfc.max_transmit = chan->max_tx;
3232
3233 __l2cap_set_ertm_timeouts(chan, &rfc);
3234
3235 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3236 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3237 L2CAP_FCS_SIZE);
3238 rfc.max_pdu_size = cpu_to_le16(size);
3239
3240 l2cap_txwin_setup(chan);
3241
3242 rfc.txwin_size = min_t(u16, chan->tx_win,
3243 L2CAP_DEFAULT_TX_WINDOW);
3244
3245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3246 (unsigned long) &rfc);
3247
3248 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3249 l2cap_add_opt_efs(&ptr, chan);
3250
3251 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3253 chan->tx_win);
3254
3255 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3256 if (chan->fcs == L2CAP_FCS_NONE ||
3257 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3258 chan->fcs = L2CAP_FCS_NONE;
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3260 chan->fcs);
3261 }
3262 break;
3263
3264 case L2CAP_MODE_STREAMING:
3265 l2cap_txwin_setup(chan);
3266 rfc.mode = L2CAP_MODE_STREAMING;
3267 rfc.txwin_size = 0;
3268 rfc.max_transmit = 0;
3269 rfc.retrans_timeout = 0;
3270 rfc.monitor_timeout = 0;
3271
3272 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3273 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3274 L2CAP_FCS_SIZE);
3275 rfc.max_pdu_size = cpu_to_le16(size);
3276
3277 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3278 (unsigned long) &rfc);
3279
3280 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3281 l2cap_add_opt_efs(&ptr, chan);
3282
3283 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3284 if (chan->fcs == L2CAP_FCS_NONE ||
3285 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3286 chan->fcs = L2CAP_FCS_NONE;
3287 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3288 chan->fcs);
3289 }
3290 break;
3291 }
3292
3293 req->dcid = cpu_to_le16(chan->dcid);
3294 req->flags = cpu_to_le16(0);
3295
3296 return ptr - data;
3297 }
3298
3299 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3300 {
3301 struct l2cap_conf_rsp *rsp = data;
3302 void *ptr = rsp->data;
3303 void *req = chan->conf_req;
3304 int len = chan->conf_len;
3305 int type, hint, olen;
3306 unsigned long val;
3307 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3308 struct l2cap_conf_efs efs;
3309 u8 remote_efs = 0;
3310 u16 mtu = L2CAP_DEFAULT_MTU;
3311 u16 result = L2CAP_CONF_SUCCESS;
3312 u16 size;
3313
3314 BT_DBG("chan %p", chan);
3315
3316 while (len >= L2CAP_CONF_OPT_SIZE) {
3317 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3318
3319 hint = type & L2CAP_CONF_HINT;
3320 type &= L2CAP_CONF_MASK;
3321
3322 switch (type) {
3323 case L2CAP_CONF_MTU:
3324 mtu = val;
3325 break;
3326
3327 case L2CAP_CONF_FLUSH_TO:
3328 chan->flush_to = val;
3329 break;
3330
3331 case L2CAP_CONF_QOS:
3332 break;
3333
3334 case L2CAP_CONF_RFC:
3335 if (olen == sizeof(rfc))
3336 memcpy(&rfc, (void *) val, olen);
3337 break;
3338
3339 case L2CAP_CONF_FCS:
3340 if (val == L2CAP_FCS_NONE)
3341 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3342 break;
3343
3344 case L2CAP_CONF_EFS:
3345 remote_efs = 1;
3346 if (olen == sizeof(efs))
3347 memcpy(&efs, (void *) val, olen);
3348 break;
3349
3350 case L2CAP_CONF_EWS:
3351 if (!chan->conn->hs_enabled)
3352 return -ECONNREFUSED;
3353
3354 set_bit(FLAG_EXT_CTRL, &chan->flags);
3355 set_bit(CONF_EWS_RECV, &chan->conf_state);
3356 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3357 chan->remote_tx_win = val;
3358 break;
3359
3360 default:
3361 if (hint)
3362 break;
3363
3364 result = L2CAP_CONF_UNKNOWN;
3365 *((u8 *) ptr++) = type;
3366 break;
3367 }
3368 }
3369
3370 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3371 goto done;
3372
3373 switch (chan->mode) {
3374 case L2CAP_MODE_STREAMING:
3375 case L2CAP_MODE_ERTM:
3376 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3377 chan->mode = l2cap_select_mode(rfc.mode,
3378 chan->conn->feat_mask);
3379 break;
3380 }
3381
3382 if (remote_efs) {
3383 if (__l2cap_efs_supported(chan->conn))
3384 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3385 else
3386 return -ECONNREFUSED;
3387 }
3388
3389 if (chan->mode != rfc.mode)
3390 return -ECONNREFUSED;
3391
3392 break;
3393 }
3394
3395 done:
3396 if (chan->mode != rfc.mode) {
3397 result = L2CAP_CONF_UNACCEPT;
3398 rfc.mode = chan->mode;
3399
3400 if (chan->num_conf_rsp == 1)
3401 return -ECONNREFUSED;
3402
3403 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3404 (unsigned long) &rfc);
3405 }
3406
3407 if (result == L2CAP_CONF_SUCCESS) {
3408 /* Configure output options and let the other side know
3409 * which ones we don't like. */
3410
3411 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3412 result = L2CAP_CONF_UNACCEPT;
3413 else {
3414 chan->omtu = mtu;
3415 set_bit(CONF_MTU_DONE, &chan->conf_state);
3416 }
3417 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3418
3419 if (remote_efs) {
3420 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3421 efs.stype != L2CAP_SERV_NOTRAFIC &&
3422 efs.stype != chan->local_stype) {
3423
3424 result = L2CAP_CONF_UNACCEPT;
3425
3426 if (chan->num_conf_req >= 1)
3427 return -ECONNREFUSED;
3428
3429 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3430 sizeof(efs),
3431 (unsigned long) &efs);
3432 } else {
3433 /* Send PENDING Conf Rsp */
3434 result = L2CAP_CONF_PENDING;
3435 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3436 }
3437 }
3438
3439 switch (rfc.mode) {
3440 case L2CAP_MODE_BASIC:
3441 chan->fcs = L2CAP_FCS_NONE;
3442 set_bit(CONF_MODE_DONE, &chan->conf_state);
3443 break;
3444
3445 case L2CAP_MODE_ERTM:
3446 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3447 chan->remote_tx_win = rfc.txwin_size;
3448 else
3449 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3450
3451 chan->remote_max_tx = rfc.max_transmit;
3452
3453 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3454 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3455 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3456 rfc.max_pdu_size = cpu_to_le16(size);
3457 chan->remote_mps = size;
3458
3459 __l2cap_set_ertm_timeouts(chan, &rfc);
3460
3461 set_bit(CONF_MODE_DONE, &chan->conf_state);
3462
3463 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3464 sizeof(rfc), (unsigned long) &rfc);
3465
3466 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3467 chan->remote_id = efs.id;
3468 chan->remote_stype = efs.stype;
3469 chan->remote_msdu = le16_to_cpu(efs.msdu);
3470 chan->remote_flush_to =
3471 le32_to_cpu(efs.flush_to);
3472 chan->remote_acc_lat =
3473 le32_to_cpu(efs.acc_lat);
3474 chan->remote_sdu_itime =
3475 le32_to_cpu(efs.sdu_itime);
3476 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3477 sizeof(efs),
3478 (unsigned long) &efs);
3479 }
3480 break;
3481
3482 case L2CAP_MODE_STREAMING:
3483 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3484 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3485 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3486 rfc.max_pdu_size = cpu_to_le16(size);
3487 chan->remote_mps = size;
3488
3489 set_bit(CONF_MODE_DONE, &chan->conf_state);
3490
3491 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3492 (unsigned long) &rfc);
3493
3494 break;
3495
3496 default:
3497 result = L2CAP_CONF_UNACCEPT;
3498
3499 memset(&rfc, 0, sizeof(rfc));
3500 rfc.mode = chan->mode;
3501 }
3502
3503 if (result == L2CAP_CONF_SUCCESS)
3504 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3505 }
3506 rsp->scid = cpu_to_le16(chan->dcid);
3507 rsp->result = cpu_to_le16(result);
3508 rsp->flags = cpu_to_le16(0);
3509
3510 return ptr - data;
3511 }
3512
3513 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3514 void *data, u16 *result)
3515 {
3516 struct l2cap_conf_req *req = data;
3517 void *ptr = req->data;
3518 int type, olen;
3519 unsigned long val;
3520 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3521 struct l2cap_conf_efs efs;
3522
3523 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3524
3525 while (len >= L2CAP_CONF_OPT_SIZE) {
3526 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3527
3528 switch (type) {
3529 case L2CAP_CONF_MTU:
3530 if (val < L2CAP_DEFAULT_MIN_MTU) {
3531 *result = L2CAP_CONF_UNACCEPT;
3532 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3533 } else
3534 chan->imtu = val;
3535 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3536 break;
3537
3538 case L2CAP_CONF_FLUSH_TO:
3539 chan->flush_to = val;
3540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3541 2, chan->flush_to);
3542 break;
3543
3544 case L2CAP_CONF_RFC:
3545 if (olen == sizeof(rfc))
3546 memcpy(&rfc, (void *)val, olen);
3547
3548 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3549 rfc.mode != chan->mode)
3550 return -ECONNREFUSED;
3551
3552 chan->fcs = 0;
3553
3554 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3555 sizeof(rfc), (unsigned long) &rfc);
3556 break;
3557
3558 case L2CAP_CONF_EWS:
3559 chan->ack_win = min_t(u16, val, chan->ack_win);
3560 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3561 chan->tx_win);
3562 break;
3563
3564 case L2CAP_CONF_EFS:
3565 if (olen == sizeof(efs))
3566 memcpy(&efs, (void *)val, olen);
3567
3568 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3569 efs.stype != L2CAP_SERV_NOTRAFIC &&
3570 efs.stype != chan->local_stype)
3571 return -ECONNREFUSED;
3572
3573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3574 (unsigned long) &efs);
3575 break;
3576
3577 case L2CAP_CONF_FCS:
3578 if (*result == L2CAP_CONF_PENDING)
3579 if (val == L2CAP_FCS_NONE)
3580 set_bit(CONF_RECV_NO_FCS,
3581 &chan->conf_state);
3582 break;
3583 }
3584 }
3585
3586 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3587 return -ECONNREFUSED;
3588
3589 chan->mode = rfc.mode;
3590
3591 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3592 switch (rfc.mode) {
3593 case L2CAP_MODE_ERTM:
3594 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3595 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3596 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3597 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3598 chan->ack_win = min_t(u16, chan->ack_win,
3599 rfc.txwin_size);
3600
3601 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3602 chan->local_msdu = le16_to_cpu(efs.msdu);
3603 chan->local_sdu_itime =
3604 le32_to_cpu(efs.sdu_itime);
3605 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3606 chan->local_flush_to =
3607 le32_to_cpu(efs.flush_to);
3608 }
3609 break;
3610
3611 case L2CAP_MODE_STREAMING:
3612 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3613 }
3614 }
3615
3616 req->dcid = cpu_to_le16(chan->dcid);
3617 req->flags = cpu_to_le16(0);
3618
3619 return ptr - data;
3620 }
3621
3622 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3623 u16 result, u16 flags)
3624 {
3625 struct l2cap_conf_rsp *rsp = data;
3626 void *ptr = rsp->data;
3627
3628 BT_DBG("chan %p", chan);
3629
3630 rsp->scid = cpu_to_le16(chan->dcid);
3631 rsp->result = cpu_to_le16(result);
3632 rsp->flags = cpu_to_le16(flags);
3633
3634 return ptr - data;
3635 }
3636
3637 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3638 {
3639 struct l2cap_le_conn_rsp rsp;
3640 struct l2cap_conn *conn = chan->conn;
3641
3642 BT_DBG("chan %p", chan);
3643
3644 rsp.dcid = cpu_to_le16(chan->scid);
3645 rsp.mtu = cpu_to_le16(chan->imtu);
3646 rsp.mps = cpu_to_le16(chan->mps);
3647 rsp.credits = cpu_to_le16(chan->rx_credits);
3648 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3649
3650 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3651 &rsp);
3652 }
3653
3654 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3655 {
3656 struct l2cap_conn_rsp rsp;
3657 struct l2cap_conn *conn = chan->conn;
3658 u8 buf[128];
3659 u8 rsp_code;
3660
3661 rsp.scid = cpu_to_le16(chan->dcid);
3662 rsp.dcid = cpu_to_le16(chan->scid);
3663 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3664 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3665
3666 if (chan->hs_hcon)
3667 rsp_code = L2CAP_CREATE_CHAN_RSP;
3668 else
3669 rsp_code = L2CAP_CONN_RSP;
3670
3671 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3672
3673 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3674
3675 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3676 return;
3677
3678 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3679 l2cap_build_conf_req(chan, buf), buf);
3680 chan->num_conf_req++;
3681 }
3682
3683 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3684 {
3685 int type, olen;
3686 unsigned long val;
3687 /* Use sane default values in case a misbehaving remote device
3688 * did not send an RFC or extended window size option.
3689 */
3690 u16 txwin_ext = chan->ack_win;
3691 struct l2cap_conf_rfc rfc = {
3692 .mode = chan->mode,
3693 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3694 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3695 .max_pdu_size = cpu_to_le16(chan->imtu),
3696 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3697 };
3698
3699 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3700
3701 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3702 return;
3703
3704 while (len >= L2CAP_CONF_OPT_SIZE) {
3705 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3706
3707 switch (type) {
3708 case L2CAP_CONF_RFC:
3709 if (olen == sizeof(rfc))
3710 memcpy(&rfc, (void *)val, olen);
3711 break;
3712 case L2CAP_CONF_EWS:
3713 txwin_ext = val;
3714 break;
3715 }
3716 }
3717
3718 switch (rfc.mode) {
3719 case L2CAP_MODE_ERTM:
3720 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3721 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3722 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3723 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3724 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3725 else
3726 chan->ack_win = min_t(u16, chan->ack_win,
3727 rfc.txwin_size);
3728 break;
3729 case L2CAP_MODE_STREAMING:
3730 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3731 }
3732 }
3733
3734 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3735 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3736 u8 *data)
3737 {
3738 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3739
3740 if (cmd_len < sizeof(*rej))
3741 return -EPROTO;
3742
3743 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3744 return 0;
3745
3746 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3747 cmd->ident == conn->info_ident) {
3748 cancel_delayed_work(&conn->info_timer);
3749
3750 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3751 conn->info_ident = 0;
3752
3753 l2cap_conn_start(conn);
3754 }
3755
3756 return 0;
3757 }
3758
3759 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3760 struct l2cap_cmd_hdr *cmd,
3761 u8 *data, u8 rsp_code, u8 amp_id)
3762 {
3763 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3764 struct l2cap_conn_rsp rsp;
3765 struct l2cap_chan *chan = NULL, *pchan;
3766 int result, status = L2CAP_CS_NO_INFO;
3767
3768 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3769 __le16 psm = req->psm;
3770
3771 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3772
3773 /* Check if we have socket listening on psm */
3774 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3775 &conn->hcon->dst, ACL_LINK);
3776 if (!pchan) {
3777 result = L2CAP_CR_BAD_PSM;
3778 goto sendresp;
3779 }
3780
3781 mutex_lock(&conn->chan_lock);
3782 l2cap_chan_lock(pchan);
3783
3784 /* Check if the ACL is secure enough (if not SDP) */
3785 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3786 !hci_conn_check_link_mode(conn->hcon)) {
3787 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3788 result = L2CAP_CR_SEC_BLOCK;
3789 goto response;
3790 }
3791
3792 result = L2CAP_CR_NO_MEM;
3793
3794 /* Check if we already have channel with that dcid */
3795 if (__l2cap_get_chan_by_dcid(conn, scid))
3796 goto response;
3797
3798 chan = pchan->ops->new_connection(pchan);
3799 if (!chan)
3800 goto response;
3801
3802 /* For certain devices (ex: HID mouse), support for authentication,
3803 * pairing and bonding is optional. For such devices, inorder to avoid
3804 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3805 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3806 */
3807 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3808
3809 bacpy(&chan->src, &conn->hcon->src);
3810 bacpy(&chan->dst, &conn->hcon->dst);
3811 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3812 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3813 chan->psm = psm;
3814 chan->dcid = scid;
3815 chan->local_amp_id = amp_id;
3816
3817 __l2cap_chan_add(conn, chan);
3818
3819 dcid = chan->scid;
3820
3821 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3822
3823 chan->ident = cmd->ident;
3824
3825 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3826 if (l2cap_chan_check_security(chan)) {
3827 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3828 l2cap_state_change(chan, BT_CONNECT2);
3829 result = L2CAP_CR_PEND;
3830 status = L2CAP_CS_AUTHOR_PEND;
3831 chan->ops->defer(chan);
3832 } else {
3833 /* Force pending result for AMP controllers.
3834 * The connection will succeed after the
3835 * physical link is up.
3836 */
3837 if (amp_id == AMP_ID_BREDR) {
3838 l2cap_state_change(chan, BT_CONFIG);
3839 result = L2CAP_CR_SUCCESS;
3840 } else {
3841 l2cap_state_change(chan, BT_CONNECT2);
3842 result = L2CAP_CR_PEND;
3843 }
3844 status = L2CAP_CS_NO_INFO;
3845 }
3846 } else {
3847 l2cap_state_change(chan, BT_CONNECT2);
3848 result = L2CAP_CR_PEND;
3849 status = L2CAP_CS_AUTHEN_PEND;
3850 }
3851 } else {
3852 l2cap_state_change(chan, BT_CONNECT2);
3853 result = L2CAP_CR_PEND;
3854 status = L2CAP_CS_NO_INFO;
3855 }
3856
3857 response:
3858 l2cap_chan_unlock(pchan);
3859 mutex_unlock(&conn->chan_lock);
3860
3861 sendresp:
3862 rsp.scid = cpu_to_le16(scid);
3863 rsp.dcid = cpu_to_le16(dcid);
3864 rsp.result = cpu_to_le16(result);
3865 rsp.status = cpu_to_le16(status);
3866 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3867
3868 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3869 struct l2cap_info_req info;
3870 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3871
3872 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3873 conn->info_ident = l2cap_get_ident(conn);
3874
3875 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3876
3877 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3878 sizeof(info), &info);
3879 }
3880
3881 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3882 result == L2CAP_CR_SUCCESS) {
3883 u8 buf[128];
3884 set_bit(CONF_REQ_SENT, &chan->conf_state);
3885 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3886 l2cap_build_conf_req(chan, buf), buf);
3887 chan->num_conf_req++;
3888 }
3889
3890 return chan;
3891 }
3892
3893 static int l2cap_connect_req(struct l2cap_conn *conn,
3894 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3895 {
3896 struct hci_dev *hdev = conn->hcon->hdev;
3897 struct hci_conn *hcon = conn->hcon;
3898
3899 if (cmd_len < sizeof(struct l2cap_conn_req))
3900 return -EPROTO;
3901
3902 hci_dev_lock(hdev);
3903 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3904 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3905 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3906 hcon->dst_type, 0, NULL, 0,
3907 hcon->dev_class);
3908 hci_dev_unlock(hdev);
3909
3910 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3911 return 0;
3912 }
3913
3914 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3915 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3916 u8 *data)
3917 {
3918 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3919 u16 scid, dcid, result, status;
3920 struct l2cap_chan *chan;
3921 u8 req[128];
3922 int err;
3923
3924 if (cmd_len < sizeof(*rsp))
3925 return -EPROTO;
3926
3927 scid = __le16_to_cpu(rsp->scid);
3928 dcid = __le16_to_cpu(rsp->dcid);
3929 result = __le16_to_cpu(rsp->result);
3930 status = __le16_to_cpu(rsp->status);
3931
3932 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3933 dcid, scid, result, status);
3934
3935 mutex_lock(&conn->chan_lock);
3936
3937 if (scid) {
3938 chan = __l2cap_get_chan_by_scid(conn, scid);
3939 if (!chan) {
3940 err = -EBADSLT;
3941 goto unlock;
3942 }
3943 } else {
3944 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3945 if (!chan) {
3946 err = -EBADSLT;
3947 goto unlock;
3948 }
3949 }
3950
3951 err = 0;
3952
3953 l2cap_chan_lock(chan);
3954
3955 switch (result) {
3956 case L2CAP_CR_SUCCESS:
3957 l2cap_state_change(chan, BT_CONFIG);
3958 chan->ident = 0;
3959 chan->dcid = dcid;
3960 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3961
3962 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3963 break;
3964
3965 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3966 l2cap_build_conf_req(chan, req), req);
3967 chan->num_conf_req++;
3968 break;
3969
3970 case L2CAP_CR_PEND:
3971 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3972 break;
3973
3974 default:
3975 l2cap_chan_del(chan, ECONNREFUSED);
3976 break;
3977 }
3978
3979 l2cap_chan_unlock(chan);
3980
3981 unlock:
3982 mutex_unlock(&conn->chan_lock);
3983
3984 return err;
3985 }
3986
3987 static inline void set_default_fcs(struct l2cap_chan *chan)
3988 {
3989 /* FCS is enabled only in ERTM or streaming mode, if one or both
3990 * sides request it.
3991 */
3992 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3993 chan->fcs = L2CAP_FCS_NONE;
3994 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3995 chan->fcs = L2CAP_FCS_CRC16;
3996 }
3997
3998 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3999 u8 ident, u16 flags)
4000 {
4001 struct l2cap_conn *conn = chan->conn;
4002
4003 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4004 flags);
4005
4006 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4007 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4008
4009 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4010 l2cap_build_conf_rsp(chan, data,
4011 L2CAP_CONF_SUCCESS, flags), data);
4012 }
4013
4014 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4015 u16 scid, u16 dcid)
4016 {
4017 struct l2cap_cmd_rej_cid rej;
4018
4019 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4020 rej.scid = __cpu_to_le16(scid);
4021 rej.dcid = __cpu_to_le16(dcid);
4022
4023 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4024 }
4025
4026 static inline int l2cap_config_req(struct l2cap_conn *conn,
4027 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4028 u8 *data)
4029 {
4030 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4031 u16 dcid, flags;
4032 u8 rsp[64];
4033 struct l2cap_chan *chan;
4034 int len, err = 0;
4035
4036 if (cmd_len < sizeof(*req))
4037 return -EPROTO;
4038
4039 dcid = __le16_to_cpu(req->dcid);
4040 flags = __le16_to_cpu(req->flags);
4041
4042 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4043
4044 chan = l2cap_get_chan_by_scid(conn, dcid);
4045 if (!chan) {
4046 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4047 return 0;
4048 }
4049
4050 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4051 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4052 chan->dcid);
4053 goto unlock;
4054 }
4055
4056 /* Reject if config buffer is too small. */
4057 len = cmd_len - sizeof(*req);
4058 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4059 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4060 l2cap_build_conf_rsp(chan, rsp,
4061 L2CAP_CONF_REJECT, flags), rsp);
4062 goto unlock;
4063 }
4064
4065 /* Store config. */
4066 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4067 chan->conf_len += len;
4068
4069 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4070 /* Incomplete config. Send empty response. */
4071 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4072 l2cap_build_conf_rsp(chan, rsp,
4073 L2CAP_CONF_SUCCESS, flags), rsp);
4074 goto unlock;
4075 }
4076
4077 /* Complete config. */
4078 len = l2cap_parse_conf_req(chan, rsp);
4079 if (len < 0) {
4080 l2cap_send_disconn_req(chan, ECONNRESET);
4081 goto unlock;
4082 }
4083
4084 chan->ident = cmd->ident;
4085 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4086 chan->num_conf_rsp++;
4087
4088 /* Reset config buffer. */
4089 chan->conf_len = 0;
4090
4091 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4092 goto unlock;
4093
4094 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4095 set_default_fcs(chan);
4096
4097 if (chan->mode == L2CAP_MODE_ERTM ||
4098 chan->mode == L2CAP_MODE_STREAMING)
4099 err = l2cap_ertm_init(chan);
4100
4101 if (err < 0)
4102 l2cap_send_disconn_req(chan, -err);
4103 else
4104 l2cap_chan_ready(chan);
4105
4106 goto unlock;
4107 }
4108
4109 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4110 u8 buf[64];
4111 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4112 l2cap_build_conf_req(chan, buf), buf);
4113 chan->num_conf_req++;
4114 }
4115
4116 /* Got Conf Rsp PENDING from remote side and asume we sent
4117 Conf Rsp PENDING in the code above */
4118 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4119 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4120
4121 /* check compatibility */
4122
4123 /* Send rsp for BR/EDR channel */
4124 if (!chan->hs_hcon)
4125 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4126 else
4127 chan->ident = cmd->ident;
4128 }
4129
4130 unlock:
4131 l2cap_chan_unlock(chan);
4132 return err;
4133 }
4134
4135 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4136 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4137 u8 *data)
4138 {
4139 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4140 u16 scid, flags, result;
4141 struct l2cap_chan *chan;
4142 int len = cmd_len - sizeof(*rsp);
4143 int err = 0;
4144
4145 if (cmd_len < sizeof(*rsp))
4146 return -EPROTO;
4147
4148 scid = __le16_to_cpu(rsp->scid);
4149 flags = __le16_to_cpu(rsp->flags);
4150 result = __le16_to_cpu(rsp->result);
4151
4152 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4153 result, len);
4154
4155 chan = l2cap_get_chan_by_scid(conn, scid);
4156 if (!chan)
4157 return 0;
4158
4159 switch (result) {
4160 case L2CAP_CONF_SUCCESS:
4161 l2cap_conf_rfc_get(chan, rsp->data, len);
4162 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4163 break;
4164
4165 case L2CAP_CONF_PENDING:
4166 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4167
4168 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4169 char buf[64];
4170
4171 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4172 buf, &result);
4173 if (len < 0) {
4174 l2cap_send_disconn_req(chan, ECONNRESET);
4175 goto done;
4176 }
4177
4178 if (!chan->hs_hcon) {
4179 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4180 0);
4181 } else {
4182 if (l2cap_check_efs(chan)) {
4183 amp_create_logical_link(chan);
4184 chan->ident = cmd->ident;
4185 }
4186 }
4187 }
4188 goto done;
4189
4190 case L2CAP_CONF_UNACCEPT:
4191 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4192 char req[64];
4193
4194 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4195 l2cap_send_disconn_req(chan, ECONNRESET);
4196 goto done;
4197 }
4198
4199 /* throw out any old stored conf requests */
4200 result = L2CAP_CONF_SUCCESS;
4201 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4202 req, &result);
4203 if (len < 0) {
4204 l2cap_send_disconn_req(chan, ECONNRESET);
4205 goto done;
4206 }
4207
4208 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4209 L2CAP_CONF_REQ, len, req);
4210 chan->num_conf_req++;
4211 if (result != L2CAP_CONF_SUCCESS)
4212 goto done;
4213 break;
4214 }
4215
4216 default:
4217 l2cap_chan_set_err(chan, ECONNRESET);
4218
4219 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4220 l2cap_send_disconn_req(chan, ECONNRESET);
4221 goto done;
4222 }
4223
4224 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4225 goto done;
4226
4227 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4228
4229 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4230 set_default_fcs(chan);
4231
4232 if (chan->mode == L2CAP_MODE_ERTM ||
4233 chan->mode == L2CAP_MODE_STREAMING)
4234 err = l2cap_ertm_init(chan);
4235
4236 if (err < 0)
4237 l2cap_send_disconn_req(chan, -err);
4238 else
4239 l2cap_chan_ready(chan);
4240 }
4241
4242 done:
4243 l2cap_chan_unlock(chan);
4244 return err;
4245 }
4246
4247 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4248 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4249 u8 *data)
4250 {
4251 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4252 struct l2cap_disconn_rsp rsp;
4253 u16 dcid, scid;
4254 struct l2cap_chan *chan;
4255
4256 if (cmd_len != sizeof(*req))
4257 return -EPROTO;
4258
4259 scid = __le16_to_cpu(req->scid);
4260 dcid = __le16_to_cpu(req->dcid);
4261
4262 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4263
4264 mutex_lock(&conn->chan_lock);
4265
4266 chan = __l2cap_get_chan_by_scid(conn, dcid);
4267 if (!chan) {
4268 mutex_unlock(&conn->chan_lock);
4269 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4270 return 0;
4271 }
4272
4273 l2cap_chan_lock(chan);
4274
4275 rsp.dcid = cpu_to_le16(chan->scid);
4276 rsp.scid = cpu_to_le16(chan->dcid);
4277 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4278
4279 chan->ops->set_shutdown(chan);
4280
4281 l2cap_chan_hold(chan);
4282 l2cap_chan_del(chan, ECONNRESET);
4283
4284 l2cap_chan_unlock(chan);
4285
4286 chan->ops->close(chan);
4287 l2cap_chan_put(chan);
4288
4289 mutex_unlock(&conn->chan_lock);
4290
4291 return 0;
4292 }
4293
4294 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4295 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4296 u8 *data)
4297 {
4298 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4299 u16 dcid, scid;
4300 struct l2cap_chan *chan;
4301
4302 if (cmd_len != sizeof(*rsp))
4303 return -EPROTO;
4304
4305 scid = __le16_to_cpu(rsp->scid);
4306 dcid = __le16_to_cpu(rsp->dcid);
4307
4308 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4309
4310 mutex_lock(&conn->chan_lock);
4311
4312 chan = __l2cap_get_chan_by_scid(conn, scid);
4313 if (!chan) {
4314 mutex_unlock(&conn->chan_lock);
4315 return 0;
4316 }
4317
4318 l2cap_chan_lock(chan);
4319
4320 l2cap_chan_hold(chan);
4321 l2cap_chan_del(chan, 0);
4322
4323 l2cap_chan_unlock(chan);
4324
4325 chan->ops->close(chan);
4326 l2cap_chan_put(chan);
4327
4328 mutex_unlock(&conn->chan_lock);
4329
4330 return 0;
4331 }
4332
4333 static inline int l2cap_information_req(struct l2cap_conn *conn,
4334 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4335 u8 *data)
4336 {
4337 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4338 u16 type;
4339
4340 if (cmd_len != sizeof(*req))
4341 return -EPROTO;
4342
4343 type = __le16_to_cpu(req->type);
4344
4345 BT_DBG("type 0x%4.4x", type);
4346
4347 if (type == L2CAP_IT_FEAT_MASK) {
4348 u8 buf[8];
4349 u32 feat_mask = l2cap_feat_mask;
4350 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4351 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4352 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4353 if (!disable_ertm)
4354 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4355 | L2CAP_FEAT_FCS;
4356 if (conn->hs_enabled)
4357 feat_mask |= L2CAP_FEAT_EXT_FLOW
4358 | L2CAP_FEAT_EXT_WINDOW;
4359
4360 put_unaligned_le32(feat_mask, rsp->data);
4361 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4362 buf);
4363 } else if (type == L2CAP_IT_FIXED_CHAN) {
4364 u8 buf[12];
4365 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4366
4367 if (conn->hs_enabled)
4368 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4369 else
4370 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4371
4372 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4373 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4374 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4375 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4376 buf);
4377 } else {
4378 struct l2cap_info_rsp rsp;
4379 rsp.type = cpu_to_le16(type);
4380 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4381 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4382 &rsp);
4383 }
4384
4385 return 0;
4386 }
4387
4388 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4389 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4390 u8 *data)
4391 {
4392 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4393 u16 type, result;
4394
4395 if (cmd_len < sizeof(*rsp))
4396 return -EPROTO;
4397
4398 type = __le16_to_cpu(rsp->type);
4399 result = __le16_to_cpu(rsp->result);
4400
4401 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4402
4403 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4404 if (cmd->ident != conn->info_ident ||
4405 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4406 return 0;
4407
4408 cancel_delayed_work(&conn->info_timer);
4409
4410 if (result != L2CAP_IR_SUCCESS) {
4411 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4412 conn->info_ident = 0;
4413
4414 l2cap_conn_start(conn);
4415
4416 return 0;
4417 }
4418
4419 switch (type) {
4420 case L2CAP_IT_FEAT_MASK:
4421 conn->feat_mask = get_unaligned_le32(rsp->data);
4422
4423 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4424 struct l2cap_info_req req;
4425 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4426
4427 conn->info_ident = l2cap_get_ident(conn);
4428
4429 l2cap_send_cmd(conn, conn->info_ident,
4430 L2CAP_INFO_REQ, sizeof(req), &req);
4431 } else {
4432 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4433 conn->info_ident = 0;
4434
4435 l2cap_conn_start(conn);
4436 }
4437 break;
4438
4439 case L2CAP_IT_FIXED_CHAN:
4440 conn->fixed_chan_mask = rsp->data[0];
4441 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4442 conn->info_ident = 0;
4443
4444 l2cap_conn_start(conn);
4445 break;
4446 }
4447
4448 return 0;
4449 }
4450
4451 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4452 struct l2cap_cmd_hdr *cmd,
4453 u16 cmd_len, void *data)
4454 {
4455 struct l2cap_create_chan_req *req = data;
4456 struct l2cap_create_chan_rsp rsp;
4457 struct l2cap_chan *chan;
4458 struct hci_dev *hdev;
4459 u16 psm, scid;
4460
4461 if (cmd_len != sizeof(*req))
4462 return -EPROTO;
4463
4464 if (!conn->hs_enabled)
4465 return -EINVAL;
4466
4467 psm = le16_to_cpu(req->psm);
4468 scid = le16_to_cpu(req->scid);
4469
4470 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4471
4472 /* For controller id 0 make BR/EDR connection */
4473 if (req->amp_id == AMP_ID_BREDR) {
4474 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4475 req->amp_id);
4476 return 0;
4477 }
4478
4479 /* Validate AMP controller id */
4480 hdev = hci_dev_get(req->amp_id);
4481 if (!hdev)
4482 goto error;
4483
4484 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4485 hci_dev_put(hdev);
4486 goto error;
4487 }
4488
4489 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4490 req->amp_id);
4491 if (chan) {
4492 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4493 struct hci_conn *hs_hcon;
4494
4495 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4496 &conn->hcon->dst);
4497 if (!hs_hcon) {
4498 hci_dev_put(hdev);
4499 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4500 chan->dcid);
4501 return 0;
4502 }
4503
4504 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4505
4506 mgr->bredr_chan = chan;
4507 chan->hs_hcon = hs_hcon;
4508 chan->fcs = L2CAP_FCS_NONE;
4509 conn->mtu = hdev->block_mtu;
4510 }
4511
4512 hci_dev_put(hdev);
4513
4514 return 0;
4515
4516 error:
4517 rsp.dcid = 0;
4518 rsp.scid = cpu_to_le16(scid);
4519 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4520 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4521
4522 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4523 sizeof(rsp), &rsp);
4524
4525 return 0;
4526 }
4527
4528 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4529 {
4530 struct l2cap_move_chan_req req;
4531 u8 ident;
4532
4533 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4534
4535 ident = l2cap_get_ident(chan->conn);
4536 chan->ident = ident;
4537
4538 req.icid = cpu_to_le16(chan->scid);
4539 req.dest_amp_id = dest_amp_id;
4540
4541 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4542 &req);
4543
4544 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4545 }
4546
4547 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4548 {
4549 struct l2cap_move_chan_rsp rsp;
4550
4551 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4552
4553 rsp.icid = cpu_to_le16(chan->dcid);
4554 rsp.result = cpu_to_le16(result);
4555
4556 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4557 sizeof(rsp), &rsp);
4558 }
4559
4560 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4561 {
4562 struct l2cap_move_chan_cfm cfm;
4563
4564 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4565
4566 chan->ident = l2cap_get_ident(chan->conn);
4567
4568 cfm.icid = cpu_to_le16(chan->scid);
4569 cfm.result = cpu_to_le16(result);
4570
4571 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4572 sizeof(cfm), &cfm);
4573
4574 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4575 }
4576
4577 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4578 {
4579 struct l2cap_move_chan_cfm cfm;
4580
4581 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4582
4583 cfm.icid = cpu_to_le16(icid);
4584 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4585
4586 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4587 sizeof(cfm), &cfm);
4588 }
4589
4590 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4591 u16 icid)
4592 {
4593 struct l2cap_move_chan_cfm_rsp rsp;
4594
4595 BT_DBG("icid 0x%4.4x", icid);
4596
4597 rsp.icid = cpu_to_le16(icid);
4598 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4599 }
4600
4601 static void __release_logical_link(struct l2cap_chan *chan)
4602 {
4603 chan->hs_hchan = NULL;
4604 chan->hs_hcon = NULL;
4605
4606 /* Placeholder - release the logical link */
4607 }
4608
4609 static void l2cap_logical_fail(struct l2cap_chan *chan)
4610 {
4611 /* Logical link setup failed */
4612 if (chan->state != BT_CONNECTED) {
4613 /* Create channel failure, disconnect */
4614 l2cap_send_disconn_req(chan, ECONNRESET);
4615 return;
4616 }
4617
4618 switch (chan->move_role) {
4619 case L2CAP_MOVE_ROLE_RESPONDER:
4620 l2cap_move_done(chan);
4621 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4622 break;
4623 case L2CAP_MOVE_ROLE_INITIATOR:
4624 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4625 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4626 /* Remote has only sent pending or
4627 * success responses, clean up
4628 */
4629 l2cap_move_done(chan);
4630 }
4631
4632 /* Other amp move states imply that the move
4633 * has already aborted
4634 */
4635 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4636 break;
4637 }
4638 }
4639
4640 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4641 struct hci_chan *hchan)
4642 {
4643 struct l2cap_conf_rsp rsp;
4644
4645 chan->hs_hchan = hchan;
4646 chan->hs_hcon->l2cap_data = chan->conn;
4647
4648 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4649
4650 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4651 int err;
4652
4653 set_default_fcs(chan);
4654
4655 err = l2cap_ertm_init(chan);
4656 if (err < 0)
4657 l2cap_send_disconn_req(chan, -err);
4658 else
4659 l2cap_chan_ready(chan);
4660 }
4661 }
4662
4663 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4664 struct hci_chan *hchan)
4665 {
4666 chan->hs_hcon = hchan->conn;
4667 chan->hs_hcon->l2cap_data = chan->conn;
4668
4669 BT_DBG("move_state %d", chan->move_state);
4670
4671 switch (chan->move_state) {
4672 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4673 /* Move confirm will be sent after a success
4674 * response is received
4675 */
4676 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4677 break;
4678 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4679 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4680 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4681 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4682 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4683 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4684 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4685 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4686 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4687 }
4688 break;
4689 default:
4690 /* Move was not in expected state, free the channel */
4691 __release_logical_link(chan);
4692
4693 chan->move_state = L2CAP_MOVE_STABLE;
4694 }
4695 }
4696
4697 /* Call with chan locked */
4698 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4699 u8 status)
4700 {
4701 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4702
4703 if (status) {
4704 l2cap_logical_fail(chan);
4705 __release_logical_link(chan);
4706 return;
4707 }
4708
4709 if (chan->state != BT_CONNECTED) {
4710 /* Ignore logical link if channel is on BR/EDR */
4711 if (chan->local_amp_id != AMP_ID_BREDR)
4712 l2cap_logical_finish_create(chan, hchan);
4713 } else {
4714 l2cap_logical_finish_move(chan, hchan);
4715 }
4716 }
4717
4718 void l2cap_move_start(struct l2cap_chan *chan)
4719 {
4720 BT_DBG("chan %p", chan);
4721
4722 if (chan->local_amp_id == AMP_ID_BREDR) {
4723 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4724 return;
4725 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4726 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4727 /* Placeholder - start physical link setup */
4728 } else {
4729 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4730 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4731 chan->move_id = 0;
4732 l2cap_move_setup(chan);
4733 l2cap_send_move_chan_req(chan, 0);
4734 }
4735 }
4736
4737 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4738 u8 local_amp_id, u8 remote_amp_id)
4739 {
4740 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4741 local_amp_id, remote_amp_id);
4742
4743 chan->fcs = L2CAP_FCS_NONE;
4744
4745 /* Outgoing channel on AMP */
4746 if (chan->state == BT_CONNECT) {
4747 if (result == L2CAP_CR_SUCCESS) {
4748 chan->local_amp_id = local_amp_id;
4749 l2cap_send_create_chan_req(chan, remote_amp_id);
4750 } else {
4751 /* Revert to BR/EDR connect */
4752 l2cap_send_conn_req(chan);
4753 }
4754
4755 return;
4756 }
4757
4758 /* Incoming channel on AMP */
4759 if (__l2cap_no_conn_pending(chan)) {
4760 struct l2cap_conn_rsp rsp;
4761 char buf[128];
4762 rsp.scid = cpu_to_le16(chan->dcid);
4763 rsp.dcid = cpu_to_le16(chan->scid);
4764
4765 if (result == L2CAP_CR_SUCCESS) {
4766 /* Send successful response */
4767 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4768 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4769 } else {
4770 /* Send negative response */
4771 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4773 }
4774
4775 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4776 sizeof(rsp), &rsp);
4777
4778 if (result == L2CAP_CR_SUCCESS) {
4779 l2cap_state_change(chan, BT_CONFIG);
4780 set_bit(CONF_REQ_SENT, &chan->conf_state);
4781 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4782 L2CAP_CONF_REQ,
4783 l2cap_build_conf_req(chan, buf), buf);
4784 chan->num_conf_req++;
4785 }
4786 }
4787 }
4788
4789 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4790 u8 remote_amp_id)
4791 {
4792 l2cap_move_setup(chan);
4793 chan->move_id = local_amp_id;
4794 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4795
4796 l2cap_send_move_chan_req(chan, remote_amp_id);
4797 }
4798
4799 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4800 {
4801 struct hci_chan *hchan = NULL;
4802
4803 /* Placeholder - get hci_chan for logical link */
4804
4805 if (hchan) {
4806 if (hchan->state == BT_CONNECTED) {
4807 /* Logical link is ready to go */
4808 chan->hs_hcon = hchan->conn;
4809 chan->hs_hcon->l2cap_data = chan->conn;
4810 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4811 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4812
4813 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4814 } else {
4815 /* Wait for logical link to be ready */
4816 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4817 }
4818 } else {
4819 /* Logical link not available */
4820 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4821 }
4822 }
4823
4824 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4825 {
4826 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4827 u8 rsp_result;
4828 if (result == -EINVAL)
4829 rsp_result = L2CAP_MR_BAD_ID;
4830 else
4831 rsp_result = L2CAP_MR_NOT_ALLOWED;
4832
4833 l2cap_send_move_chan_rsp(chan, rsp_result);
4834 }
4835
4836 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4837 chan->move_state = L2CAP_MOVE_STABLE;
4838
4839 /* Restart data transmission */
4840 l2cap_ertm_send(chan);
4841 }
4842
4843 /* Invoke with locked chan */
4844 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4845 {
4846 u8 local_amp_id = chan->local_amp_id;
4847 u8 remote_amp_id = chan->remote_amp_id;
4848
4849 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4850 chan, result, local_amp_id, remote_amp_id);
4851
4852 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4853 l2cap_chan_unlock(chan);
4854 return;
4855 }
4856
4857 if (chan->state != BT_CONNECTED) {
4858 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4859 } else if (result != L2CAP_MR_SUCCESS) {
4860 l2cap_do_move_cancel(chan, result);
4861 } else {
4862 switch (chan->move_role) {
4863 case L2CAP_MOVE_ROLE_INITIATOR:
4864 l2cap_do_move_initiate(chan, local_amp_id,
4865 remote_amp_id);
4866 break;
4867 case L2CAP_MOVE_ROLE_RESPONDER:
4868 l2cap_do_move_respond(chan, result);
4869 break;
4870 default:
4871 l2cap_do_move_cancel(chan, result);
4872 break;
4873 }
4874 }
4875 }
4876
4877 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4878 struct l2cap_cmd_hdr *cmd,
4879 u16 cmd_len, void *data)
4880 {
4881 struct l2cap_move_chan_req *req = data;
4882 struct l2cap_move_chan_rsp rsp;
4883 struct l2cap_chan *chan;
4884 u16 icid = 0;
4885 u16 result = L2CAP_MR_NOT_ALLOWED;
4886
4887 if (cmd_len != sizeof(*req))
4888 return -EPROTO;
4889
4890 icid = le16_to_cpu(req->icid);
4891
4892 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4893
4894 if (!conn->hs_enabled)
4895 return -EINVAL;
4896
4897 chan = l2cap_get_chan_by_dcid(conn, icid);
4898 if (!chan) {
4899 rsp.icid = cpu_to_le16(icid);
4900 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4901 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4902 sizeof(rsp), &rsp);
4903 return 0;
4904 }
4905
4906 chan->ident = cmd->ident;
4907
4908 if (chan->scid < L2CAP_CID_DYN_START ||
4909 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4910 (chan->mode != L2CAP_MODE_ERTM &&
4911 chan->mode != L2CAP_MODE_STREAMING)) {
4912 result = L2CAP_MR_NOT_ALLOWED;
4913 goto send_move_response;
4914 }
4915
4916 if (chan->local_amp_id == req->dest_amp_id) {
4917 result = L2CAP_MR_SAME_ID;
4918 goto send_move_response;
4919 }
4920
4921 if (req->dest_amp_id != AMP_ID_BREDR) {
4922 struct hci_dev *hdev;
4923 hdev = hci_dev_get(req->dest_amp_id);
4924 if (!hdev || hdev->dev_type != HCI_AMP ||
4925 !test_bit(HCI_UP, &hdev->flags)) {
4926 if (hdev)
4927 hci_dev_put(hdev);
4928
4929 result = L2CAP_MR_BAD_ID;
4930 goto send_move_response;
4931 }
4932 hci_dev_put(hdev);
4933 }
4934
4935 /* Detect a move collision. Only send a collision response
4936 * if this side has "lost", otherwise proceed with the move.
4937 * The winner has the larger bd_addr.
4938 */
4939 if ((__chan_is_moving(chan) ||
4940 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4941 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4942 result = L2CAP_MR_COLLISION;
4943 goto send_move_response;
4944 }
4945
4946 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4947 l2cap_move_setup(chan);
4948 chan->move_id = req->dest_amp_id;
4949 icid = chan->dcid;
4950
4951 if (req->dest_amp_id == AMP_ID_BREDR) {
4952 /* Moving to BR/EDR */
4953 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4954 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4955 result = L2CAP_MR_PEND;
4956 } else {
4957 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4958 result = L2CAP_MR_SUCCESS;
4959 }
4960 } else {
4961 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4962 /* Placeholder - uncomment when amp functions are available */
4963 /*amp_accept_physical(chan, req->dest_amp_id);*/
4964 result = L2CAP_MR_PEND;
4965 }
4966
4967 send_move_response:
4968 l2cap_send_move_chan_rsp(chan, result);
4969
4970 l2cap_chan_unlock(chan);
4971
4972 return 0;
4973 }
4974
4975 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4976 {
4977 struct l2cap_chan *chan;
4978 struct hci_chan *hchan = NULL;
4979
4980 chan = l2cap_get_chan_by_scid(conn, icid);
4981 if (!chan) {
4982 l2cap_send_move_chan_cfm_icid(conn, icid);
4983 return;
4984 }
4985
4986 __clear_chan_timer(chan);
4987 if (result == L2CAP_MR_PEND)
4988 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4989
4990 switch (chan->move_state) {
4991 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4992 /* Move confirm will be sent when logical link
4993 * is complete.
4994 */
4995 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4996 break;
4997 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4998 if (result == L2CAP_MR_PEND) {
4999 break;
5000 } else if (test_bit(CONN_LOCAL_BUSY,
5001 &chan->conn_state)) {
5002 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5003 } else {
5004 /* Logical link is up or moving to BR/EDR,
5005 * proceed with move
5006 */
5007 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5008 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5009 }
5010 break;
5011 case L2CAP_MOVE_WAIT_RSP:
5012 /* Moving to AMP */
5013 if (result == L2CAP_MR_SUCCESS) {
5014 /* Remote is ready, send confirm immediately
5015 * after logical link is ready
5016 */
5017 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5018 } else {
5019 /* Both logical link and move success
5020 * are required to confirm
5021 */
5022 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5023 }
5024
5025 /* Placeholder - get hci_chan for logical link */
5026 if (!hchan) {
5027 /* Logical link not available */
5028 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5029 break;
5030 }
5031
5032 /* If the logical link is not yet connected, do not
5033 * send confirmation.
5034 */
5035 if (hchan->state != BT_CONNECTED)
5036 break;
5037
5038 /* Logical link is already ready to go */
5039
5040 chan->hs_hcon = hchan->conn;
5041 chan->hs_hcon->l2cap_data = chan->conn;
5042
5043 if (result == L2CAP_MR_SUCCESS) {
5044 /* Can confirm now */
5045 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5046 } else {
5047 /* Now only need move success
5048 * to confirm
5049 */
5050 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5051 }
5052
5053 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5054 break;
5055 default:
5056 /* Any other amp move state means the move failed. */
5057 chan->move_id = chan->local_amp_id;
5058 l2cap_move_done(chan);
5059 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5060 }
5061
5062 l2cap_chan_unlock(chan);
5063 }
5064
5065 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5066 u16 result)
5067 {
5068 struct l2cap_chan *chan;
5069
5070 chan = l2cap_get_chan_by_ident(conn, ident);
5071 if (!chan) {
5072 /* Could not locate channel, icid is best guess */
5073 l2cap_send_move_chan_cfm_icid(conn, icid);
5074 return;
5075 }
5076
5077 __clear_chan_timer(chan);
5078
5079 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5080 if (result == L2CAP_MR_COLLISION) {
5081 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5082 } else {
5083 /* Cleanup - cancel move */
5084 chan->move_id = chan->local_amp_id;
5085 l2cap_move_done(chan);
5086 }
5087 }
5088
5089 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5090
5091 l2cap_chan_unlock(chan);
5092 }
5093
5094 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5095 struct l2cap_cmd_hdr *cmd,
5096 u16 cmd_len, void *data)
5097 {
5098 struct l2cap_move_chan_rsp *rsp = data;
5099 u16 icid, result;
5100
5101 if (cmd_len != sizeof(*rsp))
5102 return -EPROTO;
5103
5104 icid = le16_to_cpu(rsp->icid);
5105 result = le16_to_cpu(rsp->result);
5106
5107 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5108
5109 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5110 l2cap_move_continue(conn, icid, result);
5111 else
5112 l2cap_move_fail(conn, cmd->ident, icid, result);
5113
5114 return 0;
5115 }
5116
5117 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5118 struct l2cap_cmd_hdr *cmd,
5119 u16 cmd_len, void *data)
5120 {
5121 struct l2cap_move_chan_cfm *cfm = data;
5122 struct l2cap_chan *chan;
5123 u16 icid, result;
5124
5125 if (cmd_len != sizeof(*cfm))
5126 return -EPROTO;
5127
5128 icid = le16_to_cpu(cfm->icid);
5129 result = le16_to_cpu(cfm->result);
5130
5131 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5132
5133 chan = l2cap_get_chan_by_dcid(conn, icid);
5134 if (!chan) {
5135 /* Spec requires a response even if the icid was not found */
5136 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5137 return 0;
5138 }
5139
5140 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5141 if (result == L2CAP_MC_CONFIRMED) {
5142 chan->local_amp_id = chan->move_id;
5143 if (chan->local_amp_id == AMP_ID_BREDR)
5144 __release_logical_link(chan);
5145 } else {
5146 chan->move_id = chan->local_amp_id;
5147 }
5148
5149 l2cap_move_done(chan);
5150 }
5151
5152 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5153
5154 l2cap_chan_unlock(chan);
5155
5156 return 0;
5157 }
5158
5159 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5160 struct l2cap_cmd_hdr *cmd,
5161 u16 cmd_len, void *data)
5162 {
5163 struct l2cap_move_chan_cfm_rsp *rsp = data;
5164 struct l2cap_chan *chan;
5165 u16 icid;
5166
5167 if (cmd_len != sizeof(*rsp))
5168 return -EPROTO;
5169
5170 icid = le16_to_cpu(rsp->icid);
5171
5172 BT_DBG("icid 0x%4.4x", icid);
5173
5174 chan = l2cap_get_chan_by_scid(conn, icid);
5175 if (!chan)
5176 return 0;
5177
5178 __clear_chan_timer(chan);
5179
5180 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5181 chan->local_amp_id = chan->move_id;
5182
5183 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5184 __release_logical_link(chan);
5185
5186 l2cap_move_done(chan);
5187 }
5188
5189 l2cap_chan_unlock(chan);
5190
5191 return 0;
5192 }
5193
5194 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5195 u16 to_multiplier)
5196 {
5197 u16 max_latency;
5198
5199 if (min > max || min < 6 || max > 3200)
5200 return -EINVAL;
5201
5202 if (to_multiplier < 10 || to_multiplier > 3200)
5203 return -EINVAL;
5204
5205 if (max >= to_multiplier * 8)
5206 return -EINVAL;
5207
5208 max_latency = (to_multiplier * 8 / max) - 1;
5209 if (latency > 499 || latency > max_latency)
5210 return -EINVAL;
5211
5212 return 0;
5213 }
5214
5215 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5216 struct l2cap_cmd_hdr *cmd,
5217 u16 cmd_len, u8 *data)
5218 {
5219 struct hci_conn *hcon = conn->hcon;
5220 struct l2cap_conn_param_update_req *req;
5221 struct l2cap_conn_param_update_rsp rsp;
5222 u16 min, max, latency, to_multiplier;
5223 int err;
5224
5225 if (!(hcon->link_mode & HCI_LM_MASTER))
5226 return -EINVAL;
5227
5228 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5229 return -EPROTO;
5230
5231 req = (struct l2cap_conn_param_update_req *) data;
5232 min = __le16_to_cpu(req->min);
5233 max = __le16_to_cpu(req->max);
5234 latency = __le16_to_cpu(req->latency);
5235 to_multiplier = __le16_to_cpu(req->to_multiplier);
5236
5237 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5238 min, max, latency, to_multiplier);
5239
5240 memset(&rsp, 0, sizeof(rsp));
5241
5242 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5243 if (err)
5244 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5245 else
5246 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5247
5248 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5249 sizeof(rsp), &rsp);
5250
5251 if (!err)
5252 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5253
5254 return 0;
5255 }
5256
5257 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5258 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5259 u8 *data)
5260 {
5261 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5262 u16 dcid, mtu, mps, credits, result;
5263 struct l2cap_chan *chan;
5264 int err;
5265
5266 if (cmd_len < sizeof(*rsp))
5267 return -EPROTO;
5268
5269 dcid = __le16_to_cpu(rsp->dcid);
5270 mtu = __le16_to_cpu(rsp->mtu);
5271 mps = __le16_to_cpu(rsp->mps);
5272 credits = __le16_to_cpu(rsp->credits);
5273 result = __le16_to_cpu(rsp->result);
5274
5275 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5276 return -EPROTO;
5277
5278 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5279 dcid, mtu, mps, credits, result);
5280
5281 mutex_lock(&conn->chan_lock);
5282
5283 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5284 if (!chan) {
5285 err = -EBADSLT;
5286 goto unlock;
5287 }
5288
5289 err = 0;
5290
5291 l2cap_chan_lock(chan);
5292
5293 switch (result) {
5294 case L2CAP_CR_SUCCESS:
5295 chan->ident = 0;
5296 chan->dcid = dcid;
5297 chan->omtu = mtu;
5298 chan->remote_mps = mps;
5299 chan->tx_credits = credits;
5300 l2cap_chan_ready(chan);
5301 break;
5302
5303 default:
5304 l2cap_chan_del(chan, ECONNREFUSED);
5305 break;
5306 }
5307
5308 l2cap_chan_unlock(chan);
5309
5310 unlock:
5311 mutex_unlock(&conn->chan_lock);
5312
5313 return err;
5314 }
5315
5316 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5317 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5318 u8 *data)
5319 {
5320 int err = 0;
5321
5322 switch (cmd->code) {
5323 case L2CAP_COMMAND_REJ:
5324 l2cap_command_rej(conn, cmd, cmd_len, data);
5325 break;
5326
5327 case L2CAP_CONN_REQ:
5328 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5329 break;
5330
5331 case L2CAP_CONN_RSP:
5332 case L2CAP_CREATE_CHAN_RSP:
5333 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5334 break;
5335
5336 case L2CAP_CONF_REQ:
5337 err = l2cap_config_req(conn, cmd, cmd_len, data);
5338 break;
5339
5340 case L2CAP_CONF_RSP:
5341 l2cap_config_rsp(conn, cmd, cmd_len, data);
5342 break;
5343
5344 case L2CAP_DISCONN_REQ:
5345 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5346 break;
5347
5348 case L2CAP_DISCONN_RSP:
5349 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5350 break;
5351
5352 case L2CAP_ECHO_REQ:
5353 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5354 break;
5355
5356 case L2CAP_ECHO_RSP:
5357 break;
5358
5359 case L2CAP_INFO_REQ:
5360 err = l2cap_information_req(conn, cmd, cmd_len, data);
5361 break;
5362
5363 case L2CAP_INFO_RSP:
5364 l2cap_information_rsp(conn, cmd, cmd_len, data);
5365 break;
5366
5367 case L2CAP_CREATE_CHAN_REQ:
5368 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5369 break;
5370
5371 case L2CAP_MOVE_CHAN_REQ:
5372 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5373 break;
5374
5375 case L2CAP_MOVE_CHAN_RSP:
5376 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5377 break;
5378
5379 case L2CAP_MOVE_CHAN_CFM:
5380 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5381 break;
5382
5383 case L2CAP_MOVE_CHAN_CFM_RSP:
5384 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5385 break;
5386
5387 default:
5388 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5389 err = -EINVAL;
5390 break;
5391 }
5392
5393 return err;
5394 }
5395
5396 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5397 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5398 u8 *data)
5399 {
5400 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5401 struct l2cap_le_conn_rsp rsp;
5402 struct l2cap_chan *chan, *pchan;
5403 u16 dcid, scid, credits, mtu, mps;
5404 __le16 psm;
5405 u8 result;
5406
5407 if (cmd_len != sizeof(*req))
5408 return -EPROTO;
5409
5410 scid = __le16_to_cpu(req->scid);
5411 mtu = __le16_to_cpu(req->mtu);
5412 mps = __le16_to_cpu(req->mps);
5413 psm = req->psm;
5414 dcid = 0;
5415 credits = 0;
5416
5417 if (mtu < 23 || mps < 23)
5418 return -EPROTO;
5419
5420 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5421 scid, mtu, mps);
5422
5423 /* Check if we have socket listening on psm */
5424 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5425 &conn->hcon->dst, LE_LINK);
5426 if (!pchan) {
5427 result = L2CAP_CR_BAD_PSM;
5428 chan = NULL;
5429 goto response;
5430 }
5431
5432 mutex_lock(&conn->chan_lock);
5433 l2cap_chan_lock(pchan);
5434
5435 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5436 result = L2CAP_CR_AUTHENTICATION;
5437 chan = NULL;
5438 goto response_unlock;
5439 }
5440
5441 /* Check if we already have channel with that dcid */
5442 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5443 result = L2CAP_CR_NO_MEM;
5444 chan = NULL;
5445 goto response_unlock;
5446 }
5447
5448 chan = pchan->ops->new_connection(pchan);
5449 if (!chan) {
5450 result = L2CAP_CR_NO_MEM;
5451 goto response_unlock;
5452 }
5453
5454 l2cap_le_flowctl_init(chan);
5455
5456 bacpy(&chan->src, &conn->hcon->src);
5457 bacpy(&chan->dst, &conn->hcon->dst);
5458 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5459 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5460 chan->psm = psm;
5461 chan->dcid = scid;
5462 chan->omtu = mtu;
5463 chan->remote_mps = mps;
5464 chan->tx_credits = __le16_to_cpu(req->credits);
5465
5466 __l2cap_chan_add(conn, chan);
5467 dcid = chan->scid;
5468 credits = chan->rx_credits;
5469
5470 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5471
5472 chan->ident = cmd->ident;
5473
5474 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5475 l2cap_state_change(chan, BT_CONNECT2);
5476 result = L2CAP_CR_PEND;
5477 chan->ops->defer(chan);
5478 } else {
5479 l2cap_chan_ready(chan);
5480 result = L2CAP_CR_SUCCESS;
5481 }
5482
5483 response_unlock:
5484 l2cap_chan_unlock(pchan);
5485 mutex_unlock(&conn->chan_lock);
5486
5487 if (result == L2CAP_CR_PEND)
5488 return 0;
5489
5490 response:
5491 if (chan) {
5492 rsp.mtu = cpu_to_le16(chan->imtu);
5493 rsp.mps = cpu_to_le16(chan->mps);
5494 } else {
5495 rsp.mtu = 0;
5496 rsp.mps = 0;
5497 }
5498
5499 rsp.dcid = cpu_to_le16(dcid);
5500 rsp.credits = cpu_to_le16(credits);
5501 rsp.result = cpu_to_le16(result);
5502
5503 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5504
5505 return 0;
5506 }
5507
5508 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5509 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5510 u8 *data)
5511 {
5512 struct l2cap_le_credits *pkt;
5513 struct l2cap_chan *chan;
5514 u16 cid, credits, max_credits;
5515
5516 if (cmd_len != sizeof(*pkt))
5517 return -EPROTO;
5518
5519 pkt = (struct l2cap_le_credits *) data;
5520 cid = __le16_to_cpu(pkt->cid);
5521 credits = __le16_to_cpu(pkt->credits);
5522
5523 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5524
5525 chan = l2cap_get_chan_by_dcid(conn, cid);
5526 if (!chan)
5527 return -EBADSLT;
5528
5529 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5530 if (credits > max_credits) {
5531 BT_ERR("LE credits overflow");
5532 l2cap_send_disconn_req(chan, ECONNRESET);
5533
5534 /* Return 0 so that we don't trigger an unnecessary
5535 * command reject packet.
5536 */
5537 return 0;
5538 }
5539
5540 chan->tx_credits += credits;
5541
5542 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5543 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5544 chan->tx_credits--;
5545 }
5546
5547 if (chan->tx_credits)
5548 chan->ops->resume(chan);
5549
5550 l2cap_chan_unlock(chan);
5551
5552 return 0;
5553 }
5554
5555 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5556 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5557 u8 *data)
5558 {
5559 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5560 struct l2cap_chan *chan;
5561
5562 if (cmd_len < sizeof(*rej))
5563 return -EPROTO;
5564
5565 mutex_lock(&conn->chan_lock);
5566
5567 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5568 if (!chan)
5569 goto done;
5570
5571 l2cap_chan_lock(chan);
5572 l2cap_chan_del(chan, ECONNREFUSED);
5573 l2cap_chan_unlock(chan);
5574
5575 done:
5576 mutex_unlock(&conn->chan_lock);
5577 return 0;
5578 }
5579
5580 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5581 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5582 u8 *data)
5583 {
5584 int err = 0;
5585
5586 switch (cmd->code) {
5587 case L2CAP_COMMAND_REJ:
5588 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5589 break;
5590
5591 case L2CAP_CONN_PARAM_UPDATE_REQ:
5592 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5593 break;
5594
5595 case L2CAP_CONN_PARAM_UPDATE_RSP:
5596 break;
5597
5598 case L2CAP_LE_CONN_RSP:
5599 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5600 break;
5601
5602 case L2CAP_LE_CONN_REQ:
5603 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5604 break;
5605
5606 case L2CAP_LE_CREDITS:
5607 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5608 break;
5609
5610 case L2CAP_DISCONN_REQ:
5611 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5612 break;
5613
5614 case L2CAP_DISCONN_RSP:
5615 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5616 break;
5617
5618 default:
5619 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5620 err = -EINVAL;
5621 break;
5622 }
5623
5624 return err;
5625 }
5626
5627 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5628 struct sk_buff *skb)
5629 {
5630 struct hci_conn *hcon = conn->hcon;
5631 struct l2cap_cmd_hdr *cmd;
5632 u16 len;
5633 int err;
5634
5635 if (hcon->type != LE_LINK)
5636 goto drop;
5637
5638 if (skb->len < L2CAP_CMD_HDR_SIZE)
5639 goto drop;
5640
5641 cmd = (void *) skb->data;
5642 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5643
5644 len = le16_to_cpu(cmd->len);
5645
5646 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5647
5648 if (len != skb->len || !cmd->ident) {
5649 BT_DBG("corrupted command");
5650 goto drop;
5651 }
5652
5653 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5654 if (err) {
5655 struct l2cap_cmd_rej_unk rej;
5656
5657 BT_ERR("Wrong link type (%d)", err);
5658
5659 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5660 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5661 sizeof(rej), &rej);
5662 }
5663
5664 drop:
5665 kfree_skb(skb);
5666 }
5667
5668 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5669 struct sk_buff *skb)
5670 {
5671 struct hci_conn *hcon = conn->hcon;
5672 u8 *data = skb->data;
5673 int len = skb->len;
5674 struct l2cap_cmd_hdr cmd;
5675 int err;
5676
5677 l2cap_raw_recv(conn, skb);
5678
5679 if (hcon->type != ACL_LINK)
5680 goto drop;
5681
5682 while (len >= L2CAP_CMD_HDR_SIZE) {
5683 u16 cmd_len;
5684 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5685 data += L2CAP_CMD_HDR_SIZE;
5686 len -= L2CAP_CMD_HDR_SIZE;
5687
5688 cmd_len = le16_to_cpu(cmd.len);
5689
5690 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5691 cmd.ident);
5692
5693 if (cmd_len > len || !cmd.ident) {
5694 BT_DBG("corrupted command");
5695 break;
5696 }
5697
5698 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5699 if (err) {
5700 struct l2cap_cmd_rej_unk rej;
5701
5702 BT_ERR("Wrong link type (%d)", err);
5703
5704 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5705 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5706 sizeof(rej), &rej);
5707 }
5708
5709 data += cmd_len;
5710 len -= cmd_len;
5711 }
5712
5713 drop:
5714 kfree_skb(skb);
5715 }
5716
5717 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5718 {
5719 u16 our_fcs, rcv_fcs;
5720 int hdr_size;
5721
5722 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5723 hdr_size = L2CAP_EXT_HDR_SIZE;
5724 else
5725 hdr_size = L2CAP_ENH_HDR_SIZE;
5726
5727 if (chan->fcs == L2CAP_FCS_CRC16) {
5728 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5729 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5730 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5731
5732 if (our_fcs != rcv_fcs)
5733 return -EBADMSG;
5734 }
5735 return 0;
5736 }
5737
5738 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5739 {
5740 struct l2cap_ctrl control;
5741
5742 BT_DBG("chan %p", chan);
5743
5744 memset(&control, 0, sizeof(control));
5745 control.sframe = 1;
5746 control.final = 1;
5747 control.reqseq = chan->buffer_seq;
5748 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5749
5750 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5751 control.super = L2CAP_SUPER_RNR;
5752 l2cap_send_sframe(chan, &control);
5753 }
5754
5755 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5756 chan->unacked_frames > 0)
5757 __set_retrans_timer(chan);
5758
5759 /* Send pending iframes */
5760 l2cap_ertm_send(chan);
5761
5762 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5763 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5764 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5765 * send it now.
5766 */
5767 control.super = L2CAP_SUPER_RR;
5768 l2cap_send_sframe(chan, &control);
5769 }
5770 }
5771
5772 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5773 struct sk_buff **last_frag)
5774 {
5775 /* skb->len reflects data in skb as well as all fragments
5776 * skb->data_len reflects only data in fragments
5777 */
5778 if (!skb_has_frag_list(skb))
5779 skb_shinfo(skb)->frag_list = new_frag;
5780
5781 new_frag->next = NULL;
5782
5783 (*last_frag)->next = new_frag;
5784 *last_frag = new_frag;
5785
5786 skb->len += new_frag->len;
5787 skb->data_len += new_frag->len;
5788 skb->truesize += new_frag->truesize;
5789 }
5790
5791 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5792 struct l2cap_ctrl *control)
5793 {
5794 int err = -EINVAL;
5795
5796 switch (control->sar) {
5797 case L2CAP_SAR_UNSEGMENTED:
5798 if (chan->sdu)
5799 break;
5800
5801 err = chan->ops->recv(chan, skb);
5802 break;
5803
5804 case L2CAP_SAR_START:
5805 if (chan->sdu)
5806 break;
5807
5808 chan->sdu_len = get_unaligned_le16(skb->data);
5809 skb_pull(skb, L2CAP_SDULEN_SIZE);
5810
5811 if (chan->sdu_len > chan->imtu) {
5812 err = -EMSGSIZE;
5813 break;
5814 }
5815
5816 if (skb->len >= chan->sdu_len)
5817 break;
5818
5819 chan->sdu = skb;
5820 chan->sdu_last_frag = skb;
5821
5822 skb = NULL;
5823 err = 0;
5824 break;
5825
5826 case L2CAP_SAR_CONTINUE:
5827 if (!chan->sdu)
5828 break;
5829
5830 append_skb_frag(chan->sdu, skb,
5831 &chan->sdu_last_frag);
5832 skb = NULL;
5833
5834 if (chan->sdu->len >= chan->sdu_len)
5835 break;
5836
5837 err = 0;
5838 break;
5839
5840 case L2CAP_SAR_END:
5841 if (!chan->sdu)
5842 break;
5843
5844 append_skb_frag(chan->sdu, skb,
5845 &chan->sdu_last_frag);
5846 skb = NULL;
5847
5848 if (chan->sdu->len != chan->sdu_len)
5849 break;
5850
5851 err = chan->ops->recv(chan, chan->sdu);
5852
5853 if (!err) {
5854 /* Reassembly complete */
5855 chan->sdu = NULL;
5856 chan->sdu_last_frag = NULL;
5857 chan->sdu_len = 0;
5858 }
5859 break;
5860 }
5861
5862 if (err) {
5863 kfree_skb(skb);
5864 kfree_skb(chan->sdu);
5865 chan->sdu = NULL;
5866 chan->sdu_last_frag = NULL;
5867 chan->sdu_len = 0;
5868 }
5869
5870 return err;
5871 }
5872
5873 static int l2cap_resegment(struct l2cap_chan *chan)
5874 {
5875 /* Placeholder */
5876 return 0;
5877 }
5878
5879 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5880 {
5881 u8 event;
5882
5883 if (chan->mode != L2CAP_MODE_ERTM)
5884 return;
5885
5886 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5887 l2cap_tx(chan, NULL, NULL, event);
5888 }
5889
5890 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5891 {
5892 int err = 0;
5893 /* Pass sequential frames to l2cap_reassemble_sdu()
5894 * until a gap is encountered.
5895 */
5896
5897 BT_DBG("chan %p", chan);
5898
5899 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5900 struct sk_buff *skb;
5901 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5902 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5903
5904 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5905
5906 if (!skb)
5907 break;
5908
5909 skb_unlink(skb, &chan->srej_q);
5910 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5911 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5912 if (err)
5913 break;
5914 }
5915
5916 if (skb_queue_empty(&chan->srej_q)) {
5917 chan->rx_state = L2CAP_RX_STATE_RECV;
5918 l2cap_send_ack(chan);
5919 }
5920
5921 return err;
5922 }
5923
5924 static void l2cap_handle_srej(struct l2cap_chan *chan,
5925 struct l2cap_ctrl *control)
5926 {
5927 struct sk_buff *skb;
5928
5929 BT_DBG("chan %p, control %p", chan, control);
5930
5931 if (control->reqseq == chan->next_tx_seq) {
5932 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5933 l2cap_send_disconn_req(chan, ECONNRESET);
5934 return;
5935 }
5936
5937 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5938
5939 if (skb == NULL) {
5940 BT_DBG("Seq %d not available for retransmission",
5941 control->reqseq);
5942 return;
5943 }
5944
5945 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5946 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5947 l2cap_send_disconn_req(chan, ECONNRESET);
5948 return;
5949 }
5950
5951 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5952
5953 if (control->poll) {
5954 l2cap_pass_to_tx(chan, control);
5955
5956 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5957 l2cap_retransmit(chan, control);
5958 l2cap_ertm_send(chan);
5959
5960 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5961 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5962 chan->srej_save_reqseq = control->reqseq;
5963 }
5964 } else {
5965 l2cap_pass_to_tx_fbit(chan, control);
5966
5967 if (control->final) {
5968 if (chan->srej_save_reqseq != control->reqseq ||
5969 !test_and_clear_bit(CONN_SREJ_ACT,
5970 &chan->conn_state))
5971 l2cap_retransmit(chan, control);
5972 } else {
5973 l2cap_retransmit(chan, control);
5974 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5975 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5976 chan->srej_save_reqseq = control->reqseq;
5977 }
5978 }
5979 }
5980 }
5981
5982 static void l2cap_handle_rej(struct l2cap_chan *chan,
5983 struct l2cap_ctrl *control)
5984 {
5985 struct sk_buff *skb;
5986
5987 BT_DBG("chan %p, control %p", chan, control);
5988
5989 if (control->reqseq == chan->next_tx_seq) {
5990 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5991 l2cap_send_disconn_req(chan, ECONNRESET);
5992 return;
5993 }
5994
5995 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5996
5997 if (chan->max_tx && skb &&
5998 bt_cb(skb)->control.retries >= chan->max_tx) {
5999 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6000 l2cap_send_disconn_req(chan, ECONNRESET);
6001 return;
6002 }
6003
6004 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6005
6006 l2cap_pass_to_tx(chan, control);
6007
6008 if (control->final) {
6009 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6010 l2cap_retransmit_all(chan, control);
6011 } else {
6012 l2cap_retransmit_all(chan, control);
6013 l2cap_ertm_send(chan);
6014 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6015 set_bit(CONN_REJ_ACT, &chan->conn_state);
6016 }
6017 }
6018
6019 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6020 {
6021 BT_DBG("chan %p, txseq %d", chan, txseq);
6022
6023 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6024 chan->expected_tx_seq);
6025
6026 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6027 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6028 chan->tx_win) {
6029 /* See notes below regarding "double poll" and
6030 * invalid packets.
6031 */
6032 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6033 BT_DBG("Invalid/Ignore - after SREJ");
6034 return L2CAP_TXSEQ_INVALID_IGNORE;
6035 } else {
6036 BT_DBG("Invalid - in window after SREJ sent");
6037 return L2CAP_TXSEQ_INVALID;
6038 }
6039 }
6040
6041 if (chan->srej_list.head == txseq) {
6042 BT_DBG("Expected SREJ");
6043 return L2CAP_TXSEQ_EXPECTED_SREJ;
6044 }
6045
6046 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6047 BT_DBG("Duplicate SREJ - txseq already stored");
6048 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6049 }
6050
6051 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6052 BT_DBG("Unexpected SREJ - not requested");
6053 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6054 }
6055 }
6056
6057 if (chan->expected_tx_seq == txseq) {
6058 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6059 chan->tx_win) {
6060 BT_DBG("Invalid - txseq outside tx window");
6061 return L2CAP_TXSEQ_INVALID;
6062 } else {
6063 BT_DBG("Expected");
6064 return L2CAP_TXSEQ_EXPECTED;
6065 }
6066 }
6067
6068 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6069 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6070 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6071 return L2CAP_TXSEQ_DUPLICATE;
6072 }
6073
6074 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6075 /* A source of invalid packets is a "double poll" condition,
6076 * where delays cause us to send multiple poll packets. If
6077 * the remote stack receives and processes both polls,
6078 * sequence numbers can wrap around in such a way that a
6079 * resent frame has a sequence number that looks like new data
6080 * with a sequence gap. This would trigger an erroneous SREJ
6081 * request.
6082 *
6083 * Fortunately, this is impossible with a tx window that's
6084 * less than half of the maximum sequence number, which allows
6085 * invalid frames to be safely ignored.
6086 *
6087 * With tx window sizes greater than half of the tx window
6088 * maximum, the frame is invalid and cannot be ignored. This
6089 * causes a disconnect.
6090 */
6091
6092 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6093 BT_DBG("Invalid/Ignore - txseq outside tx window");
6094 return L2CAP_TXSEQ_INVALID_IGNORE;
6095 } else {
6096 BT_DBG("Invalid - txseq outside tx window");
6097 return L2CAP_TXSEQ_INVALID;
6098 }
6099 } else {
6100 BT_DBG("Unexpected - txseq indicates missing frames");
6101 return L2CAP_TXSEQ_UNEXPECTED;
6102 }
6103 }
6104
6105 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6106 struct l2cap_ctrl *control,
6107 struct sk_buff *skb, u8 event)
6108 {
6109 int err = 0;
6110 bool skb_in_use = false;
6111
6112 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6113 event);
6114
6115 switch (event) {
6116 case L2CAP_EV_RECV_IFRAME:
6117 switch (l2cap_classify_txseq(chan, control->txseq)) {
6118 case L2CAP_TXSEQ_EXPECTED:
6119 l2cap_pass_to_tx(chan, control);
6120
6121 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6122 BT_DBG("Busy, discarding expected seq %d",
6123 control->txseq);
6124 break;
6125 }
6126
6127 chan->expected_tx_seq = __next_seq(chan,
6128 control->txseq);
6129
6130 chan->buffer_seq = chan->expected_tx_seq;
6131 skb_in_use = true;
6132
6133 err = l2cap_reassemble_sdu(chan, skb, control);
6134 if (err)
6135 break;
6136
6137 if (control->final) {
6138 if (!test_and_clear_bit(CONN_REJ_ACT,
6139 &chan->conn_state)) {
6140 control->final = 0;
6141 l2cap_retransmit_all(chan, control);
6142 l2cap_ertm_send(chan);
6143 }
6144 }
6145
6146 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6147 l2cap_send_ack(chan);
6148 break;
6149 case L2CAP_TXSEQ_UNEXPECTED:
6150 l2cap_pass_to_tx(chan, control);
6151
6152 /* Can't issue SREJ frames in the local busy state.
6153 * Drop this frame, it will be seen as missing
6154 * when local busy is exited.
6155 */
6156 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6157 BT_DBG("Busy, discarding unexpected seq %d",
6158 control->txseq);
6159 break;
6160 }
6161
6162 /* There was a gap in the sequence, so an SREJ
6163 * must be sent for each missing frame. The
6164 * current frame is stored for later use.
6165 */
6166 skb_queue_tail(&chan->srej_q, skb);
6167 skb_in_use = true;
6168 BT_DBG("Queued %p (queue len %d)", skb,
6169 skb_queue_len(&chan->srej_q));
6170
6171 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6172 l2cap_seq_list_clear(&chan->srej_list);
6173 l2cap_send_srej(chan, control->txseq);
6174
6175 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6176 break;
6177 case L2CAP_TXSEQ_DUPLICATE:
6178 l2cap_pass_to_tx(chan, control);
6179 break;
6180 case L2CAP_TXSEQ_INVALID_IGNORE:
6181 break;
6182 case L2CAP_TXSEQ_INVALID:
6183 default:
6184 l2cap_send_disconn_req(chan, ECONNRESET);
6185 break;
6186 }
6187 break;
6188 case L2CAP_EV_RECV_RR:
6189 l2cap_pass_to_tx(chan, control);
6190 if (control->final) {
6191 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6192
6193 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6194 !__chan_is_moving(chan)) {
6195 control->final = 0;
6196 l2cap_retransmit_all(chan, control);
6197 }
6198
6199 l2cap_ertm_send(chan);
6200 } else if (control->poll) {
6201 l2cap_send_i_or_rr_or_rnr(chan);
6202 } else {
6203 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6204 &chan->conn_state) &&
6205 chan->unacked_frames)
6206 __set_retrans_timer(chan);
6207
6208 l2cap_ertm_send(chan);
6209 }
6210 break;
6211 case L2CAP_EV_RECV_RNR:
6212 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6213 l2cap_pass_to_tx(chan, control);
6214 if (control && control->poll) {
6215 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6216 l2cap_send_rr_or_rnr(chan, 0);
6217 }
6218 __clear_retrans_timer(chan);
6219 l2cap_seq_list_clear(&chan->retrans_list);
6220 break;
6221 case L2CAP_EV_RECV_REJ:
6222 l2cap_handle_rej(chan, control);
6223 break;
6224 case L2CAP_EV_RECV_SREJ:
6225 l2cap_handle_srej(chan, control);
6226 break;
6227 default:
6228 break;
6229 }
6230
6231 if (skb && !skb_in_use) {
6232 BT_DBG("Freeing %p", skb);
6233 kfree_skb(skb);
6234 }
6235
6236 return err;
6237 }
6238
6239 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6240 struct l2cap_ctrl *control,
6241 struct sk_buff *skb, u8 event)
6242 {
6243 int err = 0;
6244 u16 txseq = control->txseq;
6245 bool skb_in_use = false;
6246
6247 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6248 event);
6249
6250 switch (event) {
6251 case L2CAP_EV_RECV_IFRAME:
6252 switch (l2cap_classify_txseq(chan, txseq)) {
6253 case L2CAP_TXSEQ_EXPECTED:
6254 /* Keep frame for reassembly later */
6255 l2cap_pass_to_tx(chan, control);
6256 skb_queue_tail(&chan->srej_q, skb);
6257 skb_in_use = true;
6258 BT_DBG("Queued %p (queue len %d)", skb,
6259 skb_queue_len(&chan->srej_q));
6260
6261 chan->expected_tx_seq = __next_seq(chan, txseq);
6262 break;
6263 case L2CAP_TXSEQ_EXPECTED_SREJ:
6264 l2cap_seq_list_pop(&chan->srej_list);
6265
6266 l2cap_pass_to_tx(chan, control);
6267 skb_queue_tail(&chan->srej_q, skb);
6268 skb_in_use = true;
6269 BT_DBG("Queued %p (queue len %d)", skb,
6270 skb_queue_len(&chan->srej_q));
6271
6272 err = l2cap_rx_queued_iframes(chan);
6273 if (err)
6274 break;
6275
6276 break;
6277 case L2CAP_TXSEQ_UNEXPECTED:
6278 /* Got a frame that can't be reassembled yet.
6279 * Save it for later, and send SREJs to cover
6280 * the missing frames.
6281 */
6282 skb_queue_tail(&chan->srej_q, skb);
6283 skb_in_use = true;
6284 BT_DBG("Queued %p (queue len %d)", skb,
6285 skb_queue_len(&chan->srej_q));
6286
6287 l2cap_pass_to_tx(chan, control);
6288 l2cap_send_srej(chan, control->txseq);
6289 break;
6290 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6291 /* This frame was requested with an SREJ, but
6292 * some expected retransmitted frames are
6293 * missing. Request retransmission of missing
6294 * SREJ'd frames.
6295 */
6296 skb_queue_tail(&chan->srej_q, skb);
6297 skb_in_use = true;
6298 BT_DBG("Queued %p (queue len %d)", skb,
6299 skb_queue_len(&chan->srej_q));
6300
6301 l2cap_pass_to_tx(chan, control);
6302 l2cap_send_srej_list(chan, control->txseq);
6303 break;
6304 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6305 /* We've already queued this frame. Drop this copy. */
6306 l2cap_pass_to_tx(chan, control);
6307 break;
6308 case L2CAP_TXSEQ_DUPLICATE:
6309 /* Expecting a later sequence number, so this frame
6310 * was already received. Ignore it completely.
6311 */
6312 break;
6313 case L2CAP_TXSEQ_INVALID_IGNORE:
6314 break;
6315 case L2CAP_TXSEQ_INVALID:
6316 default:
6317 l2cap_send_disconn_req(chan, ECONNRESET);
6318 break;
6319 }
6320 break;
6321 case L2CAP_EV_RECV_RR:
6322 l2cap_pass_to_tx(chan, control);
6323 if (control->final) {
6324 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6325
6326 if (!test_and_clear_bit(CONN_REJ_ACT,
6327 &chan->conn_state)) {
6328 control->final = 0;
6329 l2cap_retransmit_all(chan, control);
6330 }
6331
6332 l2cap_ertm_send(chan);
6333 } else if (control->poll) {
6334 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6335 &chan->conn_state) &&
6336 chan->unacked_frames) {
6337 __set_retrans_timer(chan);
6338 }
6339
6340 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6341 l2cap_send_srej_tail(chan);
6342 } else {
6343 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6344 &chan->conn_state) &&
6345 chan->unacked_frames)
6346 __set_retrans_timer(chan);
6347
6348 l2cap_send_ack(chan);
6349 }
6350 break;
6351 case L2CAP_EV_RECV_RNR:
6352 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6353 l2cap_pass_to_tx(chan, control);
6354 if (control->poll) {
6355 l2cap_send_srej_tail(chan);
6356 } else {
6357 struct l2cap_ctrl rr_control;
6358 memset(&rr_control, 0, sizeof(rr_control));
6359 rr_control.sframe = 1;
6360 rr_control.super = L2CAP_SUPER_RR;
6361 rr_control.reqseq = chan->buffer_seq;
6362 l2cap_send_sframe(chan, &rr_control);
6363 }
6364
6365 break;
6366 case L2CAP_EV_RECV_REJ:
6367 l2cap_handle_rej(chan, control);
6368 break;
6369 case L2CAP_EV_RECV_SREJ:
6370 l2cap_handle_srej(chan, control);
6371 break;
6372 }
6373
6374 if (skb && !skb_in_use) {
6375 BT_DBG("Freeing %p", skb);
6376 kfree_skb(skb);
6377 }
6378
6379 return err;
6380 }
6381
6382 static int l2cap_finish_move(struct l2cap_chan *chan)
6383 {
6384 BT_DBG("chan %p", chan);
6385
6386 chan->rx_state = L2CAP_RX_STATE_RECV;
6387
6388 if (chan->hs_hcon)
6389 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6390 else
6391 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6392
6393 return l2cap_resegment(chan);
6394 }
6395
6396 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6397 struct l2cap_ctrl *control,
6398 struct sk_buff *skb, u8 event)
6399 {
6400 int err;
6401
6402 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6403 event);
6404
6405 if (!control->poll)
6406 return -EPROTO;
6407
6408 l2cap_process_reqseq(chan, control->reqseq);
6409
6410 if (!skb_queue_empty(&chan->tx_q))
6411 chan->tx_send_head = skb_peek(&chan->tx_q);
6412 else
6413 chan->tx_send_head = NULL;
6414
6415 /* Rewind next_tx_seq to the point expected
6416 * by the receiver.
6417 */
6418 chan->next_tx_seq = control->reqseq;
6419 chan->unacked_frames = 0;
6420
6421 err = l2cap_finish_move(chan);
6422 if (err)
6423 return err;
6424
6425 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6426 l2cap_send_i_or_rr_or_rnr(chan);
6427
6428 if (event == L2CAP_EV_RECV_IFRAME)
6429 return -EPROTO;
6430
6431 return l2cap_rx_state_recv(chan, control, NULL, event);
6432 }
6433
6434 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6435 struct l2cap_ctrl *control,
6436 struct sk_buff *skb, u8 event)
6437 {
6438 int err;
6439
6440 if (!control->final)
6441 return -EPROTO;
6442
6443 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6444
6445 chan->rx_state = L2CAP_RX_STATE_RECV;
6446 l2cap_process_reqseq(chan, control->reqseq);
6447
6448 if (!skb_queue_empty(&chan->tx_q))
6449 chan->tx_send_head = skb_peek(&chan->tx_q);
6450 else
6451 chan->tx_send_head = NULL;
6452
6453 /* Rewind next_tx_seq to the point expected
6454 * by the receiver.
6455 */
6456 chan->next_tx_seq = control->reqseq;
6457 chan->unacked_frames = 0;
6458
6459 if (chan->hs_hcon)
6460 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6461 else
6462 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6463
6464 err = l2cap_resegment(chan);
6465
6466 if (!err)
6467 err = l2cap_rx_state_recv(chan, control, skb, event);
6468
6469 return err;
6470 }
6471
6472 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6473 {
6474 /* Make sure reqseq is for a packet that has been sent but not acked */
6475 u16 unacked;
6476
6477 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6478 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6479 }
6480
6481 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6482 struct sk_buff *skb, u8 event)
6483 {
6484 int err = 0;
6485
6486 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6487 control, skb, event, chan->rx_state);
6488
6489 if (__valid_reqseq(chan, control->reqseq)) {
6490 switch (chan->rx_state) {
6491 case L2CAP_RX_STATE_RECV:
6492 err = l2cap_rx_state_recv(chan, control, skb, event);
6493 break;
6494 case L2CAP_RX_STATE_SREJ_SENT:
6495 err = l2cap_rx_state_srej_sent(chan, control, skb,
6496 event);
6497 break;
6498 case L2CAP_RX_STATE_WAIT_P:
6499 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6500 break;
6501 case L2CAP_RX_STATE_WAIT_F:
6502 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6503 break;
6504 default:
6505 /* shut it down */
6506 break;
6507 }
6508 } else {
6509 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6510 control->reqseq, chan->next_tx_seq,
6511 chan->expected_ack_seq);
6512 l2cap_send_disconn_req(chan, ECONNRESET);
6513 }
6514
6515 return err;
6516 }
6517
6518 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6519 struct sk_buff *skb)
6520 {
6521 int err = 0;
6522
6523 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6524 chan->rx_state);
6525
6526 if (l2cap_classify_txseq(chan, control->txseq) ==
6527 L2CAP_TXSEQ_EXPECTED) {
6528 l2cap_pass_to_tx(chan, control);
6529
6530 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6531 __next_seq(chan, chan->buffer_seq));
6532
6533 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6534
6535 l2cap_reassemble_sdu(chan, skb, control);
6536 } else {
6537 if (chan->sdu) {
6538 kfree_skb(chan->sdu);
6539 chan->sdu = NULL;
6540 }
6541 chan->sdu_last_frag = NULL;
6542 chan->sdu_len = 0;
6543
6544 if (skb) {
6545 BT_DBG("Freeing %p", skb);
6546 kfree_skb(skb);
6547 }
6548 }
6549
6550 chan->last_acked_seq = control->txseq;
6551 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6552
6553 return err;
6554 }
6555
6556 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6557 {
6558 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6559 u16 len;
6560 u8 event;
6561
6562 __unpack_control(chan, skb);
6563
6564 len = skb->len;
6565
6566 /*
6567 * We can just drop the corrupted I-frame here.
6568 * Receiver will miss it and start proper recovery
6569 * procedures and ask for retransmission.
6570 */
6571 if (l2cap_check_fcs(chan, skb))
6572 goto drop;
6573
6574 if (!control->sframe && control->sar == L2CAP_SAR_START)
6575 len -= L2CAP_SDULEN_SIZE;
6576
6577 if (chan->fcs == L2CAP_FCS_CRC16)
6578 len -= L2CAP_FCS_SIZE;
6579
6580 if (len > chan->mps) {
6581 l2cap_send_disconn_req(chan, ECONNRESET);
6582 goto drop;
6583 }
6584
6585 if (!control->sframe) {
6586 int err;
6587
6588 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6589 control->sar, control->reqseq, control->final,
6590 control->txseq);
6591
6592 /* Validate F-bit - F=0 always valid, F=1 only
6593 * valid in TX WAIT_F
6594 */
6595 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6596 goto drop;
6597
6598 if (chan->mode != L2CAP_MODE_STREAMING) {
6599 event = L2CAP_EV_RECV_IFRAME;
6600 err = l2cap_rx(chan, control, skb, event);
6601 } else {
6602 err = l2cap_stream_rx(chan, control, skb);
6603 }
6604
6605 if (err)
6606 l2cap_send_disconn_req(chan, ECONNRESET);
6607 } else {
6608 const u8 rx_func_to_event[4] = {
6609 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6610 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6611 };
6612
6613 /* Only I-frames are expected in streaming mode */
6614 if (chan->mode == L2CAP_MODE_STREAMING)
6615 goto drop;
6616
6617 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6618 control->reqseq, control->final, control->poll,
6619 control->super);
6620
6621 if (len != 0) {
6622 BT_ERR("Trailing bytes: %d in sframe", len);
6623 l2cap_send_disconn_req(chan, ECONNRESET);
6624 goto drop;
6625 }
6626
6627 /* Validate F and P bits */
6628 if (control->final && (control->poll ||
6629 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6630 goto drop;
6631
6632 event = rx_func_to_event[control->super];
6633 if (l2cap_rx(chan, control, skb, event))
6634 l2cap_send_disconn_req(chan, ECONNRESET);
6635 }
6636
6637 return 0;
6638
6639 drop:
6640 kfree_skb(skb);
6641 return 0;
6642 }
6643
6644 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6645 {
6646 struct l2cap_conn *conn = chan->conn;
6647 struct l2cap_le_credits pkt;
6648 u16 return_credits;
6649
6650 /* We return more credits to the sender only after the amount of
6651 * credits falls below half of the initial amount.
6652 */
6653 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6654 return;
6655
6656 return_credits = le_max_credits - chan->rx_credits;
6657
6658 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6659
6660 chan->rx_credits += return_credits;
6661
6662 pkt.cid = cpu_to_le16(chan->scid);
6663 pkt.credits = cpu_to_le16(return_credits);
6664
6665 chan->ident = l2cap_get_ident(conn);
6666
6667 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6668 }
6669
6670 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6671 {
6672 int err;
6673
6674 if (!chan->rx_credits) {
6675 BT_ERR("No credits to receive LE L2CAP data");
6676 l2cap_send_disconn_req(chan, ECONNRESET);
6677 return -ENOBUFS;
6678 }
6679
6680 if (chan->imtu < skb->len) {
6681 BT_ERR("Too big LE L2CAP PDU");
6682 return -ENOBUFS;
6683 }
6684
6685 chan->rx_credits--;
6686 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6687
6688 l2cap_chan_le_send_credits(chan);
6689
6690 err = 0;
6691
6692 if (!chan->sdu) {
6693 u16 sdu_len;
6694
6695 sdu_len = get_unaligned_le16(skb->data);
6696 skb_pull(skb, L2CAP_SDULEN_SIZE);
6697
6698 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6699 sdu_len, skb->len, chan->imtu);
6700
6701 if (sdu_len > chan->imtu) {
6702 BT_ERR("Too big LE L2CAP SDU length received");
6703 err = -EMSGSIZE;
6704 goto failed;
6705 }
6706
6707 if (skb->len > sdu_len) {
6708 BT_ERR("Too much LE L2CAP data received");
6709 err = -EINVAL;
6710 goto failed;
6711 }
6712
6713 if (skb->len == sdu_len)
6714 return chan->ops->recv(chan, skb);
6715
6716 chan->sdu = skb;
6717 chan->sdu_len = sdu_len;
6718 chan->sdu_last_frag = skb;
6719
6720 return 0;
6721 }
6722
6723 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6724 chan->sdu->len, skb->len, chan->sdu_len);
6725
6726 if (chan->sdu->len + skb->len > chan->sdu_len) {
6727 BT_ERR("Too much LE L2CAP data received");
6728 err = -EINVAL;
6729 goto failed;
6730 }
6731
6732 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6733 skb = NULL;
6734
6735 if (chan->sdu->len == chan->sdu_len) {
6736 err = chan->ops->recv(chan, chan->sdu);
6737 if (!err) {
6738 chan->sdu = NULL;
6739 chan->sdu_last_frag = NULL;
6740 chan->sdu_len = 0;
6741 }
6742 }
6743
6744 failed:
6745 if (err) {
6746 kfree_skb(skb);
6747 kfree_skb(chan->sdu);
6748 chan->sdu = NULL;
6749 chan->sdu_last_frag = NULL;
6750 chan->sdu_len = 0;
6751 }
6752
6753 /* We can't return an error here since we took care of the skb
6754 * freeing internally. An error return would cause the caller to
6755 * do a double-free of the skb.
6756 */
6757 return 0;
6758 }
6759
6760 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6761 struct sk_buff *skb)
6762 {
6763 struct l2cap_chan *chan;
6764
6765 chan = l2cap_get_chan_by_scid(conn, cid);
6766 if (!chan) {
6767 if (cid == L2CAP_CID_A2MP) {
6768 chan = a2mp_channel_create(conn, skb);
6769 if (!chan) {
6770 kfree_skb(skb);
6771 return;
6772 }
6773
6774 l2cap_chan_lock(chan);
6775 } else {
6776 BT_DBG("unknown cid 0x%4.4x", cid);
6777 /* Drop packet and return */
6778 kfree_skb(skb);
6779 return;
6780 }
6781 }
6782
6783 BT_DBG("chan %p, len %d", chan, skb->len);
6784
6785 if (chan->state != BT_CONNECTED)
6786 goto drop;
6787
6788 switch (chan->mode) {
6789 case L2CAP_MODE_LE_FLOWCTL:
6790 if (l2cap_le_data_rcv(chan, skb) < 0)
6791 goto drop;
6792
6793 goto done;
6794
6795 case L2CAP_MODE_BASIC:
6796 /* If socket recv buffers overflows we drop data here
6797 * which is *bad* because L2CAP has to be reliable.
6798 * But we don't have any other choice. L2CAP doesn't
6799 * provide flow control mechanism. */
6800
6801 if (chan->imtu < skb->len) {
6802 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6803 goto drop;
6804 }
6805
6806 if (!chan->ops->recv(chan, skb))
6807 goto done;
6808 break;
6809
6810 case L2CAP_MODE_ERTM:
6811 case L2CAP_MODE_STREAMING:
6812 l2cap_data_rcv(chan, skb);
6813 goto done;
6814
6815 default:
6816 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6817 break;
6818 }
6819
6820 drop:
6821 kfree_skb(skb);
6822
6823 done:
6824 l2cap_chan_unlock(chan);
6825 }
6826
6827 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6828 struct sk_buff *skb)
6829 {
6830 struct hci_conn *hcon = conn->hcon;
6831 struct l2cap_chan *chan;
6832
6833 if (hcon->type != ACL_LINK)
6834 goto drop;
6835
6836 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6837 ACL_LINK);
6838 if (!chan)
6839 goto drop;
6840
6841 BT_DBG("chan %p, len %d", chan, skb->len);
6842
6843 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6844 goto drop;
6845
6846 if (chan->imtu < skb->len)
6847 goto drop;
6848
6849 /* Store remote BD_ADDR and PSM for msg_name */
6850 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6851 bt_cb(skb)->psm = psm;
6852
6853 if (!chan->ops->recv(chan, skb))
6854 return;
6855
6856 drop:
6857 kfree_skb(skb);
6858 }
6859
6860 static void l2cap_att_channel(struct l2cap_conn *conn,
6861 struct sk_buff *skb)
6862 {
6863 struct hci_conn *hcon = conn->hcon;
6864 struct l2cap_chan *chan;
6865
6866 if (hcon->type != LE_LINK)
6867 goto drop;
6868
6869 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6870 &hcon->src, &hcon->dst);
6871 if (!chan)
6872 goto drop;
6873
6874 BT_DBG("chan %p, len %d", chan, skb->len);
6875
6876 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6877 goto drop;
6878
6879 if (chan->imtu < skb->len)
6880 goto drop;
6881
6882 if (!chan->ops->recv(chan, skb))
6883 return;
6884
6885 drop:
6886 kfree_skb(skb);
6887 }
6888
6889 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6890 {
6891 struct l2cap_hdr *lh = (void *) skb->data;
6892 struct hci_conn *hcon = conn->hcon;
6893 u16 cid, len;
6894 __le16 psm;
6895
6896 if (hcon->state != BT_CONNECTED) {
6897 BT_DBG("queueing pending rx skb");
6898 skb_queue_tail(&conn->pending_rx, skb);
6899 return;
6900 }
6901
6902 skb_pull(skb, L2CAP_HDR_SIZE);
6903 cid = __le16_to_cpu(lh->cid);
6904 len = __le16_to_cpu(lh->len);
6905
6906 if (len != skb->len) {
6907 kfree_skb(skb);
6908 return;
6909 }
6910
6911 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6912
6913 switch (cid) {
6914 case L2CAP_CID_SIGNALING:
6915 l2cap_sig_channel(conn, skb);
6916 break;
6917
6918 case L2CAP_CID_CONN_LESS:
6919 psm = get_unaligned((__le16 *) skb->data);
6920 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6921 l2cap_conless_channel(conn, psm, skb);
6922 break;
6923
6924 case L2CAP_CID_ATT:
6925 l2cap_att_channel(conn, skb);
6926 break;
6927
6928 case L2CAP_CID_LE_SIGNALING:
6929 l2cap_le_sig_channel(conn, skb);
6930 break;
6931
6932 case L2CAP_CID_SMP:
6933 if (smp_sig_channel(conn, skb))
6934 l2cap_conn_del(conn->hcon, EACCES);
6935 break;
6936
6937 case L2CAP_FC_6LOWPAN:
6938 bt_6lowpan_recv(conn, skb);
6939 break;
6940
6941 default:
6942 l2cap_data_channel(conn, cid, skb);
6943 break;
6944 }
6945 }
6946
6947 static void process_pending_rx(struct work_struct *work)
6948 {
6949 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6950 pending_rx_work);
6951 struct sk_buff *skb;
6952
6953 BT_DBG("");
6954
6955 while ((skb = skb_dequeue(&conn->pending_rx)))
6956 l2cap_recv_frame(conn, skb);
6957 }
6958
6959 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6960 {
6961 struct l2cap_conn *conn = hcon->l2cap_data;
6962 struct hci_chan *hchan;
6963
6964 if (conn)
6965 return conn;
6966
6967 hchan = hci_chan_create(hcon);
6968 if (!hchan)
6969 return NULL;
6970
6971 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6972 if (!conn) {
6973 hci_chan_del(hchan);
6974 return NULL;
6975 }
6976
6977 kref_init(&conn->ref);
6978 hcon->l2cap_data = conn;
6979 conn->hcon = hcon;
6980 hci_conn_get(conn->hcon);
6981 conn->hchan = hchan;
6982
6983 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6984
6985 switch (hcon->type) {
6986 case LE_LINK:
6987 if (hcon->hdev->le_mtu) {
6988 conn->mtu = hcon->hdev->le_mtu;
6989 break;
6990 }
6991 /* fall through */
6992 default:
6993 conn->mtu = hcon->hdev->acl_mtu;
6994 break;
6995 }
6996
6997 conn->feat_mask = 0;
6998
6999 if (hcon->type == ACL_LINK)
7000 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
7001 &hcon->hdev->dev_flags);
7002
7003 spin_lock_init(&conn->lock);
7004 mutex_init(&conn->chan_lock);
7005
7006 INIT_LIST_HEAD(&conn->chan_l);
7007 INIT_LIST_HEAD(&conn->users);
7008
7009 if (hcon->type == LE_LINK)
7010 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
7011 else
7012 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7013
7014 skb_queue_head_init(&conn->pending_rx);
7015 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7016
7017 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7018
7019 return conn;
7020 }
7021
7022 static bool is_valid_psm(u16 psm, u8 dst_type) {
7023 if (!psm)
7024 return false;
7025
7026 if (bdaddr_type_is_le(dst_type))
7027 return (psm <= 0x00ff);
7028
7029 /* PSM must be odd and lsb of upper byte must be 0 */
7030 return ((psm & 0x0101) == 0x0001);
7031 }
7032
7033 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7034 bdaddr_t *dst, u8 dst_type)
7035 {
7036 struct l2cap_conn *conn;
7037 struct hci_conn *hcon;
7038 struct hci_dev *hdev;
7039 __u8 auth_type;
7040 int err;
7041
7042 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7043 dst_type, __le16_to_cpu(psm));
7044
7045 hdev = hci_get_route(dst, &chan->src);
7046 if (!hdev)
7047 return -EHOSTUNREACH;
7048
7049 hci_dev_lock(hdev);
7050
7051 l2cap_chan_lock(chan);
7052
7053 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7054 chan->chan_type != L2CAP_CHAN_RAW) {
7055 err = -EINVAL;
7056 goto done;
7057 }
7058
7059 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7060 err = -EINVAL;
7061 goto done;
7062 }
7063
7064 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7065 err = -EINVAL;
7066 goto done;
7067 }
7068
7069 switch (chan->mode) {
7070 case L2CAP_MODE_BASIC:
7071 break;
7072 case L2CAP_MODE_LE_FLOWCTL:
7073 l2cap_le_flowctl_init(chan);
7074 break;
7075 case L2CAP_MODE_ERTM:
7076 case L2CAP_MODE_STREAMING:
7077 if (!disable_ertm)
7078 break;
7079 /* fall through */
7080 default:
7081 err = -ENOTSUPP;
7082 goto done;
7083 }
7084
7085 switch (chan->state) {
7086 case BT_CONNECT:
7087 case BT_CONNECT2:
7088 case BT_CONFIG:
7089 /* Already connecting */
7090 err = 0;
7091 goto done;
7092
7093 case BT_CONNECTED:
7094 /* Already connected */
7095 err = -EISCONN;
7096 goto done;
7097
7098 case BT_OPEN:
7099 case BT_BOUND:
7100 /* Can connect */
7101 break;
7102
7103 default:
7104 err = -EBADFD;
7105 goto done;
7106 }
7107
7108 /* Set destination address and psm */
7109 bacpy(&chan->dst, dst);
7110 chan->dst_type = dst_type;
7111
7112 chan->psm = psm;
7113 chan->dcid = cid;
7114
7115 auth_type = l2cap_get_auth_type(chan);
7116
7117 if (bdaddr_type_is_le(dst_type)) {
7118 /* Convert from L2CAP channel address type to HCI address type
7119 */
7120 if (dst_type == BDADDR_LE_PUBLIC)
7121 dst_type = ADDR_LE_DEV_PUBLIC;
7122 else
7123 dst_type = ADDR_LE_DEV_RANDOM;
7124
7125 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7126 auth_type);
7127 } else {
7128 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7129 }
7130
7131 if (IS_ERR(hcon)) {
7132 err = PTR_ERR(hcon);
7133 goto done;
7134 }
7135
7136 conn = l2cap_conn_add(hcon);
7137 if (!conn) {
7138 hci_conn_drop(hcon);
7139 err = -ENOMEM;
7140 goto done;
7141 }
7142
7143 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7144 hci_conn_drop(hcon);
7145 err = -EBUSY;
7146 goto done;
7147 }
7148
7149 /* Update source addr of the socket */
7150 bacpy(&chan->src, &hcon->src);
7151 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7152
7153 l2cap_chan_unlock(chan);
7154 l2cap_chan_add(conn, chan);
7155 l2cap_chan_lock(chan);
7156
7157 /* l2cap_chan_add takes its own ref so we can drop this one */
7158 hci_conn_drop(hcon);
7159
7160 l2cap_state_change(chan, BT_CONNECT);
7161 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7162
7163 /* Release chan->sport so that it can be reused by other
7164 * sockets (as it's only used for listening sockets).
7165 */
7166 write_lock(&chan_list_lock);
7167 chan->sport = 0;
7168 write_unlock(&chan_list_lock);
7169
7170 if (hcon->state == BT_CONNECTED) {
7171 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7172 __clear_chan_timer(chan);
7173 if (l2cap_chan_check_security(chan))
7174 l2cap_state_change(chan, BT_CONNECTED);
7175 } else
7176 l2cap_do_start(chan);
7177 }
7178
7179 err = 0;
7180
7181 done:
7182 l2cap_chan_unlock(chan);
7183 hci_dev_unlock(hdev);
7184 hci_dev_put(hdev);
7185 return err;
7186 }
7187
7188 /* ---- L2CAP interface with lower layer (HCI) ---- */
7189
7190 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7191 {
7192 int exact = 0, lm1 = 0, lm2 = 0;
7193 struct l2cap_chan *c;
7194
7195 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7196
7197 /* Find listening sockets and check their link_mode */
7198 read_lock(&chan_list_lock);
7199 list_for_each_entry(c, &chan_list, global_l) {
7200 if (c->state != BT_LISTEN)
7201 continue;
7202
7203 if (!bacmp(&c->src, &hdev->bdaddr)) {
7204 lm1 |= HCI_LM_ACCEPT;
7205 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7206 lm1 |= HCI_LM_MASTER;
7207 exact++;
7208 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7209 lm2 |= HCI_LM_ACCEPT;
7210 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7211 lm2 |= HCI_LM_MASTER;
7212 }
7213 }
7214 read_unlock(&chan_list_lock);
7215
7216 return exact ? lm1 : lm2;
7217 }
7218
7219 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7220 {
7221 struct l2cap_conn *conn;
7222
7223 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7224
7225 if (!status) {
7226 conn = l2cap_conn_add(hcon);
7227 if (conn)
7228 l2cap_conn_ready(conn);
7229 } else {
7230 l2cap_conn_del(hcon, bt_to_errno(status));
7231 }
7232 }
7233
7234 int l2cap_disconn_ind(struct hci_conn *hcon)
7235 {
7236 struct l2cap_conn *conn = hcon->l2cap_data;
7237
7238 BT_DBG("hcon %p", hcon);
7239
7240 if (!conn)
7241 return HCI_ERROR_REMOTE_USER_TERM;
7242 return conn->disc_reason;
7243 }
7244
7245 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7246 {
7247 BT_DBG("hcon %p reason %d", hcon, reason);
7248
7249 bt_6lowpan_del_conn(hcon->l2cap_data);
7250
7251 l2cap_conn_del(hcon, bt_to_errno(reason));
7252 }
7253
7254 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7255 {
7256 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7257 return;
7258
7259 if (encrypt == 0x00) {
7260 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7261 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7262 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7263 chan->sec_level == BT_SECURITY_FIPS)
7264 l2cap_chan_close(chan, ECONNREFUSED);
7265 } else {
7266 if (chan->sec_level == BT_SECURITY_MEDIUM)
7267 __clear_chan_timer(chan);
7268 }
7269 }
7270
7271 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7272 {
7273 struct l2cap_conn *conn = hcon->l2cap_data;
7274 struct l2cap_chan *chan;
7275
7276 if (!conn)
7277 return 0;
7278
7279 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7280
7281 if (hcon->type == LE_LINK) {
7282 if (!status && encrypt)
7283 smp_distribute_keys(conn);
7284 cancel_delayed_work(&conn->security_timer);
7285 }
7286
7287 mutex_lock(&conn->chan_lock);
7288
7289 list_for_each_entry(chan, &conn->chan_l, list) {
7290 l2cap_chan_lock(chan);
7291
7292 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7293 state_to_string(chan->state));
7294
7295 if (chan->scid == L2CAP_CID_A2MP) {
7296 l2cap_chan_unlock(chan);
7297 continue;
7298 }
7299
7300 if (chan->scid == L2CAP_CID_ATT) {
7301 if (!status && encrypt) {
7302 chan->sec_level = hcon->sec_level;
7303 l2cap_chan_ready(chan);
7304 }
7305
7306 l2cap_chan_unlock(chan);
7307 continue;
7308 }
7309
7310 if (!__l2cap_no_conn_pending(chan)) {
7311 l2cap_chan_unlock(chan);
7312 continue;
7313 }
7314
7315 if (!status && (chan->state == BT_CONNECTED ||
7316 chan->state == BT_CONFIG)) {
7317 chan->ops->resume(chan);
7318 l2cap_check_encryption(chan, encrypt);
7319 l2cap_chan_unlock(chan);
7320 continue;
7321 }
7322
7323 if (chan->state == BT_CONNECT) {
7324 if (!status)
7325 l2cap_start_connection(chan);
7326 else
7327 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7328 } else if (chan->state == BT_CONNECT2) {
7329 struct l2cap_conn_rsp rsp;
7330 __u16 res, stat;
7331
7332 if (!status) {
7333 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7334 res = L2CAP_CR_PEND;
7335 stat = L2CAP_CS_AUTHOR_PEND;
7336 chan->ops->defer(chan);
7337 } else {
7338 l2cap_state_change(chan, BT_CONFIG);
7339 res = L2CAP_CR_SUCCESS;
7340 stat = L2CAP_CS_NO_INFO;
7341 }
7342 } else {
7343 l2cap_state_change(chan, BT_DISCONN);
7344 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7345 res = L2CAP_CR_SEC_BLOCK;
7346 stat = L2CAP_CS_NO_INFO;
7347 }
7348
7349 rsp.scid = cpu_to_le16(chan->dcid);
7350 rsp.dcid = cpu_to_le16(chan->scid);
7351 rsp.result = cpu_to_le16(res);
7352 rsp.status = cpu_to_le16(stat);
7353 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7354 sizeof(rsp), &rsp);
7355
7356 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7357 res == L2CAP_CR_SUCCESS) {
7358 char buf[128];
7359 set_bit(CONF_REQ_SENT, &chan->conf_state);
7360 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7361 L2CAP_CONF_REQ,
7362 l2cap_build_conf_req(chan, buf),
7363 buf);
7364 chan->num_conf_req++;
7365 }
7366 }
7367
7368 l2cap_chan_unlock(chan);
7369 }
7370
7371 mutex_unlock(&conn->chan_lock);
7372
7373 return 0;
7374 }
7375
7376 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7377 {
7378 struct l2cap_conn *conn = hcon->l2cap_data;
7379 struct l2cap_hdr *hdr;
7380 int len;
7381
7382 /* For AMP controller do not create l2cap conn */
7383 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7384 goto drop;
7385
7386 if (!conn)
7387 conn = l2cap_conn_add(hcon);
7388
7389 if (!conn)
7390 goto drop;
7391
7392 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7393
7394 switch (flags) {
7395 case ACL_START:
7396 case ACL_START_NO_FLUSH:
7397 case ACL_COMPLETE:
7398 if (conn->rx_len) {
7399 BT_ERR("Unexpected start frame (len %d)", skb->len);
7400 kfree_skb(conn->rx_skb);
7401 conn->rx_skb = NULL;
7402 conn->rx_len = 0;
7403 l2cap_conn_unreliable(conn, ECOMM);
7404 }
7405
7406 /* Start fragment always begin with Basic L2CAP header */
7407 if (skb->len < L2CAP_HDR_SIZE) {
7408 BT_ERR("Frame is too short (len %d)", skb->len);
7409 l2cap_conn_unreliable(conn, ECOMM);
7410 goto drop;
7411 }
7412
7413 hdr = (struct l2cap_hdr *) skb->data;
7414 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7415
7416 if (len == skb->len) {
7417 /* Complete frame received */
7418 l2cap_recv_frame(conn, skb);
7419 return 0;
7420 }
7421
7422 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7423
7424 if (skb->len > len) {
7425 BT_ERR("Frame is too long (len %d, expected len %d)",
7426 skb->len, len);
7427 l2cap_conn_unreliable(conn, ECOMM);
7428 goto drop;
7429 }
7430
7431 /* Allocate skb for the complete frame (with header) */
7432 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7433 if (!conn->rx_skb)
7434 goto drop;
7435
7436 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7437 skb->len);
7438 conn->rx_len = len - skb->len;
7439 break;
7440
7441 case ACL_CONT:
7442 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7443
7444 if (!conn->rx_len) {
7445 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7446 l2cap_conn_unreliable(conn, ECOMM);
7447 goto drop;
7448 }
7449
7450 if (skb->len > conn->rx_len) {
7451 BT_ERR("Fragment is too long (len %d, expected %d)",
7452 skb->len, conn->rx_len);
7453 kfree_skb(conn->rx_skb);
7454 conn->rx_skb = NULL;
7455 conn->rx_len = 0;
7456 l2cap_conn_unreliable(conn, ECOMM);
7457 goto drop;
7458 }
7459
7460 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7461 skb->len);
7462 conn->rx_len -= skb->len;
7463
7464 if (!conn->rx_len) {
7465 /* Complete frame received. l2cap_recv_frame
7466 * takes ownership of the skb so set the global
7467 * rx_skb pointer to NULL first.
7468 */
7469 struct sk_buff *rx_skb = conn->rx_skb;
7470 conn->rx_skb = NULL;
7471 l2cap_recv_frame(conn, rx_skb);
7472 }
7473 break;
7474 }
7475
7476 drop:
7477 kfree_skb(skb);
7478 return 0;
7479 }
7480
7481 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7482 {
7483 struct l2cap_chan *c;
7484
7485 read_lock(&chan_list_lock);
7486
7487 list_for_each_entry(c, &chan_list, global_l) {
7488 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7489 &c->src, &c->dst,
7490 c->state, __le16_to_cpu(c->psm),
7491 c->scid, c->dcid, c->imtu, c->omtu,
7492 c->sec_level, c->mode);
7493 }
7494
7495 read_unlock(&chan_list_lock);
7496
7497 return 0;
7498 }
7499
7500 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7501 {
7502 return single_open(file, l2cap_debugfs_show, inode->i_private);
7503 }
7504
7505 static const struct file_operations l2cap_debugfs_fops = {
7506 .open = l2cap_debugfs_open,
7507 .read = seq_read,
7508 .llseek = seq_lseek,
7509 .release = single_release,
7510 };
7511
7512 static struct dentry *l2cap_debugfs;
7513
7514 int __init l2cap_init(void)
7515 {
7516 int err;
7517
7518 err = l2cap_init_sockets();
7519 if (err < 0)
7520 return err;
7521
7522 if (IS_ERR_OR_NULL(bt_debugfs))
7523 return 0;
7524
7525 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7526 NULL, &l2cap_debugfs_fops);
7527
7528 debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs,
7529 &le_max_credits);
7530 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
7531 &le_default_mps);
7532
7533 bt_6lowpan_init();
7534
7535 return 0;
7536 }
7537
7538 void l2cap_exit(void)
7539 {
7540 bt_6lowpan_cleanup();
7541 debugfs_remove(l2cap_debugfs);
7542 l2cap_cleanup_sockets();
7543 }
7544
7545 module_param(disable_ertm, bool, 0644);
7546 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.319635 seconds and 5 git commands to generate.