Bluetooth: Fix confusion between parent and child channel for 6lowpan
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45
46 bool disable_ertm;
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
50
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 void *data);
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66
67 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68 {
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77 }
78
79 /* ---- L2CAP channels ---- */
80
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
83 {
84 struct l2cap_chan *c;
85
86 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
89 }
90 return NULL;
91 }
92
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
95 {
96 struct l2cap_chan *c;
97
98 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
101 }
102 return NULL;
103 }
104
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
109 {
110 struct l2cap_chan *c;
111
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
114 if (c)
115 l2cap_chan_lock(c);
116 mutex_unlock(&conn->chan_lock);
117
118 return c;
119 }
120
121 /* Find channel with given DCID.
122 * Returns locked channel.
123 */
124 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126 {
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136 }
137
138 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
140 {
141 struct l2cap_chan *c;
142
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
146 }
147 return NULL;
148 }
149
150 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152 {
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162 }
163
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165 {
166 struct l2cap_chan *c;
167
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
170 return c;
171 }
172 return NULL;
173 }
174
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176 {
177 int err;
178
179 write_lock(&chan_list_lock);
180
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 err = -EADDRINUSE;
183 goto done;
184 }
185
186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
192
193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203 done:
204 write_unlock(&chan_list_lock);
205 return err;
206 }
207 EXPORT_SYMBOL_GPL(l2cap_add_psm);
208
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210 {
211 write_lock(&chan_list_lock);
212
213 chan->scid = scid;
214
215 write_unlock(&chan_list_lock);
216
217 return 0;
218 }
219
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221 {
222 u16 cid, dyn_end;
223
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
226 else
227 dyn_end = L2CAP_CID_DYN_END;
228
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
231 return cid;
232 }
233
234 return 0;
235 }
236
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
241
242 chan->state = state;
243 chan->ops->state_change(chan, state, 0);
244 }
245
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 int state, int err)
248 {
249 chan->state = state;
250 chan->ops->state_change(chan, chan->state, err);
251 }
252
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
254 {
255 chan->ops->state_change(chan, chan->state, err);
256 }
257
258 static void __set_retrans_timer(struct l2cap_chan *chan)
259 {
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
264 }
265 }
266
267 static void __set_monitor_timer(struct l2cap_chan *chan)
268 {
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
273 }
274 }
275
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
277 u16 seq)
278 {
279 struct sk_buff *skb;
280
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
283 return skb;
284 }
285
286 return NULL;
287 }
288
289 /* ---- L2CAP sequence number lists ---- */
290
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
297 * allocs or frees.
298 */
299
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
301 {
302 size_t alloc_size, i;
303
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
307 */
308 alloc_size = roundup_pow_of_two(size);
309
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
311 if (!seq_list->list)
312 return -ENOMEM;
313
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
319
320 return 0;
321 }
322
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
324 {
325 kfree(seq_list->list);
326 }
327
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
329 u16 seq)
330 {
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
333 }
334
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
336 {
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
339
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
342
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 }
347
348 return seq;
349 }
350
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
352 {
353 u16 i;
354
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
356 return;
357
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
360
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 }
364
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
366 {
367 u16 mask = seq_list->mask;
368
369 /* All appends happen in constant time */
370
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
372 return;
373
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
376 else
377 seq_list->list[seq_list->tail & mask] = seq;
378
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
381 }
382
383 static void l2cap_chan_timeout(struct work_struct *work)
384 {
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
386 chan_timer.work);
387 struct l2cap_conn *conn = chan->conn;
388 int reason;
389
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
391
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
394
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
400 else
401 reason = ETIMEDOUT;
402
403 l2cap_chan_close(chan, reason);
404
405 l2cap_chan_unlock(chan);
406
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
409
410 l2cap_chan_put(chan);
411 }
412
413 struct l2cap_chan *l2cap_chan_create(void)
414 {
415 struct l2cap_chan *chan;
416
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
418 if (!chan)
419 return NULL;
420
421 mutex_init(&chan->lock);
422
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
426
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
428
429 chan->state = BT_OPEN;
430
431 kref_init(&chan->kref);
432
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
435
436 BT_DBG("chan %p", chan);
437
438 return chan;
439 }
440 EXPORT_SYMBOL_GPL(l2cap_chan_create);
441
442 static void l2cap_chan_destroy(struct kref *kref)
443 {
444 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
445
446 BT_DBG("chan %p", chan);
447
448 write_lock(&chan_list_lock);
449 list_del(&chan->global_l);
450 write_unlock(&chan_list_lock);
451
452 kfree(chan);
453 }
454
455 void l2cap_chan_hold(struct l2cap_chan *c)
456 {
457 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
458
459 kref_get(&c->kref);
460 }
461
462 void l2cap_chan_put(struct l2cap_chan *c)
463 {
464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465
466 kref_put(&c->kref, l2cap_chan_destroy);
467 }
468 EXPORT_SYMBOL_GPL(l2cap_chan_put);
469
470 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
471 {
472 chan->fcs = L2CAP_FCS_CRC16;
473 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
474 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
475 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
476 chan->remote_max_tx = chan->max_tx;
477 chan->remote_tx_win = chan->tx_win;
478 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->sec_level = BT_SECURITY_LOW;
480 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
481 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
482 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
483 chan->conf_state = 0;
484
485 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
486 }
487 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
488
489 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
490 {
491 chan->sdu = NULL;
492 chan->sdu_last_frag = NULL;
493 chan->sdu_len = 0;
494 chan->tx_credits = 0;
495 chan->rx_credits = le_max_credits;
496 chan->mps = min_t(u16, chan->imtu, le_default_mps);
497
498 skb_queue_head_init(&chan->tx_q);
499 }
500
501 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
502 {
503 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
504 __le16_to_cpu(chan->psm), chan->dcid);
505
506 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
507
508 chan->conn = conn;
509
510 switch (chan->chan_type) {
511 case L2CAP_CHAN_CONN_ORIENTED:
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 if (conn->hcon->type == ACL_LINK)
515 chan->omtu = L2CAP_DEFAULT_MTU;
516 break;
517
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
523 break;
524
525 case L2CAP_CHAN_FIXED:
526 /* Caller will set CID and CID specific MTU values */
527 break;
528
529 default:
530 /* Raw socket can send/recv signalling messages only */
531 chan->scid = L2CAP_CID_SIGNALING;
532 chan->dcid = L2CAP_CID_SIGNALING;
533 chan->omtu = L2CAP_DEFAULT_MTU;
534 }
535
536 chan->local_id = L2CAP_BESTEFFORT_ID;
537 chan->local_stype = L2CAP_SERV_BESTEFFORT;
538 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
539 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
540 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
541 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
542
543 l2cap_chan_hold(chan);
544
545 hci_conn_hold(conn->hcon);
546
547 list_add(&chan->list, &conn->chan_l);
548 }
549
550 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 {
552 mutex_lock(&conn->chan_lock);
553 __l2cap_chan_add(conn, chan);
554 mutex_unlock(&conn->chan_lock);
555 }
556
557 void l2cap_chan_del(struct l2cap_chan *chan, int err)
558 {
559 struct l2cap_conn *conn = chan->conn;
560
561 __clear_chan_timer(chan);
562
563 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
564
565 if (conn) {
566 struct amp_mgr *mgr = conn->hcon->amp_mgr;
567 /* Delete from channel list */
568 list_del(&chan->list);
569
570 l2cap_chan_put(chan);
571
572 chan->conn = NULL;
573
574 if (chan->scid != L2CAP_CID_A2MP)
575 hci_conn_drop(conn->hcon);
576
577 if (mgr && mgr->bredr_chan == chan)
578 mgr->bredr_chan = NULL;
579 }
580
581 if (chan->hs_hchan) {
582 struct hci_chan *hs_hchan = chan->hs_hchan;
583
584 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
585 amp_disconnect_logical_link(hs_hchan);
586 }
587
588 chan->ops->teardown(chan, err);
589
590 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
591 return;
592
593 switch(chan->mode) {
594 case L2CAP_MODE_BASIC:
595 break;
596
597 case L2CAP_MODE_LE_FLOWCTL:
598 skb_queue_purge(&chan->tx_q);
599 break;
600
601 case L2CAP_MODE_ERTM:
602 __clear_retrans_timer(chan);
603 __clear_monitor_timer(chan);
604 __clear_ack_timer(chan);
605
606 skb_queue_purge(&chan->srej_q);
607
608 l2cap_seq_list_free(&chan->srej_list);
609 l2cap_seq_list_free(&chan->retrans_list);
610
611 /* fall through */
612
613 case L2CAP_MODE_STREAMING:
614 skb_queue_purge(&chan->tx_q);
615 break;
616 }
617
618 return;
619 }
620 EXPORT_SYMBOL_GPL(l2cap_chan_del);
621
622 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
623 {
624 struct l2cap_conn *conn = hcon->l2cap_data;
625 struct l2cap_chan *chan;
626
627 mutex_lock(&conn->chan_lock);
628
629 list_for_each_entry(chan, &conn->chan_l, list) {
630 l2cap_chan_lock(chan);
631 bacpy(&chan->dst, &hcon->dst);
632 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
633 l2cap_chan_unlock(chan);
634 }
635
636 mutex_unlock(&conn->chan_lock);
637 }
638
639 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
640 {
641 struct l2cap_conn *conn = chan->conn;
642 struct l2cap_le_conn_rsp rsp;
643 u16 result;
644
645 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
646 result = L2CAP_CR_AUTHORIZATION;
647 else
648 result = L2CAP_CR_BAD_PSM;
649
650 l2cap_state_change(chan, BT_DISCONN);
651
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.mtu = cpu_to_le16(chan->imtu);
654 rsp.mps = cpu_to_le16(chan->mps);
655 rsp.credits = cpu_to_le16(chan->rx_credits);
656 rsp.result = cpu_to_le16(result);
657
658 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
659 &rsp);
660 }
661
662 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
663 {
664 struct l2cap_conn *conn = chan->conn;
665 struct l2cap_conn_rsp rsp;
666 u16 result;
667
668 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
669 result = L2CAP_CR_SEC_BLOCK;
670 else
671 result = L2CAP_CR_BAD_PSM;
672
673 l2cap_state_change(chan, BT_DISCONN);
674
675 rsp.scid = cpu_to_le16(chan->dcid);
676 rsp.dcid = cpu_to_le16(chan->scid);
677 rsp.result = cpu_to_le16(result);
678 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
679
680 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
681 }
682
683 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
684 {
685 struct l2cap_conn *conn = chan->conn;
686
687 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
688
689 switch (chan->state) {
690 case BT_LISTEN:
691 chan->ops->teardown(chan, 0);
692 break;
693
694 case BT_CONNECTED:
695 case BT_CONFIG:
696 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
697 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
698 l2cap_send_disconn_req(chan, reason);
699 } else
700 l2cap_chan_del(chan, reason);
701 break;
702
703 case BT_CONNECT2:
704 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
705 if (conn->hcon->type == ACL_LINK)
706 l2cap_chan_connect_reject(chan);
707 else if (conn->hcon->type == LE_LINK)
708 l2cap_chan_le_connect_reject(chan);
709 }
710
711 l2cap_chan_del(chan, reason);
712 break;
713
714 case BT_CONNECT:
715 case BT_DISCONN:
716 l2cap_chan_del(chan, reason);
717 break;
718
719 default:
720 chan->ops->teardown(chan, 0);
721 break;
722 }
723 }
724 EXPORT_SYMBOL(l2cap_chan_close);
725
726 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
727 {
728 switch (chan->chan_type) {
729 case L2CAP_CHAN_RAW:
730 switch (chan->sec_level) {
731 case BT_SECURITY_HIGH:
732 case BT_SECURITY_FIPS:
733 return HCI_AT_DEDICATED_BONDING_MITM;
734 case BT_SECURITY_MEDIUM:
735 return HCI_AT_DEDICATED_BONDING;
736 default:
737 return HCI_AT_NO_BONDING;
738 }
739 break;
740 case L2CAP_CHAN_CONN_LESS:
741 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
742 if (chan->sec_level == BT_SECURITY_LOW)
743 chan->sec_level = BT_SECURITY_SDP;
744 }
745 if (chan->sec_level == BT_SECURITY_HIGH ||
746 chan->sec_level == BT_SECURITY_FIPS)
747 return HCI_AT_NO_BONDING_MITM;
748 else
749 return HCI_AT_NO_BONDING;
750 break;
751 case L2CAP_CHAN_CONN_ORIENTED:
752 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
753 if (chan->sec_level == BT_SECURITY_LOW)
754 chan->sec_level = BT_SECURITY_SDP;
755
756 if (chan->sec_level == BT_SECURITY_HIGH ||
757 chan->sec_level == BT_SECURITY_FIPS)
758 return HCI_AT_NO_BONDING_MITM;
759 else
760 return HCI_AT_NO_BONDING;
761 }
762 /* fall through */
763 default:
764 switch (chan->sec_level) {
765 case BT_SECURITY_HIGH:
766 case BT_SECURITY_FIPS:
767 return HCI_AT_GENERAL_BONDING_MITM;
768 case BT_SECURITY_MEDIUM:
769 return HCI_AT_GENERAL_BONDING;
770 default:
771 return HCI_AT_NO_BONDING;
772 }
773 break;
774 }
775 }
776
777 /* Service level security */
778 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
779 {
780 struct l2cap_conn *conn = chan->conn;
781 __u8 auth_type;
782
783 if (conn->hcon->type == LE_LINK)
784 return smp_conn_security(conn->hcon, chan->sec_level);
785
786 auth_type = l2cap_get_auth_type(chan);
787
788 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
789 initiator);
790 }
791
792 static u8 l2cap_get_ident(struct l2cap_conn *conn)
793 {
794 u8 id;
795
796 /* Get next available identificator.
797 * 1 - 128 are used by kernel.
798 * 129 - 199 are reserved.
799 * 200 - 254 are used by utilities like l2ping, etc.
800 */
801
802 mutex_lock(&conn->ident_lock);
803
804 if (++conn->tx_ident > 128)
805 conn->tx_ident = 1;
806
807 id = conn->tx_ident;
808
809 mutex_unlock(&conn->ident_lock);
810
811 return id;
812 }
813
814 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
815 void *data)
816 {
817 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
818 u8 flags;
819
820 BT_DBG("code 0x%2.2x", code);
821
822 if (!skb)
823 return;
824
825 if (lmp_no_flush_capable(conn->hcon->hdev))
826 flags = ACL_START_NO_FLUSH;
827 else
828 flags = ACL_START;
829
830 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
831 skb->priority = HCI_PRIO_MAX;
832
833 hci_send_acl(conn->hchan, skb, flags);
834 }
835
836 static bool __chan_is_moving(struct l2cap_chan *chan)
837 {
838 return chan->move_state != L2CAP_MOVE_STABLE &&
839 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
840 }
841
842 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
843 {
844 struct hci_conn *hcon = chan->conn->hcon;
845 u16 flags;
846
847 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
848 skb->priority);
849
850 if (chan->hs_hcon && !__chan_is_moving(chan)) {
851 if (chan->hs_hchan)
852 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
853 else
854 kfree_skb(skb);
855
856 return;
857 }
858
859 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
860 lmp_no_flush_capable(hcon->hdev))
861 flags = ACL_START_NO_FLUSH;
862 else
863 flags = ACL_START;
864
865 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
866 hci_send_acl(chan->conn->hchan, skb, flags);
867 }
868
869 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
870 {
871 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
872 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
873
874 if (enh & L2CAP_CTRL_FRAME_TYPE) {
875 /* S-Frame */
876 control->sframe = 1;
877 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
878 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
879
880 control->sar = 0;
881 control->txseq = 0;
882 } else {
883 /* I-Frame */
884 control->sframe = 0;
885 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
886 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
887
888 control->poll = 0;
889 control->super = 0;
890 }
891 }
892
893 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
894 {
895 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
896 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
897
898 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
899 /* S-Frame */
900 control->sframe = 1;
901 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
902 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
903
904 control->sar = 0;
905 control->txseq = 0;
906 } else {
907 /* I-Frame */
908 control->sframe = 0;
909 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
910 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
911
912 control->poll = 0;
913 control->super = 0;
914 }
915 }
916
917 static inline void __unpack_control(struct l2cap_chan *chan,
918 struct sk_buff *skb)
919 {
920 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
921 __unpack_extended_control(get_unaligned_le32(skb->data),
922 &bt_cb(skb)->control);
923 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
924 } else {
925 __unpack_enhanced_control(get_unaligned_le16(skb->data),
926 &bt_cb(skb)->control);
927 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
928 }
929 }
930
931 static u32 __pack_extended_control(struct l2cap_ctrl *control)
932 {
933 u32 packed;
934
935 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
936 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
937
938 if (control->sframe) {
939 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
940 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
941 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
942 } else {
943 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
944 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
945 }
946
947 return packed;
948 }
949
950 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
951 {
952 u16 packed;
953
954 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
955 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
956
957 if (control->sframe) {
958 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
959 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
960 packed |= L2CAP_CTRL_FRAME_TYPE;
961 } else {
962 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
963 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
964 }
965
966 return packed;
967 }
968
969 static inline void __pack_control(struct l2cap_chan *chan,
970 struct l2cap_ctrl *control,
971 struct sk_buff *skb)
972 {
973 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
974 put_unaligned_le32(__pack_extended_control(control),
975 skb->data + L2CAP_HDR_SIZE);
976 } else {
977 put_unaligned_le16(__pack_enhanced_control(control),
978 skb->data + L2CAP_HDR_SIZE);
979 }
980 }
981
982 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
983 {
984 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
985 return L2CAP_EXT_HDR_SIZE;
986 else
987 return L2CAP_ENH_HDR_SIZE;
988 }
989
990 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
991 u32 control)
992 {
993 struct sk_buff *skb;
994 struct l2cap_hdr *lh;
995 int hlen = __ertm_hdr_size(chan);
996
997 if (chan->fcs == L2CAP_FCS_CRC16)
998 hlen += L2CAP_FCS_SIZE;
999
1000 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1001
1002 if (!skb)
1003 return ERR_PTR(-ENOMEM);
1004
1005 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1006 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1007 lh->cid = cpu_to_le16(chan->dcid);
1008
1009 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1010 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1011 else
1012 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1013
1014 if (chan->fcs == L2CAP_FCS_CRC16) {
1015 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1016 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1017 }
1018
1019 skb->priority = HCI_PRIO_MAX;
1020 return skb;
1021 }
1022
1023 static void l2cap_send_sframe(struct l2cap_chan *chan,
1024 struct l2cap_ctrl *control)
1025 {
1026 struct sk_buff *skb;
1027 u32 control_field;
1028
1029 BT_DBG("chan %p, control %p", chan, control);
1030
1031 if (!control->sframe)
1032 return;
1033
1034 if (__chan_is_moving(chan))
1035 return;
1036
1037 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1038 !control->poll)
1039 control->final = 1;
1040
1041 if (control->super == L2CAP_SUPER_RR)
1042 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1043 else if (control->super == L2CAP_SUPER_RNR)
1044 set_bit(CONN_RNR_SENT, &chan->conn_state);
1045
1046 if (control->super != L2CAP_SUPER_SREJ) {
1047 chan->last_acked_seq = control->reqseq;
1048 __clear_ack_timer(chan);
1049 }
1050
1051 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1052 control->final, control->poll, control->super);
1053
1054 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1055 control_field = __pack_extended_control(control);
1056 else
1057 control_field = __pack_enhanced_control(control);
1058
1059 skb = l2cap_create_sframe_pdu(chan, control_field);
1060 if (!IS_ERR(skb))
1061 l2cap_do_send(chan, skb);
1062 }
1063
1064 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1065 {
1066 struct l2cap_ctrl control;
1067
1068 BT_DBG("chan %p, poll %d", chan, poll);
1069
1070 memset(&control, 0, sizeof(control));
1071 control.sframe = 1;
1072 control.poll = poll;
1073
1074 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1075 control.super = L2CAP_SUPER_RNR;
1076 else
1077 control.super = L2CAP_SUPER_RR;
1078
1079 control.reqseq = chan->buffer_seq;
1080 l2cap_send_sframe(chan, &control);
1081 }
1082
1083 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1084 {
1085 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1086 }
1087
1088 static bool __amp_capable(struct l2cap_chan *chan)
1089 {
1090 struct l2cap_conn *conn = chan->conn;
1091 struct hci_dev *hdev;
1092 bool amp_available = false;
1093
1094 if (!conn->hs_enabled)
1095 return false;
1096
1097 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1098 return false;
1099
1100 read_lock(&hci_dev_list_lock);
1101 list_for_each_entry(hdev, &hci_dev_list, list) {
1102 if (hdev->amp_type != AMP_TYPE_BREDR &&
1103 test_bit(HCI_UP, &hdev->flags)) {
1104 amp_available = true;
1105 break;
1106 }
1107 }
1108 read_unlock(&hci_dev_list_lock);
1109
1110 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1111 return amp_available;
1112
1113 return false;
1114 }
1115
1116 static bool l2cap_check_efs(struct l2cap_chan *chan)
1117 {
1118 /* Check EFS parameters */
1119 return true;
1120 }
1121
1122 void l2cap_send_conn_req(struct l2cap_chan *chan)
1123 {
1124 struct l2cap_conn *conn = chan->conn;
1125 struct l2cap_conn_req req;
1126
1127 req.scid = cpu_to_le16(chan->scid);
1128 req.psm = chan->psm;
1129
1130 chan->ident = l2cap_get_ident(conn);
1131
1132 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1133
1134 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1135 }
1136
1137 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1138 {
1139 struct l2cap_create_chan_req req;
1140 req.scid = cpu_to_le16(chan->scid);
1141 req.psm = chan->psm;
1142 req.amp_id = amp_id;
1143
1144 chan->ident = l2cap_get_ident(chan->conn);
1145
1146 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1147 sizeof(req), &req);
1148 }
1149
1150 static void l2cap_move_setup(struct l2cap_chan *chan)
1151 {
1152 struct sk_buff *skb;
1153
1154 BT_DBG("chan %p", chan);
1155
1156 if (chan->mode != L2CAP_MODE_ERTM)
1157 return;
1158
1159 __clear_retrans_timer(chan);
1160 __clear_monitor_timer(chan);
1161 __clear_ack_timer(chan);
1162
1163 chan->retry_count = 0;
1164 skb_queue_walk(&chan->tx_q, skb) {
1165 if (bt_cb(skb)->control.retries)
1166 bt_cb(skb)->control.retries = 1;
1167 else
1168 break;
1169 }
1170
1171 chan->expected_tx_seq = chan->buffer_seq;
1172
1173 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1174 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1175 l2cap_seq_list_clear(&chan->retrans_list);
1176 l2cap_seq_list_clear(&chan->srej_list);
1177 skb_queue_purge(&chan->srej_q);
1178
1179 chan->tx_state = L2CAP_TX_STATE_XMIT;
1180 chan->rx_state = L2CAP_RX_STATE_MOVE;
1181
1182 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1183 }
1184
1185 static void l2cap_move_done(struct l2cap_chan *chan)
1186 {
1187 u8 move_role = chan->move_role;
1188 BT_DBG("chan %p", chan);
1189
1190 chan->move_state = L2CAP_MOVE_STABLE;
1191 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1192
1193 if (chan->mode != L2CAP_MODE_ERTM)
1194 return;
1195
1196 switch (move_role) {
1197 case L2CAP_MOVE_ROLE_INITIATOR:
1198 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1199 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1200 break;
1201 case L2CAP_MOVE_ROLE_RESPONDER:
1202 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1203 break;
1204 }
1205 }
1206
1207 static void l2cap_chan_ready(struct l2cap_chan *chan)
1208 {
1209 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1210 chan->conf_state = 0;
1211 __clear_chan_timer(chan);
1212
1213 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1214 chan->ops->suspend(chan);
1215
1216 chan->state = BT_CONNECTED;
1217
1218 chan->ops->ready(chan);
1219 }
1220
1221 static void l2cap_le_connect(struct l2cap_chan *chan)
1222 {
1223 struct l2cap_conn *conn = chan->conn;
1224 struct l2cap_le_conn_req req;
1225
1226 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1227 return;
1228
1229 req.psm = chan->psm;
1230 req.scid = cpu_to_le16(chan->scid);
1231 req.mtu = cpu_to_le16(chan->imtu);
1232 req.mps = cpu_to_le16(chan->mps);
1233 req.credits = cpu_to_le16(chan->rx_credits);
1234
1235 chan->ident = l2cap_get_ident(conn);
1236
1237 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1238 sizeof(req), &req);
1239 }
1240
1241 static void l2cap_le_start(struct l2cap_chan *chan)
1242 {
1243 struct l2cap_conn *conn = chan->conn;
1244
1245 if (!smp_conn_security(conn->hcon, chan->sec_level))
1246 return;
1247
1248 if (!chan->psm) {
1249 l2cap_chan_ready(chan);
1250 return;
1251 }
1252
1253 if (chan->state == BT_CONNECT)
1254 l2cap_le_connect(chan);
1255 }
1256
1257 static void l2cap_start_connection(struct l2cap_chan *chan)
1258 {
1259 if (__amp_capable(chan)) {
1260 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1261 a2mp_discover_amp(chan);
1262 } else if (chan->conn->hcon->type == LE_LINK) {
1263 l2cap_le_start(chan);
1264 } else {
1265 l2cap_send_conn_req(chan);
1266 }
1267 }
1268
1269 static void l2cap_do_start(struct l2cap_chan *chan)
1270 {
1271 struct l2cap_conn *conn = chan->conn;
1272
1273 if (conn->hcon->type == LE_LINK) {
1274 l2cap_le_start(chan);
1275 return;
1276 }
1277
1278 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1279 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1280 return;
1281
1282 if (l2cap_chan_check_security(chan, true) &&
1283 __l2cap_no_conn_pending(chan)) {
1284 l2cap_start_connection(chan);
1285 }
1286 } else {
1287 struct l2cap_info_req req;
1288 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1289
1290 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1291 conn->info_ident = l2cap_get_ident(conn);
1292
1293 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1294
1295 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1296 sizeof(req), &req);
1297 }
1298 }
1299
1300 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1301 {
1302 u32 local_feat_mask = l2cap_feat_mask;
1303 if (!disable_ertm)
1304 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1305
1306 switch (mode) {
1307 case L2CAP_MODE_ERTM:
1308 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1309 case L2CAP_MODE_STREAMING:
1310 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1311 default:
1312 return 0x00;
1313 }
1314 }
1315
1316 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1317 {
1318 struct l2cap_conn *conn = chan->conn;
1319 struct l2cap_disconn_req req;
1320
1321 if (!conn)
1322 return;
1323
1324 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1325 __clear_retrans_timer(chan);
1326 __clear_monitor_timer(chan);
1327 __clear_ack_timer(chan);
1328 }
1329
1330 if (chan->scid == L2CAP_CID_A2MP) {
1331 l2cap_state_change(chan, BT_DISCONN);
1332 return;
1333 }
1334
1335 req.dcid = cpu_to_le16(chan->dcid);
1336 req.scid = cpu_to_le16(chan->scid);
1337 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1338 sizeof(req), &req);
1339
1340 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1341 }
1342
1343 /* ---- L2CAP connections ---- */
1344 static void l2cap_conn_start(struct l2cap_conn *conn)
1345 {
1346 struct l2cap_chan *chan, *tmp;
1347
1348 BT_DBG("conn %p", conn);
1349
1350 mutex_lock(&conn->chan_lock);
1351
1352 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1353 l2cap_chan_lock(chan);
1354
1355 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1356 l2cap_chan_unlock(chan);
1357 continue;
1358 }
1359
1360 if (chan->state == BT_CONNECT) {
1361 if (!l2cap_chan_check_security(chan, true) ||
1362 !__l2cap_no_conn_pending(chan)) {
1363 l2cap_chan_unlock(chan);
1364 continue;
1365 }
1366
1367 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1368 && test_bit(CONF_STATE2_DEVICE,
1369 &chan->conf_state)) {
1370 l2cap_chan_close(chan, ECONNRESET);
1371 l2cap_chan_unlock(chan);
1372 continue;
1373 }
1374
1375 l2cap_start_connection(chan);
1376
1377 } else if (chan->state == BT_CONNECT2) {
1378 struct l2cap_conn_rsp rsp;
1379 char buf[128];
1380 rsp.scid = cpu_to_le16(chan->dcid);
1381 rsp.dcid = cpu_to_le16(chan->scid);
1382
1383 if (l2cap_chan_check_security(chan, false)) {
1384 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1385 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1386 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1387 chan->ops->defer(chan);
1388
1389 } else {
1390 l2cap_state_change(chan, BT_CONFIG);
1391 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1392 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1393 }
1394 } else {
1395 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1396 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1397 }
1398
1399 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1400 sizeof(rsp), &rsp);
1401
1402 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1403 rsp.result != L2CAP_CR_SUCCESS) {
1404 l2cap_chan_unlock(chan);
1405 continue;
1406 }
1407
1408 set_bit(CONF_REQ_SENT, &chan->conf_state);
1409 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1410 l2cap_build_conf_req(chan, buf), buf);
1411 chan->num_conf_req++;
1412 }
1413
1414 l2cap_chan_unlock(chan);
1415 }
1416
1417 mutex_unlock(&conn->chan_lock);
1418 }
1419
1420 /* Find socket with cid and source/destination bdaddr.
1421 * Returns closest match, locked.
1422 */
1423 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1424 bdaddr_t *src,
1425 bdaddr_t *dst)
1426 {
1427 struct l2cap_chan *c, *c1 = NULL;
1428
1429 read_lock(&chan_list_lock);
1430
1431 list_for_each_entry(c, &chan_list, global_l) {
1432 if (state && c->state != state)
1433 continue;
1434
1435 if (c->scid == cid) {
1436 int src_match, dst_match;
1437 int src_any, dst_any;
1438
1439 /* Exact match. */
1440 src_match = !bacmp(&c->src, src);
1441 dst_match = !bacmp(&c->dst, dst);
1442 if (src_match && dst_match) {
1443 read_unlock(&chan_list_lock);
1444 return c;
1445 }
1446
1447 /* Closest match */
1448 src_any = !bacmp(&c->src, BDADDR_ANY);
1449 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1450 if ((src_match && dst_any) || (src_any && dst_match) ||
1451 (src_any && dst_any))
1452 c1 = c;
1453 }
1454 }
1455
1456 read_unlock(&chan_list_lock);
1457
1458 return c1;
1459 }
1460
1461 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1462 {
1463 struct hci_conn *hcon = conn->hcon;
1464 struct hci_dev *hdev = hcon->hdev;
1465 struct l2cap_chan *chan, *pchan;
1466 u8 dst_type;
1467
1468 BT_DBG("");
1469
1470 /* Check if we have socket listening on cid */
1471 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1472 &hcon->src, &hcon->dst);
1473 if (!pchan)
1474 return;
1475
1476 /* Client ATT sockets should override the server one */
1477 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1478 return;
1479
1480 dst_type = bdaddr_type(hcon, hcon->dst_type);
1481
1482 /* If device is blocked, do not create a channel for it */
1483 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
1484 return;
1485
1486 /* For LE slave connections, make sure the connection interval
1487 * is in the range of the minium and maximum interval that has
1488 * been configured for this connection. If not, then trigger
1489 * the connection update procedure.
1490 */
1491 if (hcon->role == HCI_ROLE_SLAVE &&
1492 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1493 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1494 struct l2cap_conn_param_update_req req;
1495
1496 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1497 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1498 req.latency = cpu_to_le16(hcon->le_conn_latency);
1499 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1500
1501 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1502 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1503 }
1504
1505 l2cap_chan_lock(pchan);
1506
1507 chan = pchan->ops->new_connection(pchan);
1508 if (!chan)
1509 goto clean;
1510
1511 bacpy(&chan->src, &hcon->src);
1512 bacpy(&chan->dst, &hcon->dst);
1513 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1514 chan->dst_type = dst_type;
1515
1516 __l2cap_chan_add(conn, chan);
1517
1518 clean:
1519 l2cap_chan_unlock(pchan);
1520 }
1521
1522 static void l2cap_conn_ready(struct l2cap_conn *conn)
1523 {
1524 struct l2cap_chan *chan;
1525 struct hci_conn *hcon = conn->hcon;
1526
1527 BT_DBG("conn %p", conn);
1528
1529 /* For outgoing pairing which doesn't necessarily have an
1530 * associated socket (e.g. mgmt_pair_device).
1531 */
1532 if (hcon->out && hcon->type == LE_LINK)
1533 smp_conn_security(hcon, hcon->pending_sec_level);
1534
1535 mutex_lock(&conn->chan_lock);
1536
1537 if (hcon->type == LE_LINK)
1538 l2cap_le_conn_ready(conn);
1539
1540 list_for_each_entry(chan, &conn->chan_l, list) {
1541
1542 l2cap_chan_lock(chan);
1543
1544 if (chan->scid == L2CAP_CID_A2MP) {
1545 l2cap_chan_unlock(chan);
1546 continue;
1547 }
1548
1549 if (hcon->type == LE_LINK) {
1550 l2cap_le_start(chan);
1551 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1552 l2cap_chan_ready(chan);
1553
1554 } else if (chan->state == BT_CONNECT) {
1555 l2cap_do_start(chan);
1556 }
1557
1558 l2cap_chan_unlock(chan);
1559 }
1560
1561 mutex_unlock(&conn->chan_lock);
1562
1563 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1564 }
1565
1566 /* Notify sockets that we cannot guaranty reliability anymore */
1567 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1568 {
1569 struct l2cap_chan *chan;
1570
1571 BT_DBG("conn %p", conn);
1572
1573 mutex_lock(&conn->chan_lock);
1574
1575 list_for_each_entry(chan, &conn->chan_l, list) {
1576 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1577 l2cap_chan_set_err(chan, err);
1578 }
1579
1580 mutex_unlock(&conn->chan_lock);
1581 }
1582
1583 static void l2cap_info_timeout(struct work_struct *work)
1584 {
1585 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1586 info_timer.work);
1587
1588 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1589 conn->info_ident = 0;
1590
1591 l2cap_conn_start(conn);
1592 }
1593
1594 /*
1595 * l2cap_user
1596 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1597 * callback is called during registration. The ->remove callback is called
1598 * during unregistration.
1599 * An l2cap_user object can either be explicitly unregistered or when the
1600 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1601 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1602 * External modules must own a reference to the l2cap_conn object if they intend
1603 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1604 * any time if they don't.
1605 */
1606
1607 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1608 {
1609 struct hci_dev *hdev = conn->hcon->hdev;
1610 int ret;
1611
1612 /* We need to check whether l2cap_conn is registered. If it is not, we
1613 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1614 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1615 * relies on the parent hci_conn object to be locked. This itself relies
1616 * on the hci_dev object to be locked. So we must lock the hci device
1617 * here, too. */
1618
1619 hci_dev_lock(hdev);
1620
1621 if (user->list.next || user->list.prev) {
1622 ret = -EINVAL;
1623 goto out_unlock;
1624 }
1625
1626 /* conn->hchan is NULL after l2cap_conn_del() was called */
1627 if (!conn->hchan) {
1628 ret = -ENODEV;
1629 goto out_unlock;
1630 }
1631
1632 ret = user->probe(conn, user);
1633 if (ret)
1634 goto out_unlock;
1635
1636 list_add(&user->list, &conn->users);
1637 ret = 0;
1638
1639 out_unlock:
1640 hci_dev_unlock(hdev);
1641 return ret;
1642 }
1643 EXPORT_SYMBOL(l2cap_register_user);
1644
1645 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1646 {
1647 struct hci_dev *hdev = conn->hcon->hdev;
1648
1649 hci_dev_lock(hdev);
1650
1651 if (!user->list.next || !user->list.prev)
1652 goto out_unlock;
1653
1654 list_del(&user->list);
1655 user->list.next = NULL;
1656 user->list.prev = NULL;
1657 user->remove(conn, user);
1658
1659 out_unlock:
1660 hci_dev_unlock(hdev);
1661 }
1662 EXPORT_SYMBOL(l2cap_unregister_user);
1663
1664 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1665 {
1666 struct l2cap_user *user;
1667
1668 while (!list_empty(&conn->users)) {
1669 user = list_first_entry(&conn->users, struct l2cap_user, list);
1670 list_del(&user->list);
1671 user->list.next = NULL;
1672 user->list.prev = NULL;
1673 user->remove(conn, user);
1674 }
1675 }
1676
1677 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1678 {
1679 struct l2cap_conn *conn = hcon->l2cap_data;
1680 struct l2cap_chan *chan, *l;
1681
1682 if (!conn)
1683 return;
1684
1685 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1686
1687 kfree_skb(conn->rx_skb);
1688
1689 skb_queue_purge(&conn->pending_rx);
1690
1691 /* We can not call flush_work(&conn->pending_rx_work) here since we
1692 * might block if we are running on a worker from the same workqueue
1693 * pending_rx_work is waiting on.
1694 */
1695 if (work_pending(&conn->pending_rx_work))
1696 cancel_work_sync(&conn->pending_rx_work);
1697
1698 l2cap_unregister_all_users(conn);
1699
1700 mutex_lock(&conn->chan_lock);
1701
1702 /* Kill channels */
1703 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1704 l2cap_chan_hold(chan);
1705 l2cap_chan_lock(chan);
1706
1707 l2cap_chan_del(chan, err);
1708
1709 l2cap_chan_unlock(chan);
1710
1711 chan->ops->close(chan);
1712 l2cap_chan_put(chan);
1713 }
1714
1715 mutex_unlock(&conn->chan_lock);
1716
1717 hci_chan_del(conn->hchan);
1718
1719 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1720 cancel_delayed_work_sync(&conn->info_timer);
1721
1722 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1723 cancel_delayed_work_sync(&conn->security_timer);
1724 smp_chan_destroy(conn);
1725 }
1726
1727 hcon->l2cap_data = NULL;
1728 conn->hchan = NULL;
1729 l2cap_conn_put(conn);
1730 }
1731
1732 static void security_timeout(struct work_struct *work)
1733 {
1734 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1735 security_timer.work);
1736
1737 BT_DBG("conn %p", conn);
1738
1739 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1740 smp_chan_destroy(conn);
1741 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1742 }
1743 }
1744
1745 static void l2cap_conn_free(struct kref *ref)
1746 {
1747 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1748
1749 hci_conn_put(conn->hcon);
1750 kfree(conn);
1751 }
1752
1753 void l2cap_conn_get(struct l2cap_conn *conn)
1754 {
1755 kref_get(&conn->ref);
1756 }
1757 EXPORT_SYMBOL(l2cap_conn_get);
1758
1759 void l2cap_conn_put(struct l2cap_conn *conn)
1760 {
1761 kref_put(&conn->ref, l2cap_conn_free);
1762 }
1763 EXPORT_SYMBOL(l2cap_conn_put);
1764
1765 /* ---- Socket interface ---- */
1766
1767 /* Find socket with psm and source / destination bdaddr.
1768 * Returns closest match.
1769 */
1770 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1771 bdaddr_t *src,
1772 bdaddr_t *dst,
1773 u8 link_type)
1774 {
1775 struct l2cap_chan *c, *c1 = NULL;
1776
1777 read_lock(&chan_list_lock);
1778
1779 list_for_each_entry(c, &chan_list, global_l) {
1780 if (state && c->state != state)
1781 continue;
1782
1783 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1784 continue;
1785
1786 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1787 continue;
1788
1789 if (c->psm == psm) {
1790 int src_match, dst_match;
1791 int src_any, dst_any;
1792
1793 /* Exact match. */
1794 src_match = !bacmp(&c->src, src);
1795 dst_match = !bacmp(&c->dst, dst);
1796 if (src_match && dst_match) {
1797 read_unlock(&chan_list_lock);
1798 return c;
1799 }
1800
1801 /* Closest match */
1802 src_any = !bacmp(&c->src, BDADDR_ANY);
1803 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1804 if ((src_match && dst_any) || (src_any && dst_match) ||
1805 (src_any && dst_any))
1806 c1 = c;
1807 }
1808 }
1809
1810 read_unlock(&chan_list_lock);
1811
1812 return c1;
1813 }
1814
1815 static void l2cap_monitor_timeout(struct work_struct *work)
1816 {
1817 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1818 monitor_timer.work);
1819
1820 BT_DBG("chan %p", chan);
1821
1822 l2cap_chan_lock(chan);
1823
1824 if (!chan->conn) {
1825 l2cap_chan_unlock(chan);
1826 l2cap_chan_put(chan);
1827 return;
1828 }
1829
1830 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1831
1832 l2cap_chan_unlock(chan);
1833 l2cap_chan_put(chan);
1834 }
1835
1836 static void l2cap_retrans_timeout(struct work_struct *work)
1837 {
1838 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1839 retrans_timer.work);
1840
1841 BT_DBG("chan %p", chan);
1842
1843 l2cap_chan_lock(chan);
1844
1845 if (!chan->conn) {
1846 l2cap_chan_unlock(chan);
1847 l2cap_chan_put(chan);
1848 return;
1849 }
1850
1851 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1852 l2cap_chan_unlock(chan);
1853 l2cap_chan_put(chan);
1854 }
1855
1856 static void l2cap_streaming_send(struct l2cap_chan *chan,
1857 struct sk_buff_head *skbs)
1858 {
1859 struct sk_buff *skb;
1860 struct l2cap_ctrl *control;
1861
1862 BT_DBG("chan %p, skbs %p", chan, skbs);
1863
1864 if (__chan_is_moving(chan))
1865 return;
1866
1867 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1868
1869 while (!skb_queue_empty(&chan->tx_q)) {
1870
1871 skb = skb_dequeue(&chan->tx_q);
1872
1873 bt_cb(skb)->control.retries = 1;
1874 control = &bt_cb(skb)->control;
1875
1876 control->reqseq = 0;
1877 control->txseq = chan->next_tx_seq;
1878
1879 __pack_control(chan, control, skb);
1880
1881 if (chan->fcs == L2CAP_FCS_CRC16) {
1882 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1883 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1884 }
1885
1886 l2cap_do_send(chan, skb);
1887
1888 BT_DBG("Sent txseq %u", control->txseq);
1889
1890 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1891 chan->frames_sent++;
1892 }
1893 }
1894
1895 static int l2cap_ertm_send(struct l2cap_chan *chan)
1896 {
1897 struct sk_buff *skb, *tx_skb;
1898 struct l2cap_ctrl *control;
1899 int sent = 0;
1900
1901 BT_DBG("chan %p", chan);
1902
1903 if (chan->state != BT_CONNECTED)
1904 return -ENOTCONN;
1905
1906 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1907 return 0;
1908
1909 if (__chan_is_moving(chan))
1910 return 0;
1911
1912 while (chan->tx_send_head &&
1913 chan->unacked_frames < chan->remote_tx_win &&
1914 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1915
1916 skb = chan->tx_send_head;
1917
1918 bt_cb(skb)->control.retries = 1;
1919 control = &bt_cb(skb)->control;
1920
1921 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1922 control->final = 1;
1923
1924 control->reqseq = chan->buffer_seq;
1925 chan->last_acked_seq = chan->buffer_seq;
1926 control->txseq = chan->next_tx_seq;
1927
1928 __pack_control(chan, control, skb);
1929
1930 if (chan->fcs == L2CAP_FCS_CRC16) {
1931 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1932 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1933 }
1934
1935 /* Clone after data has been modified. Data is assumed to be
1936 read-only (for locking purposes) on cloned sk_buffs.
1937 */
1938 tx_skb = skb_clone(skb, GFP_KERNEL);
1939
1940 if (!tx_skb)
1941 break;
1942
1943 __set_retrans_timer(chan);
1944
1945 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1946 chan->unacked_frames++;
1947 chan->frames_sent++;
1948 sent++;
1949
1950 if (skb_queue_is_last(&chan->tx_q, skb))
1951 chan->tx_send_head = NULL;
1952 else
1953 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1954
1955 l2cap_do_send(chan, tx_skb);
1956 BT_DBG("Sent txseq %u", control->txseq);
1957 }
1958
1959 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1960 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1961
1962 return sent;
1963 }
1964
1965 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1966 {
1967 struct l2cap_ctrl control;
1968 struct sk_buff *skb;
1969 struct sk_buff *tx_skb;
1970 u16 seq;
1971
1972 BT_DBG("chan %p", chan);
1973
1974 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1975 return;
1976
1977 if (__chan_is_moving(chan))
1978 return;
1979
1980 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1981 seq = l2cap_seq_list_pop(&chan->retrans_list);
1982
1983 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1984 if (!skb) {
1985 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1986 seq);
1987 continue;
1988 }
1989
1990 bt_cb(skb)->control.retries++;
1991 control = bt_cb(skb)->control;
1992
1993 if (chan->max_tx != 0 &&
1994 bt_cb(skb)->control.retries > chan->max_tx) {
1995 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1996 l2cap_send_disconn_req(chan, ECONNRESET);
1997 l2cap_seq_list_clear(&chan->retrans_list);
1998 break;
1999 }
2000
2001 control.reqseq = chan->buffer_seq;
2002 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2003 control.final = 1;
2004 else
2005 control.final = 0;
2006
2007 if (skb_cloned(skb)) {
2008 /* Cloned sk_buffs are read-only, so we need a
2009 * writeable copy
2010 */
2011 tx_skb = skb_copy(skb, GFP_KERNEL);
2012 } else {
2013 tx_skb = skb_clone(skb, GFP_KERNEL);
2014 }
2015
2016 if (!tx_skb) {
2017 l2cap_seq_list_clear(&chan->retrans_list);
2018 break;
2019 }
2020
2021 /* Update skb contents */
2022 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2023 put_unaligned_le32(__pack_extended_control(&control),
2024 tx_skb->data + L2CAP_HDR_SIZE);
2025 } else {
2026 put_unaligned_le16(__pack_enhanced_control(&control),
2027 tx_skb->data + L2CAP_HDR_SIZE);
2028 }
2029
2030 if (chan->fcs == L2CAP_FCS_CRC16) {
2031 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2032 put_unaligned_le16(fcs, skb_put(tx_skb,
2033 L2CAP_FCS_SIZE));
2034 }
2035
2036 l2cap_do_send(chan, tx_skb);
2037
2038 BT_DBG("Resent txseq %d", control.txseq);
2039
2040 chan->last_acked_seq = chan->buffer_seq;
2041 }
2042 }
2043
2044 static void l2cap_retransmit(struct l2cap_chan *chan,
2045 struct l2cap_ctrl *control)
2046 {
2047 BT_DBG("chan %p, control %p", chan, control);
2048
2049 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2050 l2cap_ertm_resend(chan);
2051 }
2052
2053 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2054 struct l2cap_ctrl *control)
2055 {
2056 struct sk_buff *skb;
2057
2058 BT_DBG("chan %p, control %p", chan, control);
2059
2060 if (control->poll)
2061 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2062
2063 l2cap_seq_list_clear(&chan->retrans_list);
2064
2065 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2066 return;
2067
2068 if (chan->unacked_frames) {
2069 skb_queue_walk(&chan->tx_q, skb) {
2070 if (bt_cb(skb)->control.txseq == control->reqseq ||
2071 skb == chan->tx_send_head)
2072 break;
2073 }
2074
2075 skb_queue_walk_from(&chan->tx_q, skb) {
2076 if (skb == chan->tx_send_head)
2077 break;
2078
2079 l2cap_seq_list_append(&chan->retrans_list,
2080 bt_cb(skb)->control.txseq);
2081 }
2082
2083 l2cap_ertm_resend(chan);
2084 }
2085 }
2086
2087 static void l2cap_send_ack(struct l2cap_chan *chan)
2088 {
2089 struct l2cap_ctrl control;
2090 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2091 chan->last_acked_seq);
2092 int threshold;
2093
2094 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2095 chan, chan->last_acked_seq, chan->buffer_seq);
2096
2097 memset(&control, 0, sizeof(control));
2098 control.sframe = 1;
2099
2100 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2101 chan->rx_state == L2CAP_RX_STATE_RECV) {
2102 __clear_ack_timer(chan);
2103 control.super = L2CAP_SUPER_RNR;
2104 control.reqseq = chan->buffer_seq;
2105 l2cap_send_sframe(chan, &control);
2106 } else {
2107 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2108 l2cap_ertm_send(chan);
2109 /* If any i-frames were sent, they included an ack */
2110 if (chan->buffer_seq == chan->last_acked_seq)
2111 frames_to_ack = 0;
2112 }
2113
2114 /* Ack now if the window is 3/4ths full.
2115 * Calculate without mul or div
2116 */
2117 threshold = chan->ack_win;
2118 threshold += threshold << 1;
2119 threshold >>= 2;
2120
2121 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2122 threshold);
2123
2124 if (frames_to_ack >= threshold) {
2125 __clear_ack_timer(chan);
2126 control.super = L2CAP_SUPER_RR;
2127 control.reqseq = chan->buffer_seq;
2128 l2cap_send_sframe(chan, &control);
2129 frames_to_ack = 0;
2130 }
2131
2132 if (frames_to_ack)
2133 __set_ack_timer(chan);
2134 }
2135 }
2136
2137 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2138 struct msghdr *msg, int len,
2139 int count, struct sk_buff *skb)
2140 {
2141 struct l2cap_conn *conn = chan->conn;
2142 struct sk_buff **frag;
2143 int sent = 0;
2144
2145 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2146 msg->msg_iov, count))
2147 return -EFAULT;
2148
2149 sent += count;
2150 len -= count;
2151
2152 /* Continuation fragments (no L2CAP header) */
2153 frag = &skb_shinfo(skb)->frag_list;
2154 while (len) {
2155 struct sk_buff *tmp;
2156
2157 count = min_t(unsigned int, conn->mtu, len);
2158
2159 tmp = chan->ops->alloc_skb(chan, 0, count,
2160 msg->msg_flags & MSG_DONTWAIT);
2161 if (IS_ERR(tmp))
2162 return PTR_ERR(tmp);
2163
2164 *frag = tmp;
2165
2166 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2167 msg->msg_iov, count))
2168 return -EFAULT;
2169
2170 sent += count;
2171 len -= count;
2172
2173 skb->len += (*frag)->len;
2174 skb->data_len += (*frag)->len;
2175
2176 frag = &(*frag)->next;
2177 }
2178
2179 return sent;
2180 }
2181
2182 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2183 struct msghdr *msg, size_t len)
2184 {
2185 struct l2cap_conn *conn = chan->conn;
2186 struct sk_buff *skb;
2187 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2188 struct l2cap_hdr *lh;
2189
2190 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2191 __le16_to_cpu(chan->psm), len);
2192
2193 count = min_t(unsigned int, (conn->mtu - hlen), len);
2194
2195 skb = chan->ops->alloc_skb(chan, hlen, count,
2196 msg->msg_flags & MSG_DONTWAIT);
2197 if (IS_ERR(skb))
2198 return skb;
2199
2200 /* Create L2CAP header */
2201 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2202 lh->cid = cpu_to_le16(chan->dcid);
2203 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2204 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2205
2206 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2207 if (unlikely(err < 0)) {
2208 kfree_skb(skb);
2209 return ERR_PTR(err);
2210 }
2211 return skb;
2212 }
2213
2214 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2215 struct msghdr *msg, size_t len)
2216 {
2217 struct l2cap_conn *conn = chan->conn;
2218 struct sk_buff *skb;
2219 int err, count;
2220 struct l2cap_hdr *lh;
2221
2222 BT_DBG("chan %p len %zu", chan, len);
2223
2224 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2225
2226 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2227 msg->msg_flags & MSG_DONTWAIT);
2228 if (IS_ERR(skb))
2229 return skb;
2230
2231 /* Create L2CAP header */
2232 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2233 lh->cid = cpu_to_le16(chan->dcid);
2234 lh->len = cpu_to_le16(len);
2235
2236 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2237 if (unlikely(err < 0)) {
2238 kfree_skb(skb);
2239 return ERR_PTR(err);
2240 }
2241 return skb;
2242 }
2243
2244 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2245 struct msghdr *msg, size_t len,
2246 u16 sdulen)
2247 {
2248 struct l2cap_conn *conn = chan->conn;
2249 struct sk_buff *skb;
2250 int err, count, hlen;
2251 struct l2cap_hdr *lh;
2252
2253 BT_DBG("chan %p len %zu", chan, len);
2254
2255 if (!conn)
2256 return ERR_PTR(-ENOTCONN);
2257
2258 hlen = __ertm_hdr_size(chan);
2259
2260 if (sdulen)
2261 hlen += L2CAP_SDULEN_SIZE;
2262
2263 if (chan->fcs == L2CAP_FCS_CRC16)
2264 hlen += L2CAP_FCS_SIZE;
2265
2266 count = min_t(unsigned int, (conn->mtu - hlen), len);
2267
2268 skb = chan->ops->alloc_skb(chan, hlen, count,
2269 msg->msg_flags & MSG_DONTWAIT);
2270 if (IS_ERR(skb))
2271 return skb;
2272
2273 /* Create L2CAP header */
2274 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2275 lh->cid = cpu_to_le16(chan->dcid);
2276 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2277
2278 /* Control header is populated later */
2279 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2280 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2281 else
2282 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2283
2284 if (sdulen)
2285 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2286
2287 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2288 if (unlikely(err < 0)) {
2289 kfree_skb(skb);
2290 return ERR_PTR(err);
2291 }
2292
2293 bt_cb(skb)->control.fcs = chan->fcs;
2294 bt_cb(skb)->control.retries = 0;
2295 return skb;
2296 }
2297
2298 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2299 struct sk_buff_head *seg_queue,
2300 struct msghdr *msg, size_t len)
2301 {
2302 struct sk_buff *skb;
2303 u16 sdu_len;
2304 size_t pdu_len;
2305 u8 sar;
2306
2307 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2308
2309 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2310 * so fragmented skbs are not used. The HCI layer's handling
2311 * of fragmented skbs is not compatible with ERTM's queueing.
2312 */
2313
2314 /* PDU size is derived from the HCI MTU */
2315 pdu_len = chan->conn->mtu;
2316
2317 /* Constrain PDU size for BR/EDR connections */
2318 if (!chan->hs_hcon)
2319 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2320
2321 /* Adjust for largest possible L2CAP overhead. */
2322 if (chan->fcs)
2323 pdu_len -= L2CAP_FCS_SIZE;
2324
2325 pdu_len -= __ertm_hdr_size(chan);
2326
2327 /* Remote device may have requested smaller PDUs */
2328 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2329
2330 if (len <= pdu_len) {
2331 sar = L2CAP_SAR_UNSEGMENTED;
2332 sdu_len = 0;
2333 pdu_len = len;
2334 } else {
2335 sar = L2CAP_SAR_START;
2336 sdu_len = len;
2337 pdu_len -= L2CAP_SDULEN_SIZE;
2338 }
2339
2340 while (len > 0) {
2341 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2342
2343 if (IS_ERR(skb)) {
2344 __skb_queue_purge(seg_queue);
2345 return PTR_ERR(skb);
2346 }
2347
2348 bt_cb(skb)->control.sar = sar;
2349 __skb_queue_tail(seg_queue, skb);
2350
2351 len -= pdu_len;
2352 if (sdu_len) {
2353 sdu_len = 0;
2354 pdu_len += L2CAP_SDULEN_SIZE;
2355 }
2356
2357 if (len <= pdu_len) {
2358 sar = L2CAP_SAR_END;
2359 pdu_len = len;
2360 } else {
2361 sar = L2CAP_SAR_CONTINUE;
2362 }
2363 }
2364
2365 return 0;
2366 }
2367
2368 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2369 struct msghdr *msg,
2370 size_t len, u16 sdulen)
2371 {
2372 struct l2cap_conn *conn = chan->conn;
2373 struct sk_buff *skb;
2374 int err, count, hlen;
2375 struct l2cap_hdr *lh;
2376
2377 BT_DBG("chan %p len %zu", chan, len);
2378
2379 if (!conn)
2380 return ERR_PTR(-ENOTCONN);
2381
2382 hlen = L2CAP_HDR_SIZE;
2383
2384 if (sdulen)
2385 hlen += L2CAP_SDULEN_SIZE;
2386
2387 count = min_t(unsigned int, (conn->mtu - hlen), len);
2388
2389 skb = chan->ops->alloc_skb(chan, hlen, count,
2390 msg->msg_flags & MSG_DONTWAIT);
2391 if (IS_ERR(skb))
2392 return skb;
2393
2394 /* Create L2CAP header */
2395 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2396 lh->cid = cpu_to_le16(chan->dcid);
2397 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2398
2399 if (sdulen)
2400 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2401
2402 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2403 if (unlikely(err < 0)) {
2404 kfree_skb(skb);
2405 return ERR_PTR(err);
2406 }
2407
2408 return skb;
2409 }
2410
2411 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2412 struct sk_buff_head *seg_queue,
2413 struct msghdr *msg, size_t len)
2414 {
2415 struct sk_buff *skb;
2416 size_t pdu_len;
2417 u16 sdu_len;
2418
2419 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2420
2421 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2422
2423 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2424
2425 sdu_len = len;
2426 pdu_len -= L2CAP_SDULEN_SIZE;
2427
2428 while (len > 0) {
2429 if (len <= pdu_len)
2430 pdu_len = len;
2431
2432 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2433 if (IS_ERR(skb)) {
2434 __skb_queue_purge(seg_queue);
2435 return PTR_ERR(skb);
2436 }
2437
2438 __skb_queue_tail(seg_queue, skb);
2439
2440 len -= pdu_len;
2441
2442 if (sdu_len) {
2443 sdu_len = 0;
2444 pdu_len += L2CAP_SDULEN_SIZE;
2445 }
2446 }
2447
2448 return 0;
2449 }
2450
2451 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2452 {
2453 struct sk_buff *skb;
2454 int err;
2455 struct sk_buff_head seg_queue;
2456
2457 if (!chan->conn)
2458 return -ENOTCONN;
2459
2460 /* Connectionless channel */
2461 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2462 skb = l2cap_create_connless_pdu(chan, msg, len);
2463 if (IS_ERR(skb))
2464 return PTR_ERR(skb);
2465
2466 /* Channel lock is released before requesting new skb and then
2467 * reacquired thus we need to recheck channel state.
2468 */
2469 if (chan->state != BT_CONNECTED) {
2470 kfree_skb(skb);
2471 return -ENOTCONN;
2472 }
2473
2474 l2cap_do_send(chan, skb);
2475 return len;
2476 }
2477
2478 switch (chan->mode) {
2479 case L2CAP_MODE_LE_FLOWCTL:
2480 /* Check outgoing MTU */
2481 if (len > chan->omtu)
2482 return -EMSGSIZE;
2483
2484 if (!chan->tx_credits)
2485 return -EAGAIN;
2486
2487 __skb_queue_head_init(&seg_queue);
2488
2489 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2490
2491 if (chan->state != BT_CONNECTED) {
2492 __skb_queue_purge(&seg_queue);
2493 err = -ENOTCONN;
2494 }
2495
2496 if (err)
2497 return err;
2498
2499 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2500
2501 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2502 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2503 chan->tx_credits--;
2504 }
2505
2506 if (!chan->tx_credits)
2507 chan->ops->suspend(chan);
2508
2509 err = len;
2510
2511 break;
2512
2513 case L2CAP_MODE_BASIC:
2514 /* Check outgoing MTU */
2515 if (len > chan->omtu)
2516 return -EMSGSIZE;
2517
2518 /* Create a basic PDU */
2519 skb = l2cap_create_basic_pdu(chan, msg, len);
2520 if (IS_ERR(skb))
2521 return PTR_ERR(skb);
2522
2523 /* Channel lock is released before requesting new skb and then
2524 * reacquired thus we need to recheck channel state.
2525 */
2526 if (chan->state != BT_CONNECTED) {
2527 kfree_skb(skb);
2528 return -ENOTCONN;
2529 }
2530
2531 l2cap_do_send(chan, skb);
2532 err = len;
2533 break;
2534
2535 case L2CAP_MODE_ERTM:
2536 case L2CAP_MODE_STREAMING:
2537 /* Check outgoing MTU */
2538 if (len > chan->omtu) {
2539 err = -EMSGSIZE;
2540 break;
2541 }
2542
2543 __skb_queue_head_init(&seg_queue);
2544
2545 /* Do segmentation before calling in to the state machine,
2546 * since it's possible to block while waiting for memory
2547 * allocation.
2548 */
2549 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2550
2551 /* The channel could have been closed while segmenting,
2552 * check that it is still connected.
2553 */
2554 if (chan->state != BT_CONNECTED) {
2555 __skb_queue_purge(&seg_queue);
2556 err = -ENOTCONN;
2557 }
2558
2559 if (err)
2560 break;
2561
2562 if (chan->mode == L2CAP_MODE_ERTM)
2563 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2564 else
2565 l2cap_streaming_send(chan, &seg_queue);
2566
2567 err = len;
2568
2569 /* If the skbs were not queued for sending, they'll still be in
2570 * seg_queue and need to be purged.
2571 */
2572 __skb_queue_purge(&seg_queue);
2573 break;
2574
2575 default:
2576 BT_DBG("bad state %1.1x", chan->mode);
2577 err = -EBADFD;
2578 }
2579
2580 return err;
2581 }
2582 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2583
2584 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2585 {
2586 struct l2cap_ctrl control;
2587 u16 seq;
2588
2589 BT_DBG("chan %p, txseq %u", chan, txseq);
2590
2591 memset(&control, 0, sizeof(control));
2592 control.sframe = 1;
2593 control.super = L2CAP_SUPER_SREJ;
2594
2595 for (seq = chan->expected_tx_seq; seq != txseq;
2596 seq = __next_seq(chan, seq)) {
2597 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2598 control.reqseq = seq;
2599 l2cap_send_sframe(chan, &control);
2600 l2cap_seq_list_append(&chan->srej_list, seq);
2601 }
2602 }
2603
2604 chan->expected_tx_seq = __next_seq(chan, txseq);
2605 }
2606
2607 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2608 {
2609 struct l2cap_ctrl control;
2610
2611 BT_DBG("chan %p", chan);
2612
2613 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2614 return;
2615
2616 memset(&control, 0, sizeof(control));
2617 control.sframe = 1;
2618 control.super = L2CAP_SUPER_SREJ;
2619 control.reqseq = chan->srej_list.tail;
2620 l2cap_send_sframe(chan, &control);
2621 }
2622
2623 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2624 {
2625 struct l2cap_ctrl control;
2626 u16 initial_head;
2627 u16 seq;
2628
2629 BT_DBG("chan %p, txseq %u", chan, txseq);
2630
2631 memset(&control, 0, sizeof(control));
2632 control.sframe = 1;
2633 control.super = L2CAP_SUPER_SREJ;
2634
2635 /* Capture initial list head to allow only one pass through the list. */
2636 initial_head = chan->srej_list.head;
2637
2638 do {
2639 seq = l2cap_seq_list_pop(&chan->srej_list);
2640 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2641 break;
2642
2643 control.reqseq = seq;
2644 l2cap_send_sframe(chan, &control);
2645 l2cap_seq_list_append(&chan->srej_list, seq);
2646 } while (chan->srej_list.head != initial_head);
2647 }
2648
2649 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2650 {
2651 struct sk_buff *acked_skb;
2652 u16 ackseq;
2653
2654 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2655
2656 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2657 return;
2658
2659 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2660 chan->expected_ack_seq, chan->unacked_frames);
2661
2662 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2663 ackseq = __next_seq(chan, ackseq)) {
2664
2665 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2666 if (acked_skb) {
2667 skb_unlink(acked_skb, &chan->tx_q);
2668 kfree_skb(acked_skb);
2669 chan->unacked_frames--;
2670 }
2671 }
2672
2673 chan->expected_ack_seq = reqseq;
2674
2675 if (chan->unacked_frames == 0)
2676 __clear_retrans_timer(chan);
2677
2678 BT_DBG("unacked_frames %u", chan->unacked_frames);
2679 }
2680
2681 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2682 {
2683 BT_DBG("chan %p", chan);
2684
2685 chan->expected_tx_seq = chan->buffer_seq;
2686 l2cap_seq_list_clear(&chan->srej_list);
2687 skb_queue_purge(&chan->srej_q);
2688 chan->rx_state = L2CAP_RX_STATE_RECV;
2689 }
2690
2691 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2692 struct l2cap_ctrl *control,
2693 struct sk_buff_head *skbs, u8 event)
2694 {
2695 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2696 event);
2697
2698 switch (event) {
2699 case L2CAP_EV_DATA_REQUEST:
2700 if (chan->tx_send_head == NULL)
2701 chan->tx_send_head = skb_peek(skbs);
2702
2703 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2704 l2cap_ertm_send(chan);
2705 break;
2706 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2707 BT_DBG("Enter LOCAL_BUSY");
2708 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2709
2710 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2711 /* The SREJ_SENT state must be aborted if we are to
2712 * enter the LOCAL_BUSY state.
2713 */
2714 l2cap_abort_rx_srej_sent(chan);
2715 }
2716
2717 l2cap_send_ack(chan);
2718
2719 break;
2720 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2721 BT_DBG("Exit LOCAL_BUSY");
2722 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2723
2724 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2725 struct l2cap_ctrl local_control;
2726
2727 memset(&local_control, 0, sizeof(local_control));
2728 local_control.sframe = 1;
2729 local_control.super = L2CAP_SUPER_RR;
2730 local_control.poll = 1;
2731 local_control.reqseq = chan->buffer_seq;
2732 l2cap_send_sframe(chan, &local_control);
2733
2734 chan->retry_count = 1;
2735 __set_monitor_timer(chan);
2736 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2737 }
2738 break;
2739 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2740 l2cap_process_reqseq(chan, control->reqseq);
2741 break;
2742 case L2CAP_EV_EXPLICIT_POLL:
2743 l2cap_send_rr_or_rnr(chan, 1);
2744 chan->retry_count = 1;
2745 __set_monitor_timer(chan);
2746 __clear_ack_timer(chan);
2747 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2748 break;
2749 case L2CAP_EV_RETRANS_TO:
2750 l2cap_send_rr_or_rnr(chan, 1);
2751 chan->retry_count = 1;
2752 __set_monitor_timer(chan);
2753 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2754 break;
2755 case L2CAP_EV_RECV_FBIT:
2756 /* Nothing to process */
2757 break;
2758 default:
2759 break;
2760 }
2761 }
2762
2763 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2764 struct l2cap_ctrl *control,
2765 struct sk_buff_head *skbs, u8 event)
2766 {
2767 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2768 event);
2769
2770 switch (event) {
2771 case L2CAP_EV_DATA_REQUEST:
2772 if (chan->tx_send_head == NULL)
2773 chan->tx_send_head = skb_peek(skbs);
2774 /* Queue data, but don't send. */
2775 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2776 break;
2777 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2778 BT_DBG("Enter LOCAL_BUSY");
2779 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2780
2781 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2782 /* The SREJ_SENT state must be aborted if we are to
2783 * enter the LOCAL_BUSY state.
2784 */
2785 l2cap_abort_rx_srej_sent(chan);
2786 }
2787
2788 l2cap_send_ack(chan);
2789
2790 break;
2791 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2792 BT_DBG("Exit LOCAL_BUSY");
2793 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2794
2795 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2796 struct l2cap_ctrl local_control;
2797 memset(&local_control, 0, sizeof(local_control));
2798 local_control.sframe = 1;
2799 local_control.super = L2CAP_SUPER_RR;
2800 local_control.poll = 1;
2801 local_control.reqseq = chan->buffer_seq;
2802 l2cap_send_sframe(chan, &local_control);
2803
2804 chan->retry_count = 1;
2805 __set_monitor_timer(chan);
2806 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2807 }
2808 break;
2809 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2810 l2cap_process_reqseq(chan, control->reqseq);
2811
2812 /* Fall through */
2813
2814 case L2CAP_EV_RECV_FBIT:
2815 if (control && control->final) {
2816 __clear_monitor_timer(chan);
2817 if (chan->unacked_frames > 0)
2818 __set_retrans_timer(chan);
2819 chan->retry_count = 0;
2820 chan->tx_state = L2CAP_TX_STATE_XMIT;
2821 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2822 }
2823 break;
2824 case L2CAP_EV_EXPLICIT_POLL:
2825 /* Ignore */
2826 break;
2827 case L2CAP_EV_MONITOR_TO:
2828 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2829 l2cap_send_rr_or_rnr(chan, 1);
2830 __set_monitor_timer(chan);
2831 chan->retry_count++;
2832 } else {
2833 l2cap_send_disconn_req(chan, ECONNABORTED);
2834 }
2835 break;
2836 default:
2837 break;
2838 }
2839 }
2840
2841 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2842 struct sk_buff_head *skbs, u8 event)
2843 {
2844 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2845 chan, control, skbs, event, chan->tx_state);
2846
2847 switch (chan->tx_state) {
2848 case L2CAP_TX_STATE_XMIT:
2849 l2cap_tx_state_xmit(chan, control, skbs, event);
2850 break;
2851 case L2CAP_TX_STATE_WAIT_F:
2852 l2cap_tx_state_wait_f(chan, control, skbs, event);
2853 break;
2854 default:
2855 /* Ignore event */
2856 break;
2857 }
2858 }
2859
2860 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2861 struct l2cap_ctrl *control)
2862 {
2863 BT_DBG("chan %p, control %p", chan, control);
2864 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2865 }
2866
2867 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2868 struct l2cap_ctrl *control)
2869 {
2870 BT_DBG("chan %p, control %p", chan, control);
2871 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2872 }
2873
2874 /* Copy frame to all raw sockets on that connection */
2875 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2876 {
2877 struct sk_buff *nskb;
2878 struct l2cap_chan *chan;
2879
2880 BT_DBG("conn %p", conn);
2881
2882 mutex_lock(&conn->chan_lock);
2883
2884 list_for_each_entry(chan, &conn->chan_l, list) {
2885 if (chan->chan_type != L2CAP_CHAN_RAW)
2886 continue;
2887
2888 /* Don't send frame to the channel it came from */
2889 if (bt_cb(skb)->chan == chan)
2890 continue;
2891
2892 nskb = skb_clone(skb, GFP_KERNEL);
2893 if (!nskb)
2894 continue;
2895 if (chan->ops->recv(chan, nskb))
2896 kfree_skb(nskb);
2897 }
2898
2899 mutex_unlock(&conn->chan_lock);
2900 }
2901
2902 /* ---- L2CAP signalling commands ---- */
2903 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2904 u8 ident, u16 dlen, void *data)
2905 {
2906 struct sk_buff *skb, **frag;
2907 struct l2cap_cmd_hdr *cmd;
2908 struct l2cap_hdr *lh;
2909 int len, count;
2910
2911 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2912 conn, code, ident, dlen);
2913
2914 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2915 return NULL;
2916
2917 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2918 count = min_t(unsigned int, conn->mtu, len);
2919
2920 skb = bt_skb_alloc(count, GFP_KERNEL);
2921 if (!skb)
2922 return NULL;
2923
2924 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2925 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2926
2927 if (conn->hcon->type == LE_LINK)
2928 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2929 else
2930 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2931
2932 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2933 cmd->code = code;
2934 cmd->ident = ident;
2935 cmd->len = cpu_to_le16(dlen);
2936
2937 if (dlen) {
2938 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2939 memcpy(skb_put(skb, count), data, count);
2940 data += count;
2941 }
2942
2943 len -= skb->len;
2944
2945 /* Continuation fragments (no L2CAP header) */
2946 frag = &skb_shinfo(skb)->frag_list;
2947 while (len) {
2948 count = min_t(unsigned int, conn->mtu, len);
2949
2950 *frag = bt_skb_alloc(count, GFP_KERNEL);
2951 if (!*frag)
2952 goto fail;
2953
2954 memcpy(skb_put(*frag, count), data, count);
2955
2956 len -= count;
2957 data += count;
2958
2959 frag = &(*frag)->next;
2960 }
2961
2962 return skb;
2963
2964 fail:
2965 kfree_skb(skb);
2966 return NULL;
2967 }
2968
2969 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2970 unsigned long *val)
2971 {
2972 struct l2cap_conf_opt *opt = *ptr;
2973 int len;
2974
2975 len = L2CAP_CONF_OPT_SIZE + opt->len;
2976 *ptr += len;
2977
2978 *type = opt->type;
2979 *olen = opt->len;
2980
2981 switch (opt->len) {
2982 case 1:
2983 *val = *((u8 *) opt->val);
2984 break;
2985
2986 case 2:
2987 *val = get_unaligned_le16(opt->val);
2988 break;
2989
2990 case 4:
2991 *val = get_unaligned_le32(opt->val);
2992 break;
2993
2994 default:
2995 *val = (unsigned long) opt->val;
2996 break;
2997 }
2998
2999 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3000 return len;
3001 }
3002
3003 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3004 {
3005 struct l2cap_conf_opt *opt = *ptr;
3006
3007 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3008
3009 opt->type = type;
3010 opt->len = len;
3011
3012 switch (len) {
3013 case 1:
3014 *((u8 *) opt->val) = val;
3015 break;
3016
3017 case 2:
3018 put_unaligned_le16(val, opt->val);
3019 break;
3020
3021 case 4:
3022 put_unaligned_le32(val, opt->val);
3023 break;
3024
3025 default:
3026 memcpy(opt->val, (void *) val, len);
3027 break;
3028 }
3029
3030 *ptr += L2CAP_CONF_OPT_SIZE + len;
3031 }
3032
3033 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3034 {
3035 struct l2cap_conf_efs efs;
3036
3037 switch (chan->mode) {
3038 case L2CAP_MODE_ERTM:
3039 efs.id = chan->local_id;
3040 efs.stype = chan->local_stype;
3041 efs.msdu = cpu_to_le16(chan->local_msdu);
3042 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3043 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3044 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3045 break;
3046
3047 case L2CAP_MODE_STREAMING:
3048 efs.id = 1;
3049 efs.stype = L2CAP_SERV_BESTEFFORT;
3050 efs.msdu = cpu_to_le16(chan->local_msdu);
3051 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3052 efs.acc_lat = 0;
3053 efs.flush_to = 0;
3054 break;
3055
3056 default:
3057 return;
3058 }
3059
3060 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3061 (unsigned long) &efs);
3062 }
3063
3064 static void l2cap_ack_timeout(struct work_struct *work)
3065 {
3066 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3067 ack_timer.work);
3068 u16 frames_to_ack;
3069
3070 BT_DBG("chan %p", chan);
3071
3072 l2cap_chan_lock(chan);
3073
3074 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3075 chan->last_acked_seq);
3076
3077 if (frames_to_ack)
3078 l2cap_send_rr_or_rnr(chan, 0);
3079
3080 l2cap_chan_unlock(chan);
3081 l2cap_chan_put(chan);
3082 }
3083
3084 int l2cap_ertm_init(struct l2cap_chan *chan)
3085 {
3086 int err;
3087
3088 chan->next_tx_seq = 0;
3089 chan->expected_tx_seq = 0;
3090 chan->expected_ack_seq = 0;
3091 chan->unacked_frames = 0;
3092 chan->buffer_seq = 0;
3093 chan->frames_sent = 0;
3094 chan->last_acked_seq = 0;
3095 chan->sdu = NULL;
3096 chan->sdu_last_frag = NULL;
3097 chan->sdu_len = 0;
3098
3099 skb_queue_head_init(&chan->tx_q);
3100
3101 chan->local_amp_id = AMP_ID_BREDR;
3102 chan->move_id = AMP_ID_BREDR;
3103 chan->move_state = L2CAP_MOVE_STABLE;
3104 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3105
3106 if (chan->mode != L2CAP_MODE_ERTM)
3107 return 0;
3108
3109 chan->rx_state = L2CAP_RX_STATE_RECV;
3110 chan->tx_state = L2CAP_TX_STATE_XMIT;
3111
3112 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3113 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3114 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3115
3116 skb_queue_head_init(&chan->srej_q);
3117
3118 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3119 if (err < 0)
3120 return err;
3121
3122 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3123 if (err < 0)
3124 l2cap_seq_list_free(&chan->srej_list);
3125
3126 return err;
3127 }
3128
3129 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3130 {
3131 switch (mode) {
3132 case L2CAP_MODE_STREAMING:
3133 case L2CAP_MODE_ERTM:
3134 if (l2cap_mode_supported(mode, remote_feat_mask))
3135 return mode;
3136 /* fall through */
3137 default:
3138 return L2CAP_MODE_BASIC;
3139 }
3140 }
3141
3142 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3143 {
3144 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3145 }
3146
3147 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3148 {
3149 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3150 }
3151
3152 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3153 struct l2cap_conf_rfc *rfc)
3154 {
3155 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3156 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3157
3158 /* Class 1 devices have must have ERTM timeouts
3159 * exceeding the Link Supervision Timeout. The
3160 * default Link Supervision Timeout for AMP
3161 * controllers is 10 seconds.
3162 *
3163 * Class 1 devices use 0xffffffff for their
3164 * best-effort flush timeout, so the clamping logic
3165 * will result in a timeout that meets the above
3166 * requirement. ERTM timeouts are 16-bit values, so
3167 * the maximum timeout is 65.535 seconds.
3168 */
3169
3170 /* Convert timeout to milliseconds and round */
3171 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3172
3173 /* This is the recommended formula for class 2 devices
3174 * that start ERTM timers when packets are sent to the
3175 * controller.
3176 */
3177 ertm_to = 3 * ertm_to + 500;
3178
3179 if (ertm_to > 0xffff)
3180 ertm_to = 0xffff;
3181
3182 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3183 rfc->monitor_timeout = rfc->retrans_timeout;
3184 } else {
3185 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3186 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3187 }
3188 }
3189
3190 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3191 {
3192 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3193 __l2cap_ews_supported(chan->conn)) {
3194 /* use extended control field */
3195 set_bit(FLAG_EXT_CTRL, &chan->flags);
3196 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3197 } else {
3198 chan->tx_win = min_t(u16, chan->tx_win,
3199 L2CAP_DEFAULT_TX_WINDOW);
3200 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3201 }
3202 chan->ack_win = chan->tx_win;
3203 }
3204
3205 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3206 {
3207 struct l2cap_conf_req *req = data;
3208 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3209 void *ptr = req->data;
3210 u16 size;
3211
3212 BT_DBG("chan %p", chan);
3213
3214 if (chan->num_conf_req || chan->num_conf_rsp)
3215 goto done;
3216
3217 switch (chan->mode) {
3218 case L2CAP_MODE_STREAMING:
3219 case L2CAP_MODE_ERTM:
3220 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3221 break;
3222
3223 if (__l2cap_efs_supported(chan->conn))
3224 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3225
3226 /* fall through */
3227 default:
3228 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3229 break;
3230 }
3231
3232 done:
3233 if (chan->imtu != L2CAP_DEFAULT_MTU)
3234 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3235
3236 switch (chan->mode) {
3237 case L2CAP_MODE_BASIC:
3238 if (disable_ertm)
3239 break;
3240
3241 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3242 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3243 break;
3244
3245 rfc.mode = L2CAP_MODE_BASIC;
3246 rfc.txwin_size = 0;
3247 rfc.max_transmit = 0;
3248 rfc.retrans_timeout = 0;
3249 rfc.monitor_timeout = 0;
3250 rfc.max_pdu_size = 0;
3251
3252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3253 (unsigned long) &rfc);
3254 break;
3255
3256 case L2CAP_MODE_ERTM:
3257 rfc.mode = L2CAP_MODE_ERTM;
3258 rfc.max_transmit = chan->max_tx;
3259
3260 __l2cap_set_ertm_timeouts(chan, &rfc);
3261
3262 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3263 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3264 L2CAP_FCS_SIZE);
3265 rfc.max_pdu_size = cpu_to_le16(size);
3266
3267 l2cap_txwin_setup(chan);
3268
3269 rfc.txwin_size = min_t(u16, chan->tx_win,
3270 L2CAP_DEFAULT_TX_WINDOW);
3271
3272 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3273 (unsigned long) &rfc);
3274
3275 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3276 l2cap_add_opt_efs(&ptr, chan);
3277
3278 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3279 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3280 chan->tx_win);
3281
3282 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3283 if (chan->fcs == L2CAP_FCS_NONE ||
3284 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3285 chan->fcs = L2CAP_FCS_NONE;
3286 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3287 chan->fcs);
3288 }
3289 break;
3290
3291 case L2CAP_MODE_STREAMING:
3292 l2cap_txwin_setup(chan);
3293 rfc.mode = L2CAP_MODE_STREAMING;
3294 rfc.txwin_size = 0;
3295 rfc.max_transmit = 0;
3296 rfc.retrans_timeout = 0;
3297 rfc.monitor_timeout = 0;
3298
3299 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3300 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3301 L2CAP_FCS_SIZE);
3302 rfc.max_pdu_size = cpu_to_le16(size);
3303
3304 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3305 (unsigned long) &rfc);
3306
3307 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3308 l2cap_add_opt_efs(&ptr, chan);
3309
3310 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3311 if (chan->fcs == L2CAP_FCS_NONE ||
3312 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3313 chan->fcs = L2CAP_FCS_NONE;
3314 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3315 chan->fcs);
3316 }
3317 break;
3318 }
3319
3320 req->dcid = cpu_to_le16(chan->dcid);
3321 req->flags = cpu_to_le16(0);
3322
3323 return ptr - data;
3324 }
3325
3326 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3327 {
3328 struct l2cap_conf_rsp *rsp = data;
3329 void *ptr = rsp->data;
3330 void *req = chan->conf_req;
3331 int len = chan->conf_len;
3332 int type, hint, olen;
3333 unsigned long val;
3334 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3335 struct l2cap_conf_efs efs;
3336 u8 remote_efs = 0;
3337 u16 mtu = L2CAP_DEFAULT_MTU;
3338 u16 result = L2CAP_CONF_SUCCESS;
3339 u16 size;
3340
3341 BT_DBG("chan %p", chan);
3342
3343 while (len >= L2CAP_CONF_OPT_SIZE) {
3344 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3345
3346 hint = type & L2CAP_CONF_HINT;
3347 type &= L2CAP_CONF_MASK;
3348
3349 switch (type) {
3350 case L2CAP_CONF_MTU:
3351 mtu = val;
3352 break;
3353
3354 case L2CAP_CONF_FLUSH_TO:
3355 chan->flush_to = val;
3356 break;
3357
3358 case L2CAP_CONF_QOS:
3359 break;
3360
3361 case L2CAP_CONF_RFC:
3362 if (olen == sizeof(rfc))
3363 memcpy(&rfc, (void *) val, olen);
3364 break;
3365
3366 case L2CAP_CONF_FCS:
3367 if (val == L2CAP_FCS_NONE)
3368 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3369 break;
3370
3371 case L2CAP_CONF_EFS:
3372 remote_efs = 1;
3373 if (olen == sizeof(efs))
3374 memcpy(&efs, (void *) val, olen);
3375 break;
3376
3377 case L2CAP_CONF_EWS:
3378 if (!chan->conn->hs_enabled)
3379 return -ECONNREFUSED;
3380
3381 set_bit(FLAG_EXT_CTRL, &chan->flags);
3382 set_bit(CONF_EWS_RECV, &chan->conf_state);
3383 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3384 chan->remote_tx_win = val;
3385 break;
3386
3387 default:
3388 if (hint)
3389 break;
3390
3391 result = L2CAP_CONF_UNKNOWN;
3392 *((u8 *) ptr++) = type;
3393 break;
3394 }
3395 }
3396
3397 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3398 goto done;
3399
3400 switch (chan->mode) {
3401 case L2CAP_MODE_STREAMING:
3402 case L2CAP_MODE_ERTM:
3403 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3404 chan->mode = l2cap_select_mode(rfc.mode,
3405 chan->conn->feat_mask);
3406 break;
3407 }
3408
3409 if (remote_efs) {
3410 if (__l2cap_efs_supported(chan->conn))
3411 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3412 else
3413 return -ECONNREFUSED;
3414 }
3415
3416 if (chan->mode != rfc.mode)
3417 return -ECONNREFUSED;
3418
3419 break;
3420 }
3421
3422 done:
3423 if (chan->mode != rfc.mode) {
3424 result = L2CAP_CONF_UNACCEPT;
3425 rfc.mode = chan->mode;
3426
3427 if (chan->num_conf_rsp == 1)
3428 return -ECONNREFUSED;
3429
3430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3431 (unsigned long) &rfc);
3432 }
3433
3434 if (result == L2CAP_CONF_SUCCESS) {
3435 /* Configure output options and let the other side know
3436 * which ones we don't like. */
3437
3438 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3439 result = L2CAP_CONF_UNACCEPT;
3440 else {
3441 chan->omtu = mtu;
3442 set_bit(CONF_MTU_DONE, &chan->conf_state);
3443 }
3444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3445
3446 if (remote_efs) {
3447 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3448 efs.stype != L2CAP_SERV_NOTRAFIC &&
3449 efs.stype != chan->local_stype) {
3450
3451 result = L2CAP_CONF_UNACCEPT;
3452
3453 if (chan->num_conf_req >= 1)
3454 return -ECONNREFUSED;
3455
3456 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3457 sizeof(efs),
3458 (unsigned long) &efs);
3459 } else {
3460 /* Send PENDING Conf Rsp */
3461 result = L2CAP_CONF_PENDING;
3462 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3463 }
3464 }
3465
3466 switch (rfc.mode) {
3467 case L2CAP_MODE_BASIC:
3468 chan->fcs = L2CAP_FCS_NONE;
3469 set_bit(CONF_MODE_DONE, &chan->conf_state);
3470 break;
3471
3472 case L2CAP_MODE_ERTM:
3473 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3474 chan->remote_tx_win = rfc.txwin_size;
3475 else
3476 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3477
3478 chan->remote_max_tx = rfc.max_transmit;
3479
3480 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3481 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3482 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3483 rfc.max_pdu_size = cpu_to_le16(size);
3484 chan->remote_mps = size;
3485
3486 __l2cap_set_ertm_timeouts(chan, &rfc);
3487
3488 set_bit(CONF_MODE_DONE, &chan->conf_state);
3489
3490 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3491 sizeof(rfc), (unsigned long) &rfc);
3492
3493 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3494 chan->remote_id = efs.id;
3495 chan->remote_stype = efs.stype;
3496 chan->remote_msdu = le16_to_cpu(efs.msdu);
3497 chan->remote_flush_to =
3498 le32_to_cpu(efs.flush_to);
3499 chan->remote_acc_lat =
3500 le32_to_cpu(efs.acc_lat);
3501 chan->remote_sdu_itime =
3502 le32_to_cpu(efs.sdu_itime);
3503 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3504 sizeof(efs),
3505 (unsigned long) &efs);
3506 }
3507 break;
3508
3509 case L2CAP_MODE_STREAMING:
3510 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3511 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3512 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3513 rfc.max_pdu_size = cpu_to_le16(size);
3514 chan->remote_mps = size;
3515
3516 set_bit(CONF_MODE_DONE, &chan->conf_state);
3517
3518 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3519 (unsigned long) &rfc);
3520
3521 break;
3522
3523 default:
3524 result = L2CAP_CONF_UNACCEPT;
3525
3526 memset(&rfc, 0, sizeof(rfc));
3527 rfc.mode = chan->mode;
3528 }
3529
3530 if (result == L2CAP_CONF_SUCCESS)
3531 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3532 }
3533 rsp->scid = cpu_to_le16(chan->dcid);
3534 rsp->result = cpu_to_le16(result);
3535 rsp->flags = cpu_to_le16(0);
3536
3537 return ptr - data;
3538 }
3539
3540 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3541 void *data, u16 *result)
3542 {
3543 struct l2cap_conf_req *req = data;
3544 void *ptr = req->data;
3545 int type, olen;
3546 unsigned long val;
3547 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3548 struct l2cap_conf_efs efs;
3549
3550 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3551
3552 while (len >= L2CAP_CONF_OPT_SIZE) {
3553 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3554
3555 switch (type) {
3556 case L2CAP_CONF_MTU:
3557 if (val < L2CAP_DEFAULT_MIN_MTU) {
3558 *result = L2CAP_CONF_UNACCEPT;
3559 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3560 } else
3561 chan->imtu = val;
3562 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3563 break;
3564
3565 case L2CAP_CONF_FLUSH_TO:
3566 chan->flush_to = val;
3567 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3568 2, chan->flush_to);
3569 break;
3570
3571 case L2CAP_CONF_RFC:
3572 if (olen == sizeof(rfc))
3573 memcpy(&rfc, (void *)val, olen);
3574
3575 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3576 rfc.mode != chan->mode)
3577 return -ECONNREFUSED;
3578
3579 chan->fcs = 0;
3580
3581 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3582 sizeof(rfc), (unsigned long) &rfc);
3583 break;
3584
3585 case L2CAP_CONF_EWS:
3586 chan->ack_win = min_t(u16, val, chan->ack_win);
3587 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3588 chan->tx_win);
3589 break;
3590
3591 case L2CAP_CONF_EFS:
3592 if (olen == sizeof(efs))
3593 memcpy(&efs, (void *)val, olen);
3594
3595 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3596 efs.stype != L2CAP_SERV_NOTRAFIC &&
3597 efs.stype != chan->local_stype)
3598 return -ECONNREFUSED;
3599
3600 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3601 (unsigned long) &efs);
3602 break;
3603
3604 case L2CAP_CONF_FCS:
3605 if (*result == L2CAP_CONF_PENDING)
3606 if (val == L2CAP_FCS_NONE)
3607 set_bit(CONF_RECV_NO_FCS,
3608 &chan->conf_state);
3609 break;
3610 }
3611 }
3612
3613 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3614 return -ECONNREFUSED;
3615
3616 chan->mode = rfc.mode;
3617
3618 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3619 switch (rfc.mode) {
3620 case L2CAP_MODE_ERTM:
3621 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3622 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3623 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3624 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3625 chan->ack_win = min_t(u16, chan->ack_win,
3626 rfc.txwin_size);
3627
3628 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3629 chan->local_msdu = le16_to_cpu(efs.msdu);
3630 chan->local_sdu_itime =
3631 le32_to_cpu(efs.sdu_itime);
3632 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3633 chan->local_flush_to =
3634 le32_to_cpu(efs.flush_to);
3635 }
3636 break;
3637
3638 case L2CAP_MODE_STREAMING:
3639 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3640 }
3641 }
3642
3643 req->dcid = cpu_to_le16(chan->dcid);
3644 req->flags = cpu_to_le16(0);
3645
3646 return ptr - data;
3647 }
3648
3649 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3650 u16 result, u16 flags)
3651 {
3652 struct l2cap_conf_rsp *rsp = data;
3653 void *ptr = rsp->data;
3654
3655 BT_DBG("chan %p", chan);
3656
3657 rsp->scid = cpu_to_le16(chan->dcid);
3658 rsp->result = cpu_to_le16(result);
3659 rsp->flags = cpu_to_le16(flags);
3660
3661 return ptr - data;
3662 }
3663
3664 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3665 {
3666 struct l2cap_le_conn_rsp rsp;
3667 struct l2cap_conn *conn = chan->conn;
3668
3669 BT_DBG("chan %p", chan);
3670
3671 rsp.dcid = cpu_to_le16(chan->scid);
3672 rsp.mtu = cpu_to_le16(chan->imtu);
3673 rsp.mps = cpu_to_le16(chan->mps);
3674 rsp.credits = cpu_to_le16(chan->rx_credits);
3675 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3676
3677 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3678 &rsp);
3679 }
3680
3681 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3682 {
3683 struct l2cap_conn_rsp rsp;
3684 struct l2cap_conn *conn = chan->conn;
3685 u8 buf[128];
3686 u8 rsp_code;
3687
3688 rsp.scid = cpu_to_le16(chan->dcid);
3689 rsp.dcid = cpu_to_le16(chan->scid);
3690 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3691 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3692
3693 if (chan->hs_hcon)
3694 rsp_code = L2CAP_CREATE_CHAN_RSP;
3695 else
3696 rsp_code = L2CAP_CONN_RSP;
3697
3698 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3699
3700 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3701
3702 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3703 return;
3704
3705 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3706 l2cap_build_conf_req(chan, buf), buf);
3707 chan->num_conf_req++;
3708 }
3709
3710 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3711 {
3712 int type, olen;
3713 unsigned long val;
3714 /* Use sane default values in case a misbehaving remote device
3715 * did not send an RFC or extended window size option.
3716 */
3717 u16 txwin_ext = chan->ack_win;
3718 struct l2cap_conf_rfc rfc = {
3719 .mode = chan->mode,
3720 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3721 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3722 .max_pdu_size = cpu_to_le16(chan->imtu),
3723 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3724 };
3725
3726 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3727
3728 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3729 return;
3730
3731 while (len >= L2CAP_CONF_OPT_SIZE) {
3732 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3733
3734 switch (type) {
3735 case L2CAP_CONF_RFC:
3736 if (olen == sizeof(rfc))
3737 memcpy(&rfc, (void *)val, olen);
3738 break;
3739 case L2CAP_CONF_EWS:
3740 txwin_ext = val;
3741 break;
3742 }
3743 }
3744
3745 switch (rfc.mode) {
3746 case L2CAP_MODE_ERTM:
3747 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3748 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3749 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3750 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3751 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3752 else
3753 chan->ack_win = min_t(u16, chan->ack_win,
3754 rfc.txwin_size);
3755 break;
3756 case L2CAP_MODE_STREAMING:
3757 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3758 }
3759 }
3760
3761 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3762 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3763 u8 *data)
3764 {
3765 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3766
3767 if (cmd_len < sizeof(*rej))
3768 return -EPROTO;
3769
3770 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3771 return 0;
3772
3773 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3774 cmd->ident == conn->info_ident) {
3775 cancel_delayed_work(&conn->info_timer);
3776
3777 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3778 conn->info_ident = 0;
3779
3780 l2cap_conn_start(conn);
3781 }
3782
3783 return 0;
3784 }
3785
3786 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3787 struct l2cap_cmd_hdr *cmd,
3788 u8 *data, u8 rsp_code, u8 amp_id)
3789 {
3790 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3791 struct l2cap_conn_rsp rsp;
3792 struct l2cap_chan *chan = NULL, *pchan;
3793 int result, status = L2CAP_CS_NO_INFO;
3794
3795 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3796 __le16 psm = req->psm;
3797
3798 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3799
3800 /* Check if we have socket listening on psm */
3801 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3802 &conn->hcon->dst, ACL_LINK);
3803 if (!pchan) {
3804 result = L2CAP_CR_BAD_PSM;
3805 goto sendresp;
3806 }
3807
3808 mutex_lock(&conn->chan_lock);
3809 l2cap_chan_lock(pchan);
3810
3811 /* Check if the ACL is secure enough (if not SDP) */
3812 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3813 !hci_conn_check_link_mode(conn->hcon)) {
3814 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3815 result = L2CAP_CR_SEC_BLOCK;
3816 goto response;
3817 }
3818
3819 result = L2CAP_CR_NO_MEM;
3820
3821 /* Check if we already have channel with that dcid */
3822 if (__l2cap_get_chan_by_dcid(conn, scid))
3823 goto response;
3824
3825 chan = pchan->ops->new_connection(pchan);
3826 if (!chan)
3827 goto response;
3828
3829 /* For certain devices (ex: HID mouse), support for authentication,
3830 * pairing and bonding is optional. For such devices, inorder to avoid
3831 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3832 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3833 */
3834 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3835
3836 bacpy(&chan->src, &conn->hcon->src);
3837 bacpy(&chan->dst, &conn->hcon->dst);
3838 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3839 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3840 chan->psm = psm;
3841 chan->dcid = scid;
3842 chan->local_amp_id = amp_id;
3843
3844 __l2cap_chan_add(conn, chan);
3845
3846 dcid = chan->scid;
3847
3848 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3849
3850 chan->ident = cmd->ident;
3851
3852 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3853 if (l2cap_chan_check_security(chan, false)) {
3854 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3855 l2cap_state_change(chan, BT_CONNECT2);
3856 result = L2CAP_CR_PEND;
3857 status = L2CAP_CS_AUTHOR_PEND;
3858 chan->ops->defer(chan);
3859 } else {
3860 /* Force pending result for AMP controllers.
3861 * The connection will succeed after the
3862 * physical link is up.
3863 */
3864 if (amp_id == AMP_ID_BREDR) {
3865 l2cap_state_change(chan, BT_CONFIG);
3866 result = L2CAP_CR_SUCCESS;
3867 } else {
3868 l2cap_state_change(chan, BT_CONNECT2);
3869 result = L2CAP_CR_PEND;
3870 }
3871 status = L2CAP_CS_NO_INFO;
3872 }
3873 } else {
3874 l2cap_state_change(chan, BT_CONNECT2);
3875 result = L2CAP_CR_PEND;
3876 status = L2CAP_CS_AUTHEN_PEND;
3877 }
3878 } else {
3879 l2cap_state_change(chan, BT_CONNECT2);
3880 result = L2CAP_CR_PEND;
3881 status = L2CAP_CS_NO_INFO;
3882 }
3883
3884 response:
3885 l2cap_chan_unlock(pchan);
3886 mutex_unlock(&conn->chan_lock);
3887
3888 sendresp:
3889 rsp.scid = cpu_to_le16(scid);
3890 rsp.dcid = cpu_to_le16(dcid);
3891 rsp.result = cpu_to_le16(result);
3892 rsp.status = cpu_to_le16(status);
3893 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3894
3895 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3896 struct l2cap_info_req info;
3897 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3898
3899 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3900 conn->info_ident = l2cap_get_ident(conn);
3901
3902 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3903
3904 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3905 sizeof(info), &info);
3906 }
3907
3908 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3909 result == L2CAP_CR_SUCCESS) {
3910 u8 buf[128];
3911 set_bit(CONF_REQ_SENT, &chan->conf_state);
3912 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3913 l2cap_build_conf_req(chan, buf), buf);
3914 chan->num_conf_req++;
3915 }
3916
3917 return chan;
3918 }
3919
3920 static int l2cap_connect_req(struct l2cap_conn *conn,
3921 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3922 {
3923 struct hci_dev *hdev = conn->hcon->hdev;
3924 struct hci_conn *hcon = conn->hcon;
3925
3926 if (cmd_len < sizeof(struct l2cap_conn_req))
3927 return -EPROTO;
3928
3929 hci_dev_lock(hdev);
3930 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3931 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3932 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3933 hcon->dst_type, 0, NULL, 0,
3934 hcon->dev_class);
3935 hci_dev_unlock(hdev);
3936
3937 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3938 return 0;
3939 }
3940
3941 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3942 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3943 u8 *data)
3944 {
3945 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3946 u16 scid, dcid, result, status;
3947 struct l2cap_chan *chan;
3948 u8 req[128];
3949 int err;
3950
3951 if (cmd_len < sizeof(*rsp))
3952 return -EPROTO;
3953
3954 scid = __le16_to_cpu(rsp->scid);
3955 dcid = __le16_to_cpu(rsp->dcid);
3956 result = __le16_to_cpu(rsp->result);
3957 status = __le16_to_cpu(rsp->status);
3958
3959 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3960 dcid, scid, result, status);
3961
3962 mutex_lock(&conn->chan_lock);
3963
3964 if (scid) {
3965 chan = __l2cap_get_chan_by_scid(conn, scid);
3966 if (!chan) {
3967 err = -EBADSLT;
3968 goto unlock;
3969 }
3970 } else {
3971 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3972 if (!chan) {
3973 err = -EBADSLT;
3974 goto unlock;
3975 }
3976 }
3977
3978 err = 0;
3979
3980 l2cap_chan_lock(chan);
3981
3982 switch (result) {
3983 case L2CAP_CR_SUCCESS:
3984 l2cap_state_change(chan, BT_CONFIG);
3985 chan->ident = 0;
3986 chan->dcid = dcid;
3987 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3988
3989 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3990 break;
3991
3992 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3993 l2cap_build_conf_req(chan, req), req);
3994 chan->num_conf_req++;
3995 break;
3996
3997 case L2CAP_CR_PEND:
3998 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3999 break;
4000
4001 default:
4002 l2cap_chan_del(chan, ECONNREFUSED);
4003 break;
4004 }
4005
4006 l2cap_chan_unlock(chan);
4007
4008 unlock:
4009 mutex_unlock(&conn->chan_lock);
4010
4011 return err;
4012 }
4013
4014 static inline void set_default_fcs(struct l2cap_chan *chan)
4015 {
4016 /* FCS is enabled only in ERTM or streaming mode, if one or both
4017 * sides request it.
4018 */
4019 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4020 chan->fcs = L2CAP_FCS_NONE;
4021 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4022 chan->fcs = L2CAP_FCS_CRC16;
4023 }
4024
4025 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4026 u8 ident, u16 flags)
4027 {
4028 struct l2cap_conn *conn = chan->conn;
4029
4030 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4031 flags);
4032
4033 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4034 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4035
4036 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4037 l2cap_build_conf_rsp(chan, data,
4038 L2CAP_CONF_SUCCESS, flags), data);
4039 }
4040
4041 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4042 u16 scid, u16 dcid)
4043 {
4044 struct l2cap_cmd_rej_cid rej;
4045
4046 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4047 rej.scid = __cpu_to_le16(scid);
4048 rej.dcid = __cpu_to_le16(dcid);
4049
4050 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4051 }
4052
4053 static inline int l2cap_config_req(struct l2cap_conn *conn,
4054 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4055 u8 *data)
4056 {
4057 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4058 u16 dcid, flags;
4059 u8 rsp[64];
4060 struct l2cap_chan *chan;
4061 int len, err = 0;
4062
4063 if (cmd_len < sizeof(*req))
4064 return -EPROTO;
4065
4066 dcid = __le16_to_cpu(req->dcid);
4067 flags = __le16_to_cpu(req->flags);
4068
4069 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4070
4071 chan = l2cap_get_chan_by_scid(conn, dcid);
4072 if (!chan) {
4073 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4074 return 0;
4075 }
4076
4077 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4078 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4079 chan->dcid);
4080 goto unlock;
4081 }
4082
4083 /* Reject if config buffer is too small. */
4084 len = cmd_len - sizeof(*req);
4085 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4086 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4087 l2cap_build_conf_rsp(chan, rsp,
4088 L2CAP_CONF_REJECT, flags), rsp);
4089 goto unlock;
4090 }
4091
4092 /* Store config. */
4093 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4094 chan->conf_len += len;
4095
4096 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4097 /* Incomplete config. Send empty response. */
4098 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4099 l2cap_build_conf_rsp(chan, rsp,
4100 L2CAP_CONF_SUCCESS, flags), rsp);
4101 goto unlock;
4102 }
4103
4104 /* Complete config. */
4105 len = l2cap_parse_conf_req(chan, rsp);
4106 if (len < 0) {
4107 l2cap_send_disconn_req(chan, ECONNRESET);
4108 goto unlock;
4109 }
4110
4111 chan->ident = cmd->ident;
4112 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4113 chan->num_conf_rsp++;
4114
4115 /* Reset config buffer. */
4116 chan->conf_len = 0;
4117
4118 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4119 goto unlock;
4120
4121 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4122 set_default_fcs(chan);
4123
4124 if (chan->mode == L2CAP_MODE_ERTM ||
4125 chan->mode == L2CAP_MODE_STREAMING)
4126 err = l2cap_ertm_init(chan);
4127
4128 if (err < 0)
4129 l2cap_send_disconn_req(chan, -err);
4130 else
4131 l2cap_chan_ready(chan);
4132
4133 goto unlock;
4134 }
4135
4136 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4137 u8 buf[64];
4138 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4139 l2cap_build_conf_req(chan, buf), buf);
4140 chan->num_conf_req++;
4141 }
4142
4143 /* Got Conf Rsp PENDING from remote side and asume we sent
4144 Conf Rsp PENDING in the code above */
4145 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4146 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4147
4148 /* check compatibility */
4149
4150 /* Send rsp for BR/EDR channel */
4151 if (!chan->hs_hcon)
4152 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4153 else
4154 chan->ident = cmd->ident;
4155 }
4156
4157 unlock:
4158 l2cap_chan_unlock(chan);
4159 return err;
4160 }
4161
4162 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4163 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4164 u8 *data)
4165 {
4166 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4167 u16 scid, flags, result;
4168 struct l2cap_chan *chan;
4169 int len = cmd_len - sizeof(*rsp);
4170 int err = 0;
4171
4172 if (cmd_len < sizeof(*rsp))
4173 return -EPROTO;
4174
4175 scid = __le16_to_cpu(rsp->scid);
4176 flags = __le16_to_cpu(rsp->flags);
4177 result = __le16_to_cpu(rsp->result);
4178
4179 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4180 result, len);
4181
4182 chan = l2cap_get_chan_by_scid(conn, scid);
4183 if (!chan)
4184 return 0;
4185
4186 switch (result) {
4187 case L2CAP_CONF_SUCCESS:
4188 l2cap_conf_rfc_get(chan, rsp->data, len);
4189 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4190 break;
4191
4192 case L2CAP_CONF_PENDING:
4193 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4194
4195 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4196 char buf[64];
4197
4198 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4199 buf, &result);
4200 if (len < 0) {
4201 l2cap_send_disconn_req(chan, ECONNRESET);
4202 goto done;
4203 }
4204
4205 if (!chan->hs_hcon) {
4206 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4207 0);
4208 } else {
4209 if (l2cap_check_efs(chan)) {
4210 amp_create_logical_link(chan);
4211 chan->ident = cmd->ident;
4212 }
4213 }
4214 }
4215 goto done;
4216
4217 case L2CAP_CONF_UNACCEPT:
4218 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4219 char req[64];
4220
4221 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4222 l2cap_send_disconn_req(chan, ECONNRESET);
4223 goto done;
4224 }
4225
4226 /* throw out any old stored conf requests */
4227 result = L2CAP_CONF_SUCCESS;
4228 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4229 req, &result);
4230 if (len < 0) {
4231 l2cap_send_disconn_req(chan, ECONNRESET);
4232 goto done;
4233 }
4234
4235 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4236 L2CAP_CONF_REQ, len, req);
4237 chan->num_conf_req++;
4238 if (result != L2CAP_CONF_SUCCESS)
4239 goto done;
4240 break;
4241 }
4242
4243 default:
4244 l2cap_chan_set_err(chan, ECONNRESET);
4245
4246 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4247 l2cap_send_disconn_req(chan, ECONNRESET);
4248 goto done;
4249 }
4250
4251 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4252 goto done;
4253
4254 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4255
4256 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4257 set_default_fcs(chan);
4258
4259 if (chan->mode == L2CAP_MODE_ERTM ||
4260 chan->mode == L2CAP_MODE_STREAMING)
4261 err = l2cap_ertm_init(chan);
4262
4263 if (err < 0)
4264 l2cap_send_disconn_req(chan, -err);
4265 else
4266 l2cap_chan_ready(chan);
4267 }
4268
4269 done:
4270 l2cap_chan_unlock(chan);
4271 return err;
4272 }
4273
4274 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4275 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4276 u8 *data)
4277 {
4278 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4279 struct l2cap_disconn_rsp rsp;
4280 u16 dcid, scid;
4281 struct l2cap_chan *chan;
4282
4283 if (cmd_len != sizeof(*req))
4284 return -EPROTO;
4285
4286 scid = __le16_to_cpu(req->scid);
4287 dcid = __le16_to_cpu(req->dcid);
4288
4289 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4290
4291 mutex_lock(&conn->chan_lock);
4292
4293 chan = __l2cap_get_chan_by_scid(conn, dcid);
4294 if (!chan) {
4295 mutex_unlock(&conn->chan_lock);
4296 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4297 return 0;
4298 }
4299
4300 l2cap_chan_lock(chan);
4301
4302 rsp.dcid = cpu_to_le16(chan->scid);
4303 rsp.scid = cpu_to_le16(chan->dcid);
4304 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4305
4306 chan->ops->set_shutdown(chan);
4307
4308 l2cap_chan_hold(chan);
4309 l2cap_chan_del(chan, ECONNRESET);
4310
4311 l2cap_chan_unlock(chan);
4312
4313 chan->ops->close(chan);
4314 l2cap_chan_put(chan);
4315
4316 mutex_unlock(&conn->chan_lock);
4317
4318 return 0;
4319 }
4320
4321 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4322 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4323 u8 *data)
4324 {
4325 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4326 u16 dcid, scid;
4327 struct l2cap_chan *chan;
4328
4329 if (cmd_len != sizeof(*rsp))
4330 return -EPROTO;
4331
4332 scid = __le16_to_cpu(rsp->scid);
4333 dcid = __le16_to_cpu(rsp->dcid);
4334
4335 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4336
4337 mutex_lock(&conn->chan_lock);
4338
4339 chan = __l2cap_get_chan_by_scid(conn, scid);
4340 if (!chan) {
4341 mutex_unlock(&conn->chan_lock);
4342 return 0;
4343 }
4344
4345 l2cap_chan_lock(chan);
4346
4347 l2cap_chan_hold(chan);
4348 l2cap_chan_del(chan, 0);
4349
4350 l2cap_chan_unlock(chan);
4351
4352 chan->ops->close(chan);
4353 l2cap_chan_put(chan);
4354
4355 mutex_unlock(&conn->chan_lock);
4356
4357 return 0;
4358 }
4359
4360 static inline int l2cap_information_req(struct l2cap_conn *conn,
4361 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4362 u8 *data)
4363 {
4364 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4365 u16 type;
4366
4367 if (cmd_len != sizeof(*req))
4368 return -EPROTO;
4369
4370 type = __le16_to_cpu(req->type);
4371
4372 BT_DBG("type 0x%4.4x", type);
4373
4374 if (type == L2CAP_IT_FEAT_MASK) {
4375 u8 buf[8];
4376 u32 feat_mask = l2cap_feat_mask;
4377 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4378 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4379 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4380 if (!disable_ertm)
4381 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4382 | L2CAP_FEAT_FCS;
4383 if (conn->hs_enabled)
4384 feat_mask |= L2CAP_FEAT_EXT_FLOW
4385 | L2CAP_FEAT_EXT_WINDOW;
4386
4387 put_unaligned_le32(feat_mask, rsp->data);
4388 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4389 buf);
4390 } else if (type == L2CAP_IT_FIXED_CHAN) {
4391 u8 buf[12];
4392 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4393
4394 if (conn->hs_enabled)
4395 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4396 else
4397 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4398
4399 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4400 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4401 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4402 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4403 buf);
4404 } else {
4405 struct l2cap_info_rsp rsp;
4406 rsp.type = cpu_to_le16(type);
4407 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4408 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4409 &rsp);
4410 }
4411
4412 return 0;
4413 }
4414
4415 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4416 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4417 u8 *data)
4418 {
4419 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4420 u16 type, result;
4421
4422 if (cmd_len < sizeof(*rsp))
4423 return -EPROTO;
4424
4425 type = __le16_to_cpu(rsp->type);
4426 result = __le16_to_cpu(rsp->result);
4427
4428 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4429
4430 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4431 if (cmd->ident != conn->info_ident ||
4432 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4433 return 0;
4434
4435 cancel_delayed_work(&conn->info_timer);
4436
4437 if (result != L2CAP_IR_SUCCESS) {
4438 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4439 conn->info_ident = 0;
4440
4441 l2cap_conn_start(conn);
4442
4443 return 0;
4444 }
4445
4446 switch (type) {
4447 case L2CAP_IT_FEAT_MASK:
4448 conn->feat_mask = get_unaligned_le32(rsp->data);
4449
4450 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4451 struct l2cap_info_req req;
4452 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4453
4454 conn->info_ident = l2cap_get_ident(conn);
4455
4456 l2cap_send_cmd(conn, conn->info_ident,
4457 L2CAP_INFO_REQ, sizeof(req), &req);
4458 } else {
4459 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4460 conn->info_ident = 0;
4461
4462 l2cap_conn_start(conn);
4463 }
4464 break;
4465
4466 case L2CAP_IT_FIXED_CHAN:
4467 conn->fixed_chan_mask = rsp->data[0];
4468 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4469 conn->info_ident = 0;
4470
4471 l2cap_conn_start(conn);
4472 break;
4473 }
4474
4475 return 0;
4476 }
4477
4478 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4479 struct l2cap_cmd_hdr *cmd,
4480 u16 cmd_len, void *data)
4481 {
4482 struct l2cap_create_chan_req *req = data;
4483 struct l2cap_create_chan_rsp rsp;
4484 struct l2cap_chan *chan;
4485 struct hci_dev *hdev;
4486 u16 psm, scid;
4487
4488 if (cmd_len != sizeof(*req))
4489 return -EPROTO;
4490
4491 if (!conn->hs_enabled)
4492 return -EINVAL;
4493
4494 psm = le16_to_cpu(req->psm);
4495 scid = le16_to_cpu(req->scid);
4496
4497 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4498
4499 /* For controller id 0 make BR/EDR connection */
4500 if (req->amp_id == AMP_ID_BREDR) {
4501 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4502 req->amp_id);
4503 return 0;
4504 }
4505
4506 /* Validate AMP controller id */
4507 hdev = hci_dev_get(req->amp_id);
4508 if (!hdev)
4509 goto error;
4510
4511 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4512 hci_dev_put(hdev);
4513 goto error;
4514 }
4515
4516 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4517 req->amp_id);
4518 if (chan) {
4519 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4520 struct hci_conn *hs_hcon;
4521
4522 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4523 &conn->hcon->dst);
4524 if (!hs_hcon) {
4525 hci_dev_put(hdev);
4526 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4527 chan->dcid);
4528 return 0;
4529 }
4530
4531 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4532
4533 mgr->bredr_chan = chan;
4534 chan->hs_hcon = hs_hcon;
4535 chan->fcs = L2CAP_FCS_NONE;
4536 conn->mtu = hdev->block_mtu;
4537 }
4538
4539 hci_dev_put(hdev);
4540
4541 return 0;
4542
4543 error:
4544 rsp.dcid = 0;
4545 rsp.scid = cpu_to_le16(scid);
4546 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4547 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4548
4549 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4550 sizeof(rsp), &rsp);
4551
4552 return 0;
4553 }
4554
4555 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4556 {
4557 struct l2cap_move_chan_req req;
4558 u8 ident;
4559
4560 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4561
4562 ident = l2cap_get_ident(chan->conn);
4563 chan->ident = ident;
4564
4565 req.icid = cpu_to_le16(chan->scid);
4566 req.dest_amp_id = dest_amp_id;
4567
4568 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4569 &req);
4570
4571 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4572 }
4573
4574 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4575 {
4576 struct l2cap_move_chan_rsp rsp;
4577
4578 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4579
4580 rsp.icid = cpu_to_le16(chan->dcid);
4581 rsp.result = cpu_to_le16(result);
4582
4583 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4584 sizeof(rsp), &rsp);
4585 }
4586
4587 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4588 {
4589 struct l2cap_move_chan_cfm cfm;
4590
4591 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4592
4593 chan->ident = l2cap_get_ident(chan->conn);
4594
4595 cfm.icid = cpu_to_le16(chan->scid);
4596 cfm.result = cpu_to_le16(result);
4597
4598 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4599 sizeof(cfm), &cfm);
4600
4601 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4602 }
4603
4604 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4605 {
4606 struct l2cap_move_chan_cfm cfm;
4607
4608 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4609
4610 cfm.icid = cpu_to_le16(icid);
4611 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4612
4613 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4614 sizeof(cfm), &cfm);
4615 }
4616
4617 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4618 u16 icid)
4619 {
4620 struct l2cap_move_chan_cfm_rsp rsp;
4621
4622 BT_DBG("icid 0x%4.4x", icid);
4623
4624 rsp.icid = cpu_to_le16(icid);
4625 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4626 }
4627
4628 static void __release_logical_link(struct l2cap_chan *chan)
4629 {
4630 chan->hs_hchan = NULL;
4631 chan->hs_hcon = NULL;
4632
4633 /* Placeholder - release the logical link */
4634 }
4635
4636 static void l2cap_logical_fail(struct l2cap_chan *chan)
4637 {
4638 /* Logical link setup failed */
4639 if (chan->state != BT_CONNECTED) {
4640 /* Create channel failure, disconnect */
4641 l2cap_send_disconn_req(chan, ECONNRESET);
4642 return;
4643 }
4644
4645 switch (chan->move_role) {
4646 case L2CAP_MOVE_ROLE_RESPONDER:
4647 l2cap_move_done(chan);
4648 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4649 break;
4650 case L2CAP_MOVE_ROLE_INITIATOR:
4651 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4652 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4653 /* Remote has only sent pending or
4654 * success responses, clean up
4655 */
4656 l2cap_move_done(chan);
4657 }
4658
4659 /* Other amp move states imply that the move
4660 * has already aborted
4661 */
4662 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4663 break;
4664 }
4665 }
4666
4667 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4668 struct hci_chan *hchan)
4669 {
4670 struct l2cap_conf_rsp rsp;
4671
4672 chan->hs_hchan = hchan;
4673 chan->hs_hcon->l2cap_data = chan->conn;
4674
4675 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4676
4677 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4678 int err;
4679
4680 set_default_fcs(chan);
4681
4682 err = l2cap_ertm_init(chan);
4683 if (err < 0)
4684 l2cap_send_disconn_req(chan, -err);
4685 else
4686 l2cap_chan_ready(chan);
4687 }
4688 }
4689
4690 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4691 struct hci_chan *hchan)
4692 {
4693 chan->hs_hcon = hchan->conn;
4694 chan->hs_hcon->l2cap_data = chan->conn;
4695
4696 BT_DBG("move_state %d", chan->move_state);
4697
4698 switch (chan->move_state) {
4699 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4700 /* Move confirm will be sent after a success
4701 * response is received
4702 */
4703 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4704 break;
4705 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4706 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4707 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4708 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4709 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4710 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4711 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4712 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4713 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4714 }
4715 break;
4716 default:
4717 /* Move was not in expected state, free the channel */
4718 __release_logical_link(chan);
4719
4720 chan->move_state = L2CAP_MOVE_STABLE;
4721 }
4722 }
4723
4724 /* Call with chan locked */
4725 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4726 u8 status)
4727 {
4728 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4729
4730 if (status) {
4731 l2cap_logical_fail(chan);
4732 __release_logical_link(chan);
4733 return;
4734 }
4735
4736 if (chan->state != BT_CONNECTED) {
4737 /* Ignore logical link if channel is on BR/EDR */
4738 if (chan->local_amp_id != AMP_ID_BREDR)
4739 l2cap_logical_finish_create(chan, hchan);
4740 } else {
4741 l2cap_logical_finish_move(chan, hchan);
4742 }
4743 }
4744
4745 void l2cap_move_start(struct l2cap_chan *chan)
4746 {
4747 BT_DBG("chan %p", chan);
4748
4749 if (chan->local_amp_id == AMP_ID_BREDR) {
4750 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4751 return;
4752 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4753 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4754 /* Placeholder - start physical link setup */
4755 } else {
4756 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4757 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4758 chan->move_id = 0;
4759 l2cap_move_setup(chan);
4760 l2cap_send_move_chan_req(chan, 0);
4761 }
4762 }
4763
4764 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4765 u8 local_amp_id, u8 remote_amp_id)
4766 {
4767 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4768 local_amp_id, remote_amp_id);
4769
4770 chan->fcs = L2CAP_FCS_NONE;
4771
4772 /* Outgoing channel on AMP */
4773 if (chan->state == BT_CONNECT) {
4774 if (result == L2CAP_CR_SUCCESS) {
4775 chan->local_amp_id = local_amp_id;
4776 l2cap_send_create_chan_req(chan, remote_amp_id);
4777 } else {
4778 /* Revert to BR/EDR connect */
4779 l2cap_send_conn_req(chan);
4780 }
4781
4782 return;
4783 }
4784
4785 /* Incoming channel on AMP */
4786 if (__l2cap_no_conn_pending(chan)) {
4787 struct l2cap_conn_rsp rsp;
4788 char buf[128];
4789 rsp.scid = cpu_to_le16(chan->dcid);
4790 rsp.dcid = cpu_to_le16(chan->scid);
4791
4792 if (result == L2CAP_CR_SUCCESS) {
4793 /* Send successful response */
4794 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4795 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4796 } else {
4797 /* Send negative response */
4798 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4799 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4800 }
4801
4802 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4803 sizeof(rsp), &rsp);
4804
4805 if (result == L2CAP_CR_SUCCESS) {
4806 l2cap_state_change(chan, BT_CONFIG);
4807 set_bit(CONF_REQ_SENT, &chan->conf_state);
4808 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4809 L2CAP_CONF_REQ,
4810 l2cap_build_conf_req(chan, buf), buf);
4811 chan->num_conf_req++;
4812 }
4813 }
4814 }
4815
4816 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4817 u8 remote_amp_id)
4818 {
4819 l2cap_move_setup(chan);
4820 chan->move_id = local_amp_id;
4821 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4822
4823 l2cap_send_move_chan_req(chan, remote_amp_id);
4824 }
4825
4826 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4827 {
4828 struct hci_chan *hchan = NULL;
4829
4830 /* Placeholder - get hci_chan for logical link */
4831
4832 if (hchan) {
4833 if (hchan->state == BT_CONNECTED) {
4834 /* Logical link is ready to go */
4835 chan->hs_hcon = hchan->conn;
4836 chan->hs_hcon->l2cap_data = chan->conn;
4837 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4838 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4839
4840 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4841 } else {
4842 /* Wait for logical link to be ready */
4843 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4844 }
4845 } else {
4846 /* Logical link not available */
4847 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4848 }
4849 }
4850
4851 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4852 {
4853 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4854 u8 rsp_result;
4855 if (result == -EINVAL)
4856 rsp_result = L2CAP_MR_BAD_ID;
4857 else
4858 rsp_result = L2CAP_MR_NOT_ALLOWED;
4859
4860 l2cap_send_move_chan_rsp(chan, rsp_result);
4861 }
4862
4863 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4864 chan->move_state = L2CAP_MOVE_STABLE;
4865
4866 /* Restart data transmission */
4867 l2cap_ertm_send(chan);
4868 }
4869
4870 /* Invoke with locked chan */
4871 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4872 {
4873 u8 local_amp_id = chan->local_amp_id;
4874 u8 remote_amp_id = chan->remote_amp_id;
4875
4876 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4877 chan, result, local_amp_id, remote_amp_id);
4878
4879 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4880 l2cap_chan_unlock(chan);
4881 return;
4882 }
4883
4884 if (chan->state != BT_CONNECTED) {
4885 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4886 } else if (result != L2CAP_MR_SUCCESS) {
4887 l2cap_do_move_cancel(chan, result);
4888 } else {
4889 switch (chan->move_role) {
4890 case L2CAP_MOVE_ROLE_INITIATOR:
4891 l2cap_do_move_initiate(chan, local_amp_id,
4892 remote_amp_id);
4893 break;
4894 case L2CAP_MOVE_ROLE_RESPONDER:
4895 l2cap_do_move_respond(chan, result);
4896 break;
4897 default:
4898 l2cap_do_move_cancel(chan, result);
4899 break;
4900 }
4901 }
4902 }
4903
4904 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4905 struct l2cap_cmd_hdr *cmd,
4906 u16 cmd_len, void *data)
4907 {
4908 struct l2cap_move_chan_req *req = data;
4909 struct l2cap_move_chan_rsp rsp;
4910 struct l2cap_chan *chan;
4911 u16 icid = 0;
4912 u16 result = L2CAP_MR_NOT_ALLOWED;
4913
4914 if (cmd_len != sizeof(*req))
4915 return -EPROTO;
4916
4917 icid = le16_to_cpu(req->icid);
4918
4919 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4920
4921 if (!conn->hs_enabled)
4922 return -EINVAL;
4923
4924 chan = l2cap_get_chan_by_dcid(conn, icid);
4925 if (!chan) {
4926 rsp.icid = cpu_to_le16(icid);
4927 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4928 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4929 sizeof(rsp), &rsp);
4930 return 0;
4931 }
4932
4933 chan->ident = cmd->ident;
4934
4935 if (chan->scid < L2CAP_CID_DYN_START ||
4936 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4937 (chan->mode != L2CAP_MODE_ERTM &&
4938 chan->mode != L2CAP_MODE_STREAMING)) {
4939 result = L2CAP_MR_NOT_ALLOWED;
4940 goto send_move_response;
4941 }
4942
4943 if (chan->local_amp_id == req->dest_amp_id) {
4944 result = L2CAP_MR_SAME_ID;
4945 goto send_move_response;
4946 }
4947
4948 if (req->dest_amp_id != AMP_ID_BREDR) {
4949 struct hci_dev *hdev;
4950 hdev = hci_dev_get(req->dest_amp_id);
4951 if (!hdev || hdev->dev_type != HCI_AMP ||
4952 !test_bit(HCI_UP, &hdev->flags)) {
4953 if (hdev)
4954 hci_dev_put(hdev);
4955
4956 result = L2CAP_MR_BAD_ID;
4957 goto send_move_response;
4958 }
4959 hci_dev_put(hdev);
4960 }
4961
4962 /* Detect a move collision. Only send a collision response
4963 * if this side has "lost", otherwise proceed with the move.
4964 * The winner has the larger bd_addr.
4965 */
4966 if ((__chan_is_moving(chan) ||
4967 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4968 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4969 result = L2CAP_MR_COLLISION;
4970 goto send_move_response;
4971 }
4972
4973 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4974 l2cap_move_setup(chan);
4975 chan->move_id = req->dest_amp_id;
4976 icid = chan->dcid;
4977
4978 if (req->dest_amp_id == AMP_ID_BREDR) {
4979 /* Moving to BR/EDR */
4980 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4981 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4982 result = L2CAP_MR_PEND;
4983 } else {
4984 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4985 result = L2CAP_MR_SUCCESS;
4986 }
4987 } else {
4988 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4989 /* Placeholder - uncomment when amp functions are available */
4990 /*amp_accept_physical(chan, req->dest_amp_id);*/
4991 result = L2CAP_MR_PEND;
4992 }
4993
4994 send_move_response:
4995 l2cap_send_move_chan_rsp(chan, result);
4996
4997 l2cap_chan_unlock(chan);
4998
4999 return 0;
5000 }
5001
5002 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5003 {
5004 struct l2cap_chan *chan;
5005 struct hci_chan *hchan = NULL;
5006
5007 chan = l2cap_get_chan_by_scid(conn, icid);
5008 if (!chan) {
5009 l2cap_send_move_chan_cfm_icid(conn, icid);
5010 return;
5011 }
5012
5013 __clear_chan_timer(chan);
5014 if (result == L2CAP_MR_PEND)
5015 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5016
5017 switch (chan->move_state) {
5018 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5019 /* Move confirm will be sent when logical link
5020 * is complete.
5021 */
5022 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5023 break;
5024 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5025 if (result == L2CAP_MR_PEND) {
5026 break;
5027 } else if (test_bit(CONN_LOCAL_BUSY,
5028 &chan->conn_state)) {
5029 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5030 } else {
5031 /* Logical link is up or moving to BR/EDR,
5032 * proceed with move
5033 */
5034 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5035 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5036 }
5037 break;
5038 case L2CAP_MOVE_WAIT_RSP:
5039 /* Moving to AMP */
5040 if (result == L2CAP_MR_SUCCESS) {
5041 /* Remote is ready, send confirm immediately
5042 * after logical link is ready
5043 */
5044 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5045 } else {
5046 /* Both logical link and move success
5047 * are required to confirm
5048 */
5049 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5050 }
5051
5052 /* Placeholder - get hci_chan for logical link */
5053 if (!hchan) {
5054 /* Logical link not available */
5055 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5056 break;
5057 }
5058
5059 /* If the logical link is not yet connected, do not
5060 * send confirmation.
5061 */
5062 if (hchan->state != BT_CONNECTED)
5063 break;
5064
5065 /* Logical link is already ready to go */
5066
5067 chan->hs_hcon = hchan->conn;
5068 chan->hs_hcon->l2cap_data = chan->conn;
5069
5070 if (result == L2CAP_MR_SUCCESS) {
5071 /* Can confirm now */
5072 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5073 } else {
5074 /* Now only need move success
5075 * to confirm
5076 */
5077 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5078 }
5079
5080 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5081 break;
5082 default:
5083 /* Any other amp move state means the move failed. */
5084 chan->move_id = chan->local_amp_id;
5085 l2cap_move_done(chan);
5086 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5087 }
5088
5089 l2cap_chan_unlock(chan);
5090 }
5091
5092 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5093 u16 result)
5094 {
5095 struct l2cap_chan *chan;
5096
5097 chan = l2cap_get_chan_by_ident(conn, ident);
5098 if (!chan) {
5099 /* Could not locate channel, icid is best guess */
5100 l2cap_send_move_chan_cfm_icid(conn, icid);
5101 return;
5102 }
5103
5104 __clear_chan_timer(chan);
5105
5106 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5107 if (result == L2CAP_MR_COLLISION) {
5108 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5109 } else {
5110 /* Cleanup - cancel move */
5111 chan->move_id = chan->local_amp_id;
5112 l2cap_move_done(chan);
5113 }
5114 }
5115
5116 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5117
5118 l2cap_chan_unlock(chan);
5119 }
5120
5121 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5122 struct l2cap_cmd_hdr *cmd,
5123 u16 cmd_len, void *data)
5124 {
5125 struct l2cap_move_chan_rsp *rsp = data;
5126 u16 icid, result;
5127
5128 if (cmd_len != sizeof(*rsp))
5129 return -EPROTO;
5130
5131 icid = le16_to_cpu(rsp->icid);
5132 result = le16_to_cpu(rsp->result);
5133
5134 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5135
5136 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5137 l2cap_move_continue(conn, icid, result);
5138 else
5139 l2cap_move_fail(conn, cmd->ident, icid, result);
5140
5141 return 0;
5142 }
5143
5144 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5145 struct l2cap_cmd_hdr *cmd,
5146 u16 cmd_len, void *data)
5147 {
5148 struct l2cap_move_chan_cfm *cfm = data;
5149 struct l2cap_chan *chan;
5150 u16 icid, result;
5151
5152 if (cmd_len != sizeof(*cfm))
5153 return -EPROTO;
5154
5155 icid = le16_to_cpu(cfm->icid);
5156 result = le16_to_cpu(cfm->result);
5157
5158 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5159
5160 chan = l2cap_get_chan_by_dcid(conn, icid);
5161 if (!chan) {
5162 /* Spec requires a response even if the icid was not found */
5163 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5164 return 0;
5165 }
5166
5167 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5168 if (result == L2CAP_MC_CONFIRMED) {
5169 chan->local_amp_id = chan->move_id;
5170 if (chan->local_amp_id == AMP_ID_BREDR)
5171 __release_logical_link(chan);
5172 } else {
5173 chan->move_id = chan->local_amp_id;
5174 }
5175
5176 l2cap_move_done(chan);
5177 }
5178
5179 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5180
5181 l2cap_chan_unlock(chan);
5182
5183 return 0;
5184 }
5185
5186 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5187 struct l2cap_cmd_hdr *cmd,
5188 u16 cmd_len, void *data)
5189 {
5190 struct l2cap_move_chan_cfm_rsp *rsp = data;
5191 struct l2cap_chan *chan;
5192 u16 icid;
5193
5194 if (cmd_len != sizeof(*rsp))
5195 return -EPROTO;
5196
5197 icid = le16_to_cpu(rsp->icid);
5198
5199 BT_DBG("icid 0x%4.4x", icid);
5200
5201 chan = l2cap_get_chan_by_scid(conn, icid);
5202 if (!chan)
5203 return 0;
5204
5205 __clear_chan_timer(chan);
5206
5207 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5208 chan->local_amp_id = chan->move_id;
5209
5210 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5211 __release_logical_link(chan);
5212
5213 l2cap_move_done(chan);
5214 }
5215
5216 l2cap_chan_unlock(chan);
5217
5218 return 0;
5219 }
5220
5221 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5222 struct l2cap_cmd_hdr *cmd,
5223 u16 cmd_len, u8 *data)
5224 {
5225 struct hci_conn *hcon = conn->hcon;
5226 struct l2cap_conn_param_update_req *req;
5227 struct l2cap_conn_param_update_rsp rsp;
5228 u16 min, max, latency, to_multiplier;
5229 int err;
5230
5231 if (hcon->role != HCI_ROLE_MASTER)
5232 return -EINVAL;
5233
5234 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5235 return -EPROTO;
5236
5237 req = (struct l2cap_conn_param_update_req *) data;
5238 min = __le16_to_cpu(req->min);
5239 max = __le16_to_cpu(req->max);
5240 latency = __le16_to_cpu(req->latency);
5241 to_multiplier = __le16_to_cpu(req->to_multiplier);
5242
5243 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5244 min, max, latency, to_multiplier);
5245
5246 memset(&rsp, 0, sizeof(rsp));
5247
5248 err = hci_check_conn_params(min, max, latency, to_multiplier);
5249 if (err)
5250 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5251 else
5252 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5253
5254 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5255 sizeof(rsp), &rsp);
5256
5257 if (!err) {
5258 u8 store_hint;
5259
5260 store_hint = hci_le_conn_update(hcon, min, max, latency,
5261 to_multiplier);
5262 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5263 store_hint, min, max, latency,
5264 to_multiplier);
5265
5266 }
5267
5268 return 0;
5269 }
5270
5271 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5272 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5273 u8 *data)
5274 {
5275 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5276 u16 dcid, mtu, mps, credits, result;
5277 struct l2cap_chan *chan;
5278 int err;
5279
5280 if (cmd_len < sizeof(*rsp))
5281 return -EPROTO;
5282
5283 dcid = __le16_to_cpu(rsp->dcid);
5284 mtu = __le16_to_cpu(rsp->mtu);
5285 mps = __le16_to_cpu(rsp->mps);
5286 credits = __le16_to_cpu(rsp->credits);
5287 result = __le16_to_cpu(rsp->result);
5288
5289 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5290 return -EPROTO;
5291
5292 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5293 dcid, mtu, mps, credits, result);
5294
5295 mutex_lock(&conn->chan_lock);
5296
5297 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5298 if (!chan) {
5299 err = -EBADSLT;
5300 goto unlock;
5301 }
5302
5303 err = 0;
5304
5305 l2cap_chan_lock(chan);
5306
5307 switch (result) {
5308 case L2CAP_CR_SUCCESS:
5309 chan->ident = 0;
5310 chan->dcid = dcid;
5311 chan->omtu = mtu;
5312 chan->remote_mps = mps;
5313 chan->tx_credits = credits;
5314 l2cap_chan_ready(chan);
5315 break;
5316
5317 default:
5318 l2cap_chan_del(chan, ECONNREFUSED);
5319 break;
5320 }
5321
5322 l2cap_chan_unlock(chan);
5323
5324 unlock:
5325 mutex_unlock(&conn->chan_lock);
5326
5327 return err;
5328 }
5329
5330 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5331 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5332 u8 *data)
5333 {
5334 int err = 0;
5335
5336 switch (cmd->code) {
5337 case L2CAP_COMMAND_REJ:
5338 l2cap_command_rej(conn, cmd, cmd_len, data);
5339 break;
5340
5341 case L2CAP_CONN_REQ:
5342 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5343 break;
5344
5345 case L2CAP_CONN_RSP:
5346 case L2CAP_CREATE_CHAN_RSP:
5347 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5348 break;
5349
5350 case L2CAP_CONF_REQ:
5351 err = l2cap_config_req(conn, cmd, cmd_len, data);
5352 break;
5353
5354 case L2CAP_CONF_RSP:
5355 l2cap_config_rsp(conn, cmd, cmd_len, data);
5356 break;
5357
5358 case L2CAP_DISCONN_REQ:
5359 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5360 break;
5361
5362 case L2CAP_DISCONN_RSP:
5363 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5364 break;
5365
5366 case L2CAP_ECHO_REQ:
5367 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5368 break;
5369
5370 case L2CAP_ECHO_RSP:
5371 break;
5372
5373 case L2CAP_INFO_REQ:
5374 err = l2cap_information_req(conn, cmd, cmd_len, data);
5375 break;
5376
5377 case L2CAP_INFO_RSP:
5378 l2cap_information_rsp(conn, cmd, cmd_len, data);
5379 break;
5380
5381 case L2CAP_CREATE_CHAN_REQ:
5382 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5383 break;
5384
5385 case L2CAP_MOVE_CHAN_REQ:
5386 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5387 break;
5388
5389 case L2CAP_MOVE_CHAN_RSP:
5390 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5391 break;
5392
5393 case L2CAP_MOVE_CHAN_CFM:
5394 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5395 break;
5396
5397 case L2CAP_MOVE_CHAN_CFM_RSP:
5398 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5399 break;
5400
5401 default:
5402 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5403 err = -EINVAL;
5404 break;
5405 }
5406
5407 return err;
5408 }
5409
5410 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5411 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5412 u8 *data)
5413 {
5414 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5415 struct l2cap_le_conn_rsp rsp;
5416 struct l2cap_chan *chan, *pchan;
5417 u16 dcid, scid, credits, mtu, mps;
5418 __le16 psm;
5419 u8 result;
5420
5421 if (cmd_len != sizeof(*req))
5422 return -EPROTO;
5423
5424 scid = __le16_to_cpu(req->scid);
5425 mtu = __le16_to_cpu(req->mtu);
5426 mps = __le16_to_cpu(req->mps);
5427 psm = req->psm;
5428 dcid = 0;
5429 credits = 0;
5430
5431 if (mtu < 23 || mps < 23)
5432 return -EPROTO;
5433
5434 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5435 scid, mtu, mps);
5436
5437 /* Check if we have socket listening on psm */
5438 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5439 &conn->hcon->dst, LE_LINK);
5440 if (!pchan) {
5441 result = L2CAP_CR_BAD_PSM;
5442 chan = NULL;
5443 goto response;
5444 }
5445
5446 mutex_lock(&conn->chan_lock);
5447 l2cap_chan_lock(pchan);
5448
5449 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5450 result = L2CAP_CR_AUTHENTICATION;
5451 chan = NULL;
5452 goto response_unlock;
5453 }
5454
5455 /* Check if we already have channel with that dcid */
5456 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5457 result = L2CAP_CR_NO_MEM;
5458 chan = NULL;
5459 goto response_unlock;
5460 }
5461
5462 chan = pchan->ops->new_connection(pchan);
5463 if (!chan) {
5464 result = L2CAP_CR_NO_MEM;
5465 goto response_unlock;
5466 }
5467
5468 l2cap_le_flowctl_init(chan);
5469
5470 bacpy(&chan->src, &conn->hcon->src);
5471 bacpy(&chan->dst, &conn->hcon->dst);
5472 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5473 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5474 chan->psm = psm;
5475 chan->dcid = scid;
5476 chan->omtu = mtu;
5477 chan->remote_mps = mps;
5478 chan->tx_credits = __le16_to_cpu(req->credits);
5479
5480 __l2cap_chan_add(conn, chan);
5481 dcid = chan->scid;
5482 credits = chan->rx_credits;
5483
5484 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5485
5486 chan->ident = cmd->ident;
5487
5488 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5489 l2cap_state_change(chan, BT_CONNECT2);
5490 result = L2CAP_CR_PEND;
5491 chan->ops->defer(chan);
5492 } else {
5493 l2cap_chan_ready(chan);
5494 result = L2CAP_CR_SUCCESS;
5495 }
5496
5497 response_unlock:
5498 l2cap_chan_unlock(pchan);
5499 mutex_unlock(&conn->chan_lock);
5500
5501 if (result == L2CAP_CR_PEND)
5502 return 0;
5503
5504 response:
5505 if (chan) {
5506 rsp.mtu = cpu_to_le16(chan->imtu);
5507 rsp.mps = cpu_to_le16(chan->mps);
5508 } else {
5509 rsp.mtu = 0;
5510 rsp.mps = 0;
5511 }
5512
5513 rsp.dcid = cpu_to_le16(dcid);
5514 rsp.credits = cpu_to_le16(credits);
5515 rsp.result = cpu_to_le16(result);
5516
5517 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5518
5519 return 0;
5520 }
5521
5522 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5523 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5524 u8 *data)
5525 {
5526 struct l2cap_le_credits *pkt;
5527 struct l2cap_chan *chan;
5528 u16 cid, credits, max_credits;
5529
5530 if (cmd_len != sizeof(*pkt))
5531 return -EPROTO;
5532
5533 pkt = (struct l2cap_le_credits *) data;
5534 cid = __le16_to_cpu(pkt->cid);
5535 credits = __le16_to_cpu(pkt->credits);
5536
5537 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5538
5539 chan = l2cap_get_chan_by_dcid(conn, cid);
5540 if (!chan)
5541 return -EBADSLT;
5542
5543 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5544 if (credits > max_credits) {
5545 BT_ERR("LE credits overflow");
5546 l2cap_send_disconn_req(chan, ECONNRESET);
5547
5548 /* Return 0 so that we don't trigger an unnecessary
5549 * command reject packet.
5550 */
5551 return 0;
5552 }
5553
5554 chan->tx_credits += credits;
5555
5556 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5557 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5558 chan->tx_credits--;
5559 }
5560
5561 if (chan->tx_credits)
5562 chan->ops->resume(chan);
5563
5564 l2cap_chan_unlock(chan);
5565
5566 return 0;
5567 }
5568
5569 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5570 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5571 u8 *data)
5572 {
5573 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5574 struct l2cap_chan *chan;
5575
5576 if (cmd_len < sizeof(*rej))
5577 return -EPROTO;
5578
5579 mutex_lock(&conn->chan_lock);
5580
5581 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5582 if (!chan)
5583 goto done;
5584
5585 l2cap_chan_lock(chan);
5586 l2cap_chan_del(chan, ECONNREFUSED);
5587 l2cap_chan_unlock(chan);
5588
5589 done:
5590 mutex_unlock(&conn->chan_lock);
5591 return 0;
5592 }
5593
5594 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5595 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5596 u8 *data)
5597 {
5598 int err = 0;
5599
5600 switch (cmd->code) {
5601 case L2CAP_COMMAND_REJ:
5602 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5603 break;
5604
5605 case L2CAP_CONN_PARAM_UPDATE_REQ:
5606 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5607 break;
5608
5609 case L2CAP_CONN_PARAM_UPDATE_RSP:
5610 break;
5611
5612 case L2CAP_LE_CONN_RSP:
5613 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5614 break;
5615
5616 case L2CAP_LE_CONN_REQ:
5617 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5618 break;
5619
5620 case L2CAP_LE_CREDITS:
5621 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5622 break;
5623
5624 case L2CAP_DISCONN_REQ:
5625 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5626 break;
5627
5628 case L2CAP_DISCONN_RSP:
5629 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5630 break;
5631
5632 default:
5633 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5634 err = -EINVAL;
5635 break;
5636 }
5637
5638 return err;
5639 }
5640
5641 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5642 struct sk_buff *skb)
5643 {
5644 struct hci_conn *hcon = conn->hcon;
5645 struct l2cap_cmd_hdr *cmd;
5646 u16 len;
5647 int err;
5648
5649 if (hcon->type != LE_LINK)
5650 goto drop;
5651
5652 if (skb->len < L2CAP_CMD_HDR_SIZE)
5653 goto drop;
5654
5655 cmd = (void *) skb->data;
5656 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5657
5658 len = le16_to_cpu(cmd->len);
5659
5660 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5661
5662 if (len != skb->len || !cmd->ident) {
5663 BT_DBG("corrupted command");
5664 goto drop;
5665 }
5666
5667 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5668 if (err) {
5669 struct l2cap_cmd_rej_unk rej;
5670
5671 BT_ERR("Wrong link type (%d)", err);
5672
5673 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5674 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5675 sizeof(rej), &rej);
5676 }
5677
5678 drop:
5679 kfree_skb(skb);
5680 }
5681
5682 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5683 struct sk_buff *skb)
5684 {
5685 struct hci_conn *hcon = conn->hcon;
5686 u8 *data = skb->data;
5687 int len = skb->len;
5688 struct l2cap_cmd_hdr cmd;
5689 int err;
5690
5691 l2cap_raw_recv(conn, skb);
5692
5693 if (hcon->type != ACL_LINK)
5694 goto drop;
5695
5696 while (len >= L2CAP_CMD_HDR_SIZE) {
5697 u16 cmd_len;
5698 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5699 data += L2CAP_CMD_HDR_SIZE;
5700 len -= L2CAP_CMD_HDR_SIZE;
5701
5702 cmd_len = le16_to_cpu(cmd.len);
5703
5704 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5705 cmd.ident);
5706
5707 if (cmd_len > len || !cmd.ident) {
5708 BT_DBG("corrupted command");
5709 break;
5710 }
5711
5712 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5713 if (err) {
5714 struct l2cap_cmd_rej_unk rej;
5715
5716 BT_ERR("Wrong link type (%d)", err);
5717
5718 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5719 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5720 sizeof(rej), &rej);
5721 }
5722
5723 data += cmd_len;
5724 len -= cmd_len;
5725 }
5726
5727 drop:
5728 kfree_skb(skb);
5729 }
5730
5731 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5732 {
5733 u16 our_fcs, rcv_fcs;
5734 int hdr_size;
5735
5736 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5737 hdr_size = L2CAP_EXT_HDR_SIZE;
5738 else
5739 hdr_size = L2CAP_ENH_HDR_SIZE;
5740
5741 if (chan->fcs == L2CAP_FCS_CRC16) {
5742 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5743 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5744 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5745
5746 if (our_fcs != rcv_fcs)
5747 return -EBADMSG;
5748 }
5749 return 0;
5750 }
5751
5752 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5753 {
5754 struct l2cap_ctrl control;
5755
5756 BT_DBG("chan %p", chan);
5757
5758 memset(&control, 0, sizeof(control));
5759 control.sframe = 1;
5760 control.final = 1;
5761 control.reqseq = chan->buffer_seq;
5762 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5763
5764 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5765 control.super = L2CAP_SUPER_RNR;
5766 l2cap_send_sframe(chan, &control);
5767 }
5768
5769 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5770 chan->unacked_frames > 0)
5771 __set_retrans_timer(chan);
5772
5773 /* Send pending iframes */
5774 l2cap_ertm_send(chan);
5775
5776 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5777 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5778 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5779 * send it now.
5780 */
5781 control.super = L2CAP_SUPER_RR;
5782 l2cap_send_sframe(chan, &control);
5783 }
5784 }
5785
5786 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5787 struct sk_buff **last_frag)
5788 {
5789 /* skb->len reflects data in skb as well as all fragments
5790 * skb->data_len reflects only data in fragments
5791 */
5792 if (!skb_has_frag_list(skb))
5793 skb_shinfo(skb)->frag_list = new_frag;
5794
5795 new_frag->next = NULL;
5796
5797 (*last_frag)->next = new_frag;
5798 *last_frag = new_frag;
5799
5800 skb->len += new_frag->len;
5801 skb->data_len += new_frag->len;
5802 skb->truesize += new_frag->truesize;
5803 }
5804
5805 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5806 struct l2cap_ctrl *control)
5807 {
5808 int err = -EINVAL;
5809
5810 switch (control->sar) {
5811 case L2CAP_SAR_UNSEGMENTED:
5812 if (chan->sdu)
5813 break;
5814
5815 err = chan->ops->recv(chan, skb);
5816 break;
5817
5818 case L2CAP_SAR_START:
5819 if (chan->sdu)
5820 break;
5821
5822 chan->sdu_len = get_unaligned_le16(skb->data);
5823 skb_pull(skb, L2CAP_SDULEN_SIZE);
5824
5825 if (chan->sdu_len > chan->imtu) {
5826 err = -EMSGSIZE;
5827 break;
5828 }
5829
5830 if (skb->len >= chan->sdu_len)
5831 break;
5832
5833 chan->sdu = skb;
5834 chan->sdu_last_frag = skb;
5835
5836 skb = NULL;
5837 err = 0;
5838 break;
5839
5840 case L2CAP_SAR_CONTINUE:
5841 if (!chan->sdu)
5842 break;
5843
5844 append_skb_frag(chan->sdu, skb,
5845 &chan->sdu_last_frag);
5846 skb = NULL;
5847
5848 if (chan->sdu->len >= chan->sdu_len)
5849 break;
5850
5851 err = 0;
5852 break;
5853
5854 case L2CAP_SAR_END:
5855 if (!chan->sdu)
5856 break;
5857
5858 append_skb_frag(chan->sdu, skb,
5859 &chan->sdu_last_frag);
5860 skb = NULL;
5861
5862 if (chan->sdu->len != chan->sdu_len)
5863 break;
5864
5865 err = chan->ops->recv(chan, chan->sdu);
5866
5867 if (!err) {
5868 /* Reassembly complete */
5869 chan->sdu = NULL;
5870 chan->sdu_last_frag = NULL;
5871 chan->sdu_len = 0;
5872 }
5873 break;
5874 }
5875
5876 if (err) {
5877 kfree_skb(skb);
5878 kfree_skb(chan->sdu);
5879 chan->sdu = NULL;
5880 chan->sdu_last_frag = NULL;
5881 chan->sdu_len = 0;
5882 }
5883
5884 return err;
5885 }
5886
5887 static int l2cap_resegment(struct l2cap_chan *chan)
5888 {
5889 /* Placeholder */
5890 return 0;
5891 }
5892
5893 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5894 {
5895 u8 event;
5896
5897 if (chan->mode != L2CAP_MODE_ERTM)
5898 return;
5899
5900 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5901 l2cap_tx(chan, NULL, NULL, event);
5902 }
5903
5904 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5905 {
5906 int err = 0;
5907 /* Pass sequential frames to l2cap_reassemble_sdu()
5908 * until a gap is encountered.
5909 */
5910
5911 BT_DBG("chan %p", chan);
5912
5913 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5914 struct sk_buff *skb;
5915 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5916 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5917
5918 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5919
5920 if (!skb)
5921 break;
5922
5923 skb_unlink(skb, &chan->srej_q);
5924 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5925 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5926 if (err)
5927 break;
5928 }
5929
5930 if (skb_queue_empty(&chan->srej_q)) {
5931 chan->rx_state = L2CAP_RX_STATE_RECV;
5932 l2cap_send_ack(chan);
5933 }
5934
5935 return err;
5936 }
5937
5938 static void l2cap_handle_srej(struct l2cap_chan *chan,
5939 struct l2cap_ctrl *control)
5940 {
5941 struct sk_buff *skb;
5942
5943 BT_DBG("chan %p, control %p", chan, control);
5944
5945 if (control->reqseq == chan->next_tx_seq) {
5946 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5947 l2cap_send_disconn_req(chan, ECONNRESET);
5948 return;
5949 }
5950
5951 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5952
5953 if (skb == NULL) {
5954 BT_DBG("Seq %d not available for retransmission",
5955 control->reqseq);
5956 return;
5957 }
5958
5959 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5960 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5961 l2cap_send_disconn_req(chan, ECONNRESET);
5962 return;
5963 }
5964
5965 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5966
5967 if (control->poll) {
5968 l2cap_pass_to_tx(chan, control);
5969
5970 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5971 l2cap_retransmit(chan, control);
5972 l2cap_ertm_send(chan);
5973
5974 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5975 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5976 chan->srej_save_reqseq = control->reqseq;
5977 }
5978 } else {
5979 l2cap_pass_to_tx_fbit(chan, control);
5980
5981 if (control->final) {
5982 if (chan->srej_save_reqseq != control->reqseq ||
5983 !test_and_clear_bit(CONN_SREJ_ACT,
5984 &chan->conn_state))
5985 l2cap_retransmit(chan, control);
5986 } else {
5987 l2cap_retransmit(chan, control);
5988 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5989 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5990 chan->srej_save_reqseq = control->reqseq;
5991 }
5992 }
5993 }
5994 }
5995
5996 static void l2cap_handle_rej(struct l2cap_chan *chan,
5997 struct l2cap_ctrl *control)
5998 {
5999 struct sk_buff *skb;
6000
6001 BT_DBG("chan %p, control %p", chan, control);
6002
6003 if (control->reqseq == chan->next_tx_seq) {
6004 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6005 l2cap_send_disconn_req(chan, ECONNRESET);
6006 return;
6007 }
6008
6009 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6010
6011 if (chan->max_tx && skb &&
6012 bt_cb(skb)->control.retries >= chan->max_tx) {
6013 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6014 l2cap_send_disconn_req(chan, ECONNRESET);
6015 return;
6016 }
6017
6018 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6019
6020 l2cap_pass_to_tx(chan, control);
6021
6022 if (control->final) {
6023 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6024 l2cap_retransmit_all(chan, control);
6025 } else {
6026 l2cap_retransmit_all(chan, control);
6027 l2cap_ertm_send(chan);
6028 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6029 set_bit(CONN_REJ_ACT, &chan->conn_state);
6030 }
6031 }
6032
6033 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6034 {
6035 BT_DBG("chan %p, txseq %d", chan, txseq);
6036
6037 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6038 chan->expected_tx_seq);
6039
6040 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6041 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6042 chan->tx_win) {
6043 /* See notes below regarding "double poll" and
6044 * invalid packets.
6045 */
6046 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6047 BT_DBG("Invalid/Ignore - after SREJ");
6048 return L2CAP_TXSEQ_INVALID_IGNORE;
6049 } else {
6050 BT_DBG("Invalid - in window after SREJ sent");
6051 return L2CAP_TXSEQ_INVALID;
6052 }
6053 }
6054
6055 if (chan->srej_list.head == txseq) {
6056 BT_DBG("Expected SREJ");
6057 return L2CAP_TXSEQ_EXPECTED_SREJ;
6058 }
6059
6060 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6061 BT_DBG("Duplicate SREJ - txseq already stored");
6062 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6063 }
6064
6065 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6066 BT_DBG("Unexpected SREJ - not requested");
6067 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6068 }
6069 }
6070
6071 if (chan->expected_tx_seq == txseq) {
6072 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6073 chan->tx_win) {
6074 BT_DBG("Invalid - txseq outside tx window");
6075 return L2CAP_TXSEQ_INVALID;
6076 } else {
6077 BT_DBG("Expected");
6078 return L2CAP_TXSEQ_EXPECTED;
6079 }
6080 }
6081
6082 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6083 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6084 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6085 return L2CAP_TXSEQ_DUPLICATE;
6086 }
6087
6088 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6089 /* A source of invalid packets is a "double poll" condition,
6090 * where delays cause us to send multiple poll packets. If
6091 * the remote stack receives and processes both polls,
6092 * sequence numbers can wrap around in such a way that a
6093 * resent frame has a sequence number that looks like new data
6094 * with a sequence gap. This would trigger an erroneous SREJ
6095 * request.
6096 *
6097 * Fortunately, this is impossible with a tx window that's
6098 * less than half of the maximum sequence number, which allows
6099 * invalid frames to be safely ignored.
6100 *
6101 * With tx window sizes greater than half of the tx window
6102 * maximum, the frame is invalid and cannot be ignored. This
6103 * causes a disconnect.
6104 */
6105
6106 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6107 BT_DBG("Invalid/Ignore - txseq outside tx window");
6108 return L2CAP_TXSEQ_INVALID_IGNORE;
6109 } else {
6110 BT_DBG("Invalid - txseq outside tx window");
6111 return L2CAP_TXSEQ_INVALID;
6112 }
6113 } else {
6114 BT_DBG("Unexpected - txseq indicates missing frames");
6115 return L2CAP_TXSEQ_UNEXPECTED;
6116 }
6117 }
6118
6119 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6120 struct l2cap_ctrl *control,
6121 struct sk_buff *skb, u8 event)
6122 {
6123 int err = 0;
6124 bool skb_in_use = false;
6125
6126 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6127 event);
6128
6129 switch (event) {
6130 case L2CAP_EV_RECV_IFRAME:
6131 switch (l2cap_classify_txseq(chan, control->txseq)) {
6132 case L2CAP_TXSEQ_EXPECTED:
6133 l2cap_pass_to_tx(chan, control);
6134
6135 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6136 BT_DBG("Busy, discarding expected seq %d",
6137 control->txseq);
6138 break;
6139 }
6140
6141 chan->expected_tx_seq = __next_seq(chan,
6142 control->txseq);
6143
6144 chan->buffer_seq = chan->expected_tx_seq;
6145 skb_in_use = true;
6146
6147 err = l2cap_reassemble_sdu(chan, skb, control);
6148 if (err)
6149 break;
6150
6151 if (control->final) {
6152 if (!test_and_clear_bit(CONN_REJ_ACT,
6153 &chan->conn_state)) {
6154 control->final = 0;
6155 l2cap_retransmit_all(chan, control);
6156 l2cap_ertm_send(chan);
6157 }
6158 }
6159
6160 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6161 l2cap_send_ack(chan);
6162 break;
6163 case L2CAP_TXSEQ_UNEXPECTED:
6164 l2cap_pass_to_tx(chan, control);
6165
6166 /* Can't issue SREJ frames in the local busy state.
6167 * Drop this frame, it will be seen as missing
6168 * when local busy is exited.
6169 */
6170 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6171 BT_DBG("Busy, discarding unexpected seq %d",
6172 control->txseq);
6173 break;
6174 }
6175
6176 /* There was a gap in the sequence, so an SREJ
6177 * must be sent for each missing frame. The
6178 * current frame is stored for later use.
6179 */
6180 skb_queue_tail(&chan->srej_q, skb);
6181 skb_in_use = true;
6182 BT_DBG("Queued %p (queue len %d)", skb,
6183 skb_queue_len(&chan->srej_q));
6184
6185 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6186 l2cap_seq_list_clear(&chan->srej_list);
6187 l2cap_send_srej(chan, control->txseq);
6188
6189 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6190 break;
6191 case L2CAP_TXSEQ_DUPLICATE:
6192 l2cap_pass_to_tx(chan, control);
6193 break;
6194 case L2CAP_TXSEQ_INVALID_IGNORE:
6195 break;
6196 case L2CAP_TXSEQ_INVALID:
6197 default:
6198 l2cap_send_disconn_req(chan, ECONNRESET);
6199 break;
6200 }
6201 break;
6202 case L2CAP_EV_RECV_RR:
6203 l2cap_pass_to_tx(chan, control);
6204 if (control->final) {
6205 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6206
6207 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6208 !__chan_is_moving(chan)) {
6209 control->final = 0;
6210 l2cap_retransmit_all(chan, control);
6211 }
6212
6213 l2cap_ertm_send(chan);
6214 } else if (control->poll) {
6215 l2cap_send_i_or_rr_or_rnr(chan);
6216 } else {
6217 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6218 &chan->conn_state) &&
6219 chan->unacked_frames)
6220 __set_retrans_timer(chan);
6221
6222 l2cap_ertm_send(chan);
6223 }
6224 break;
6225 case L2CAP_EV_RECV_RNR:
6226 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6227 l2cap_pass_to_tx(chan, control);
6228 if (control && control->poll) {
6229 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6230 l2cap_send_rr_or_rnr(chan, 0);
6231 }
6232 __clear_retrans_timer(chan);
6233 l2cap_seq_list_clear(&chan->retrans_list);
6234 break;
6235 case L2CAP_EV_RECV_REJ:
6236 l2cap_handle_rej(chan, control);
6237 break;
6238 case L2CAP_EV_RECV_SREJ:
6239 l2cap_handle_srej(chan, control);
6240 break;
6241 default:
6242 break;
6243 }
6244
6245 if (skb && !skb_in_use) {
6246 BT_DBG("Freeing %p", skb);
6247 kfree_skb(skb);
6248 }
6249
6250 return err;
6251 }
6252
6253 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6254 struct l2cap_ctrl *control,
6255 struct sk_buff *skb, u8 event)
6256 {
6257 int err = 0;
6258 u16 txseq = control->txseq;
6259 bool skb_in_use = false;
6260
6261 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6262 event);
6263
6264 switch (event) {
6265 case L2CAP_EV_RECV_IFRAME:
6266 switch (l2cap_classify_txseq(chan, txseq)) {
6267 case L2CAP_TXSEQ_EXPECTED:
6268 /* Keep frame for reassembly later */
6269 l2cap_pass_to_tx(chan, control);
6270 skb_queue_tail(&chan->srej_q, skb);
6271 skb_in_use = true;
6272 BT_DBG("Queued %p (queue len %d)", skb,
6273 skb_queue_len(&chan->srej_q));
6274
6275 chan->expected_tx_seq = __next_seq(chan, txseq);
6276 break;
6277 case L2CAP_TXSEQ_EXPECTED_SREJ:
6278 l2cap_seq_list_pop(&chan->srej_list);
6279
6280 l2cap_pass_to_tx(chan, control);
6281 skb_queue_tail(&chan->srej_q, skb);
6282 skb_in_use = true;
6283 BT_DBG("Queued %p (queue len %d)", skb,
6284 skb_queue_len(&chan->srej_q));
6285
6286 err = l2cap_rx_queued_iframes(chan);
6287 if (err)
6288 break;
6289
6290 break;
6291 case L2CAP_TXSEQ_UNEXPECTED:
6292 /* Got a frame that can't be reassembled yet.
6293 * Save it for later, and send SREJs to cover
6294 * the missing frames.
6295 */
6296 skb_queue_tail(&chan->srej_q, skb);
6297 skb_in_use = true;
6298 BT_DBG("Queued %p (queue len %d)", skb,
6299 skb_queue_len(&chan->srej_q));
6300
6301 l2cap_pass_to_tx(chan, control);
6302 l2cap_send_srej(chan, control->txseq);
6303 break;
6304 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6305 /* This frame was requested with an SREJ, but
6306 * some expected retransmitted frames are
6307 * missing. Request retransmission of missing
6308 * SREJ'd frames.
6309 */
6310 skb_queue_tail(&chan->srej_q, skb);
6311 skb_in_use = true;
6312 BT_DBG("Queued %p (queue len %d)", skb,
6313 skb_queue_len(&chan->srej_q));
6314
6315 l2cap_pass_to_tx(chan, control);
6316 l2cap_send_srej_list(chan, control->txseq);
6317 break;
6318 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6319 /* We've already queued this frame. Drop this copy. */
6320 l2cap_pass_to_tx(chan, control);
6321 break;
6322 case L2CAP_TXSEQ_DUPLICATE:
6323 /* Expecting a later sequence number, so this frame
6324 * was already received. Ignore it completely.
6325 */
6326 break;
6327 case L2CAP_TXSEQ_INVALID_IGNORE:
6328 break;
6329 case L2CAP_TXSEQ_INVALID:
6330 default:
6331 l2cap_send_disconn_req(chan, ECONNRESET);
6332 break;
6333 }
6334 break;
6335 case L2CAP_EV_RECV_RR:
6336 l2cap_pass_to_tx(chan, control);
6337 if (control->final) {
6338 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6339
6340 if (!test_and_clear_bit(CONN_REJ_ACT,
6341 &chan->conn_state)) {
6342 control->final = 0;
6343 l2cap_retransmit_all(chan, control);
6344 }
6345
6346 l2cap_ertm_send(chan);
6347 } else if (control->poll) {
6348 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6349 &chan->conn_state) &&
6350 chan->unacked_frames) {
6351 __set_retrans_timer(chan);
6352 }
6353
6354 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6355 l2cap_send_srej_tail(chan);
6356 } else {
6357 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6358 &chan->conn_state) &&
6359 chan->unacked_frames)
6360 __set_retrans_timer(chan);
6361
6362 l2cap_send_ack(chan);
6363 }
6364 break;
6365 case L2CAP_EV_RECV_RNR:
6366 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6367 l2cap_pass_to_tx(chan, control);
6368 if (control->poll) {
6369 l2cap_send_srej_tail(chan);
6370 } else {
6371 struct l2cap_ctrl rr_control;
6372 memset(&rr_control, 0, sizeof(rr_control));
6373 rr_control.sframe = 1;
6374 rr_control.super = L2CAP_SUPER_RR;
6375 rr_control.reqseq = chan->buffer_seq;
6376 l2cap_send_sframe(chan, &rr_control);
6377 }
6378
6379 break;
6380 case L2CAP_EV_RECV_REJ:
6381 l2cap_handle_rej(chan, control);
6382 break;
6383 case L2CAP_EV_RECV_SREJ:
6384 l2cap_handle_srej(chan, control);
6385 break;
6386 }
6387
6388 if (skb && !skb_in_use) {
6389 BT_DBG("Freeing %p", skb);
6390 kfree_skb(skb);
6391 }
6392
6393 return err;
6394 }
6395
6396 static int l2cap_finish_move(struct l2cap_chan *chan)
6397 {
6398 BT_DBG("chan %p", chan);
6399
6400 chan->rx_state = L2CAP_RX_STATE_RECV;
6401
6402 if (chan->hs_hcon)
6403 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6404 else
6405 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6406
6407 return l2cap_resegment(chan);
6408 }
6409
6410 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6411 struct l2cap_ctrl *control,
6412 struct sk_buff *skb, u8 event)
6413 {
6414 int err;
6415
6416 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6417 event);
6418
6419 if (!control->poll)
6420 return -EPROTO;
6421
6422 l2cap_process_reqseq(chan, control->reqseq);
6423
6424 if (!skb_queue_empty(&chan->tx_q))
6425 chan->tx_send_head = skb_peek(&chan->tx_q);
6426 else
6427 chan->tx_send_head = NULL;
6428
6429 /* Rewind next_tx_seq to the point expected
6430 * by the receiver.
6431 */
6432 chan->next_tx_seq = control->reqseq;
6433 chan->unacked_frames = 0;
6434
6435 err = l2cap_finish_move(chan);
6436 if (err)
6437 return err;
6438
6439 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6440 l2cap_send_i_or_rr_or_rnr(chan);
6441
6442 if (event == L2CAP_EV_RECV_IFRAME)
6443 return -EPROTO;
6444
6445 return l2cap_rx_state_recv(chan, control, NULL, event);
6446 }
6447
6448 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6449 struct l2cap_ctrl *control,
6450 struct sk_buff *skb, u8 event)
6451 {
6452 int err;
6453
6454 if (!control->final)
6455 return -EPROTO;
6456
6457 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6458
6459 chan->rx_state = L2CAP_RX_STATE_RECV;
6460 l2cap_process_reqseq(chan, control->reqseq);
6461
6462 if (!skb_queue_empty(&chan->tx_q))
6463 chan->tx_send_head = skb_peek(&chan->tx_q);
6464 else
6465 chan->tx_send_head = NULL;
6466
6467 /* Rewind next_tx_seq to the point expected
6468 * by the receiver.
6469 */
6470 chan->next_tx_seq = control->reqseq;
6471 chan->unacked_frames = 0;
6472
6473 if (chan->hs_hcon)
6474 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6475 else
6476 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6477
6478 err = l2cap_resegment(chan);
6479
6480 if (!err)
6481 err = l2cap_rx_state_recv(chan, control, skb, event);
6482
6483 return err;
6484 }
6485
6486 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6487 {
6488 /* Make sure reqseq is for a packet that has been sent but not acked */
6489 u16 unacked;
6490
6491 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6492 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6493 }
6494
6495 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6496 struct sk_buff *skb, u8 event)
6497 {
6498 int err = 0;
6499
6500 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6501 control, skb, event, chan->rx_state);
6502
6503 if (__valid_reqseq(chan, control->reqseq)) {
6504 switch (chan->rx_state) {
6505 case L2CAP_RX_STATE_RECV:
6506 err = l2cap_rx_state_recv(chan, control, skb, event);
6507 break;
6508 case L2CAP_RX_STATE_SREJ_SENT:
6509 err = l2cap_rx_state_srej_sent(chan, control, skb,
6510 event);
6511 break;
6512 case L2CAP_RX_STATE_WAIT_P:
6513 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6514 break;
6515 case L2CAP_RX_STATE_WAIT_F:
6516 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6517 break;
6518 default:
6519 /* shut it down */
6520 break;
6521 }
6522 } else {
6523 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6524 control->reqseq, chan->next_tx_seq,
6525 chan->expected_ack_seq);
6526 l2cap_send_disconn_req(chan, ECONNRESET);
6527 }
6528
6529 return err;
6530 }
6531
6532 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6533 struct sk_buff *skb)
6534 {
6535 int err = 0;
6536
6537 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6538 chan->rx_state);
6539
6540 if (l2cap_classify_txseq(chan, control->txseq) ==
6541 L2CAP_TXSEQ_EXPECTED) {
6542 l2cap_pass_to_tx(chan, control);
6543
6544 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6545 __next_seq(chan, chan->buffer_seq));
6546
6547 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6548
6549 l2cap_reassemble_sdu(chan, skb, control);
6550 } else {
6551 if (chan->sdu) {
6552 kfree_skb(chan->sdu);
6553 chan->sdu = NULL;
6554 }
6555 chan->sdu_last_frag = NULL;
6556 chan->sdu_len = 0;
6557
6558 if (skb) {
6559 BT_DBG("Freeing %p", skb);
6560 kfree_skb(skb);
6561 }
6562 }
6563
6564 chan->last_acked_seq = control->txseq;
6565 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6566
6567 return err;
6568 }
6569
6570 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6571 {
6572 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6573 u16 len;
6574 u8 event;
6575
6576 __unpack_control(chan, skb);
6577
6578 len = skb->len;
6579
6580 /*
6581 * We can just drop the corrupted I-frame here.
6582 * Receiver will miss it and start proper recovery
6583 * procedures and ask for retransmission.
6584 */
6585 if (l2cap_check_fcs(chan, skb))
6586 goto drop;
6587
6588 if (!control->sframe && control->sar == L2CAP_SAR_START)
6589 len -= L2CAP_SDULEN_SIZE;
6590
6591 if (chan->fcs == L2CAP_FCS_CRC16)
6592 len -= L2CAP_FCS_SIZE;
6593
6594 if (len > chan->mps) {
6595 l2cap_send_disconn_req(chan, ECONNRESET);
6596 goto drop;
6597 }
6598
6599 if (!control->sframe) {
6600 int err;
6601
6602 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6603 control->sar, control->reqseq, control->final,
6604 control->txseq);
6605
6606 /* Validate F-bit - F=0 always valid, F=1 only
6607 * valid in TX WAIT_F
6608 */
6609 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6610 goto drop;
6611
6612 if (chan->mode != L2CAP_MODE_STREAMING) {
6613 event = L2CAP_EV_RECV_IFRAME;
6614 err = l2cap_rx(chan, control, skb, event);
6615 } else {
6616 err = l2cap_stream_rx(chan, control, skb);
6617 }
6618
6619 if (err)
6620 l2cap_send_disconn_req(chan, ECONNRESET);
6621 } else {
6622 const u8 rx_func_to_event[4] = {
6623 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6624 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6625 };
6626
6627 /* Only I-frames are expected in streaming mode */
6628 if (chan->mode == L2CAP_MODE_STREAMING)
6629 goto drop;
6630
6631 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6632 control->reqseq, control->final, control->poll,
6633 control->super);
6634
6635 if (len != 0) {
6636 BT_ERR("Trailing bytes: %d in sframe", len);
6637 l2cap_send_disconn_req(chan, ECONNRESET);
6638 goto drop;
6639 }
6640
6641 /* Validate F and P bits */
6642 if (control->final && (control->poll ||
6643 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6644 goto drop;
6645
6646 event = rx_func_to_event[control->super];
6647 if (l2cap_rx(chan, control, skb, event))
6648 l2cap_send_disconn_req(chan, ECONNRESET);
6649 }
6650
6651 return 0;
6652
6653 drop:
6654 kfree_skb(skb);
6655 return 0;
6656 }
6657
6658 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6659 {
6660 struct l2cap_conn *conn = chan->conn;
6661 struct l2cap_le_credits pkt;
6662 u16 return_credits;
6663
6664 /* We return more credits to the sender only after the amount of
6665 * credits falls below half of the initial amount.
6666 */
6667 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6668 return;
6669
6670 return_credits = le_max_credits - chan->rx_credits;
6671
6672 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6673
6674 chan->rx_credits += return_credits;
6675
6676 pkt.cid = cpu_to_le16(chan->scid);
6677 pkt.credits = cpu_to_le16(return_credits);
6678
6679 chan->ident = l2cap_get_ident(conn);
6680
6681 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6682 }
6683
6684 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6685 {
6686 int err;
6687
6688 if (!chan->rx_credits) {
6689 BT_ERR("No credits to receive LE L2CAP data");
6690 l2cap_send_disconn_req(chan, ECONNRESET);
6691 return -ENOBUFS;
6692 }
6693
6694 if (chan->imtu < skb->len) {
6695 BT_ERR("Too big LE L2CAP PDU");
6696 return -ENOBUFS;
6697 }
6698
6699 chan->rx_credits--;
6700 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6701
6702 l2cap_chan_le_send_credits(chan);
6703
6704 err = 0;
6705
6706 if (!chan->sdu) {
6707 u16 sdu_len;
6708
6709 sdu_len = get_unaligned_le16(skb->data);
6710 skb_pull(skb, L2CAP_SDULEN_SIZE);
6711
6712 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6713 sdu_len, skb->len, chan->imtu);
6714
6715 if (sdu_len > chan->imtu) {
6716 BT_ERR("Too big LE L2CAP SDU length received");
6717 err = -EMSGSIZE;
6718 goto failed;
6719 }
6720
6721 if (skb->len > sdu_len) {
6722 BT_ERR("Too much LE L2CAP data received");
6723 err = -EINVAL;
6724 goto failed;
6725 }
6726
6727 if (skb->len == sdu_len)
6728 return chan->ops->recv(chan, skb);
6729
6730 chan->sdu = skb;
6731 chan->sdu_len = sdu_len;
6732 chan->sdu_last_frag = skb;
6733
6734 return 0;
6735 }
6736
6737 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6738 chan->sdu->len, skb->len, chan->sdu_len);
6739
6740 if (chan->sdu->len + skb->len > chan->sdu_len) {
6741 BT_ERR("Too much LE L2CAP data received");
6742 err = -EINVAL;
6743 goto failed;
6744 }
6745
6746 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6747 skb = NULL;
6748
6749 if (chan->sdu->len == chan->sdu_len) {
6750 err = chan->ops->recv(chan, chan->sdu);
6751 if (!err) {
6752 chan->sdu = NULL;
6753 chan->sdu_last_frag = NULL;
6754 chan->sdu_len = 0;
6755 }
6756 }
6757
6758 failed:
6759 if (err) {
6760 kfree_skb(skb);
6761 kfree_skb(chan->sdu);
6762 chan->sdu = NULL;
6763 chan->sdu_last_frag = NULL;
6764 chan->sdu_len = 0;
6765 }
6766
6767 /* We can't return an error here since we took care of the skb
6768 * freeing internally. An error return would cause the caller to
6769 * do a double-free of the skb.
6770 */
6771 return 0;
6772 }
6773
6774 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6775 struct sk_buff *skb)
6776 {
6777 struct l2cap_chan *chan;
6778
6779 chan = l2cap_get_chan_by_scid(conn, cid);
6780 if (!chan) {
6781 if (cid == L2CAP_CID_A2MP) {
6782 chan = a2mp_channel_create(conn, skb);
6783 if (!chan) {
6784 kfree_skb(skb);
6785 return;
6786 }
6787
6788 l2cap_chan_lock(chan);
6789 } else {
6790 BT_DBG("unknown cid 0x%4.4x", cid);
6791 /* Drop packet and return */
6792 kfree_skb(skb);
6793 return;
6794 }
6795 }
6796
6797 BT_DBG("chan %p, len %d", chan, skb->len);
6798
6799 if (chan->state != BT_CONNECTED)
6800 goto drop;
6801
6802 switch (chan->mode) {
6803 case L2CAP_MODE_LE_FLOWCTL:
6804 if (l2cap_le_data_rcv(chan, skb) < 0)
6805 goto drop;
6806
6807 goto done;
6808
6809 case L2CAP_MODE_BASIC:
6810 /* If socket recv buffers overflows we drop data here
6811 * which is *bad* because L2CAP has to be reliable.
6812 * But we don't have any other choice. L2CAP doesn't
6813 * provide flow control mechanism. */
6814
6815 if (chan->imtu < skb->len) {
6816 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6817 goto drop;
6818 }
6819
6820 if (!chan->ops->recv(chan, skb))
6821 goto done;
6822 break;
6823
6824 case L2CAP_MODE_ERTM:
6825 case L2CAP_MODE_STREAMING:
6826 l2cap_data_rcv(chan, skb);
6827 goto done;
6828
6829 default:
6830 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6831 break;
6832 }
6833
6834 drop:
6835 kfree_skb(skb);
6836
6837 done:
6838 l2cap_chan_unlock(chan);
6839 }
6840
6841 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6842 struct sk_buff *skb)
6843 {
6844 struct hci_conn *hcon = conn->hcon;
6845 struct l2cap_chan *chan;
6846
6847 if (hcon->type != ACL_LINK)
6848 goto drop;
6849
6850 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6851 ACL_LINK);
6852 if (!chan)
6853 goto drop;
6854
6855 BT_DBG("chan %p, len %d", chan, skb->len);
6856
6857 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6858 goto drop;
6859
6860 if (chan->imtu < skb->len)
6861 goto drop;
6862
6863 /* Store remote BD_ADDR and PSM for msg_name */
6864 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6865 bt_cb(skb)->psm = psm;
6866
6867 if (!chan->ops->recv(chan, skb))
6868 return;
6869
6870 drop:
6871 kfree_skb(skb);
6872 }
6873
6874 static void l2cap_att_channel(struct l2cap_conn *conn,
6875 struct sk_buff *skb)
6876 {
6877 struct hci_conn *hcon = conn->hcon;
6878 struct l2cap_chan *chan;
6879
6880 if (hcon->type != LE_LINK)
6881 goto drop;
6882
6883 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6884 &hcon->src, &hcon->dst);
6885 if (!chan)
6886 goto drop;
6887
6888 BT_DBG("chan %p, len %d", chan, skb->len);
6889
6890 if (chan->imtu < skb->len)
6891 goto drop;
6892
6893 if (!chan->ops->recv(chan, skb))
6894 return;
6895
6896 drop:
6897 kfree_skb(skb);
6898 }
6899
6900 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6901 {
6902 struct l2cap_hdr *lh = (void *) skb->data;
6903 struct hci_conn *hcon = conn->hcon;
6904 u16 cid, len;
6905 __le16 psm;
6906
6907 if (hcon->state != BT_CONNECTED) {
6908 BT_DBG("queueing pending rx skb");
6909 skb_queue_tail(&conn->pending_rx, skb);
6910 return;
6911 }
6912
6913 skb_pull(skb, L2CAP_HDR_SIZE);
6914 cid = __le16_to_cpu(lh->cid);
6915 len = __le16_to_cpu(lh->len);
6916
6917 if (len != skb->len) {
6918 kfree_skb(skb);
6919 return;
6920 }
6921
6922 /* Since we can't actively block incoming LE connections we must
6923 * at least ensure that we ignore incoming data from them.
6924 */
6925 if (hcon->type == LE_LINK &&
6926 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6927 bdaddr_type(hcon, hcon->dst_type))) {
6928 kfree_skb(skb);
6929 return;
6930 }
6931
6932 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6933
6934 switch (cid) {
6935 case L2CAP_CID_SIGNALING:
6936 l2cap_sig_channel(conn, skb);
6937 break;
6938
6939 case L2CAP_CID_CONN_LESS:
6940 psm = get_unaligned((__le16 *) skb->data);
6941 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6942 l2cap_conless_channel(conn, psm, skb);
6943 break;
6944
6945 case L2CAP_CID_ATT:
6946 l2cap_att_channel(conn, skb);
6947 break;
6948
6949 case L2CAP_CID_LE_SIGNALING:
6950 l2cap_le_sig_channel(conn, skb);
6951 break;
6952
6953 case L2CAP_CID_SMP:
6954 if (smp_sig_channel(conn, skb))
6955 l2cap_conn_del(conn->hcon, EACCES);
6956 break;
6957
6958 default:
6959 l2cap_data_channel(conn, cid, skb);
6960 break;
6961 }
6962 }
6963
6964 static void process_pending_rx(struct work_struct *work)
6965 {
6966 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6967 pending_rx_work);
6968 struct sk_buff *skb;
6969
6970 BT_DBG("");
6971
6972 while ((skb = skb_dequeue(&conn->pending_rx)))
6973 l2cap_recv_frame(conn, skb);
6974 }
6975
6976 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6977 {
6978 struct l2cap_conn *conn = hcon->l2cap_data;
6979 struct hci_chan *hchan;
6980
6981 if (conn)
6982 return conn;
6983
6984 hchan = hci_chan_create(hcon);
6985 if (!hchan)
6986 return NULL;
6987
6988 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6989 if (!conn) {
6990 hci_chan_del(hchan);
6991 return NULL;
6992 }
6993
6994 kref_init(&conn->ref);
6995 hcon->l2cap_data = conn;
6996 conn->hcon = hcon;
6997 hci_conn_get(conn->hcon);
6998 conn->hchan = hchan;
6999
7000 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7001
7002 switch (hcon->type) {
7003 case LE_LINK:
7004 if (hcon->hdev->le_mtu) {
7005 conn->mtu = hcon->hdev->le_mtu;
7006 break;
7007 }
7008 /* fall through */
7009 default:
7010 conn->mtu = hcon->hdev->acl_mtu;
7011 break;
7012 }
7013
7014 conn->feat_mask = 0;
7015
7016 if (hcon->type == ACL_LINK)
7017 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
7018 &hcon->hdev->dev_flags);
7019
7020 mutex_init(&conn->ident_lock);
7021 mutex_init(&conn->chan_lock);
7022
7023 INIT_LIST_HEAD(&conn->chan_l);
7024 INIT_LIST_HEAD(&conn->users);
7025
7026 if (hcon->type == LE_LINK)
7027 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
7028 else
7029 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7030
7031 skb_queue_head_init(&conn->pending_rx);
7032 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7033
7034 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7035
7036 return conn;
7037 }
7038
7039 static bool is_valid_psm(u16 psm, u8 dst_type) {
7040 if (!psm)
7041 return false;
7042
7043 if (bdaddr_type_is_le(dst_type))
7044 return (psm <= 0x00ff);
7045
7046 /* PSM must be odd and lsb of upper byte must be 0 */
7047 return ((psm & 0x0101) == 0x0001);
7048 }
7049
7050 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7051 bdaddr_t *dst, u8 dst_type)
7052 {
7053 struct l2cap_conn *conn;
7054 struct hci_conn *hcon;
7055 struct hci_dev *hdev;
7056 int err;
7057
7058 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7059 dst_type, __le16_to_cpu(psm));
7060
7061 hdev = hci_get_route(dst, &chan->src);
7062 if (!hdev)
7063 return -EHOSTUNREACH;
7064
7065 hci_dev_lock(hdev);
7066
7067 l2cap_chan_lock(chan);
7068
7069 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7070 chan->chan_type != L2CAP_CHAN_RAW) {
7071 err = -EINVAL;
7072 goto done;
7073 }
7074
7075 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7076 err = -EINVAL;
7077 goto done;
7078 }
7079
7080 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7081 err = -EINVAL;
7082 goto done;
7083 }
7084
7085 switch (chan->mode) {
7086 case L2CAP_MODE_BASIC:
7087 break;
7088 case L2CAP_MODE_LE_FLOWCTL:
7089 l2cap_le_flowctl_init(chan);
7090 break;
7091 case L2CAP_MODE_ERTM:
7092 case L2CAP_MODE_STREAMING:
7093 if (!disable_ertm)
7094 break;
7095 /* fall through */
7096 default:
7097 err = -EOPNOTSUPP;
7098 goto done;
7099 }
7100
7101 switch (chan->state) {
7102 case BT_CONNECT:
7103 case BT_CONNECT2:
7104 case BT_CONFIG:
7105 /* Already connecting */
7106 err = 0;
7107 goto done;
7108
7109 case BT_CONNECTED:
7110 /* Already connected */
7111 err = -EISCONN;
7112 goto done;
7113
7114 case BT_OPEN:
7115 case BT_BOUND:
7116 /* Can connect */
7117 break;
7118
7119 default:
7120 err = -EBADFD;
7121 goto done;
7122 }
7123
7124 /* Set destination address and psm */
7125 bacpy(&chan->dst, dst);
7126 chan->dst_type = dst_type;
7127
7128 chan->psm = psm;
7129 chan->dcid = cid;
7130
7131 if (bdaddr_type_is_le(dst_type)) {
7132 u8 role;
7133
7134 /* Convert from L2CAP channel address type to HCI address type
7135 */
7136 if (dst_type == BDADDR_LE_PUBLIC)
7137 dst_type = ADDR_LE_DEV_PUBLIC;
7138 else
7139 dst_type = ADDR_LE_DEV_RANDOM;
7140
7141 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7142 role = HCI_ROLE_SLAVE;
7143 else
7144 role = HCI_ROLE_MASTER;
7145
7146 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7147 HCI_LE_CONN_TIMEOUT, role);
7148 } else {
7149 u8 auth_type = l2cap_get_auth_type(chan);
7150 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7151 }
7152
7153 if (IS_ERR(hcon)) {
7154 err = PTR_ERR(hcon);
7155 goto done;
7156 }
7157
7158 conn = l2cap_conn_add(hcon);
7159 if (!conn) {
7160 hci_conn_drop(hcon);
7161 err = -ENOMEM;
7162 goto done;
7163 }
7164
7165 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7166 hci_conn_drop(hcon);
7167 err = -EBUSY;
7168 goto done;
7169 }
7170
7171 /* Update source addr of the socket */
7172 bacpy(&chan->src, &hcon->src);
7173 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7174
7175 l2cap_chan_unlock(chan);
7176 l2cap_chan_add(conn, chan);
7177 l2cap_chan_lock(chan);
7178
7179 /* l2cap_chan_add takes its own ref so we can drop this one */
7180 hci_conn_drop(hcon);
7181
7182 l2cap_state_change(chan, BT_CONNECT);
7183 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7184
7185 /* Release chan->sport so that it can be reused by other
7186 * sockets (as it's only used for listening sockets).
7187 */
7188 write_lock(&chan_list_lock);
7189 chan->sport = 0;
7190 write_unlock(&chan_list_lock);
7191
7192 if (hcon->state == BT_CONNECTED) {
7193 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7194 __clear_chan_timer(chan);
7195 if (l2cap_chan_check_security(chan, true))
7196 l2cap_state_change(chan, BT_CONNECTED);
7197 } else
7198 l2cap_do_start(chan);
7199 }
7200
7201 err = 0;
7202
7203 done:
7204 l2cap_chan_unlock(chan);
7205 hci_dev_unlock(hdev);
7206 hci_dev_put(hdev);
7207 return err;
7208 }
7209 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7210
7211 /* ---- L2CAP interface with lower layer (HCI) ---- */
7212
7213 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7214 {
7215 int exact = 0, lm1 = 0, lm2 = 0;
7216 struct l2cap_chan *c;
7217
7218 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7219
7220 /* Find listening sockets and check their link_mode */
7221 read_lock(&chan_list_lock);
7222 list_for_each_entry(c, &chan_list, global_l) {
7223 if (c->state != BT_LISTEN)
7224 continue;
7225
7226 if (!bacmp(&c->src, &hdev->bdaddr)) {
7227 lm1 |= HCI_LM_ACCEPT;
7228 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7229 lm1 |= HCI_LM_MASTER;
7230 exact++;
7231 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7232 lm2 |= HCI_LM_ACCEPT;
7233 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7234 lm2 |= HCI_LM_MASTER;
7235 }
7236 }
7237 read_unlock(&chan_list_lock);
7238
7239 return exact ? lm1 : lm2;
7240 }
7241
7242 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7243 {
7244 struct l2cap_conn *conn;
7245
7246 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7247
7248 if (!status) {
7249 conn = l2cap_conn_add(hcon);
7250 if (conn)
7251 l2cap_conn_ready(conn);
7252 } else {
7253 l2cap_conn_del(hcon, bt_to_errno(status));
7254 }
7255 }
7256
7257 int l2cap_disconn_ind(struct hci_conn *hcon)
7258 {
7259 struct l2cap_conn *conn = hcon->l2cap_data;
7260
7261 BT_DBG("hcon %p", hcon);
7262
7263 if (!conn)
7264 return HCI_ERROR_REMOTE_USER_TERM;
7265 return conn->disc_reason;
7266 }
7267
7268 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7269 {
7270 BT_DBG("hcon %p reason %d", hcon, reason);
7271
7272 l2cap_conn_del(hcon, bt_to_errno(reason));
7273 }
7274
7275 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7276 {
7277 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7278 return;
7279
7280 if (encrypt == 0x00) {
7281 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7282 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7283 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7284 chan->sec_level == BT_SECURITY_FIPS)
7285 l2cap_chan_close(chan, ECONNREFUSED);
7286 } else {
7287 if (chan->sec_level == BT_SECURITY_MEDIUM)
7288 __clear_chan_timer(chan);
7289 }
7290 }
7291
7292 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7293 {
7294 struct l2cap_conn *conn = hcon->l2cap_data;
7295 struct l2cap_chan *chan;
7296
7297 if (!conn)
7298 return 0;
7299
7300 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7301
7302 if (hcon->type == LE_LINK) {
7303 if (!status && encrypt)
7304 smp_distribute_keys(conn);
7305 cancel_delayed_work(&conn->security_timer);
7306 }
7307
7308 mutex_lock(&conn->chan_lock);
7309
7310 list_for_each_entry(chan, &conn->chan_l, list) {
7311 l2cap_chan_lock(chan);
7312
7313 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7314 state_to_string(chan->state));
7315
7316 if (chan->scid == L2CAP_CID_A2MP) {
7317 l2cap_chan_unlock(chan);
7318 continue;
7319 }
7320
7321 if (chan->scid == L2CAP_CID_ATT) {
7322 if (!status && encrypt) {
7323 chan->sec_level = hcon->sec_level;
7324 l2cap_chan_ready(chan);
7325 }
7326
7327 l2cap_chan_unlock(chan);
7328 continue;
7329 }
7330
7331 if (!__l2cap_no_conn_pending(chan)) {
7332 l2cap_chan_unlock(chan);
7333 continue;
7334 }
7335
7336 if (!status && (chan->state == BT_CONNECTED ||
7337 chan->state == BT_CONFIG)) {
7338 chan->ops->resume(chan);
7339 l2cap_check_encryption(chan, encrypt);
7340 l2cap_chan_unlock(chan);
7341 continue;
7342 }
7343
7344 if (chan->state == BT_CONNECT) {
7345 if (!status)
7346 l2cap_start_connection(chan);
7347 else
7348 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7349 } else if (chan->state == BT_CONNECT2) {
7350 struct l2cap_conn_rsp rsp;
7351 __u16 res, stat;
7352
7353 if (!status) {
7354 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7355 res = L2CAP_CR_PEND;
7356 stat = L2CAP_CS_AUTHOR_PEND;
7357 chan->ops->defer(chan);
7358 } else {
7359 l2cap_state_change(chan, BT_CONFIG);
7360 res = L2CAP_CR_SUCCESS;
7361 stat = L2CAP_CS_NO_INFO;
7362 }
7363 } else {
7364 l2cap_state_change(chan, BT_DISCONN);
7365 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7366 res = L2CAP_CR_SEC_BLOCK;
7367 stat = L2CAP_CS_NO_INFO;
7368 }
7369
7370 rsp.scid = cpu_to_le16(chan->dcid);
7371 rsp.dcid = cpu_to_le16(chan->scid);
7372 rsp.result = cpu_to_le16(res);
7373 rsp.status = cpu_to_le16(stat);
7374 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7375 sizeof(rsp), &rsp);
7376
7377 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7378 res == L2CAP_CR_SUCCESS) {
7379 char buf[128];
7380 set_bit(CONF_REQ_SENT, &chan->conf_state);
7381 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7382 L2CAP_CONF_REQ,
7383 l2cap_build_conf_req(chan, buf),
7384 buf);
7385 chan->num_conf_req++;
7386 }
7387 }
7388
7389 l2cap_chan_unlock(chan);
7390 }
7391
7392 mutex_unlock(&conn->chan_lock);
7393
7394 return 0;
7395 }
7396
7397 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7398 {
7399 struct l2cap_conn *conn = hcon->l2cap_data;
7400 struct l2cap_hdr *hdr;
7401 int len;
7402
7403 /* For AMP controller do not create l2cap conn */
7404 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7405 goto drop;
7406
7407 if (!conn)
7408 conn = l2cap_conn_add(hcon);
7409
7410 if (!conn)
7411 goto drop;
7412
7413 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7414
7415 switch (flags) {
7416 case ACL_START:
7417 case ACL_START_NO_FLUSH:
7418 case ACL_COMPLETE:
7419 if (conn->rx_len) {
7420 BT_ERR("Unexpected start frame (len %d)", skb->len);
7421 kfree_skb(conn->rx_skb);
7422 conn->rx_skb = NULL;
7423 conn->rx_len = 0;
7424 l2cap_conn_unreliable(conn, ECOMM);
7425 }
7426
7427 /* Start fragment always begin with Basic L2CAP header */
7428 if (skb->len < L2CAP_HDR_SIZE) {
7429 BT_ERR("Frame is too short (len %d)", skb->len);
7430 l2cap_conn_unreliable(conn, ECOMM);
7431 goto drop;
7432 }
7433
7434 hdr = (struct l2cap_hdr *) skb->data;
7435 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7436
7437 if (len == skb->len) {
7438 /* Complete frame received */
7439 l2cap_recv_frame(conn, skb);
7440 return 0;
7441 }
7442
7443 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7444
7445 if (skb->len > len) {
7446 BT_ERR("Frame is too long (len %d, expected len %d)",
7447 skb->len, len);
7448 l2cap_conn_unreliable(conn, ECOMM);
7449 goto drop;
7450 }
7451
7452 /* Allocate skb for the complete frame (with header) */
7453 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7454 if (!conn->rx_skb)
7455 goto drop;
7456
7457 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7458 skb->len);
7459 conn->rx_len = len - skb->len;
7460 break;
7461
7462 case ACL_CONT:
7463 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7464
7465 if (!conn->rx_len) {
7466 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7467 l2cap_conn_unreliable(conn, ECOMM);
7468 goto drop;
7469 }
7470
7471 if (skb->len > conn->rx_len) {
7472 BT_ERR("Fragment is too long (len %d, expected %d)",
7473 skb->len, conn->rx_len);
7474 kfree_skb(conn->rx_skb);
7475 conn->rx_skb = NULL;
7476 conn->rx_len = 0;
7477 l2cap_conn_unreliable(conn, ECOMM);
7478 goto drop;
7479 }
7480
7481 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7482 skb->len);
7483 conn->rx_len -= skb->len;
7484
7485 if (!conn->rx_len) {
7486 /* Complete frame received. l2cap_recv_frame
7487 * takes ownership of the skb so set the global
7488 * rx_skb pointer to NULL first.
7489 */
7490 struct sk_buff *rx_skb = conn->rx_skb;
7491 conn->rx_skb = NULL;
7492 l2cap_recv_frame(conn, rx_skb);
7493 }
7494 break;
7495 }
7496
7497 drop:
7498 kfree_skb(skb);
7499 return 0;
7500 }
7501
7502 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7503 {
7504 struct l2cap_chan *c;
7505
7506 read_lock(&chan_list_lock);
7507
7508 list_for_each_entry(c, &chan_list, global_l) {
7509 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7510 &c->src, &c->dst,
7511 c->state, __le16_to_cpu(c->psm),
7512 c->scid, c->dcid, c->imtu, c->omtu,
7513 c->sec_level, c->mode);
7514 }
7515
7516 read_unlock(&chan_list_lock);
7517
7518 return 0;
7519 }
7520
7521 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7522 {
7523 return single_open(file, l2cap_debugfs_show, inode->i_private);
7524 }
7525
7526 static const struct file_operations l2cap_debugfs_fops = {
7527 .open = l2cap_debugfs_open,
7528 .read = seq_read,
7529 .llseek = seq_lseek,
7530 .release = single_release,
7531 };
7532
7533 static struct dentry *l2cap_debugfs;
7534
7535 int __init l2cap_init(void)
7536 {
7537 int err;
7538
7539 err = l2cap_init_sockets();
7540 if (err < 0)
7541 return err;
7542
7543 if (IS_ERR_OR_NULL(bt_debugfs))
7544 return 0;
7545
7546 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7547 NULL, &l2cap_debugfs_fops);
7548
7549 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7550 &le_max_credits);
7551 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7552 &le_default_mps);
7553
7554 return 0;
7555 }
7556
7557 void l2cap_exit(void)
7558 {
7559 debugfs_remove(l2cap_debugfs);
7560 l2cap_cleanup_sockets();
7561 }
7562
7563 module_param(disable_ertm, bool, 0644);
7564 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.260586 seconds and 6 git commands to generate.