Bluetooth: Use explicit role instead of a bool in function parameters
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45
46 bool disable_ertm;
47
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
50
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 void *data);
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66
67 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68 {
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77 }
78
79 /* ---- L2CAP channels ---- */
80
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
83 {
84 struct l2cap_chan *c;
85
86 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
89 }
90 return NULL;
91 }
92
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
95 {
96 struct l2cap_chan *c;
97
98 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
101 }
102 return NULL;
103 }
104
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
109 {
110 struct l2cap_chan *c;
111
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
114 if (c)
115 l2cap_chan_lock(c);
116 mutex_unlock(&conn->chan_lock);
117
118 return c;
119 }
120
121 /* Find channel with given DCID.
122 * Returns locked channel.
123 */
124 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126 {
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136 }
137
138 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
140 {
141 struct l2cap_chan *c;
142
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
146 }
147 return NULL;
148 }
149
150 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152 {
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162 }
163
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165 {
166 struct l2cap_chan *c;
167
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
170 return c;
171 }
172 return NULL;
173 }
174
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176 {
177 int err;
178
179 write_lock(&chan_list_lock);
180
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 err = -EADDRINUSE;
183 goto done;
184 }
185
186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
192
193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203 done:
204 write_unlock(&chan_list_lock);
205 return err;
206 }
207 EXPORT_SYMBOL_GPL(l2cap_add_psm);
208
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210 {
211 write_lock(&chan_list_lock);
212
213 chan->scid = scid;
214
215 write_unlock(&chan_list_lock);
216
217 return 0;
218 }
219
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221 {
222 u16 cid, dyn_end;
223
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
226 else
227 dyn_end = L2CAP_CID_DYN_END;
228
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
231 return cid;
232 }
233
234 return 0;
235 }
236
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
241
242 chan->state = state;
243 chan->ops->state_change(chan, state, 0);
244 }
245
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 int state, int err)
248 {
249 chan->state = state;
250 chan->ops->state_change(chan, chan->state, err);
251 }
252
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
254 {
255 chan->ops->state_change(chan, chan->state, err);
256 }
257
258 static void __set_retrans_timer(struct l2cap_chan *chan)
259 {
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
264 }
265 }
266
267 static void __set_monitor_timer(struct l2cap_chan *chan)
268 {
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
273 }
274 }
275
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
277 u16 seq)
278 {
279 struct sk_buff *skb;
280
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
283 return skb;
284 }
285
286 return NULL;
287 }
288
289 /* ---- L2CAP sequence number lists ---- */
290
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
297 * allocs or frees.
298 */
299
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
301 {
302 size_t alloc_size, i;
303
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
307 */
308 alloc_size = roundup_pow_of_two(size);
309
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
311 if (!seq_list->list)
312 return -ENOMEM;
313
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
319
320 return 0;
321 }
322
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
324 {
325 kfree(seq_list->list);
326 }
327
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
329 u16 seq)
330 {
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
333 }
334
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
336 {
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
339
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
342
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 }
347
348 return seq;
349 }
350
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
352 {
353 u16 i;
354
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
356 return;
357
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
360
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 }
364
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
366 {
367 u16 mask = seq_list->mask;
368
369 /* All appends happen in constant time */
370
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
372 return;
373
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
376 else
377 seq_list->list[seq_list->tail & mask] = seq;
378
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
381 }
382
383 static void l2cap_chan_timeout(struct work_struct *work)
384 {
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
386 chan_timer.work);
387 struct l2cap_conn *conn = chan->conn;
388 int reason;
389
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
391
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
394
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
400 else
401 reason = ETIMEDOUT;
402
403 l2cap_chan_close(chan, reason);
404
405 l2cap_chan_unlock(chan);
406
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
409
410 l2cap_chan_put(chan);
411 }
412
413 struct l2cap_chan *l2cap_chan_create(void)
414 {
415 struct l2cap_chan *chan;
416
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
418 if (!chan)
419 return NULL;
420
421 mutex_init(&chan->lock);
422
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
426
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
428
429 chan->state = BT_OPEN;
430
431 kref_init(&chan->kref);
432
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
435
436 BT_DBG("chan %p", chan);
437
438 return chan;
439 }
440 EXPORT_SYMBOL_GPL(l2cap_chan_create);
441
442 static void l2cap_chan_destroy(struct kref *kref)
443 {
444 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
445
446 BT_DBG("chan %p", chan);
447
448 write_lock(&chan_list_lock);
449 list_del(&chan->global_l);
450 write_unlock(&chan_list_lock);
451
452 kfree(chan);
453 }
454
455 void l2cap_chan_hold(struct l2cap_chan *c)
456 {
457 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
458
459 kref_get(&c->kref);
460 }
461
462 void l2cap_chan_put(struct l2cap_chan *c)
463 {
464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465
466 kref_put(&c->kref, l2cap_chan_destroy);
467 }
468 EXPORT_SYMBOL_GPL(l2cap_chan_put);
469
470 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
471 {
472 chan->fcs = L2CAP_FCS_CRC16;
473 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
474 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
475 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
476 chan->remote_max_tx = chan->max_tx;
477 chan->remote_tx_win = chan->tx_win;
478 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->sec_level = BT_SECURITY_LOW;
480 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
481 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
482 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
483 chan->conf_state = 0;
484
485 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
486 }
487 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
488
489 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
490 {
491 chan->sdu = NULL;
492 chan->sdu_last_frag = NULL;
493 chan->sdu_len = 0;
494 chan->tx_credits = 0;
495 chan->rx_credits = le_max_credits;
496 chan->mps = min_t(u16, chan->imtu, le_default_mps);
497
498 skb_queue_head_init(&chan->tx_q);
499 }
500
501 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
502 {
503 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
504 __le16_to_cpu(chan->psm), chan->dcid);
505
506 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
507
508 chan->conn = conn;
509
510 switch (chan->chan_type) {
511 case L2CAP_CHAN_CONN_ORIENTED:
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 if (conn->hcon->type == ACL_LINK)
515 chan->omtu = L2CAP_DEFAULT_MTU;
516 break;
517
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
523 break;
524
525 case L2CAP_CHAN_FIXED:
526 /* Caller will set CID and CID specific MTU values */
527 break;
528
529 default:
530 /* Raw socket can send/recv signalling messages only */
531 chan->scid = L2CAP_CID_SIGNALING;
532 chan->dcid = L2CAP_CID_SIGNALING;
533 chan->omtu = L2CAP_DEFAULT_MTU;
534 }
535
536 chan->local_id = L2CAP_BESTEFFORT_ID;
537 chan->local_stype = L2CAP_SERV_BESTEFFORT;
538 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
539 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
540 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
541 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
542
543 l2cap_chan_hold(chan);
544
545 hci_conn_hold(conn->hcon);
546
547 list_add(&chan->list, &conn->chan_l);
548 }
549
550 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 {
552 mutex_lock(&conn->chan_lock);
553 __l2cap_chan_add(conn, chan);
554 mutex_unlock(&conn->chan_lock);
555 }
556
557 void l2cap_chan_del(struct l2cap_chan *chan, int err)
558 {
559 struct l2cap_conn *conn = chan->conn;
560
561 __clear_chan_timer(chan);
562
563 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
564
565 if (conn) {
566 struct amp_mgr *mgr = conn->hcon->amp_mgr;
567 /* Delete from channel list */
568 list_del(&chan->list);
569
570 l2cap_chan_put(chan);
571
572 chan->conn = NULL;
573
574 if (chan->scid != L2CAP_CID_A2MP)
575 hci_conn_drop(conn->hcon);
576
577 if (mgr && mgr->bredr_chan == chan)
578 mgr->bredr_chan = NULL;
579 }
580
581 if (chan->hs_hchan) {
582 struct hci_chan *hs_hchan = chan->hs_hchan;
583
584 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
585 amp_disconnect_logical_link(hs_hchan);
586 }
587
588 chan->ops->teardown(chan, err);
589
590 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
591 return;
592
593 switch(chan->mode) {
594 case L2CAP_MODE_BASIC:
595 break;
596
597 case L2CAP_MODE_LE_FLOWCTL:
598 skb_queue_purge(&chan->tx_q);
599 break;
600
601 case L2CAP_MODE_ERTM:
602 __clear_retrans_timer(chan);
603 __clear_monitor_timer(chan);
604 __clear_ack_timer(chan);
605
606 skb_queue_purge(&chan->srej_q);
607
608 l2cap_seq_list_free(&chan->srej_list);
609 l2cap_seq_list_free(&chan->retrans_list);
610
611 /* fall through */
612
613 case L2CAP_MODE_STREAMING:
614 skb_queue_purge(&chan->tx_q);
615 break;
616 }
617
618 return;
619 }
620 EXPORT_SYMBOL_GPL(l2cap_chan_del);
621
622 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
623 {
624 struct l2cap_conn *conn = hcon->l2cap_data;
625 struct l2cap_chan *chan;
626
627 mutex_lock(&conn->chan_lock);
628
629 list_for_each_entry(chan, &conn->chan_l, list) {
630 l2cap_chan_lock(chan);
631 bacpy(&chan->dst, &hcon->dst);
632 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
633 l2cap_chan_unlock(chan);
634 }
635
636 mutex_unlock(&conn->chan_lock);
637 }
638
639 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
640 {
641 struct l2cap_conn *conn = chan->conn;
642 struct l2cap_le_conn_rsp rsp;
643 u16 result;
644
645 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
646 result = L2CAP_CR_AUTHORIZATION;
647 else
648 result = L2CAP_CR_BAD_PSM;
649
650 l2cap_state_change(chan, BT_DISCONN);
651
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.mtu = cpu_to_le16(chan->imtu);
654 rsp.mps = cpu_to_le16(chan->mps);
655 rsp.credits = cpu_to_le16(chan->rx_credits);
656 rsp.result = cpu_to_le16(result);
657
658 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
659 &rsp);
660 }
661
662 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
663 {
664 struct l2cap_conn *conn = chan->conn;
665 struct l2cap_conn_rsp rsp;
666 u16 result;
667
668 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
669 result = L2CAP_CR_SEC_BLOCK;
670 else
671 result = L2CAP_CR_BAD_PSM;
672
673 l2cap_state_change(chan, BT_DISCONN);
674
675 rsp.scid = cpu_to_le16(chan->dcid);
676 rsp.dcid = cpu_to_le16(chan->scid);
677 rsp.result = cpu_to_le16(result);
678 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
679
680 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
681 }
682
683 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
684 {
685 struct l2cap_conn *conn = chan->conn;
686
687 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
688
689 switch (chan->state) {
690 case BT_LISTEN:
691 chan->ops->teardown(chan, 0);
692 break;
693
694 case BT_CONNECTED:
695 case BT_CONFIG:
696 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
697 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
698 l2cap_send_disconn_req(chan, reason);
699 } else
700 l2cap_chan_del(chan, reason);
701 break;
702
703 case BT_CONNECT2:
704 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
705 if (conn->hcon->type == ACL_LINK)
706 l2cap_chan_connect_reject(chan);
707 else if (conn->hcon->type == LE_LINK)
708 l2cap_chan_le_connect_reject(chan);
709 }
710
711 l2cap_chan_del(chan, reason);
712 break;
713
714 case BT_CONNECT:
715 case BT_DISCONN:
716 l2cap_chan_del(chan, reason);
717 break;
718
719 default:
720 chan->ops->teardown(chan, 0);
721 break;
722 }
723 }
724 EXPORT_SYMBOL(l2cap_chan_close);
725
726 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
727 {
728 switch (chan->chan_type) {
729 case L2CAP_CHAN_RAW:
730 switch (chan->sec_level) {
731 case BT_SECURITY_HIGH:
732 case BT_SECURITY_FIPS:
733 return HCI_AT_DEDICATED_BONDING_MITM;
734 case BT_SECURITY_MEDIUM:
735 return HCI_AT_DEDICATED_BONDING;
736 default:
737 return HCI_AT_NO_BONDING;
738 }
739 break;
740 case L2CAP_CHAN_CONN_LESS:
741 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
742 if (chan->sec_level == BT_SECURITY_LOW)
743 chan->sec_level = BT_SECURITY_SDP;
744 }
745 if (chan->sec_level == BT_SECURITY_HIGH ||
746 chan->sec_level == BT_SECURITY_FIPS)
747 return HCI_AT_NO_BONDING_MITM;
748 else
749 return HCI_AT_NO_BONDING;
750 break;
751 case L2CAP_CHAN_CONN_ORIENTED:
752 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
753 if (chan->sec_level == BT_SECURITY_LOW)
754 chan->sec_level = BT_SECURITY_SDP;
755
756 if (chan->sec_level == BT_SECURITY_HIGH ||
757 chan->sec_level == BT_SECURITY_FIPS)
758 return HCI_AT_NO_BONDING_MITM;
759 else
760 return HCI_AT_NO_BONDING;
761 }
762 /* fall through */
763 default:
764 switch (chan->sec_level) {
765 case BT_SECURITY_HIGH:
766 case BT_SECURITY_FIPS:
767 return HCI_AT_GENERAL_BONDING_MITM;
768 case BT_SECURITY_MEDIUM:
769 return HCI_AT_GENERAL_BONDING;
770 default:
771 return HCI_AT_NO_BONDING;
772 }
773 break;
774 }
775 }
776
777 /* Service level security */
778 int l2cap_chan_check_security(struct l2cap_chan *chan)
779 {
780 struct l2cap_conn *conn = chan->conn;
781 __u8 auth_type;
782
783 if (conn->hcon->type == LE_LINK)
784 return smp_conn_security(conn->hcon, chan->sec_level);
785
786 auth_type = l2cap_get_auth_type(chan);
787
788 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
789 }
790
791 static u8 l2cap_get_ident(struct l2cap_conn *conn)
792 {
793 u8 id;
794
795 /* Get next available identificator.
796 * 1 - 128 are used by kernel.
797 * 129 - 199 are reserved.
798 * 200 - 254 are used by utilities like l2ping, etc.
799 */
800
801 mutex_lock(&conn->ident_lock);
802
803 if (++conn->tx_ident > 128)
804 conn->tx_ident = 1;
805
806 id = conn->tx_ident;
807
808 mutex_unlock(&conn->ident_lock);
809
810 return id;
811 }
812
813 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
814 void *data)
815 {
816 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
817 u8 flags;
818
819 BT_DBG("code 0x%2.2x", code);
820
821 if (!skb)
822 return;
823
824 if (lmp_no_flush_capable(conn->hcon->hdev))
825 flags = ACL_START_NO_FLUSH;
826 else
827 flags = ACL_START;
828
829 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
830 skb->priority = HCI_PRIO_MAX;
831
832 hci_send_acl(conn->hchan, skb, flags);
833 }
834
835 static bool __chan_is_moving(struct l2cap_chan *chan)
836 {
837 return chan->move_state != L2CAP_MOVE_STABLE &&
838 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
839 }
840
841 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
842 {
843 struct hci_conn *hcon = chan->conn->hcon;
844 u16 flags;
845
846 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
847 skb->priority);
848
849 if (chan->hs_hcon && !__chan_is_moving(chan)) {
850 if (chan->hs_hchan)
851 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
852 else
853 kfree_skb(skb);
854
855 return;
856 }
857
858 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
859 lmp_no_flush_capable(hcon->hdev))
860 flags = ACL_START_NO_FLUSH;
861 else
862 flags = ACL_START;
863
864 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
865 hci_send_acl(chan->conn->hchan, skb, flags);
866 }
867
868 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
869 {
870 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
871 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
872
873 if (enh & L2CAP_CTRL_FRAME_TYPE) {
874 /* S-Frame */
875 control->sframe = 1;
876 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
877 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
878
879 control->sar = 0;
880 control->txseq = 0;
881 } else {
882 /* I-Frame */
883 control->sframe = 0;
884 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
885 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
886
887 control->poll = 0;
888 control->super = 0;
889 }
890 }
891
892 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
893 {
894 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
895 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
896
897 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
898 /* S-Frame */
899 control->sframe = 1;
900 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
901 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
902
903 control->sar = 0;
904 control->txseq = 0;
905 } else {
906 /* I-Frame */
907 control->sframe = 0;
908 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
909 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
910
911 control->poll = 0;
912 control->super = 0;
913 }
914 }
915
916 static inline void __unpack_control(struct l2cap_chan *chan,
917 struct sk_buff *skb)
918 {
919 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
920 __unpack_extended_control(get_unaligned_le32(skb->data),
921 &bt_cb(skb)->control);
922 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
923 } else {
924 __unpack_enhanced_control(get_unaligned_le16(skb->data),
925 &bt_cb(skb)->control);
926 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
927 }
928 }
929
930 static u32 __pack_extended_control(struct l2cap_ctrl *control)
931 {
932 u32 packed;
933
934 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
935 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
936
937 if (control->sframe) {
938 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
939 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
940 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
941 } else {
942 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
943 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
944 }
945
946 return packed;
947 }
948
949 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
950 {
951 u16 packed;
952
953 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
954 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
955
956 if (control->sframe) {
957 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
958 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
959 packed |= L2CAP_CTRL_FRAME_TYPE;
960 } else {
961 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
962 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
963 }
964
965 return packed;
966 }
967
968 static inline void __pack_control(struct l2cap_chan *chan,
969 struct l2cap_ctrl *control,
970 struct sk_buff *skb)
971 {
972 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
973 put_unaligned_le32(__pack_extended_control(control),
974 skb->data + L2CAP_HDR_SIZE);
975 } else {
976 put_unaligned_le16(__pack_enhanced_control(control),
977 skb->data + L2CAP_HDR_SIZE);
978 }
979 }
980
981 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
982 {
983 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
984 return L2CAP_EXT_HDR_SIZE;
985 else
986 return L2CAP_ENH_HDR_SIZE;
987 }
988
989 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
990 u32 control)
991 {
992 struct sk_buff *skb;
993 struct l2cap_hdr *lh;
994 int hlen = __ertm_hdr_size(chan);
995
996 if (chan->fcs == L2CAP_FCS_CRC16)
997 hlen += L2CAP_FCS_SIZE;
998
999 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1000
1001 if (!skb)
1002 return ERR_PTR(-ENOMEM);
1003
1004 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1005 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1006 lh->cid = cpu_to_le16(chan->dcid);
1007
1008 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1009 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1010 else
1011 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1012
1013 if (chan->fcs == L2CAP_FCS_CRC16) {
1014 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1015 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1016 }
1017
1018 skb->priority = HCI_PRIO_MAX;
1019 return skb;
1020 }
1021
1022 static void l2cap_send_sframe(struct l2cap_chan *chan,
1023 struct l2cap_ctrl *control)
1024 {
1025 struct sk_buff *skb;
1026 u32 control_field;
1027
1028 BT_DBG("chan %p, control %p", chan, control);
1029
1030 if (!control->sframe)
1031 return;
1032
1033 if (__chan_is_moving(chan))
1034 return;
1035
1036 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1037 !control->poll)
1038 control->final = 1;
1039
1040 if (control->super == L2CAP_SUPER_RR)
1041 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1042 else if (control->super == L2CAP_SUPER_RNR)
1043 set_bit(CONN_RNR_SENT, &chan->conn_state);
1044
1045 if (control->super != L2CAP_SUPER_SREJ) {
1046 chan->last_acked_seq = control->reqseq;
1047 __clear_ack_timer(chan);
1048 }
1049
1050 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1051 control->final, control->poll, control->super);
1052
1053 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1054 control_field = __pack_extended_control(control);
1055 else
1056 control_field = __pack_enhanced_control(control);
1057
1058 skb = l2cap_create_sframe_pdu(chan, control_field);
1059 if (!IS_ERR(skb))
1060 l2cap_do_send(chan, skb);
1061 }
1062
1063 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1064 {
1065 struct l2cap_ctrl control;
1066
1067 BT_DBG("chan %p, poll %d", chan, poll);
1068
1069 memset(&control, 0, sizeof(control));
1070 control.sframe = 1;
1071 control.poll = poll;
1072
1073 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1074 control.super = L2CAP_SUPER_RNR;
1075 else
1076 control.super = L2CAP_SUPER_RR;
1077
1078 control.reqseq = chan->buffer_seq;
1079 l2cap_send_sframe(chan, &control);
1080 }
1081
1082 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1083 {
1084 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1085 }
1086
1087 static bool __amp_capable(struct l2cap_chan *chan)
1088 {
1089 struct l2cap_conn *conn = chan->conn;
1090 struct hci_dev *hdev;
1091 bool amp_available = false;
1092
1093 if (!conn->hs_enabled)
1094 return false;
1095
1096 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1097 return false;
1098
1099 read_lock(&hci_dev_list_lock);
1100 list_for_each_entry(hdev, &hci_dev_list, list) {
1101 if (hdev->amp_type != AMP_TYPE_BREDR &&
1102 test_bit(HCI_UP, &hdev->flags)) {
1103 amp_available = true;
1104 break;
1105 }
1106 }
1107 read_unlock(&hci_dev_list_lock);
1108
1109 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1110 return amp_available;
1111
1112 return false;
1113 }
1114
1115 static bool l2cap_check_efs(struct l2cap_chan *chan)
1116 {
1117 /* Check EFS parameters */
1118 return true;
1119 }
1120
1121 void l2cap_send_conn_req(struct l2cap_chan *chan)
1122 {
1123 struct l2cap_conn *conn = chan->conn;
1124 struct l2cap_conn_req req;
1125
1126 req.scid = cpu_to_le16(chan->scid);
1127 req.psm = chan->psm;
1128
1129 chan->ident = l2cap_get_ident(conn);
1130
1131 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1132
1133 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1134 }
1135
1136 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1137 {
1138 struct l2cap_create_chan_req req;
1139 req.scid = cpu_to_le16(chan->scid);
1140 req.psm = chan->psm;
1141 req.amp_id = amp_id;
1142
1143 chan->ident = l2cap_get_ident(chan->conn);
1144
1145 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1146 sizeof(req), &req);
1147 }
1148
1149 static void l2cap_move_setup(struct l2cap_chan *chan)
1150 {
1151 struct sk_buff *skb;
1152
1153 BT_DBG("chan %p", chan);
1154
1155 if (chan->mode != L2CAP_MODE_ERTM)
1156 return;
1157
1158 __clear_retrans_timer(chan);
1159 __clear_monitor_timer(chan);
1160 __clear_ack_timer(chan);
1161
1162 chan->retry_count = 0;
1163 skb_queue_walk(&chan->tx_q, skb) {
1164 if (bt_cb(skb)->control.retries)
1165 bt_cb(skb)->control.retries = 1;
1166 else
1167 break;
1168 }
1169
1170 chan->expected_tx_seq = chan->buffer_seq;
1171
1172 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1173 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1174 l2cap_seq_list_clear(&chan->retrans_list);
1175 l2cap_seq_list_clear(&chan->srej_list);
1176 skb_queue_purge(&chan->srej_q);
1177
1178 chan->tx_state = L2CAP_TX_STATE_XMIT;
1179 chan->rx_state = L2CAP_RX_STATE_MOVE;
1180
1181 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1182 }
1183
1184 static void l2cap_move_done(struct l2cap_chan *chan)
1185 {
1186 u8 move_role = chan->move_role;
1187 BT_DBG("chan %p", chan);
1188
1189 chan->move_state = L2CAP_MOVE_STABLE;
1190 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1191
1192 if (chan->mode != L2CAP_MODE_ERTM)
1193 return;
1194
1195 switch (move_role) {
1196 case L2CAP_MOVE_ROLE_INITIATOR:
1197 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1198 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1199 break;
1200 case L2CAP_MOVE_ROLE_RESPONDER:
1201 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1202 break;
1203 }
1204 }
1205
1206 static void l2cap_chan_ready(struct l2cap_chan *chan)
1207 {
1208 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1209 chan->conf_state = 0;
1210 __clear_chan_timer(chan);
1211
1212 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1213 chan->ops->suspend(chan);
1214
1215 chan->state = BT_CONNECTED;
1216
1217 chan->ops->ready(chan);
1218 }
1219
1220 static void l2cap_le_connect(struct l2cap_chan *chan)
1221 {
1222 struct l2cap_conn *conn = chan->conn;
1223 struct l2cap_le_conn_req req;
1224
1225 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1226 return;
1227
1228 req.psm = chan->psm;
1229 req.scid = cpu_to_le16(chan->scid);
1230 req.mtu = cpu_to_le16(chan->imtu);
1231 req.mps = cpu_to_le16(chan->mps);
1232 req.credits = cpu_to_le16(chan->rx_credits);
1233
1234 chan->ident = l2cap_get_ident(conn);
1235
1236 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1237 sizeof(req), &req);
1238 }
1239
1240 static void l2cap_le_start(struct l2cap_chan *chan)
1241 {
1242 struct l2cap_conn *conn = chan->conn;
1243
1244 if (!smp_conn_security(conn->hcon, chan->sec_level))
1245 return;
1246
1247 if (!chan->psm) {
1248 l2cap_chan_ready(chan);
1249 return;
1250 }
1251
1252 if (chan->state == BT_CONNECT)
1253 l2cap_le_connect(chan);
1254 }
1255
1256 static void l2cap_start_connection(struct l2cap_chan *chan)
1257 {
1258 if (__amp_capable(chan)) {
1259 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1260 a2mp_discover_amp(chan);
1261 } else if (chan->conn->hcon->type == LE_LINK) {
1262 l2cap_le_start(chan);
1263 } else {
1264 l2cap_send_conn_req(chan);
1265 }
1266 }
1267
1268 static void l2cap_do_start(struct l2cap_chan *chan)
1269 {
1270 struct l2cap_conn *conn = chan->conn;
1271
1272 if (conn->hcon->type == LE_LINK) {
1273 l2cap_le_start(chan);
1274 return;
1275 }
1276
1277 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1278 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1279 return;
1280
1281 if (l2cap_chan_check_security(chan) &&
1282 __l2cap_no_conn_pending(chan)) {
1283 l2cap_start_connection(chan);
1284 }
1285 } else {
1286 struct l2cap_info_req req;
1287 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1288
1289 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1290 conn->info_ident = l2cap_get_ident(conn);
1291
1292 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1293
1294 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1295 sizeof(req), &req);
1296 }
1297 }
1298
1299 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1300 {
1301 u32 local_feat_mask = l2cap_feat_mask;
1302 if (!disable_ertm)
1303 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1304
1305 switch (mode) {
1306 case L2CAP_MODE_ERTM:
1307 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1308 case L2CAP_MODE_STREAMING:
1309 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1310 default:
1311 return 0x00;
1312 }
1313 }
1314
1315 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1316 {
1317 struct l2cap_conn *conn = chan->conn;
1318 struct l2cap_disconn_req req;
1319
1320 if (!conn)
1321 return;
1322
1323 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1324 __clear_retrans_timer(chan);
1325 __clear_monitor_timer(chan);
1326 __clear_ack_timer(chan);
1327 }
1328
1329 if (chan->scid == L2CAP_CID_A2MP) {
1330 l2cap_state_change(chan, BT_DISCONN);
1331 return;
1332 }
1333
1334 req.dcid = cpu_to_le16(chan->dcid);
1335 req.scid = cpu_to_le16(chan->scid);
1336 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1337 sizeof(req), &req);
1338
1339 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1340 }
1341
1342 /* ---- L2CAP connections ---- */
1343 static void l2cap_conn_start(struct l2cap_conn *conn)
1344 {
1345 struct l2cap_chan *chan, *tmp;
1346
1347 BT_DBG("conn %p", conn);
1348
1349 mutex_lock(&conn->chan_lock);
1350
1351 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1352 l2cap_chan_lock(chan);
1353
1354 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1355 l2cap_chan_unlock(chan);
1356 continue;
1357 }
1358
1359 if (chan->state == BT_CONNECT) {
1360 if (!l2cap_chan_check_security(chan) ||
1361 !__l2cap_no_conn_pending(chan)) {
1362 l2cap_chan_unlock(chan);
1363 continue;
1364 }
1365
1366 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1367 && test_bit(CONF_STATE2_DEVICE,
1368 &chan->conf_state)) {
1369 l2cap_chan_close(chan, ECONNRESET);
1370 l2cap_chan_unlock(chan);
1371 continue;
1372 }
1373
1374 l2cap_start_connection(chan);
1375
1376 } else if (chan->state == BT_CONNECT2) {
1377 struct l2cap_conn_rsp rsp;
1378 char buf[128];
1379 rsp.scid = cpu_to_le16(chan->dcid);
1380 rsp.dcid = cpu_to_le16(chan->scid);
1381
1382 if (l2cap_chan_check_security(chan)) {
1383 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1384 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1385 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1386 chan->ops->defer(chan);
1387
1388 } else {
1389 l2cap_state_change(chan, BT_CONFIG);
1390 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1391 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1392 }
1393 } else {
1394 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1395 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1396 }
1397
1398 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1399 sizeof(rsp), &rsp);
1400
1401 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1402 rsp.result != L2CAP_CR_SUCCESS) {
1403 l2cap_chan_unlock(chan);
1404 continue;
1405 }
1406
1407 set_bit(CONF_REQ_SENT, &chan->conf_state);
1408 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1409 l2cap_build_conf_req(chan, buf), buf);
1410 chan->num_conf_req++;
1411 }
1412
1413 l2cap_chan_unlock(chan);
1414 }
1415
1416 mutex_unlock(&conn->chan_lock);
1417 }
1418
1419 /* Find socket with cid and source/destination bdaddr.
1420 * Returns closest match, locked.
1421 */
1422 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1423 bdaddr_t *src,
1424 bdaddr_t *dst)
1425 {
1426 struct l2cap_chan *c, *c1 = NULL;
1427
1428 read_lock(&chan_list_lock);
1429
1430 list_for_each_entry(c, &chan_list, global_l) {
1431 if (state && c->state != state)
1432 continue;
1433
1434 if (c->scid == cid) {
1435 int src_match, dst_match;
1436 int src_any, dst_any;
1437
1438 /* Exact match. */
1439 src_match = !bacmp(&c->src, src);
1440 dst_match = !bacmp(&c->dst, dst);
1441 if (src_match && dst_match) {
1442 read_unlock(&chan_list_lock);
1443 return c;
1444 }
1445
1446 /* Closest match */
1447 src_any = !bacmp(&c->src, BDADDR_ANY);
1448 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1449 if ((src_match && dst_any) || (src_any && dst_match) ||
1450 (src_any && dst_any))
1451 c1 = c;
1452 }
1453 }
1454
1455 read_unlock(&chan_list_lock);
1456
1457 return c1;
1458 }
1459
1460 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1461 {
1462 struct hci_conn *hcon = conn->hcon;
1463 struct hci_dev *hdev = hcon->hdev;
1464 struct l2cap_chan *chan, *pchan;
1465 u8 dst_type;
1466
1467 BT_DBG("");
1468
1469 /* Check if we have socket listening on cid */
1470 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1471 &hcon->src, &hcon->dst);
1472 if (!pchan)
1473 return;
1474
1475 /* Client ATT sockets should override the server one */
1476 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1477 return;
1478
1479 dst_type = bdaddr_type(hcon, hcon->dst_type);
1480
1481 /* If device is blocked, do not create a channel for it */
1482 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
1483 return;
1484
1485 /* For LE slave connections, make sure the connection interval
1486 * is in the range of the minium and maximum interval that has
1487 * been configured for this connection. If not, then trigger
1488 * the connection update procedure.
1489 */
1490 if (hcon->role == HCI_ROLE_SLAVE &&
1491 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1492 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1493 struct l2cap_conn_param_update_req req;
1494
1495 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1496 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1497 req.latency = cpu_to_le16(hcon->le_conn_latency);
1498 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1499
1500 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1501 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1502 }
1503
1504 l2cap_chan_lock(pchan);
1505
1506 chan = pchan->ops->new_connection(pchan);
1507 if (!chan)
1508 goto clean;
1509
1510 bacpy(&chan->src, &hcon->src);
1511 bacpy(&chan->dst, &hcon->dst);
1512 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1513 chan->dst_type = dst_type;
1514
1515 __l2cap_chan_add(conn, chan);
1516
1517 clean:
1518 l2cap_chan_unlock(pchan);
1519 }
1520
1521 static void l2cap_conn_ready(struct l2cap_conn *conn)
1522 {
1523 struct l2cap_chan *chan;
1524 struct hci_conn *hcon = conn->hcon;
1525
1526 BT_DBG("conn %p", conn);
1527
1528 /* For outgoing pairing which doesn't necessarily have an
1529 * associated socket (e.g. mgmt_pair_device).
1530 */
1531 if (hcon->out && hcon->type == LE_LINK)
1532 smp_conn_security(hcon, hcon->pending_sec_level);
1533
1534 mutex_lock(&conn->chan_lock);
1535
1536 if (hcon->type == LE_LINK)
1537 l2cap_le_conn_ready(conn);
1538
1539 list_for_each_entry(chan, &conn->chan_l, list) {
1540
1541 l2cap_chan_lock(chan);
1542
1543 if (chan->scid == L2CAP_CID_A2MP) {
1544 l2cap_chan_unlock(chan);
1545 continue;
1546 }
1547
1548 if (hcon->type == LE_LINK) {
1549 l2cap_le_start(chan);
1550 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1551 l2cap_chan_ready(chan);
1552
1553 } else if (chan->state == BT_CONNECT) {
1554 l2cap_do_start(chan);
1555 }
1556
1557 l2cap_chan_unlock(chan);
1558 }
1559
1560 mutex_unlock(&conn->chan_lock);
1561
1562 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1563 }
1564
1565 /* Notify sockets that we cannot guaranty reliability anymore */
1566 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1567 {
1568 struct l2cap_chan *chan;
1569
1570 BT_DBG("conn %p", conn);
1571
1572 mutex_lock(&conn->chan_lock);
1573
1574 list_for_each_entry(chan, &conn->chan_l, list) {
1575 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1576 l2cap_chan_set_err(chan, err);
1577 }
1578
1579 mutex_unlock(&conn->chan_lock);
1580 }
1581
1582 static void l2cap_info_timeout(struct work_struct *work)
1583 {
1584 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1585 info_timer.work);
1586
1587 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1588 conn->info_ident = 0;
1589
1590 l2cap_conn_start(conn);
1591 }
1592
1593 /*
1594 * l2cap_user
1595 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1596 * callback is called during registration. The ->remove callback is called
1597 * during unregistration.
1598 * An l2cap_user object can either be explicitly unregistered or when the
1599 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1600 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1601 * External modules must own a reference to the l2cap_conn object if they intend
1602 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1603 * any time if they don't.
1604 */
1605
1606 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1607 {
1608 struct hci_dev *hdev = conn->hcon->hdev;
1609 int ret;
1610
1611 /* We need to check whether l2cap_conn is registered. If it is not, we
1612 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1613 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1614 * relies on the parent hci_conn object to be locked. This itself relies
1615 * on the hci_dev object to be locked. So we must lock the hci device
1616 * here, too. */
1617
1618 hci_dev_lock(hdev);
1619
1620 if (user->list.next || user->list.prev) {
1621 ret = -EINVAL;
1622 goto out_unlock;
1623 }
1624
1625 /* conn->hchan is NULL after l2cap_conn_del() was called */
1626 if (!conn->hchan) {
1627 ret = -ENODEV;
1628 goto out_unlock;
1629 }
1630
1631 ret = user->probe(conn, user);
1632 if (ret)
1633 goto out_unlock;
1634
1635 list_add(&user->list, &conn->users);
1636 ret = 0;
1637
1638 out_unlock:
1639 hci_dev_unlock(hdev);
1640 return ret;
1641 }
1642 EXPORT_SYMBOL(l2cap_register_user);
1643
1644 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1645 {
1646 struct hci_dev *hdev = conn->hcon->hdev;
1647
1648 hci_dev_lock(hdev);
1649
1650 if (!user->list.next || !user->list.prev)
1651 goto out_unlock;
1652
1653 list_del(&user->list);
1654 user->list.next = NULL;
1655 user->list.prev = NULL;
1656 user->remove(conn, user);
1657
1658 out_unlock:
1659 hci_dev_unlock(hdev);
1660 }
1661 EXPORT_SYMBOL(l2cap_unregister_user);
1662
1663 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1664 {
1665 struct l2cap_user *user;
1666
1667 while (!list_empty(&conn->users)) {
1668 user = list_first_entry(&conn->users, struct l2cap_user, list);
1669 list_del(&user->list);
1670 user->list.next = NULL;
1671 user->list.prev = NULL;
1672 user->remove(conn, user);
1673 }
1674 }
1675
1676 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1677 {
1678 struct l2cap_conn *conn = hcon->l2cap_data;
1679 struct l2cap_chan *chan, *l;
1680
1681 if (!conn)
1682 return;
1683
1684 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1685
1686 kfree_skb(conn->rx_skb);
1687
1688 skb_queue_purge(&conn->pending_rx);
1689
1690 /* We can not call flush_work(&conn->pending_rx_work) here since we
1691 * might block if we are running on a worker from the same workqueue
1692 * pending_rx_work is waiting on.
1693 */
1694 if (work_pending(&conn->pending_rx_work))
1695 cancel_work_sync(&conn->pending_rx_work);
1696
1697 l2cap_unregister_all_users(conn);
1698
1699 mutex_lock(&conn->chan_lock);
1700
1701 /* Kill channels */
1702 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1703 l2cap_chan_hold(chan);
1704 l2cap_chan_lock(chan);
1705
1706 l2cap_chan_del(chan, err);
1707
1708 l2cap_chan_unlock(chan);
1709
1710 chan->ops->close(chan);
1711 l2cap_chan_put(chan);
1712 }
1713
1714 mutex_unlock(&conn->chan_lock);
1715
1716 hci_chan_del(conn->hchan);
1717
1718 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1719 cancel_delayed_work_sync(&conn->info_timer);
1720
1721 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1722 cancel_delayed_work_sync(&conn->security_timer);
1723 smp_chan_destroy(conn);
1724 }
1725
1726 hcon->l2cap_data = NULL;
1727 conn->hchan = NULL;
1728 l2cap_conn_put(conn);
1729 }
1730
1731 static void security_timeout(struct work_struct *work)
1732 {
1733 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1734 security_timer.work);
1735
1736 BT_DBG("conn %p", conn);
1737
1738 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1739 smp_chan_destroy(conn);
1740 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1741 }
1742 }
1743
1744 static void l2cap_conn_free(struct kref *ref)
1745 {
1746 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1747
1748 hci_conn_put(conn->hcon);
1749 kfree(conn);
1750 }
1751
1752 void l2cap_conn_get(struct l2cap_conn *conn)
1753 {
1754 kref_get(&conn->ref);
1755 }
1756 EXPORT_SYMBOL(l2cap_conn_get);
1757
1758 void l2cap_conn_put(struct l2cap_conn *conn)
1759 {
1760 kref_put(&conn->ref, l2cap_conn_free);
1761 }
1762 EXPORT_SYMBOL(l2cap_conn_put);
1763
1764 /* ---- Socket interface ---- */
1765
1766 /* Find socket with psm and source / destination bdaddr.
1767 * Returns closest match.
1768 */
1769 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1770 bdaddr_t *src,
1771 bdaddr_t *dst,
1772 u8 link_type)
1773 {
1774 struct l2cap_chan *c, *c1 = NULL;
1775
1776 read_lock(&chan_list_lock);
1777
1778 list_for_each_entry(c, &chan_list, global_l) {
1779 if (state && c->state != state)
1780 continue;
1781
1782 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1783 continue;
1784
1785 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1786 continue;
1787
1788 if (c->psm == psm) {
1789 int src_match, dst_match;
1790 int src_any, dst_any;
1791
1792 /* Exact match. */
1793 src_match = !bacmp(&c->src, src);
1794 dst_match = !bacmp(&c->dst, dst);
1795 if (src_match && dst_match) {
1796 read_unlock(&chan_list_lock);
1797 return c;
1798 }
1799
1800 /* Closest match */
1801 src_any = !bacmp(&c->src, BDADDR_ANY);
1802 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1803 if ((src_match && dst_any) || (src_any && dst_match) ||
1804 (src_any && dst_any))
1805 c1 = c;
1806 }
1807 }
1808
1809 read_unlock(&chan_list_lock);
1810
1811 return c1;
1812 }
1813
1814 static void l2cap_monitor_timeout(struct work_struct *work)
1815 {
1816 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1817 monitor_timer.work);
1818
1819 BT_DBG("chan %p", chan);
1820
1821 l2cap_chan_lock(chan);
1822
1823 if (!chan->conn) {
1824 l2cap_chan_unlock(chan);
1825 l2cap_chan_put(chan);
1826 return;
1827 }
1828
1829 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1830
1831 l2cap_chan_unlock(chan);
1832 l2cap_chan_put(chan);
1833 }
1834
1835 static void l2cap_retrans_timeout(struct work_struct *work)
1836 {
1837 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1838 retrans_timer.work);
1839
1840 BT_DBG("chan %p", chan);
1841
1842 l2cap_chan_lock(chan);
1843
1844 if (!chan->conn) {
1845 l2cap_chan_unlock(chan);
1846 l2cap_chan_put(chan);
1847 return;
1848 }
1849
1850 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1851 l2cap_chan_unlock(chan);
1852 l2cap_chan_put(chan);
1853 }
1854
1855 static void l2cap_streaming_send(struct l2cap_chan *chan,
1856 struct sk_buff_head *skbs)
1857 {
1858 struct sk_buff *skb;
1859 struct l2cap_ctrl *control;
1860
1861 BT_DBG("chan %p, skbs %p", chan, skbs);
1862
1863 if (__chan_is_moving(chan))
1864 return;
1865
1866 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1867
1868 while (!skb_queue_empty(&chan->tx_q)) {
1869
1870 skb = skb_dequeue(&chan->tx_q);
1871
1872 bt_cb(skb)->control.retries = 1;
1873 control = &bt_cb(skb)->control;
1874
1875 control->reqseq = 0;
1876 control->txseq = chan->next_tx_seq;
1877
1878 __pack_control(chan, control, skb);
1879
1880 if (chan->fcs == L2CAP_FCS_CRC16) {
1881 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1882 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1883 }
1884
1885 l2cap_do_send(chan, skb);
1886
1887 BT_DBG("Sent txseq %u", control->txseq);
1888
1889 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1890 chan->frames_sent++;
1891 }
1892 }
1893
1894 static int l2cap_ertm_send(struct l2cap_chan *chan)
1895 {
1896 struct sk_buff *skb, *tx_skb;
1897 struct l2cap_ctrl *control;
1898 int sent = 0;
1899
1900 BT_DBG("chan %p", chan);
1901
1902 if (chan->state != BT_CONNECTED)
1903 return -ENOTCONN;
1904
1905 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1906 return 0;
1907
1908 if (__chan_is_moving(chan))
1909 return 0;
1910
1911 while (chan->tx_send_head &&
1912 chan->unacked_frames < chan->remote_tx_win &&
1913 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1914
1915 skb = chan->tx_send_head;
1916
1917 bt_cb(skb)->control.retries = 1;
1918 control = &bt_cb(skb)->control;
1919
1920 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1921 control->final = 1;
1922
1923 control->reqseq = chan->buffer_seq;
1924 chan->last_acked_seq = chan->buffer_seq;
1925 control->txseq = chan->next_tx_seq;
1926
1927 __pack_control(chan, control, skb);
1928
1929 if (chan->fcs == L2CAP_FCS_CRC16) {
1930 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1931 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1932 }
1933
1934 /* Clone after data has been modified. Data is assumed to be
1935 read-only (for locking purposes) on cloned sk_buffs.
1936 */
1937 tx_skb = skb_clone(skb, GFP_KERNEL);
1938
1939 if (!tx_skb)
1940 break;
1941
1942 __set_retrans_timer(chan);
1943
1944 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1945 chan->unacked_frames++;
1946 chan->frames_sent++;
1947 sent++;
1948
1949 if (skb_queue_is_last(&chan->tx_q, skb))
1950 chan->tx_send_head = NULL;
1951 else
1952 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1953
1954 l2cap_do_send(chan, tx_skb);
1955 BT_DBG("Sent txseq %u", control->txseq);
1956 }
1957
1958 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1959 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1960
1961 return sent;
1962 }
1963
1964 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1965 {
1966 struct l2cap_ctrl control;
1967 struct sk_buff *skb;
1968 struct sk_buff *tx_skb;
1969 u16 seq;
1970
1971 BT_DBG("chan %p", chan);
1972
1973 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1974 return;
1975
1976 if (__chan_is_moving(chan))
1977 return;
1978
1979 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1980 seq = l2cap_seq_list_pop(&chan->retrans_list);
1981
1982 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1983 if (!skb) {
1984 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1985 seq);
1986 continue;
1987 }
1988
1989 bt_cb(skb)->control.retries++;
1990 control = bt_cb(skb)->control;
1991
1992 if (chan->max_tx != 0 &&
1993 bt_cb(skb)->control.retries > chan->max_tx) {
1994 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1995 l2cap_send_disconn_req(chan, ECONNRESET);
1996 l2cap_seq_list_clear(&chan->retrans_list);
1997 break;
1998 }
1999
2000 control.reqseq = chan->buffer_seq;
2001 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2002 control.final = 1;
2003 else
2004 control.final = 0;
2005
2006 if (skb_cloned(skb)) {
2007 /* Cloned sk_buffs are read-only, so we need a
2008 * writeable copy
2009 */
2010 tx_skb = skb_copy(skb, GFP_KERNEL);
2011 } else {
2012 tx_skb = skb_clone(skb, GFP_KERNEL);
2013 }
2014
2015 if (!tx_skb) {
2016 l2cap_seq_list_clear(&chan->retrans_list);
2017 break;
2018 }
2019
2020 /* Update skb contents */
2021 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2022 put_unaligned_le32(__pack_extended_control(&control),
2023 tx_skb->data + L2CAP_HDR_SIZE);
2024 } else {
2025 put_unaligned_le16(__pack_enhanced_control(&control),
2026 tx_skb->data + L2CAP_HDR_SIZE);
2027 }
2028
2029 if (chan->fcs == L2CAP_FCS_CRC16) {
2030 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2031 put_unaligned_le16(fcs, skb_put(tx_skb,
2032 L2CAP_FCS_SIZE));
2033 }
2034
2035 l2cap_do_send(chan, tx_skb);
2036
2037 BT_DBG("Resent txseq %d", control.txseq);
2038
2039 chan->last_acked_seq = chan->buffer_seq;
2040 }
2041 }
2042
2043 static void l2cap_retransmit(struct l2cap_chan *chan,
2044 struct l2cap_ctrl *control)
2045 {
2046 BT_DBG("chan %p, control %p", chan, control);
2047
2048 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2049 l2cap_ertm_resend(chan);
2050 }
2051
2052 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2053 struct l2cap_ctrl *control)
2054 {
2055 struct sk_buff *skb;
2056
2057 BT_DBG("chan %p, control %p", chan, control);
2058
2059 if (control->poll)
2060 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2061
2062 l2cap_seq_list_clear(&chan->retrans_list);
2063
2064 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2065 return;
2066
2067 if (chan->unacked_frames) {
2068 skb_queue_walk(&chan->tx_q, skb) {
2069 if (bt_cb(skb)->control.txseq == control->reqseq ||
2070 skb == chan->tx_send_head)
2071 break;
2072 }
2073
2074 skb_queue_walk_from(&chan->tx_q, skb) {
2075 if (skb == chan->tx_send_head)
2076 break;
2077
2078 l2cap_seq_list_append(&chan->retrans_list,
2079 bt_cb(skb)->control.txseq);
2080 }
2081
2082 l2cap_ertm_resend(chan);
2083 }
2084 }
2085
2086 static void l2cap_send_ack(struct l2cap_chan *chan)
2087 {
2088 struct l2cap_ctrl control;
2089 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2090 chan->last_acked_seq);
2091 int threshold;
2092
2093 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2094 chan, chan->last_acked_seq, chan->buffer_seq);
2095
2096 memset(&control, 0, sizeof(control));
2097 control.sframe = 1;
2098
2099 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2100 chan->rx_state == L2CAP_RX_STATE_RECV) {
2101 __clear_ack_timer(chan);
2102 control.super = L2CAP_SUPER_RNR;
2103 control.reqseq = chan->buffer_seq;
2104 l2cap_send_sframe(chan, &control);
2105 } else {
2106 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2107 l2cap_ertm_send(chan);
2108 /* If any i-frames were sent, they included an ack */
2109 if (chan->buffer_seq == chan->last_acked_seq)
2110 frames_to_ack = 0;
2111 }
2112
2113 /* Ack now if the window is 3/4ths full.
2114 * Calculate without mul or div
2115 */
2116 threshold = chan->ack_win;
2117 threshold += threshold << 1;
2118 threshold >>= 2;
2119
2120 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2121 threshold);
2122
2123 if (frames_to_ack >= threshold) {
2124 __clear_ack_timer(chan);
2125 control.super = L2CAP_SUPER_RR;
2126 control.reqseq = chan->buffer_seq;
2127 l2cap_send_sframe(chan, &control);
2128 frames_to_ack = 0;
2129 }
2130
2131 if (frames_to_ack)
2132 __set_ack_timer(chan);
2133 }
2134 }
2135
2136 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2137 struct msghdr *msg, int len,
2138 int count, struct sk_buff *skb)
2139 {
2140 struct l2cap_conn *conn = chan->conn;
2141 struct sk_buff **frag;
2142 int sent = 0;
2143
2144 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2145 msg->msg_iov, count))
2146 return -EFAULT;
2147
2148 sent += count;
2149 len -= count;
2150
2151 /* Continuation fragments (no L2CAP header) */
2152 frag = &skb_shinfo(skb)->frag_list;
2153 while (len) {
2154 struct sk_buff *tmp;
2155
2156 count = min_t(unsigned int, conn->mtu, len);
2157
2158 tmp = chan->ops->alloc_skb(chan, 0, count,
2159 msg->msg_flags & MSG_DONTWAIT);
2160 if (IS_ERR(tmp))
2161 return PTR_ERR(tmp);
2162
2163 *frag = tmp;
2164
2165 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2166 msg->msg_iov, count))
2167 return -EFAULT;
2168
2169 sent += count;
2170 len -= count;
2171
2172 skb->len += (*frag)->len;
2173 skb->data_len += (*frag)->len;
2174
2175 frag = &(*frag)->next;
2176 }
2177
2178 return sent;
2179 }
2180
2181 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2182 struct msghdr *msg, size_t len)
2183 {
2184 struct l2cap_conn *conn = chan->conn;
2185 struct sk_buff *skb;
2186 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2187 struct l2cap_hdr *lh;
2188
2189 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2190 __le16_to_cpu(chan->psm), len);
2191
2192 count = min_t(unsigned int, (conn->mtu - hlen), len);
2193
2194 skb = chan->ops->alloc_skb(chan, hlen, count,
2195 msg->msg_flags & MSG_DONTWAIT);
2196 if (IS_ERR(skb))
2197 return skb;
2198
2199 /* Create L2CAP header */
2200 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2201 lh->cid = cpu_to_le16(chan->dcid);
2202 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2203 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2204
2205 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2206 if (unlikely(err < 0)) {
2207 kfree_skb(skb);
2208 return ERR_PTR(err);
2209 }
2210 return skb;
2211 }
2212
2213 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2214 struct msghdr *msg, size_t len)
2215 {
2216 struct l2cap_conn *conn = chan->conn;
2217 struct sk_buff *skb;
2218 int err, count;
2219 struct l2cap_hdr *lh;
2220
2221 BT_DBG("chan %p len %zu", chan, len);
2222
2223 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2224
2225 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2226 msg->msg_flags & MSG_DONTWAIT);
2227 if (IS_ERR(skb))
2228 return skb;
2229
2230 /* Create L2CAP header */
2231 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2232 lh->cid = cpu_to_le16(chan->dcid);
2233 lh->len = cpu_to_le16(len);
2234
2235 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2236 if (unlikely(err < 0)) {
2237 kfree_skb(skb);
2238 return ERR_PTR(err);
2239 }
2240 return skb;
2241 }
2242
2243 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2244 struct msghdr *msg, size_t len,
2245 u16 sdulen)
2246 {
2247 struct l2cap_conn *conn = chan->conn;
2248 struct sk_buff *skb;
2249 int err, count, hlen;
2250 struct l2cap_hdr *lh;
2251
2252 BT_DBG("chan %p len %zu", chan, len);
2253
2254 if (!conn)
2255 return ERR_PTR(-ENOTCONN);
2256
2257 hlen = __ertm_hdr_size(chan);
2258
2259 if (sdulen)
2260 hlen += L2CAP_SDULEN_SIZE;
2261
2262 if (chan->fcs == L2CAP_FCS_CRC16)
2263 hlen += L2CAP_FCS_SIZE;
2264
2265 count = min_t(unsigned int, (conn->mtu - hlen), len);
2266
2267 skb = chan->ops->alloc_skb(chan, hlen, count,
2268 msg->msg_flags & MSG_DONTWAIT);
2269 if (IS_ERR(skb))
2270 return skb;
2271
2272 /* Create L2CAP header */
2273 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2274 lh->cid = cpu_to_le16(chan->dcid);
2275 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2276
2277 /* Control header is populated later */
2278 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2279 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2280 else
2281 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2282
2283 if (sdulen)
2284 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2285
2286 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2287 if (unlikely(err < 0)) {
2288 kfree_skb(skb);
2289 return ERR_PTR(err);
2290 }
2291
2292 bt_cb(skb)->control.fcs = chan->fcs;
2293 bt_cb(skb)->control.retries = 0;
2294 return skb;
2295 }
2296
2297 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2298 struct sk_buff_head *seg_queue,
2299 struct msghdr *msg, size_t len)
2300 {
2301 struct sk_buff *skb;
2302 u16 sdu_len;
2303 size_t pdu_len;
2304 u8 sar;
2305
2306 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2307
2308 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2309 * so fragmented skbs are not used. The HCI layer's handling
2310 * of fragmented skbs is not compatible with ERTM's queueing.
2311 */
2312
2313 /* PDU size is derived from the HCI MTU */
2314 pdu_len = chan->conn->mtu;
2315
2316 /* Constrain PDU size for BR/EDR connections */
2317 if (!chan->hs_hcon)
2318 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2319
2320 /* Adjust for largest possible L2CAP overhead. */
2321 if (chan->fcs)
2322 pdu_len -= L2CAP_FCS_SIZE;
2323
2324 pdu_len -= __ertm_hdr_size(chan);
2325
2326 /* Remote device may have requested smaller PDUs */
2327 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2328
2329 if (len <= pdu_len) {
2330 sar = L2CAP_SAR_UNSEGMENTED;
2331 sdu_len = 0;
2332 pdu_len = len;
2333 } else {
2334 sar = L2CAP_SAR_START;
2335 sdu_len = len;
2336 pdu_len -= L2CAP_SDULEN_SIZE;
2337 }
2338
2339 while (len > 0) {
2340 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2341
2342 if (IS_ERR(skb)) {
2343 __skb_queue_purge(seg_queue);
2344 return PTR_ERR(skb);
2345 }
2346
2347 bt_cb(skb)->control.sar = sar;
2348 __skb_queue_tail(seg_queue, skb);
2349
2350 len -= pdu_len;
2351 if (sdu_len) {
2352 sdu_len = 0;
2353 pdu_len += L2CAP_SDULEN_SIZE;
2354 }
2355
2356 if (len <= pdu_len) {
2357 sar = L2CAP_SAR_END;
2358 pdu_len = len;
2359 } else {
2360 sar = L2CAP_SAR_CONTINUE;
2361 }
2362 }
2363
2364 return 0;
2365 }
2366
2367 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2368 struct msghdr *msg,
2369 size_t len, u16 sdulen)
2370 {
2371 struct l2cap_conn *conn = chan->conn;
2372 struct sk_buff *skb;
2373 int err, count, hlen;
2374 struct l2cap_hdr *lh;
2375
2376 BT_DBG("chan %p len %zu", chan, len);
2377
2378 if (!conn)
2379 return ERR_PTR(-ENOTCONN);
2380
2381 hlen = L2CAP_HDR_SIZE;
2382
2383 if (sdulen)
2384 hlen += L2CAP_SDULEN_SIZE;
2385
2386 count = min_t(unsigned int, (conn->mtu - hlen), len);
2387
2388 skb = chan->ops->alloc_skb(chan, hlen, count,
2389 msg->msg_flags & MSG_DONTWAIT);
2390 if (IS_ERR(skb))
2391 return skb;
2392
2393 /* Create L2CAP header */
2394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2395 lh->cid = cpu_to_le16(chan->dcid);
2396 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2397
2398 if (sdulen)
2399 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2400
2401 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2402 if (unlikely(err < 0)) {
2403 kfree_skb(skb);
2404 return ERR_PTR(err);
2405 }
2406
2407 return skb;
2408 }
2409
2410 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2411 struct sk_buff_head *seg_queue,
2412 struct msghdr *msg, size_t len)
2413 {
2414 struct sk_buff *skb;
2415 size_t pdu_len;
2416 u16 sdu_len;
2417
2418 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2419
2420 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2421
2422 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2423
2424 sdu_len = len;
2425 pdu_len -= L2CAP_SDULEN_SIZE;
2426
2427 while (len > 0) {
2428 if (len <= pdu_len)
2429 pdu_len = len;
2430
2431 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2432 if (IS_ERR(skb)) {
2433 __skb_queue_purge(seg_queue);
2434 return PTR_ERR(skb);
2435 }
2436
2437 __skb_queue_tail(seg_queue, skb);
2438
2439 len -= pdu_len;
2440
2441 if (sdu_len) {
2442 sdu_len = 0;
2443 pdu_len += L2CAP_SDULEN_SIZE;
2444 }
2445 }
2446
2447 return 0;
2448 }
2449
2450 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2451 {
2452 struct sk_buff *skb;
2453 int err;
2454 struct sk_buff_head seg_queue;
2455
2456 if (!chan->conn)
2457 return -ENOTCONN;
2458
2459 /* Connectionless channel */
2460 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2461 skb = l2cap_create_connless_pdu(chan, msg, len);
2462 if (IS_ERR(skb))
2463 return PTR_ERR(skb);
2464
2465 /* Channel lock is released before requesting new skb and then
2466 * reacquired thus we need to recheck channel state.
2467 */
2468 if (chan->state != BT_CONNECTED) {
2469 kfree_skb(skb);
2470 return -ENOTCONN;
2471 }
2472
2473 l2cap_do_send(chan, skb);
2474 return len;
2475 }
2476
2477 switch (chan->mode) {
2478 case L2CAP_MODE_LE_FLOWCTL:
2479 /* Check outgoing MTU */
2480 if (len > chan->omtu)
2481 return -EMSGSIZE;
2482
2483 if (!chan->tx_credits)
2484 return -EAGAIN;
2485
2486 __skb_queue_head_init(&seg_queue);
2487
2488 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2489
2490 if (chan->state != BT_CONNECTED) {
2491 __skb_queue_purge(&seg_queue);
2492 err = -ENOTCONN;
2493 }
2494
2495 if (err)
2496 return err;
2497
2498 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2499
2500 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2501 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2502 chan->tx_credits--;
2503 }
2504
2505 if (!chan->tx_credits)
2506 chan->ops->suspend(chan);
2507
2508 err = len;
2509
2510 break;
2511
2512 case L2CAP_MODE_BASIC:
2513 /* Check outgoing MTU */
2514 if (len > chan->omtu)
2515 return -EMSGSIZE;
2516
2517 /* Create a basic PDU */
2518 skb = l2cap_create_basic_pdu(chan, msg, len);
2519 if (IS_ERR(skb))
2520 return PTR_ERR(skb);
2521
2522 /* Channel lock is released before requesting new skb and then
2523 * reacquired thus we need to recheck channel state.
2524 */
2525 if (chan->state != BT_CONNECTED) {
2526 kfree_skb(skb);
2527 return -ENOTCONN;
2528 }
2529
2530 l2cap_do_send(chan, skb);
2531 err = len;
2532 break;
2533
2534 case L2CAP_MODE_ERTM:
2535 case L2CAP_MODE_STREAMING:
2536 /* Check outgoing MTU */
2537 if (len > chan->omtu) {
2538 err = -EMSGSIZE;
2539 break;
2540 }
2541
2542 __skb_queue_head_init(&seg_queue);
2543
2544 /* Do segmentation before calling in to the state machine,
2545 * since it's possible to block while waiting for memory
2546 * allocation.
2547 */
2548 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2549
2550 /* The channel could have been closed while segmenting,
2551 * check that it is still connected.
2552 */
2553 if (chan->state != BT_CONNECTED) {
2554 __skb_queue_purge(&seg_queue);
2555 err = -ENOTCONN;
2556 }
2557
2558 if (err)
2559 break;
2560
2561 if (chan->mode == L2CAP_MODE_ERTM)
2562 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2563 else
2564 l2cap_streaming_send(chan, &seg_queue);
2565
2566 err = len;
2567
2568 /* If the skbs were not queued for sending, they'll still be in
2569 * seg_queue and need to be purged.
2570 */
2571 __skb_queue_purge(&seg_queue);
2572 break;
2573
2574 default:
2575 BT_DBG("bad state %1.1x", chan->mode);
2576 err = -EBADFD;
2577 }
2578
2579 return err;
2580 }
2581 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2582
2583 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2584 {
2585 struct l2cap_ctrl control;
2586 u16 seq;
2587
2588 BT_DBG("chan %p, txseq %u", chan, txseq);
2589
2590 memset(&control, 0, sizeof(control));
2591 control.sframe = 1;
2592 control.super = L2CAP_SUPER_SREJ;
2593
2594 for (seq = chan->expected_tx_seq; seq != txseq;
2595 seq = __next_seq(chan, seq)) {
2596 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2597 control.reqseq = seq;
2598 l2cap_send_sframe(chan, &control);
2599 l2cap_seq_list_append(&chan->srej_list, seq);
2600 }
2601 }
2602
2603 chan->expected_tx_seq = __next_seq(chan, txseq);
2604 }
2605
2606 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2607 {
2608 struct l2cap_ctrl control;
2609
2610 BT_DBG("chan %p", chan);
2611
2612 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2613 return;
2614
2615 memset(&control, 0, sizeof(control));
2616 control.sframe = 1;
2617 control.super = L2CAP_SUPER_SREJ;
2618 control.reqseq = chan->srej_list.tail;
2619 l2cap_send_sframe(chan, &control);
2620 }
2621
2622 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2623 {
2624 struct l2cap_ctrl control;
2625 u16 initial_head;
2626 u16 seq;
2627
2628 BT_DBG("chan %p, txseq %u", chan, txseq);
2629
2630 memset(&control, 0, sizeof(control));
2631 control.sframe = 1;
2632 control.super = L2CAP_SUPER_SREJ;
2633
2634 /* Capture initial list head to allow only one pass through the list. */
2635 initial_head = chan->srej_list.head;
2636
2637 do {
2638 seq = l2cap_seq_list_pop(&chan->srej_list);
2639 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2640 break;
2641
2642 control.reqseq = seq;
2643 l2cap_send_sframe(chan, &control);
2644 l2cap_seq_list_append(&chan->srej_list, seq);
2645 } while (chan->srej_list.head != initial_head);
2646 }
2647
2648 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2649 {
2650 struct sk_buff *acked_skb;
2651 u16 ackseq;
2652
2653 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2654
2655 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2656 return;
2657
2658 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2659 chan->expected_ack_seq, chan->unacked_frames);
2660
2661 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2662 ackseq = __next_seq(chan, ackseq)) {
2663
2664 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2665 if (acked_skb) {
2666 skb_unlink(acked_skb, &chan->tx_q);
2667 kfree_skb(acked_skb);
2668 chan->unacked_frames--;
2669 }
2670 }
2671
2672 chan->expected_ack_seq = reqseq;
2673
2674 if (chan->unacked_frames == 0)
2675 __clear_retrans_timer(chan);
2676
2677 BT_DBG("unacked_frames %u", chan->unacked_frames);
2678 }
2679
2680 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2681 {
2682 BT_DBG("chan %p", chan);
2683
2684 chan->expected_tx_seq = chan->buffer_seq;
2685 l2cap_seq_list_clear(&chan->srej_list);
2686 skb_queue_purge(&chan->srej_q);
2687 chan->rx_state = L2CAP_RX_STATE_RECV;
2688 }
2689
2690 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2691 struct l2cap_ctrl *control,
2692 struct sk_buff_head *skbs, u8 event)
2693 {
2694 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2695 event);
2696
2697 switch (event) {
2698 case L2CAP_EV_DATA_REQUEST:
2699 if (chan->tx_send_head == NULL)
2700 chan->tx_send_head = skb_peek(skbs);
2701
2702 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2703 l2cap_ertm_send(chan);
2704 break;
2705 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2706 BT_DBG("Enter LOCAL_BUSY");
2707 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2708
2709 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2710 /* The SREJ_SENT state must be aborted if we are to
2711 * enter the LOCAL_BUSY state.
2712 */
2713 l2cap_abort_rx_srej_sent(chan);
2714 }
2715
2716 l2cap_send_ack(chan);
2717
2718 break;
2719 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2720 BT_DBG("Exit LOCAL_BUSY");
2721 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2722
2723 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2724 struct l2cap_ctrl local_control;
2725
2726 memset(&local_control, 0, sizeof(local_control));
2727 local_control.sframe = 1;
2728 local_control.super = L2CAP_SUPER_RR;
2729 local_control.poll = 1;
2730 local_control.reqseq = chan->buffer_seq;
2731 l2cap_send_sframe(chan, &local_control);
2732
2733 chan->retry_count = 1;
2734 __set_monitor_timer(chan);
2735 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2736 }
2737 break;
2738 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2739 l2cap_process_reqseq(chan, control->reqseq);
2740 break;
2741 case L2CAP_EV_EXPLICIT_POLL:
2742 l2cap_send_rr_or_rnr(chan, 1);
2743 chan->retry_count = 1;
2744 __set_monitor_timer(chan);
2745 __clear_ack_timer(chan);
2746 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2747 break;
2748 case L2CAP_EV_RETRANS_TO:
2749 l2cap_send_rr_or_rnr(chan, 1);
2750 chan->retry_count = 1;
2751 __set_monitor_timer(chan);
2752 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2753 break;
2754 case L2CAP_EV_RECV_FBIT:
2755 /* Nothing to process */
2756 break;
2757 default:
2758 break;
2759 }
2760 }
2761
2762 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2763 struct l2cap_ctrl *control,
2764 struct sk_buff_head *skbs, u8 event)
2765 {
2766 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2767 event);
2768
2769 switch (event) {
2770 case L2CAP_EV_DATA_REQUEST:
2771 if (chan->tx_send_head == NULL)
2772 chan->tx_send_head = skb_peek(skbs);
2773 /* Queue data, but don't send. */
2774 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2775 break;
2776 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2777 BT_DBG("Enter LOCAL_BUSY");
2778 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2779
2780 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2781 /* The SREJ_SENT state must be aborted if we are to
2782 * enter the LOCAL_BUSY state.
2783 */
2784 l2cap_abort_rx_srej_sent(chan);
2785 }
2786
2787 l2cap_send_ack(chan);
2788
2789 break;
2790 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2791 BT_DBG("Exit LOCAL_BUSY");
2792 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2793
2794 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2795 struct l2cap_ctrl local_control;
2796 memset(&local_control, 0, sizeof(local_control));
2797 local_control.sframe = 1;
2798 local_control.super = L2CAP_SUPER_RR;
2799 local_control.poll = 1;
2800 local_control.reqseq = chan->buffer_seq;
2801 l2cap_send_sframe(chan, &local_control);
2802
2803 chan->retry_count = 1;
2804 __set_monitor_timer(chan);
2805 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2806 }
2807 break;
2808 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2809 l2cap_process_reqseq(chan, control->reqseq);
2810
2811 /* Fall through */
2812
2813 case L2CAP_EV_RECV_FBIT:
2814 if (control && control->final) {
2815 __clear_monitor_timer(chan);
2816 if (chan->unacked_frames > 0)
2817 __set_retrans_timer(chan);
2818 chan->retry_count = 0;
2819 chan->tx_state = L2CAP_TX_STATE_XMIT;
2820 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2821 }
2822 break;
2823 case L2CAP_EV_EXPLICIT_POLL:
2824 /* Ignore */
2825 break;
2826 case L2CAP_EV_MONITOR_TO:
2827 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2828 l2cap_send_rr_or_rnr(chan, 1);
2829 __set_monitor_timer(chan);
2830 chan->retry_count++;
2831 } else {
2832 l2cap_send_disconn_req(chan, ECONNABORTED);
2833 }
2834 break;
2835 default:
2836 break;
2837 }
2838 }
2839
2840 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2841 struct sk_buff_head *skbs, u8 event)
2842 {
2843 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2844 chan, control, skbs, event, chan->tx_state);
2845
2846 switch (chan->tx_state) {
2847 case L2CAP_TX_STATE_XMIT:
2848 l2cap_tx_state_xmit(chan, control, skbs, event);
2849 break;
2850 case L2CAP_TX_STATE_WAIT_F:
2851 l2cap_tx_state_wait_f(chan, control, skbs, event);
2852 break;
2853 default:
2854 /* Ignore event */
2855 break;
2856 }
2857 }
2858
2859 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2860 struct l2cap_ctrl *control)
2861 {
2862 BT_DBG("chan %p, control %p", chan, control);
2863 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2864 }
2865
2866 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2867 struct l2cap_ctrl *control)
2868 {
2869 BT_DBG("chan %p, control %p", chan, control);
2870 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2871 }
2872
2873 /* Copy frame to all raw sockets on that connection */
2874 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2875 {
2876 struct sk_buff *nskb;
2877 struct l2cap_chan *chan;
2878
2879 BT_DBG("conn %p", conn);
2880
2881 mutex_lock(&conn->chan_lock);
2882
2883 list_for_each_entry(chan, &conn->chan_l, list) {
2884 if (chan->chan_type != L2CAP_CHAN_RAW)
2885 continue;
2886
2887 /* Don't send frame to the channel it came from */
2888 if (bt_cb(skb)->chan == chan)
2889 continue;
2890
2891 nskb = skb_clone(skb, GFP_KERNEL);
2892 if (!nskb)
2893 continue;
2894 if (chan->ops->recv(chan, nskb))
2895 kfree_skb(nskb);
2896 }
2897
2898 mutex_unlock(&conn->chan_lock);
2899 }
2900
2901 /* ---- L2CAP signalling commands ---- */
2902 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2903 u8 ident, u16 dlen, void *data)
2904 {
2905 struct sk_buff *skb, **frag;
2906 struct l2cap_cmd_hdr *cmd;
2907 struct l2cap_hdr *lh;
2908 int len, count;
2909
2910 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2911 conn, code, ident, dlen);
2912
2913 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2914 return NULL;
2915
2916 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2917 count = min_t(unsigned int, conn->mtu, len);
2918
2919 skb = bt_skb_alloc(count, GFP_KERNEL);
2920 if (!skb)
2921 return NULL;
2922
2923 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2924 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2925
2926 if (conn->hcon->type == LE_LINK)
2927 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2928 else
2929 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2930
2931 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2932 cmd->code = code;
2933 cmd->ident = ident;
2934 cmd->len = cpu_to_le16(dlen);
2935
2936 if (dlen) {
2937 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2938 memcpy(skb_put(skb, count), data, count);
2939 data += count;
2940 }
2941
2942 len -= skb->len;
2943
2944 /* Continuation fragments (no L2CAP header) */
2945 frag = &skb_shinfo(skb)->frag_list;
2946 while (len) {
2947 count = min_t(unsigned int, conn->mtu, len);
2948
2949 *frag = bt_skb_alloc(count, GFP_KERNEL);
2950 if (!*frag)
2951 goto fail;
2952
2953 memcpy(skb_put(*frag, count), data, count);
2954
2955 len -= count;
2956 data += count;
2957
2958 frag = &(*frag)->next;
2959 }
2960
2961 return skb;
2962
2963 fail:
2964 kfree_skb(skb);
2965 return NULL;
2966 }
2967
2968 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2969 unsigned long *val)
2970 {
2971 struct l2cap_conf_opt *opt = *ptr;
2972 int len;
2973
2974 len = L2CAP_CONF_OPT_SIZE + opt->len;
2975 *ptr += len;
2976
2977 *type = opt->type;
2978 *olen = opt->len;
2979
2980 switch (opt->len) {
2981 case 1:
2982 *val = *((u8 *) opt->val);
2983 break;
2984
2985 case 2:
2986 *val = get_unaligned_le16(opt->val);
2987 break;
2988
2989 case 4:
2990 *val = get_unaligned_le32(opt->val);
2991 break;
2992
2993 default:
2994 *val = (unsigned long) opt->val;
2995 break;
2996 }
2997
2998 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2999 return len;
3000 }
3001
3002 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3003 {
3004 struct l2cap_conf_opt *opt = *ptr;
3005
3006 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3007
3008 opt->type = type;
3009 opt->len = len;
3010
3011 switch (len) {
3012 case 1:
3013 *((u8 *) opt->val) = val;
3014 break;
3015
3016 case 2:
3017 put_unaligned_le16(val, opt->val);
3018 break;
3019
3020 case 4:
3021 put_unaligned_le32(val, opt->val);
3022 break;
3023
3024 default:
3025 memcpy(opt->val, (void *) val, len);
3026 break;
3027 }
3028
3029 *ptr += L2CAP_CONF_OPT_SIZE + len;
3030 }
3031
3032 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3033 {
3034 struct l2cap_conf_efs efs;
3035
3036 switch (chan->mode) {
3037 case L2CAP_MODE_ERTM:
3038 efs.id = chan->local_id;
3039 efs.stype = chan->local_stype;
3040 efs.msdu = cpu_to_le16(chan->local_msdu);
3041 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3042 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3043 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3044 break;
3045
3046 case L2CAP_MODE_STREAMING:
3047 efs.id = 1;
3048 efs.stype = L2CAP_SERV_BESTEFFORT;
3049 efs.msdu = cpu_to_le16(chan->local_msdu);
3050 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3051 efs.acc_lat = 0;
3052 efs.flush_to = 0;
3053 break;
3054
3055 default:
3056 return;
3057 }
3058
3059 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3060 (unsigned long) &efs);
3061 }
3062
3063 static void l2cap_ack_timeout(struct work_struct *work)
3064 {
3065 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3066 ack_timer.work);
3067 u16 frames_to_ack;
3068
3069 BT_DBG("chan %p", chan);
3070
3071 l2cap_chan_lock(chan);
3072
3073 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3074 chan->last_acked_seq);
3075
3076 if (frames_to_ack)
3077 l2cap_send_rr_or_rnr(chan, 0);
3078
3079 l2cap_chan_unlock(chan);
3080 l2cap_chan_put(chan);
3081 }
3082
3083 int l2cap_ertm_init(struct l2cap_chan *chan)
3084 {
3085 int err;
3086
3087 chan->next_tx_seq = 0;
3088 chan->expected_tx_seq = 0;
3089 chan->expected_ack_seq = 0;
3090 chan->unacked_frames = 0;
3091 chan->buffer_seq = 0;
3092 chan->frames_sent = 0;
3093 chan->last_acked_seq = 0;
3094 chan->sdu = NULL;
3095 chan->sdu_last_frag = NULL;
3096 chan->sdu_len = 0;
3097
3098 skb_queue_head_init(&chan->tx_q);
3099
3100 chan->local_amp_id = AMP_ID_BREDR;
3101 chan->move_id = AMP_ID_BREDR;
3102 chan->move_state = L2CAP_MOVE_STABLE;
3103 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3104
3105 if (chan->mode != L2CAP_MODE_ERTM)
3106 return 0;
3107
3108 chan->rx_state = L2CAP_RX_STATE_RECV;
3109 chan->tx_state = L2CAP_TX_STATE_XMIT;
3110
3111 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3112 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3113 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3114
3115 skb_queue_head_init(&chan->srej_q);
3116
3117 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3118 if (err < 0)
3119 return err;
3120
3121 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3122 if (err < 0)
3123 l2cap_seq_list_free(&chan->srej_list);
3124
3125 return err;
3126 }
3127
3128 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3129 {
3130 switch (mode) {
3131 case L2CAP_MODE_STREAMING:
3132 case L2CAP_MODE_ERTM:
3133 if (l2cap_mode_supported(mode, remote_feat_mask))
3134 return mode;
3135 /* fall through */
3136 default:
3137 return L2CAP_MODE_BASIC;
3138 }
3139 }
3140
3141 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3142 {
3143 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3144 }
3145
3146 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3147 {
3148 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3149 }
3150
3151 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3152 struct l2cap_conf_rfc *rfc)
3153 {
3154 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3155 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3156
3157 /* Class 1 devices have must have ERTM timeouts
3158 * exceeding the Link Supervision Timeout. The
3159 * default Link Supervision Timeout for AMP
3160 * controllers is 10 seconds.
3161 *
3162 * Class 1 devices use 0xffffffff for their
3163 * best-effort flush timeout, so the clamping logic
3164 * will result in a timeout that meets the above
3165 * requirement. ERTM timeouts are 16-bit values, so
3166 * the maximum timeout is 65.535 seconds.
3167 */
3168
3169 /* Convert timeout to milliseconds and round */
3170 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3171
3172 /* This is the recommended formula for class 2 devices
3173 * that start ERTM timers when packets are sent to the
3174 * controller.
3175 */
3176 ertm_to = 3 * ertm_to + 500;
3177
3178 if (ertm_to > 0xffff)
3179 ertm_to = 0xffff;
3180
3181 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3182 rfc->monitor_timeout = rfc->retrans_timeout;
3183 } else {
3184 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3185 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3186 }
3187 }
3188
3189 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3190 {
3191 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3192 __l2cap_ews_supported(chan->conn)) {
3193 /* use extended control field */
3194 set_bit(FLAG_EXT_CTRL, &chan->flags);
3195 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3196 } else {
3197 chan->tx_win = min_t(u16, chan->tx_win,
3198 L2CAP_DEFAULT_TX_WINDOW);
3199 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3200 }
3201 chan->ack_win = chan->tx_win;
3202 }
3203
3204 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3205 {
3206 struct l2cap_conf_req *req = data;
3207 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3208 void *ptr = req->data;
3209 u16 size;
3210
3211 BT_DBG("chan %p", chan);
3212
3213 if (chan->num_conf_req || chan->num_conf_rsp)
3214 goto done;
3215
3216 switch (chan->mode) {
3217 case L2CAP_MODE_STREAMING:
3218 case L2CAP_MODE_ERTM:
3219 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3220 break;
3221
3222 if (__l2cap_efs_supported(chan->conn))
3223 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3224
3225 /* fall through */
3226 default:
3227 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3228 break;
3229 }
3230
3231 done:
3232 if (chan->imtu != L2CAP_DEFAULT_MTU)
3233 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3234
3235 switch (chan->mode) {
3236 case L2CAP_MODE_BASIC:
3237 if (disable_ertm)
3238 break;
3239
3240 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3241 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3242 break;
3243
3244 rfc.mode = L2CAP_MODE_BASIC;
3245 rfc.txwin_size = 0;
3246 rfc.max_transmit = 0;
3247 rfc.retrans_timeout = 0;
3248 rfc.monitor_timeout = 0;
3249 rfc.max_pdu_size = 0;
3250
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3252 (unsigned long) &rfc);
3253 break;
3254
3255 case L2CAP_MODE_ERTM:
3256 rfc.mode = L2CAP_MODE_ERTM;
3257 rfc.max_transmit = chan->max_tx;
3258
3259 __l2cap_set_ertm_timeouts(chan, &rfc);
3260
3261 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3262 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3263 L2CAP_FCS_SIZE);
3264 rfc.max_pdu_size = cpu_to_le16(size);
3265
3266 l2cap_txwin_setup(chan);
3267
3268 rfc.txwin_size = min_t(u16, chan->tx_win,
3269 L2CAP_DEFAULT_TX_WINDOW);
3270
3271 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3272 (unsigned long) &rfc);
3273
3274 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3275 l2cap_add_opt_efs(&ptr, chan);
3276
3277 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3278 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3279 chan->tx_win);
3280
3281 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3282 if (chan->fcs == L2CAP_FCS_NONE ||
3283 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3284 chan->fcs = L2CAP_FCS_NONE;
3285 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3286 chan->fcs);
3287 }
3288 break;
3289
3290 case L2CAP_MODE_STREAMING:
3291 l2cap_txwin_setup(chan);
3292 rfc.mode = L2CAP_MODE_STREAMING;
3293 rfc.txwin_size = 0;
3294 rfc.max_transmit = 0;
3295 rfc.retrans_timeout = 0;
3296 rfc.monitor_timeout = 0;
3297
3298 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3299 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3300 L2CAP_FCS_SIZE);
3301 rfc.max_pdu_size = cpu_to_le16(size);
3302
3303 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3304 (unsigned long) &rfc);
3305
3306 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3307 l2cap_add_opt_efs(&ptr, chan);
3308
3309 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3310 if (chan->fcs == L2CAP_FCS_NONE ||
3311 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3312 chan->fcs = L2CAP_FCS_NONE;
3313 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3314 chan->fcs);
3315 }
3316 break;
3317 }
3318
3319 req->dcid = cpu_to_le16(chan->dcid);
3320 req->flags = cpu_to_le16(0);
3321
3322 return ptr - data;
3323 }
3324
3325 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3326 {
3327 struct l2cap_conf_rsp *rsp = data;
3328 void *ptr = rsp->data;
3329 void *req = chan->conf_req;
3330 int len = chan->conf_len;
3331 int type, hint, olen;
3332 unsigned long val;
3333 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3334 struct l2cap_conf_efs efs;
3335 u8 remote_efs = 0;
3336 u16 mtu = L2CAP_DEFAULT_MTU;
3337 u16 result = L2CAP_CONF_SUCCESS;
3338 u16 size;
3339
3340 BT_DBG("chan %p", chan);
3341
3342 while (len >= L2CAP_CONF_OPT_SIZE) {
3343 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3344
3345 hint = type & L2CAP_CONF_HINT;
3346 type &= L2CAP_CONF_MASK;
3347
3348 switch (type) {
3349 case L2CAP_CONF_MTU:
3350 mtu = val;
3351 break;
3352
3353 case L2CAP_CONF_FLUSH_TO:
3354 chan->flush_to = val;
3355 break;
3356
3357 case L2CAP_CONF_QOS:
3358 break;
3359
3360 case L2CAP_CONF_RFC:
3361 if (olen == sizeof(rfc))
3362 memcpy(&rfc, (void *) val, olen);
3363 break;
3364
3365 case L2CAP_CONF_FCS:
3366 if (val == L2CAP_FCS_NONE)
3367 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3368 break;
3369
3370 case L2CAP_CONF_EFS:
3371 remote_efs = 1;
3372 if (olen == sizeof(efs))
3373 memcpy(&efs, (void *) val, olen);
3374 break;
3375
3376 case L2CAP_CONF_EWS:
3377 if (!chan->conn->hs_enabled)
3378 return -ECONNREFUSED;
3379
3380 set_bit(FLAG_EXT_CTRL, &chan->flags);
3381 set_bit(CONF_EWS_RECV, &chan->conf_state);
3382 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3383 chan->remote_tx_win = val;
3384 break;
3385
3386 default:
3387 if (hint)
3388 break;
3389
3390 result = L2CAP_CONF_UNKNOWN;
3391 *((u8 *) ptr++) = type;
3392 break;
3393 }
3394 }
3395
3396 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3397 goto done;
3398
3399 switch (chan->mode) {
3400 case L2CAP_MODE_STREAMING:
3401 case L2CAP_MODE_ERTM:
3402 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3403 chan->mode = l2cap_select_mode(rfc.mode,
3404 chan->conn->feat_mask);
3405 break;
3406 }
3407
3408 if (remote_efs) {
3409 if (__l2cap_efs_supported(chan->conn))
3410 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3411 else
3412 return -ECONNREFUSED;
3413 }
3414
3415 if (chan->mode != rfc.mode)
3416 return -ECONNREFUSED;
3417
3418 break;
3419 }
3420
3421 done:
3422 if (chan->mode != rfc.mode) {
3423 result = L2CAP_CONF_UNACCEPT;
3424 rfc.mode = chan->mode;
3425
3426 if (chan->num_conf_rsp == 1)
3427 return -ECONNREFUSED;
3428
3429 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3430 (unsigned long) &rfc);
3431 }
3432
3433 if (result == L2CAP_CONF_SUCCESS) {
3434 /* Configure output options and let the other side know
3435 * which ones we don't like. */
3436
3437 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3438 result = L2CAP_CONF_UNACCEPT;
3439 else {
3440 chan->omtu = mtu;
3441 set_bit(CONF_MTU_DONE, &chan->conf_state);
3442 }
3443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3444
3445 if (remote_efs) {
3446 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3447 efs.stype != L2CAP_SERV_NOTRAFIC &&
3448 efs.stype != chan->local_stype) {
3449
3450 result = L2CAP_CONF_UNACCEPT;
3451
3452 if (chan->num_conf_req >= 1)
3453 return -ECONNREFUSED;
3454
3455 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3456 sizeof(efs),
3457 (unsigned long) &efs);
3458 } else {
3459 /* Send PENDING Conf Rsp */
3460 result = L2CAP_CONF_PENDING;
3461 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3462 }
3463 }
3464
3465 switch (rfc.mode) {
3466 case L2CAP_MODE_BASIC:
3467 chan->fcs = L2CAP_FCS_NONE;
3468 set_bit(CONF_MODE_DONE, &chan->conf_state);
3469 break;
3470
3471 case L2CAP_MODE_ERTM:
3472 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3473 chan->remote_tx_win = rfc.txwin_size;
3474 else
3475 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3476
3477 chan->remote_max_tx = rfc.max_transmit;
3478
3479 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3480 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3481 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3482 rfc.max_pdu_size = cpu_to_le16(size);
3483 chan->remote_mps = size;
3484
3485 __l2cap_set_ertm_timeouts(chan, &rfc);
3486
3487 set_bit(CONF_MODE_DONE, &chan->conf_state);
3488
3489 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3490 sizeof(rfc), (unsigned long) &rfc);
3491
3492 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3493 chan->remote_id = efs.id;
3494 chan->remote_stype = efs.stype;
3495 chan->remote_msdu = le16_to_cpu(efs.msdu);
3496 chan->remote_flush_to =
3497 le32_to_cpu(efs.flush_to);
3498 chan->remote_acc_lat =
3499 le32_to_cpu(efs.acc_lat);
3500 chan->remote_sdu_itime =
3501 le32_to_cpu(efs.sdu_itime);
3502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3503 sizeof(efs),
3504 (unsigned long) &efs);
3505 }
3506 break;
3507
3508 case L2CAP_MODE_STREAMING:
3509 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3510 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3511 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3512 rfc.max_pdu_size = cpu_to_le16(size);
3513 chan->remote_mps = size;
3514
3515 set_bit(CONF_MODE_DONE, &chan->conf_state);
3516
3517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3518 (unsigned long) &rfc);
3519
3520 break;
3521
3522 default:
3523 result = L2CAP_CONF_UNACCEPT;
3524
3525 memset(&rfc, 0, sizeof(rfc));
3526 rfc.mode = chan->mode;
3527 }
3528
3529 if (result == L2CAP_CONF_SUCCESS)
3530 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3531 }
3532 rsp->scid = cpu_to_le16(chan->dcid);
3533 rsp->result = cpu_to_le16(result);
3534 rsp->flags = cpu_to_le16(0);
3535
3536 return ptr - data;
3537 }
3538
3539 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3540 void *data, u16 *result)
3541 {
3542 struct l2cap_conf_req *req = data;
3543 void *ptr = req->data;
3544 int type, olen;
3545 unsigned long val;
3546 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3547 struct l2cap_conf_efs efs;
3548
3549 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3550
3551 while (len >= L2CAP_CONF_OPT_SIZE) {
3552 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3553
3554 switch (type) {
3555 case L2CAP_CONF_MTU:
3556 if (val < L2CAP_DEFAULT_MIN_MTU) {
3557 *result = L2CAP_CONF_UNACCEPT;
3558 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3559 } else
3560 chan->imtu = val;
3561 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3562 break;
3563
3564 case L2CAP_CONF_FLUSH_TO:
3565 chan->flush_to = val;
3566 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3567 2, chan->flush_to);
3568 break;
3569
3570 case L2CAP_CONF_RFC:
3571 if (olen == sizeof(rfc))
3572 memcpy(&rfc, (void *)val, olen);
3573
3574 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3575 rfc.mode != chan->mode)
3576 return -ECONNREFUSED;
3577
3578 chan->fcs = 0;
3579
3580 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3581 sizeof(rfc), (unsigned long) &rfc);
3582 break;
3583
3584 case L2CAP_CONF_EWS:
3585 chan->ack_win = min_t(u16, val, chan->ack_win);
3586 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3587 chan->tx_win);
3588 break;
3589
3590 case L2CAP_CONF_EFS:
3591 if (olen == sizeof(efs))
3592 memcpy(&efs, (void *)val, olen);
3593
3594 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3595 efs.stype != L2CAP_SERV_NOTRAFIC &&
3596 efs.stype != chan->local_stype)
3597 return -ECONNREFUSED;
3598
3599 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3600 (unsigned long) &efs);
3601 break;
3602
3603 case L2CAP_CONF_FCS:
3604 if (*result == L2CAP_CONF_PENDING)
3605 if (val == L2CAP_FCS_NONE)
3606 set_bit(CONF_RECV_NO_FCS,
3607 &chan->conf_state);
3608 break;
3609 }
3610 }
3611
3612 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3613 return -ECONNREFUSED;
3614
3615 chan->mode = rfc.mode;
3616
3617 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3618 switch (rfc.mode) {
3619 case L2CAP_MODE_ERTM:
3620 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3621 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3622 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3623 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3624 chan->ack_win = min_t(u16, chan->ack_win,
3625 rfc.txwin_size);
3626
3627 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3628 chan->local_msdu = le16_to_cpu(efs.msdu);
3629 chan->local_sdu_itime =
3630 le32_to_cpu(efs.sdu_itime);
3631 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3632 chan->local_flush_to =
3633 le32_to_cpu(efs.flush_to);
3634 }
3635 break;
3636
3637 case L2CAP_MODE_STREAMING:
3638 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3639 }
3640 }
3641
3642 req->dcid = cpu_to_le16(chan->dcid);
3643 req->flags = cpu_to_le16(0);
3644
3645 return ptr - data;
3646 }
3647
3648 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3649 u16 result, u16 flags)
3650 {
3651 struct l2cap_conf_rsp *rsp = data;
3652 void *ptr = rsp->data;
3653
3654 BT_DBG("chan %p", chan);
3655
3656 rsp->scid = cpu_to_le16(chan->dcid);
3657 rsp->result = cpu_to_le16(result);
3658 rsp->flags = cpu_to_le16(flags);
3659
3660 return ptr - data;
3661 }
3662
3663 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3664 {
3665 struct l2cap_le_conn_rsp rsp;
3666 struct l2cap_conn *conn = chan->conn;
3667
3668 BT_DBG("chan %p", chan);
3669
3670 rsp.dcid = cpu_to_le16(chan->scid);
3671 rsp.mtu = cpu_to_le16(chan->imtu);
3672 rsp.mps = cpu_to_le16(chan->mps);
3673 rsp.credits = cpu_to_le16(chan->rx_credits);
3674 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3675
3676 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3677 &rsp);
3678 }
3679
3680 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3681 {
3682 struct l2cap_conn_rsp rsp;
3683 struct l2cap_conn *conn = chan->conn;
3684 u8 buf[128];
3685 u8 rsp_code;
3686
3687 rsp.scid = cpu_to_le16(chan->dcid);
3688 rsp.dcid = cpu_to_le16(chan->scid);
3689 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3690 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3691
3692 if (chan->hs_hcon)
3693 rsp_code = L2CAP_CREATE_CHAN_RSP;
3694 else
3695 rsp_code = L2CAP_CONN_RSP;
3696
3697 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3698
3699 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3700
3701 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3702 return;
3703
3704 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3705 l2cap_build_conf_req(chan, buf), buf);
3706 chan->num_conf_req++;
3707 }
3708
3709 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3710 {
3711 int type, olen;
3712 unsigned long val;
3713 /* Use sane default values in case a misbehaving remote device
3714 * did not send an RFC or extended window size option.
3715 */
3716 u16 txwin_ext = chan->ack_win;
3717 struct l2cap_conf_rfc rfc = {
3718 .mode = chan->mode,
3719 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3720 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3721 .max_pdu_size = cpu_to_le16(chan->imtu),
3722 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3723 };
3724
3725 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3726
3727 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3728 return;
3729
3730 while (len >= L2CAP_CONF_OPT_SIZE) {
3731 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3732
3733 switch (type) {
3734 case L2CAP_CONF_RFC:
3735 if (olen == sizeof(rfc))
3736 memcpy(&rfc, (void *)val, olen);
3737 break;
3738 case L2CAP_CONF_EWS:
3739 txwin_ext = val;
3740 break;
3741 }
3742 }
3743
3744 switch (rfc.mode) {
3745 case L2CAP_MODE_ERTM:
3746 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3747 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3748 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3749 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3750 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3751 else
3752 chan->ack_win = min_t(u16, chan->ack_win,
3753 rfc.txwin_size);
3754 break;
3755 case L2CAP_MODE_STREAMING:
3756 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3757 }
3758 }
3759
3760 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3761 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3762 u8 *data)
3763 {
3764 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3765
3766 if (cmd_len < sizeof(*rej))
3767 return -EPROTO;
3768
3769 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3770 return 0;
3771
3772 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3773 cmd->ident == conn->info_ident) {
3774 cancel_delayed_work(&conn->info_timer);
3775
3776 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3777 conn->info_ident = 0;
3778
3779 l2cap_conn_start(conn);
3780 }
3781
3782 return 0;
3783 }
3784
3785 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3786 struct l2cap_cmd_hdr *cmd,
3787 u8 *data, u8 rsp_code, u8 amp_id)
3788 {
3789 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3790 struct l2cap_conn_rsp rsp;
3791 struct l2cap_chan *chan = NULL, *pchan;
3792 int result, status = L2CAP_CS_NO_INFO;
3793
3794 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3795 __le16 psm = req->psm;
3796
3797 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3798
3799 /* Check if we have socket listening on psm */
3800 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3801 &conn->hcon->dst, ACL_LINK);
3802 if (!pchan) {
3803 result = L2CAP_CR_BAD_PSM;
3804 goto sendresp;
3805 }
3806
3807 mutex_lock(&conn->chan_lock);
3808 l2cap_chan_lock(pchan);
3809
3810 /* Check if the ACL is secure enough (if not SDP) */
3811 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3812 !hci_conn_check_link_mode(conn->hcon)) {
3813 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3814 result = L2CAP_CR_SEC_BLOCK;
3815 goto response;
3816 }
3817
3818 result = L2CAP_CR_NO_MEM;
3819
3820 /* Check if we already have channel with that dcid */
3821 if (__l2cap_get_chan_by_dcid(conn, scid))
3822 goto response;
3823
3824 chan = pchan->ops->new_connection(pchan);
3825 if (!chan)
3826 goto response;
3827
3828 /* For certain devices (ex: HID mouse), support for authentication,
3829 * pairing and bonding is optional. For such devices, inorder to avoid
3830 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3831 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3832 */
3833 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3834
3835 bacpy(&chan->src, &conn->hcon->src);
3836 bacpy(&chan->dst, &conn->hcon->dst);
3837 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3838 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3839 chan->psm = psm;
3840 chan->dcid = scid;
3841 chan->local_amp_id = amp_id;
3842
3843 __l2cap_chan_add(conn, chan);
3844
3845 dcid = chan->scid;
3846
3847 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3848
3849 chan->ident = cmd->ident;
3850
3851 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3852 if (l2cap_chan_check_security(chan)) {
3853 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3854 l2cap_state_change(chan, BT_CONNECT2);
3855 result = L2CAP_CR_PEND;
3856 status = L2CAP_CS_AUTHOR_PEND;
3857 chan->ops->defer(chan);
3858 } else {
3859 /* Force pending result for AMP controllers.
3860 * The connection will succeed after the
3861 * physical link is up.
3862 */
3863 if (amp_id == AMP_ID_BREDR) {
3864 l2cap_state_change(chan, BT_CONFIG);
3865 result = L2CAP_CR_SUCCESS;
3866 } else {
3867 l2cap_state_change(chan, BT_CONNECT2);
3868 result = L2CAP_CR_PEND;
3869 }
3870 status = L2CAP_CS_NO_INFO;
3871 }
3872 } else {
3873 l2cap_state_change(chan, BT_CONNECT2);
3874 result = L2CAP_CR_PEND;
3875 status = L2CAP_CS_AUTHEN_PEND;
3876 }
3877 } else {
3878 l2cap_state_change(chan, BT_CONNECT2);
3879 result = L2CAP_CR_PEND;
3880 status = L2CAP_CS_NO_INFO;
3881 }
3882
3883 response:
3884 l2cap_chan_unlock(pchan);
3885 mutex_unlock(&conn->chan_lock);
3886
3887 sendresp:
3888 rsp.scid = cpu_to_le16(scid);
3889 rsp.dcid = cpu_to_le16(dcid);
3890 rsp.result = cpu_to_le16(result);
3891 rsp.status = cpu_to_le16(status);
3892 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3893
3894 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3895 struct l2cap_info_req info;
3896 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3897
3898 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3899 conn->info_ident = l2cap_get_ident(conn);
3900
3901 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3902
3903 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3904 sizeof(info), &info);
3905 }
3906
3907 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3908 result == L2CAP_CR_SUCCESS) {
3909 u8 buf[128];
3910 set_bit(CONF_REQ_SENT, &chan->conf_state);
3911 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3912 l2cap_build_conf_req(chan, buf), buf);
3913 chan->num_conf_req++;
3914 }
3915
3916 return chan;
3917 }
3918
3919 static int l2cap_connect_req(struct l2cap_conn *conn,
3920 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3921 {
3922 struct hci_dev *hdev = conn->hcon->hdev;
3923 struct hci_conn *hcon = conn->hcon;
3924
3925 if (cmd_len < sizeof(struct l2cap_conn_req))
3926 return -EPROTO;
3927
3928 hci_dev_lock(hdev);
3929 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3930 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3931 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3932 hcon->dst_type, 0, NULL, 0,
3933 hcon->dev_class);
3934 hci_dev_unlock(hdev);
3935
3936 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3937 return 0;
3938 }
3939
3940 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3941 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3942 u8 *data)
3943 {
3944 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3945 u16 scid, dcid, result, status;
3946 struct l2cap_chan *chan;
3947 u8 req[128];
3948 int err;
3949
3950 if (cmd_len < sizeof(*rsp))
3951 return -EPROTO;
3952
3953 scid = __le16_to_cpu(rsp->scid);
3954 dcid = __le16_to_cpu(rsp->dcid);
3955 result = __le16_to_cpu(rsp->result);
3956 status = __le16_to_cpu(rsp->status);
3957
3958 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3959 dcid, scid, result, status);
3960
3961 mutex_lock(&conn->chan_lock);
3962
3963 if (scid) {
3964 chan = __l2cap_get_chan_by_scid(conn, scid);
3965 if (!chan) {
3966 err = -EBADSLT;
3967 goto unlock;
3968 }
3969 } else {
3970 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3971 if (!chan) {
3972 err = -EBADSLT;
3973 goto unlock;
3974 }
3975 }
3976
3977 err = 0;
3978
3979 l2cap_chan_lock(chan);
3980
3981 switch (result) {
3982 case L2CAP_CR_SUCCESS:
3983 l2cap_state_change(chan, BT_CONFIG);
3984 chan->ident = 0;
3985 chan->dcid = dcid;
3986 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3987
3988 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3989 break;
3990
3991 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3992 l2cap_build_conf_req(chan, req), req);
3993 chan->num_conf_req++;
3994 break;
3995
3996 case L2CAP_CR_PEND:
3997 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3998 break;
3999
4000 default:
4001 l2cap_chan_del(chan, ECONNREFUSED);
4002 break;
4003 }
4004
4005 l2cap_chan_unlock(chan);
4006
4007 unlock:
4008 mutex_unlock(&conn->chan_lock);
4009
4010 return err;
4011 }
4012
4013 static inline void set_default_fcs(struct l2cap_chan *chan)
4014 {
4015 /* FCS is enabled only in ERTM or streaming mode, if one or both
4016 * sides request it.
4017 */
4018 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4019 chan->fcs = L2CAP_FCS_NONE;
4020 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4021 chan->fcs = L2CAP_FCS_CRC16;
4022 }
4023
4024 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4025 u8 ident, u16 flags)
4026 {
4027 struct l2cap_conn *conn = chan->conn;
4028
4029 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4030 flags);
4031
4032 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4033 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4034
4035 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4036 l2cap_build_conf_rsp(chan, data,
4037 L2CAP_CONF_SUCCESS, flags), data);
4038 }
4039
4040 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4041 u16 scid, u16 dcid)
4042 {
4043 struct l2cap_cmd_rej_cid rej;
4044
4045 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4046 rej.scid = __cpu_to_le16(scid);
4047 rej.dcid = __cpu_to_le16(dcid);
4048
4049 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4050 }
4051
4052 static inline int l2cap_config_req(struct l2cap_conn *conn,
4053 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4054 u8 *data)
4055 {
4056 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4057 u16 dcid, flags;
4058 u8 rsp[64];
4059 struct l2cap_chan *chan;
4060 int len, err = 0;
4061
4062 if (cmd_len < sizeof(*req))
4063 return -EPROTO;
4064
4065 dcid = __le16_to_cpu(req->dcid);
4066 flags = __le16_to_cpu(req->flags);
4067
4068 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4069
4070 chan = l2cap_get_chan_by_scid(conn, dcid);
4071 if (!chan) {
4072 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4073 return 0;
4074 }
4075
4076 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4077 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4078 chan->dcid);
4079 goto unlock;
4080 }
4081
4082 /* Reject if config buffer is too small. */
4083 len = cmd_len - sizeof(*req);
4084 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4085 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4086 l2cap_build_conf_rsp(chan, rsp,
4087 L2CAP_CONF_REJECT, flags), rsp);
4088 goto unlock;
4089 }
4090
4091 /* Store config. */
4092 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4093 chan->conf_len += len;
4094
4095 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4096 /* Incomplete config. Send empty response. */
4097 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4098 l2cap_build_conf_rsp(chan, rsp,
4099 L2CAP_CONF_SUCCESS, flags), rsp);
4100 goto unlock;
4101 }
4102
4103 /* Complete config. */
4104 len = l2cap_parse_conf_req(chan, rsp);
4105 if (len < 0) {
4106 l2cap_send_disconn_req(chan, ECONNRESET);
4107 goto unlock;
4108 }
4109
4110 chan->ident = cmd->ident;
4111 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4112 chan->num_conf_rsp++;
4113
4114 /* Reset config buffer. */
4115 chan->conf_len = 0;
4116
4117 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4118 goto unlock;
4119
4120 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4121 set_default_fcs(chan);
4122
4123 if (chan->mode == L2CAP_MODE_ERTM ||
4124 chan->mode == L2CAP_MODE_STREAMING)
4125 err = l2cap_ertm_init(chan);
4126
4127 if (err < 0)
4128 l2cap_send_disconn_req(chan, -err);
4129 else
4130 l2cap_chan_ready(chan);
4131
4132 goto unlock;
4133 }
4134
4135 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4136 u8 buf[64];
4137 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4138 l2cap_build_conf_req(chan, buf), buf);
4139 chan->num_conf_req++;
4140 }
4141
4142 /* Got Conf Rsp PENDING from remote side and asume we sent
4143 Conf Rsp PENDING in the code above */
4144 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4145 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4146
4147 /* check compatibility */
4148
4149 /* Send rsp for BR/EDR channel */
4150 if (!chan->hs_hcon)
4151 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4152 else
4153 chan->ident = cmd->ident;
4154 }
4155
4156 unlock:
4157 l2cap_chan_unlock(chan);
4158 return err;
4159 }
4160
4161 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4162 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4163 u8 *data)
4164 {
4165 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4166 u16 scid, flags, result;
4167 struct l2cap_chan *chan;
4168 int len = cmd_len - sizeof(*rsp);
4169 int err = 0;
4170
4171 if (cmd_len < sizeof(*rsp))
4172 return -EPROTO;
4173
4174 scid = __le16_to_cpu(rsp->scid);
4175 flags = __le16_to_cpu(rsp->flags);
4176 result = __le16_to_cpu(rsp->result);
4177
4178 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4179 result, len);
4180
4181 chan = l2cap_get_chan_by_scid(conn, scid);
4182 if (!chan)
4183 return 0;
4184
4185 switch (result) {
4186 case L2CAP_CONF_SUCCESS:
4187 l2cap_conf_rfc_get(chan, rsp->data, len);
4188 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4189 break;
4190
4191 case L2CAP_CONF_PENDING:
4192 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4193
4194 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4195 char buf[64];
4196
4197 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4198 buf, &result);
4199 if (len < 0) {
4200 l2cap_send_disconn_req(chan, ECONNRESET);
4201 goto done;
4202 }
4203
4204 if (!chan->hs_hcon) {
4205 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4206 0);
4207 } else {
4208 if (l2cap_check_efs(chan)) {
4209 amp_create_logical_link(chan);
4210 chan->ident = cmd->ident;
4211 }
4212 }
4213 }
4214 goto done;
4215
4216 case L2CAP_CONF_UNACCEPT:
4217 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4218 char req[64];
4219
4220 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4221 l2cap_send_disconn_req(chan, ECONNRESET);
4222 goto done;
4223 }
4224
4225 /* throw out any old stored conf requests */
4226 result = L2CAP_CONF_SUCCESS;
4227 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4228 req, &result);
4229 if (len < 0) {
4230 l2cap_send_disconn_req(chan, ECONNRESET);
4231 goto done;
4232 }
4233
4234 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4235 L2CAP_CONF_REQ, len, req);
4236 chan->num_conf_req++;
4237 if (result != L2CAP_CONF_SUCCESS)
4238 goto done;
4239 break;
4240 }
4241
4242 default:
4243 l2cap_chan_set_err(chan, ECONNRESET);
4244
4245 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4246 l2cap_send_disconn_req(chan, ECONNRESET);
4247 goto done;
4248 }
4249
4250 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4251 goto done;
4252
4253 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4254
4255 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4256 set_default_fcs(chan);
4257
4258 if (chan->mode == L2CAP_MODE_ERTM ||
4259 chan->mode == L2CAP_MODE_STREAMING)
4260 err = l2cap_ertm_init(chan);
4261
4262 if (err < 0)
4263 l2cap_send_disconn_req(chan, -err);
4264 else
4265 l2cap_chan_ready(chan);
4266 }
4267
4268 done:
4269 l2cap_chan_unlock(chan);
4270 return err;
4271 }
4272
4273 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4274 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4275 u8 *data)
4276 {
4277 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4278 struct l2cap_disconn_rsp rsp;
4279 u16 dcid, scid;
4280 struct l2cap_chan *chan;
4281
4282 if (cmd_len != sizeof(*req))
4283 return -EPROTO;
4284
4285 scid = __le16_to_cpu(req->scid);
4286 dcid = __le16_to_cpu(req->dcid);
4287
4288 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4289
4290 mutex_lock(&conn->chan_lock);
4291
4292 chan = __l2cap_get_chan_by_scid(conn, dcid);
4293 if (!chan) {
4294 mutex_unlock(&conn->chan_lock);
4295 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4296 return 0;
4297 }
4298
4299 l2cap_chan_lock(chan);
4300
4301 rsp.dcid = cpu_to_le16(chan->scid);
4302 rsp.scid = cpu_to_le16(chan->dcid);
4303 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4304
4305 chan->ops->set_shutdown(chan);
4306
4307 l2cap_chan_hold(chan);
4308 l2cap_chan_del(chan, ECONNRESET);
4309
4310 l2cap_chan_unlock(chan);
4311
4312 chan->ops->close(chan);
4313 l2cap_chan_put(chan);
4314
4315 mutex_unlock(&conn->chan_lock);
4316
4317 return 0;
4318 }
4319
4320 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4321 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4322 u8 *data)
4323 {
4324 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4325 u16 dcid, scid;
4326 struct l2cap_chan *chan;
4327
4328 if (cmd_len != sizeof(*rsp))
4329 return -EPROTO;
4330
4331 scid = __le16_to_cpu(rsp->scid);
4332 dcid = __le16_to_cpu(rsp->dcid);
4333
4334 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4335
4336 mutex_lock(&conn->chan_lock);
4337
4338 chan = __l2cap_get_chan_by_scid(conn, scid);
4339 if (!chan) {
4340 mutex_unlock(&conn->chan_lock);
4341 return 0;
4342 }
4343
4344 l2cap_chan_lock(chan);
4345
4346 l2cap_chan_hold(chan);
4347 l2cap_chan_del(chan, 0);
4348
4349 l2cap_chan_unlock(chan);
4350
4351 chan->ops->close(chan);
4352 l2cap_chan_put(chan);
4353
4354 mutex_unlock(&conn->chan_lock);
4355
4356 return 0;
4357 }
4358
4359 static inline int l2cap_information_req(struct l2cap_conn *conn,
4360 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4361 u8 *data)
4362 {
4363 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4364 u16 type;
4365
4366 if (cmd_len != sizeof(*req))
4367 return -EPROTO;
4368
4369 type = __le16_to_cpu(req->type);
4370
4371 BT_DBG("type 0x%4.4x", type);
4372
4373 if (type == L2CAP_IT_FEAT_MASK) {
4374 u8 buf[8];
4375 u32 feat_mask = l2cap_feat_mask;
4376 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4377 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4378 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4379 if (!disable_ertm)
4380 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4381 | L2CAP_FEAT_FCS;
4382 if (conn->hs_enabled)
4383 feat_mask |= L2CAP_FEAT_EXT_FLOW
4384 | L2CAP_FEAT_EXT_WINDOW;
4385
4386 put_unaligned_le32(feat_mask, rsp->data);
4387 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4388 buf);
4389 } else if (type == L2CAP_IT_FIXED_CHAN) {
4390 u8 buf[12];
4391 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4392
4393 if (conn->hs_enabled)
4394 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4395 else
4396 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4397
4398 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4399 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4400 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4401 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4402 buf);
4403 } else {
4404 struct l2cap_info_rsp rsp;
4405 rsp.type = cpu_to_le16(type);
4406 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4407 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4408 &rsp);
4409 }
4410
4411 return 0;
4412 }
4413
4414 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4415 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4416 u8 *data)
4417 {
4418 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4419 u16 type, result;
4420
4421 if (cmd_len < sizeof(*rsp))
4422 return -EPROTO;
4423
4424 type = __le16_to_cpu(rsp->type);
4425 result = __le16_to_cpu(rsp->result);
4426
4427 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4428
4429 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4430 if (cmd->ident != conn->info_ident ||
4431 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4432 return 0;
4433
4434 cancel_delayed_work(&conn->info_timer);
4435
4436 if (result != L2CAP_IR_SUCCESS) {
4437 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4438 conn->info_ident = 0;
4439
4440 l2cap_conn_start(conn);
4441
4442 return 0;
4443 }
4444
4445 switch (type) {
4446 case L2CAP_IT_FEAT_MASK:
4447 conn->feat_mask = get_unaligned_le32(rsp->data);
4448
4449 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4450 struct l2cap_info_req req;
4451 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4452
4453 conn->info_ident = l2cap_get_ident(conn);
4454
4455 l2cap_send_cmd(conn, conn->info_ident,
4456 L2CAP_INFO_REQ, sizeof(req), &req);
4457 } else {
4458 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4459 conn->info_ident = 0;
4460
4461 l2cap_conn_start(conn);
4462 }
4463 break;
4464
4465 case L2CAP_IT_FIXED_CHAN:
4466 conn->fixed_chan_mask = rsp->data[0];
4467 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4468 conn->info_ident = 0;
4469
4470 l2cap_conn_start(conn);
4471 break;
4472 }
4473
4474 return 0;
4475 }
4476
4477 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4478 struct l2cap_cmd_hdr *cmd,
4479 u16 cmd_len, void *data)
4480 {
4481 struct l2cap_create_chan_req *req = data;
4482 struct l2cap_create_chan_rsp rsp;
4483 struct l2cap_chan *chan;
4484 struct hci_dev *hdev;
4485 u16 psm, scid;
4486
4487 if (cmd_len != sizeof(*req))
4488 return -EPROTO;
4489
4490 if (!conn->hs_enabled)
4491 return -EINVAL;
4492
4493 psm = le16_to_cpu(req->psm);
4494 scid = le16_to_cpu(req->scid);
4495
4496 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4497
4498 /* For controller id 0 make BR/EDR connection */
4499 if (req->amp_id == AMP_ID_BREDR) {
4500 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4501 req->amp_id);
4502 return 0;
4503 }
4504
4505 /* Validate AMP controller id */
4506 hdev = hci_dev_get(req->amp_id);
4507 if (!hdev)
4508 goto error;
4509
4510 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4511 hci_dev_put(hdev);
4512 goto error;
4513 }
4514
4515 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4516 req->amp_id);
4517 if (chan) {
4518 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4519 struct hci_conn *hs_hcon;
4520
4521 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4522 &conn->hcon->dst);
4523 if (!hs_hcon) {
4524 hci_dev_put(hdev);
4525 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4526 chan->dcid);
4527 return 0;
4528 }
4529
4530 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4531
4532 mgr->bredr_chan = chan;
4533 chan->hs_hcon = hs_hcon;
4534 chan->fcs = L2CAP_FCS_NONE;
4535 conn->mtu = hdev->block_mtu;
4536 }
4537
4538 hci_dev_put(hdev);
4539
4540 return 0;
4541
4542 error:
4543 rsp.dcid = 0;
4544 rsp.scid = cpu_to_le16(scid);
4545 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4546 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4547
4548 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4549 sizeof(rsp), &rsp);
4550
4551 return 0;
4552 }
4553
4554 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4555 {
4556 struct l2cap_move_chan_req req;
4557 u8 ident;
4558
4559 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4560
4561 ident = l2cap_get_ident(chan->conn);
4562 chan->ident = ident;
4563
4564 req.icid = cpu_to_le16(chan->scid);
4565 req.dest_amp_id = dest_amp_id;
4566
4567 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4568 &req);
4569
4570 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4571 }
4572
4573 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4574 {
4575 struct l2cap_move_chan_rsp rsp;
4576
4577 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4578
4579 rsp.icid = cpu_to_le16(chan->dcid);
4580 rsp.result = cpu_to_le16(result);
4581
4582 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4583 sizeof(rsp), &rsp);
4584 }
4585
4586 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4587 {
4588 struct l2cap_move_chan_cfm cfm;
4589
4590 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4591
4592 chan->ident = l2cap_get_ident(chan->conn);
4593
4594 cfm.icid = cpu_to_le16(chan->scid);
4595 cfm.result = cpu_to_le16(result);
4596
4597 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4598 sizeof(cfm), &cfm);
4599
4600 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4601 }
4602
4603 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4604 {
4605 struct l2cap_move_chan_cfm cfm;
4606
4607 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4608
4609 cfm.icid = cpu_to_le16(icid);
4610 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4611
4612 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4613 sizeof(cfm), &cfm);
4614 }
4615
4616 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4617 u16 icid)
4618 {
4619 struct l2cap_move_chan_cfm_rsp rsp;
4620
4621 BT_DBG("icid 0x%4.4x", icid);
4622
4623 rsp.icid = cpu_to_le16(icid);
4624 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4625 }
4626
4627 static void __release_logical_link(struct l2cap_chan *chan)
4628 {
4629 chan->hs_hchan = NULL;
4630 chan->hs_hcon = NULL;
4631
4632 /* Placeholder - release the logical link */
4633 }
4634
4635 static void l2cap_logical_fail(struct l2cap_chan *chan)
4636 {
4637 /* Logical link setup failed */
4638 if (chan->state != BT_CONNECTED) {
4639 /* Create channel failure, disconnect */
4640 l2cap_send_disconn_req(chan, ECONNRESET);
4641 return;
4642 }
4643
4644 switch (chan->move_role) {
4645 case L2CAP_MOVE_ROLE_RESPONDER:
4646 l2cap_move_done(chan);
4647 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4648 break;
4649 case L2CAP_MOVE_ROLE_INITIATOR:
4650 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4651 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4652 /* Remote has only sent pending or
4653 * success responses, clean up
4654 */
4655 l2cap_move_done(chan);
4656 }
4657
4658 /* Other amp move states imply that the move
4659 * has already aborted
4660 */
4661 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4662 break;
4663 }
4664 }
4665
4666 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4667 struct hci_chan *hchan)
4668 {
4669 struct l2cap_conf_rsp rsp;
4670
4671 chan->hs_hchan = hchan;
4672 chan->hs_hcon->l2cap_data = chan->conn;
4673
4674 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4675
4676 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4677 int err;
4678
4679 set_default_fcs(chan);
4680
4681 err = l2cap_ertm_init(chan);
4682 if (err < 0)
4683 l2cap_send_disconn_req(chan, -err);
4684 else
4685 l2cap_chan_ready(chan);
4686 }
4687 }
4688
4689 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4690 struct hci_chan *hchan)
4691 {
4692 chan->hs_hcon = hchan->conn;
4693 chan->hs_hcon->l2cap_data = chan->conn;
4694
4695 BT_DBG("move_state %d", chan->move_state);
4696
4697 switch (chan->move_state) {
4698 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4699 /* Move confirm will be sent after a success
4700 * response is received
4701 */
4702 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4703 break;
4704 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4705 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4706 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4707 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4708 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4709 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4710 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4711 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4712 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4713 }
4714 break;
4715 default:
4716 /* Move was not in expected state, free the channel */
4717 __release_logical_link(chan);
4718
4719 chan->move_state = L2CAP_MOVE_STABLE;
4720 }
4721 }
4722
4723 /* Call with chan locked */
4724 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4725 u8 status)
4726 {
4727 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4728
4729 if (status) {
4730 l2cap_logical_fail(chan);
4731 __release_logical_link(chan);
4732 return;
4733 }
4734
4735 if (chan->state != BT_CONNECTED) {
4736 /* Ignore logical link if channel is on BR/EDR */
4737 if (chan->local_amp_id != AMP_ID_BREDR)
4738 l2cap_logical_finish_create(chan, hchan);
4739 } else {
4740 l2cap_logical_finish_move(chan, hchan);
4741 }
4742 }
4743
4744 void l2cap_move_start(struct l2cap_chan *chan)
4745 {
4746 BT_DBG("chan %p", chan);
4747
4748 if (chan->local_amp_id == AMP_ID_BREDR) {
4749 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4750 return;
4751 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4752 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4753 /* Placeholder - start physical link setup */
4754 } else {
4755 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4756 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4757 chan->move_id = 0;
4758 l2cap_move_setup(chan);
4759 l2cap_send_move_chan_req(chan, 0);
4760 }
4761 }
4762
4763 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4764 u8 local_amp_id, u8 remote_amp_id)
4765 {
4766 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4767 local_amp_id, remote_amp_id);
4768
4769 chan->fcs = L2CAP_FCS_NONE;
4770
4771 /* Outgoing channel on AMP */
4772 if (chan->state == BT_CONNECT) {
4773 if (result == L2CAP_CR_SUCCESS) {
4774 chan->local_amp_id = local_amp_id;
4775 l2cap_send_create_chan_req(chan, remote_amp_id);
4776 } else {
4777 /* Revert to BR/EDR connect */
4778 l2cap_send_conn_req(chan);
4779 }
4780
4781 return;
4782 }
4783
4784 /* Incoming channel on AMP */
4785 if (__l2cap_no_conn_pending(chan)) {
4786 struct l2cap_conn_rsp rsp;
4787 char buf[128];
4788 rsp.scid = cpu_to_le16(chan->dcid);
4789 rsp.dcid = cpu_to_le16(chan->scid);
4790
4791 if (result == L2CAP_CR_SUCCESS) {
4792 /* Send successful response */
4793 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4794 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4795 } else {
4796 /* Send negative response */
4797 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4798 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4799 }
4800
4801 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4802 sizeof(rsp), &rsp);
4803
4804 if (result == L2CAP_CR_SUCCESS) {
4805 l2cap_state_change(chan, BT_CONFIG);
4806 set_bit(CONF_REQ_SENT, &chan->conf_state);
4807 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4808 L2CAP_CONF_REQ,
4809 l2cap_build_conf_req(chan, buf), buf);
4810 chan->num_conf_req++;
4811 }
4812 }
4813 }
4814
4815 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4816 u8 remote_amp_id)
4817 {
4818 l2cap_move_setup(chan);
4819 chan->move_id = local_amp_id;
4820 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4821
4822 l2cap_send_move_chan_req(chan, remote_amp_id);
4823 }
4824
4825 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4826 {
4827 struct hci_chan *hchan = NULL;
4828
4829 /* Placeholder - get hci_chan for logical link */
4830
4831 if (hchan) {
4832 if (hchan->state == BT_CONNECTED) {
4833 /* Logical link is ready to go */
4834 chan->hs_hcon = hchan->conn;
4835 chan->hs_hcon->l2cap_data = chan->conn;
4836 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4837 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4838
4839 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4840 } else {
4841 /* Wait for logical link to be ready */
4842 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4843 }
4844 } else {
4845 /* Logical link not available */
4846 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4847 }
4848 }
4849
4850 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4851 {
4852 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4853 u8 rsp_result;
4854 if (result == -EINVAL)
4855 rsp_result = L2CAP_MR_BAD_ID;
4856 else
4857 rsp_result = L2CAP_MR_NOT_ALLOWED;
4858
4859 l2cap_send_move_chan_rsp(chan, rsp_result);
4860 }
4861
4862 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4863 chan->move_state = L2CAP_MOVE_STABLE;
4864
4865 /* Restart data transmission */
4866 l2cap_ertm_send(chan);
4867 }
4868
4869 /* Invoke with locked chan */
4870 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4871 {
4872 u8 local_amp_id = chan->local_amp_id;
4873 u8 remote_amp_id = chan->remote_amp_id;
4874
4875 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4876 chan, result, local_amp_id, remote_amp_id);
4877
4878 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4879 l2cap_chan_unlock(chan);
4880 return;
4881 }
4882
4883 if (chan->state != BT_CONNECTED) {
4884 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4885 } else if (result != L2CAP_MR_SUCCESS) {
4886 l2cap_do_move_cancel(chan, result);
4887 } else {
4888 switch (chan->move_role) {
4889 case L2CAP_MOVE_ROLE_INITIATOR:
4890 l2cap_do_move_initiate(chan, local_amp_id,
4891 remote_amp_id);
4892 break;
4893 case L2CAP_MOVE_ROLE_RESPONDER:
4894 l2cap_do_move_respond(chan, result);
4895 break;
4896 default:
4897 l2cap_do_move_cancel(chan, result);
4898 break;
4899 }
4900 }
4901 }
4902
4903 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4904 struct l2cap_cmd_hdr *cmd,
4905 u16 cmd_len, void *data)
4906 {
4907 struct l2cap_move_chan_req *req = data;
4908 struct l2cap_move_chan_rsp rsp;
4909 struct l2cap_chan *chan;
4910 u16 icid = 0;
4911 u16 result = L2CAP_MR_NOT_ALLOWED;
4912
4913 if (cmd_len != sizeof(*req))
4914 return -EPROTO;
4915
4916 icid = le16_to_cpu(req->icid);
4917
4918 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4919
4920 if (!conn->hs_enabled)
4921 return -EINVAL;
4922
4923 chan = l2cap_get_chan_by_dcid(conn, icid);
4924 if (!chan) {
4925 rsp.icid = cpu_to_le16(icid);
4926 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4927 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4928 sizeof(rsp), &rsp);
4929 return 0;
4930 }
4931
4932 chan->ident = cmd->ident;
4933
4934 if (chan->scid < L2CAP_CID_DYN_START ||
4935 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4936 (chan->mode != L2CAP_MODE_ERTM &&
4937 chan->mode != L2CAP_MODE_STREAMING)) {
4938 result = L2CAP_MR_NOT_ALLOWED;
4939 goto send_move_response;
4940 }
4941
4942 if (chan->local_amp_id == req->dest_amp_id) {
4943 result = L2CAP_MR_SAME_ID;
4944 goto send_move_response;
4945 }
4946
4947 if (req->dest_amp_id != AMP_ID_BREDR) {
4948 struct hci_dev *hdev;
4949 hdev = hci_dev_get(req->dest_amp_id);
4950 if (!hdev || hdev->dev_type != HCI_AMP ||
4951 !test_bit(HCI_UP, &hdev->flags)) {
4952 if (hdev)
4953 hci_dev_put(hdev);
4954
4955 result = L2CAP_MR_BAD_ID;
4956 goto send_move_response;
4957 }
4958 hci_dev_put(hdev);
4959 }
4960
4961 /* Detect a move collision. Only send a collision response
4962 * if this side has "lost", otherwise proceed with the move.
4963 * The winner has the larger bd_addr.
4964 */
4965 if ((__chan_is_moving(chan) ||
4966 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4967 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4968 result = L2CAP_MR_COLLISION;
4969 goto send_move_response;
4970 }
4971
4972 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4973 l2cap_move_setup(chan);
4974 chan->move_id = req->dest_amp_id;
4975 icid = chan->dcid;
4976
4977 if (req->dest_amp_id == AMP_ID_BREDR) {
4978 /* Moving to BR/EDR */
4979 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4980 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4981 result = L2CAP_MR_PEND;
4982 } else {
4983 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4984 result = L2CAP_MR_SUCCESS;
4985 }
4986 } else {
4987 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4988 /* Placeholder - uncomment when amp functions are available */
4989 /*amp_accept_physical(chan, req->dest_amp_id);*/
4990 result = L2CAP_MR_PEND;
4991 }
4992
4993 send_move_response:
4994 l2cap_send_move_chan_rsp(chan, result);
4995
4996 l2cap_chan_unlock(chan);
4997
4998 return 0;
4999 }
5000
5001 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5002 {
5003 struct l2cap_chan *chan;
5004 struct hci_chan *hchan = NULL;
5005
5006 chan = l2cap_get_chan_by_scid(conn, icid);
5007 if (!chan) {
5008 l2cap_send_move_chan_cfm_icid(conn, icid);
5009 return;
5010 }
5011
5012 __clear_chan_timer(chan);
5013 if (result == L2CAP_MR_PEND)
5014 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5015
5016 switch (chan->move_state) {
5017 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5018 /* Move confirm will be sent when logical link
5019 * is complete.
5020 */
5021 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5022 break;
5023 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5024 if (result == L2CAP_MR_PEND) {
5025 break;
5026 } else if (test_bit(CONN_LOCAL_BUSY,
5027 &chan->conn_state)) {
5028 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5029 } else {
5030 /* Logical link is up or moving to BR/EDR,
5031 * proceed with move
5032 */
5033 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5034 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5035 }
5036 break;
5037 case L2CAP_MOVE_WAIT_RSP:
5038 /* Moving to AMP */
5039 if (result == L2CAP_MR_SUCCESS) {
5040 /* Remote is ready, send confirm immediately
5041 * after logical link is ready
5042 */
5043 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5044 } else {
5045 /* Both logical link and move success
5046 * are required to confirm
5047 */
5048 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5049 }
5050
5051 /* Placeholder - get hci_chan for logical link */
5052 if (!hchan) {
5053 /* Logical link not available */
5054 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5055 break;
5056 }
5057
5058 /* If the logical link is not yet connected, do not
5059 * send confirmation.
5060 */
5061 if (hchan->state != BT_CONNECTED)
5062 break;
5063
5064 /* Logical link is already ready to go */
5065
5066 chan->hs_hcon = hchan->conn;
5067 chan->hs_hcon->l2cap_data = chan->conn;
5068
5069 if (result == L2CAP_MR_SUCCESS) {
5070 /* Can confirm now */
5071 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5072 } else {
5073 /* Now only need move success
5074 * to confirm
5075 */
5076 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5077 }
5078
5079 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5080 break;
5081 default:
5082 /* Any other amp move state means the move failed. */
5083 chan->move_id = chan->local_amp_id;
5084 l2cap_move_done(chan);
5085 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5086 }
5087
5088 l2cap_chan_unlock(chan);
5089 }
5090
5091 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5092 u16 result)
5093 {
5094 struct l2cap_chan *chan;
5095
5096 chan = l2cap_get_chan_by_ident(conn, ident);
5097 if (!chan) {
5098 /* Could not locate channel, icid is best guess */
5099 l2cap_send_move_chan_cfm_icid(conn, icid);
5100 return;
5101 }
5102
5103 __clear_chan_timer(chan);
5104
5105 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5106 if (result == L2CAP_MR_COLLISION) {
5107 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5108 } else {
5109 /* Cleanup - cancel move */
5110 chan->move_id = chan->local_amp_id;
5111 l2cap_move_done(chan);
5112 }
5113 }
5114
5115 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5116
5117 l2cap_chan_unlock(chan);
5118 }
5119
5120 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5121 struct l2cap_cmd_hdr *cmd,
5122 u16 cmd_len, void *data)
5123 {
5124 struct l2cap_move_chan_rsp *rsp = data;
5125 u16 icid, result;
5126
5127 if (cmd_len != sizeof(*rsp))
5128 return -EPROTO;
5129
5130 icid = le16_to_cpu(rsp->icid);
5131 result = le16_to_cpu(rsp->result);
5132
5133 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5134
5135 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5136 l2cap_move_continue(conn, icid, result);
5137 else
5138 l2cap_move_fail(conn, cmd->ident, icid, result);
5139
5140 return 0;
5141 }
5142
5143 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5144 struct l2cap_cmd_hdr *cmd,
5145 u16 cmd_len, void *data)
5146 {
5147 struct l2cap_move_chan_cfm *cfm = data;
5148 struct l2cap_chan *chan;
5149 u16 icid, result;
5150
5151 if (cmd_len != sizeof(*cfm))
5152 return -EPROTO;
5153
5154 icid = le16_to_cpu(cfm->icid);
5155 result = le16_to_cpu(cfm->result);
5156
5157 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5158
5159 chan = l2cap_get_chan_by_dcid(conn, icid);
5160 if (!chan) {
5161 /* Spec requires a response even if the icid was not found */
5162 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5163 return 0;
5164 }
5165
5166 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5167 if (result == L2CAP_MC_CONFIRMED) {
5168 chan->local_amp_id = chan->move_id;
5169 if (chan->local_amp_id == AMP_ID_BREDR)
5170 __release_logical_link(chan);
5171 } else {
5172 chan->move_id = chan->local_amp_id;
5173 }
5174
5175 l2cap_move_done(chan);
5176 }
5177
5178 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5179
5180 l2cap_chan_unlock(chan);
5181
5182 return 0;
5183 }
5184
5185 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5186 struct l2cap_cmd_hdr *cmd,
5187 u16 cmd_len, void *data)
5188 {
5189 struct l2cap_move_chan_cfm_rsp *rsp = data;
5190 struct l2cap_chan *chan;
5191 u16 icid;
5192
5193 if (cmd_len != sizeof(*rsp))
5194 return -EPROTO;
5195
5196 icid = le16_to_cpu(rsp->icid);
5197
5198 BT_DBG("icid 0x%4.4x", icid);
5199
5200 chan = l2cap_get_chan_by_scid(conn, icid);
5201 if (!chan)
5202 return 0;
5203
5204 __clear_chan_timer(chan);
5205
5206 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5207 chan->local_amp_id = chan->move_id;
5208
5209 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5210 __release_logical_link(chan);
5211
5212 l2cap_move_done(chan);
5213 }
5214
5215 l2cap_chan_unlock(chan);
5216
5217 return 0;
5218 }
5219
5220 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5221 struct l2cap_cmd_hdr *cmd,
5222 u16 cmd_len, u8 *data)
5223 {
5224 struct hci_conn *hcon = conn->hcon;
5225 struct l2cap_conn_param_update_req *req;
5226 struct l2cap_conn_param_update_rsp rsp;
5227 u16 min, max, latency, to_multiplier;
5228 int err;
5229
5230 if (hcon->role != HCI_ROLE_MASTER)
5231 return -EINVAL;
5232
5233 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5234 return -EPROTO;
5235
5236 req = (struct l2cap_conn_param_update_req *) data;
5237 min = __le16_to_cpu(req->min);
5238 max = __le16_to_cpu(req->max);
5239 latency = __le16_to_cpu(req->latency);
5240 to_multiplier = __le16_to_cpu(req->to_multiplier);
5241
5242 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5243 min, max, latency, to_multiplier);
5244
5245 memset(&rsp, 0, sizeof(rsp));
5246
5247 err = hci_check_conn_params(min, max, latency, to_multiplier);
5248 if (err)
5249 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5250 else
5251 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5252
5253 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5254 sizeof(rsp), &rsp);
5255
5256 if (!err) {
5257 u8 store_hint;
5258
5259 store_hint = hci_le_conn_update(hcon, min, max, latency,
5260 to_multiplier);
5261 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5262 store_hint, min, max, latency,
5263 to_multiplier);
5264
5265 }
5266
5267 return 0;
5268 }
5269
5270 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5271 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5272 u8 *data)
5273 {
5274 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5275 u16 dcid, mtu, mps, credits, result;
5276 struct l2cap_chan *chan;
5277 int err;
5278
5279 if (cmd_len < sizeof(*rsp))
5280 return -EPROTO;
5281
5282 dcid = __le16_to_cpu(rsp->dcid);
5283 mtu = __le16_to_cpu(rsp->mtu);
5284 mps = __le16_to_cpu(rsp->mps);
5285 credits = __le16_to_cpu(rsp->credits);
5286 result = __le16_to_cpu(rsp->result);
5287
5288 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5289 return -EPROTO;
5290
5291 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5292 dcid, mtu, mps, credits, result);
5293
5294 mutex_lock(&conn->chan_lock);
5295
5296 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5297 if (!chan) {
5298 err = -EBADSLT;
5299 goto unlock;
5300 }
5301
5302 err = 0;
5303
5304 l2cap_chan_lock(chan);
5305
5306 switch (result) {
5307 case L2CAP_CR_SUCCESS:
5308 chan->ident = 0;
5309 chan->dcid = dcid;
5310 chan->omtu = mtu;
5311 chan->remote_mps = mps;
5312 chan->tx_credits = credits;
5313 l2cap_chan_ready(chan);
5314 break;
5315
5316 default:
5317 l2cap_chan_del(chan, ECONNREFUSED);
5318 break;
5319 }
5320
5321 l2cap_chan_unlock(chan);
5322
5323 unlock:
5324 mutex_unlock(&conn->chan_lock);
5325
5326 return err;
5327 }
5328
5329 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5330 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5331 u8 *data)
5332 {
5333 int err = 0;
5334
5335 switch (cmd->code) {
5336 case L2CAP_COMMAND_REJ:
5337 l2cap_command_rej(conn, cmd, cmd_len, data);
5338 break;
5339
5340 case L2CAP_CONN_REQ:
5341 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5342 break;
5343
5344 case L2CAP_CONN_RSP:
5345 case L2CAP_CREATE_CHAN_RSP:
5346 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5347 break;
5348
5349 case L2CAP_CONF_REQ:
5350 err = l2cap_config_req(conn, cmd, cmd_len, data);
5351 break;
5352
5353 case L2CAP_CONF_RSP:
5354 l2cap_config_rsp(conn, cmd, cmd_len, data);
5355 break;
5356
5357 case L2CAP_DISCONN_REQ:
5358 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5359 break;
5360
5361 case L2CAP_DISCONN_RSP:
5362 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5363 break;
5364
5365 case L2CAP_ECHO_REQ:
5366 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5367 break;
5368
5369 case L2CAP_ECHO_RSP:
5370 break;
5371
5372 case L2CAP_INFO_REQ:
5373 err = l2cap_information_req(conn, cmd, cmd_len, data);
5374 break;
5375
5376 case L2CAP_INFO_RSP:
5377 l2cap_information_rsp(conn, cmd, cmd_len, data);
5378 break;
5379
5380 case L2CAP_CREATE_CHAN_REQ:
5381 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5382 break;
5383
5384 case L2CAP_MOVE_CHAN_REQ:
5385 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5386 break;
5387
5388 case L2CAP_MOVE_CHAN_RSP:
5389 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5390 break;
5391
5392 case L2CAP_MOVE_CHAN_CFM:
5393 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5394 break;
5395
5396 case L2CAP_MOVE_CHAN_CFM_RSP:
5397 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5398 break;
5399
5400 default:
5401 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5402 err = -EINVAL;
5403 break;
5404 }
5405
5406 return err;
5407 }
5408
5409 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5410 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5411 u8 *data)
5412 {
5413 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5414 struct l2cap_le_conn_rsp rsp;
5415 struct l2cap_chan *chan, *pchan;
5416 u16 dcid, scid, credits, mtu, mps;
5417 __le16 psm;
5418 u8 result;
5419
5420 if (cmd_len != sizeof(*req))
5421 return -EPROTO;
5422
5423 scid = __le16_to_cpu(req->scid);
5424 mtu = __le16_to_cpu(req->mtu);
5425 mps = __le16_to_cpu(req->mps);
5426 psm = req->psm;
5427 dcid = 0;
5428 credits = 0;
5429
5430 if (mtu < 23 || mps < 23)
5431 return -EPROTO;
5432
5433 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5434 scid, mtu, mps);
5435
5436 /* Check if we have socket listening on psm */
5437 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5438 &conn->hcon->dst, LE_LINK);
5439 if (!pchan) {
5440 result = L2CAP_CR_BAD_PSM;
5441 chan = NULL;
5442 goto response;
5443 }
5444
5445 mutex_lock(&conn->chan_lock);
5446 l2cap_chan_lock(pchan);
5447
5448 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5449 result = L2CAP_CR_AUTHENTICATION;
5450 chan = NULL;
5451 goto response_unlock;
5452 }
5453
5454 /* Check if we already have channel with that dcid */
5455 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5456 result = L2CAP_CR_NO_MEM;
5457 chan = NULL;
5458 goto response_unlock;
5459 }
5460
5461 chan = pchan->ops->new_connection(pchan);
5462 if (!chan) {
5463 result = L2CAP_CR_NO_MEM;
5464 goto response_unlock;
5465 }
5466
5467 l2cap_le_flowctl_init(chan);
5468
5469 bacpy(&chan->src, &conn->hcon->src);
5470 bacpy(&chan->dst, &conn->hcon->dst);
5471 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5472 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5473 chan->psm = psm;
5474 chan->dcid = scid;
5475 chan->omtu = mtu;
5476 chan->remote_mps = mps;
5477 chan->tx_credits = __le16_to_cpu(req->credits);
5478
5479 __l2cap_chan_add(conn, chan);
5480 dcid = chan->scid;
5481 credits = chan->rx_credits;
5482
5483 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5484
5485 chan->ident = cmd->ident;
5486
5487 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5488 l2cap_state_change(chan, BT_CONNECT2);
5489 result = L2CAP_CR_PEND;
5490 chan->ops->defer(chan);
5491 } else {
5492 l2cap_chan_ready(chan);
5493 result = L2CAP_CR_SUCCESS;
5494 }
5495
5496 response_unlock:
5497 l2cap_chan_unlock(pchan);
5498 mutex_unlock(&conn->chan_lock);
5499
5500 if (result == L2CAP_CR_PEND)
5501 return 0;
5502
5503 response:
5504 if (chan) {
5505 rsp.mtu = cpu_to_le16(chan->imtu);
5506 rsp.mps = cpu_to_le16(chan->mps);
5507 } else {
5508 rsp.mtu = 0;
5509 rsp.mps = 0;
5510 }
5511
5512 rsp.dcid = cpu_to_le16(dcid);
5513 rsp.credits = cpu_to_le16(credits);
5514 rsp.result = cpu_to_le16(result);
5515
5516 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5517
5518 return 0;
5519 }
5520
5521 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5522 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5523 u8 *data)
5524 {
5525 struct l2cap_le_credits *pkt;
5526 struct l2cap_chan *chan;
5527 u16 cid, credits, max_credits;
5528
5529 if (cmd_len != sizeof(*pkt))
5530 return -EPROTO;
5531
5532 pkt = (struct l2cap_le_credits *) data;
5533 cid = __le16_to_cpu(pkt->cid);
5534 credits = __le16_to_cpu(pkt->credits);
5535
5536 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5537
5538 chan = l2cap_get_chan_by_dcid(conn, cid);
5539 if (!chan)
5540 return -EBADSLT;
5541
5542 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5543 if (credits > max_credits) {
5544 BT_ERR("LE credits overflow");
5545 l2cap_send_disconn_req(chan, ECONNRESET);
5546
5547 /* Return 0 so that we don't trigger an unnecessary
5548 * command reject packet.
5549 */
5550 return 0;
5551 }
5552
5553 chan->tx_credits += credits;
5554
5555 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5556 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5557 chan->tx_credits--;
5558 }
5559
5560 if (chan->tx_credits)
5561 chan->ops->resume(chan);
5562
5563 l2cap_chan_unlock(chan);
5564
5565 return 0;
5566 }
5567
5568 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5569 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5570 u8 *data)
5571 {
5572 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5573 struct l2cap_chan *chan;
5574
5575 if (cmd_len < sizeof(*rej))
5576 return -EPROTO;
5577
5578 mutex_lock(&conn->chan_lock);
5579
5580 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5581 if (!chan)
5582 goto done;
5583
5584 l2cap_chan_lock(chan);
5585 l2cap_chan_del(chan, ECONNREFUSED);
5586 l2cap_chan_unlock(chan);
5587
5588 done:
5589 mutex_unlock(&conn->chan_lock);
5590 return 0;
5591 }
5592
5593 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5594 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5595 u8 *data)
5596 {
5597 int err = 0;
5598
5599 switch (cmd->code) {
5600 case L2CAP_COMMAND_REJ:
5601 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5602 break;
5603
5604 case L2CAP_CONN_PARAM_UPDATE_REQ:
5605 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5606 break;
5607
5608 case L2CAP_CONN_PARAM_UPDATE_RSP:
5609 break;
5610
5611 case L2CAP_LE_CONN_RSP:
5612 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5613 break;
5614
5615 case L2CAP_LE_CONN_REQ:
5616 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5617 break;
5618
5619 case L2CAP_LE_CREDITS:
5620 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5621 break;
5622
5623 case L2CAP_DISCONN_REQ:
5624 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5625 break;
5626
5627 case L2CAP_DISCONN_RSP:
5628 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5629 break;
5630
5631 default:
5632 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5633 err = -EINVAL;
5634 break;
5635 }
5636
5637 return err;
5638 }
5639
5640 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5641 struct sk_buff *skb)
5642 {
5643 struct hci_conn *hcon = conn->hcon;
5644 struct l2cap_cmd_hdr *cmd;
5645 u16 len;
5646 int err;
5647
5648 if (hcon->type != LE_LINK)
5649 goto drop;
5650
5651 if (skb->len < L2CAP_CMD_HDR_SIZE)
5652 goto drop;
5653
5654 cmd = (void *) skb->data;
5655 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5656
5657 len = le16_to_cpu(cmd->len);
5658
5659 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5660
5661 if (len != skb->len || !cmd->ident) {
5662 BT_DBG("corrupted command");
5663 goto drop;
5664 }
5665
5666 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5667 if (err) {
5668 struct l2cap_cmd_rej_unk rej;
5669
5670 BT_ERR("Wrong link type (%d)", err);
5671
5672 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5673 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5674 sizeof(rej), &rej);
5675 }
5676
5677 drop:
5678 kfree_skb(skb);
5679 }
5680
5681 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5682 struct sk_buff *skb)
5683 {
5684 struct hci_conn *hcon = conn->hcon;
5685 u8 *data = skb->data;
5686 int len = skb->len;
5687 struct l2cap_cmd_hdr cmd;
5688 int err;
5689
5690 l2cap_raw_recv(conn, skb);
5691
5692 if (hcon->type != ACL_LINK)
5693 goto drop;
5694
5695 while (len >= L2CAP_CMD_HDR_SIZE) {
5696 u16 cmd_len;
5697 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5698 data += L2CAP_CMD_HDR_SIZE;
5699 len -= L2CAP_CMD_HDR_SIZE;
5700
5701 cmd_len = le16_to_cpu(cmd.len);
5702
5703 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5704 cmd.ident);
5705
5706 if (cmd_len > len || !cmd.ident) {
5707 BT_DBG("corrupted command");
5708 break;
5709 }
5710
5711 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5712 if (err) {
5713 struct l2cap_cmd_rej_unk rej;
5714
5715 BT_ERR("Wrong link type (%d)", err);
5716
5717 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5718 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5719 sizeof(rej), &rej);
5720 }
5721
5722 data += cmd_len;
5723 len -= cmd_len;
5724 }
5725
5726 drop:
5727 kfree_skb(skb);
5728 }
5729
5730 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5731 {
5732 u16 our_fcs, rcv_fcs;
5733 int hdr_size;
5734
5735 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5736 hdr_size = L2CAP_EXT_HDR_SIZE;
5737 else
5738 hdr_size = L2CAP_ENH_HDR_SIZE;
5739
5740 if (chan->fcs == L2CAP_FCS_CRC16) {
5741 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5742 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5743 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5744
5745 if (our_fcs != rcv_fcs)
5746 return -EBADMSG;
5747 }
5748 return 0;
5749 }
5750
5751 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5752 {
5753 struct l2cap_ctrl control;
5754
5755 BT_DBG("chan %p", chan);
5756
5757 memset(&control, 0, sizeof(control));
5758 control.sframe = 1;
5759 control.final = 1;
5760 control.reqseq = chan->buffer_seq;
5761 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5762
5763 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5764 control.super = L2CAP_SUPER_RNR;
5765 l2cap_send_sframe(chan, &control);
5766 }
5767
5768 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5769 chan->unacked_frames > 0)
5770 __set_retrans_timer(chan);
5771
5772 /* Send pending iframes */
5773 l2cap_ertm_send(chan);
5774
5775 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5776 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5777 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5778 * send it now.
5779 */
5780 control.super = L2CAP_SUPER_RR;
5781 l2cap_send_sframe(chan, &control);
5782 }
5783 }
5784
5785 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5786 struct sk_buff **last_frag)
5787 {
5788 /* skb->len reflects data in skb as well as all fragments
5789 * skb->data_len reflects only data in fragments
5790 */
5791 if (!skb_has_frag_list(skb))
5792 skb_shinfo(skb)->frag_list = new_frag;
5793
5794 new_frag->next = NULL;
5795
5796 (*last_frag)->next = new_frag;
5797 *last_frag = new_frag;
5798
5799 skb->len += new_frag->len;
5800 skb->data_len += new_frag->len;
5801 skb->truesize += new_frag->truesize;
5802 }
5803
5804 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5805 struct l2cap_ctrl *control)
5806 {
5807 int err = -EINVAL;
5808
5809 switch (control->sar) {
5810 case L2CAP_SAR_UNSEGMENTED:
5811 if (chan->sdu)
5812 break;
5813
5814 err = chan->ops->recv(chan, skb);
5815 break;
5816
5817 case L2CAP_SAR_START:
5818 if (chan->sdu)
5819 break;
5820
5821 chan->sdu_len = get_unaligned_le16(skb->data);
5822 skb_pull(skb, L2CAP_SDULEN_SIZE);
5823
5824 if (chan->sdu_len > chan->imtu) {
5825 err = -EMSGSIZE;
5826 break;
5827 }
5828
5829 if (skb->len >= chan->sdu_len)
5830 break;
5831
5832 chan->sdu = skb;
5833 chan->sdu_last_frag = skb;
5834
5835 skb = NULL;
5836 err = 0;
5837 break;
5838
5839 case L2CAP_SAR_CONTINUE:
5840 if (!chan->sdu)
5841 break;
5842
5843 append_skb_frag(chan->sdu, skb,
5844 &chan->sdu_last_frag);
5845 skb = NULL;
5846
5847 if (chan->sdu->len >= chan->sdu_len)
5848 break;
5849
5850 err = 0;
5851 break;
5852
5853 case L2CAP_SAR_END:
5854 if (!chan->sdu)
5855 break;
5856
5857 append_skb_frag(chan->sdu, skb,
5858 &chan->sdu_last_frag);
5859 skb = NULL;
5860
5861 if (chan->sdu->len != chan->sdu_len)
5862 break;
5863
5864 err = chan->ops->recv(chan, chan->sdu);
5865
5866 if (!err) {
5867 /* Reassembly complete */
5868 chan->sdu = NULL;
5869 chan->sdu_last_frag = NULL;
5870 chan->sdu_len = 0;
5871 }
5872 break;
5873 }
5874
5875 if (err) {
5876 kfree_skb(skb);
5877 kfree_skb(chan->sdu);
5878 chan->sdu = NULL;
5879 chan->sdu_last_frag = NULL;
5880 chan->sdu_len = 0;
5881 }
5882
5883 return err;
5884 }
5885
5886 static int l2cap_resegment(struct l2cap_chan *chan)
5887 {
5888 /* Placeholder */
5889 return 0;
5890 }
5891
5892 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5893 {
5894 u8 event;
5895
5896 if (chan->mode != L2CAP_MODE_ERTM)
5897 return;
5898
5899 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5900 l2cap_tx(chan, NULL, NULL, event);
5901 }
5902
5903 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5904 {
5905 int err = 0;
5906 /* Pass sequential frames to l2cap_reassemble_sdu()
5907 * until a gap is encountered.
5908 */
5909
5910 BT_DBG("chan %p", chan);
5911
5912 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5913 struct sk_buff *skb;
5914 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5915 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5916
5917 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5918
5919 if (!skb)
5920 break;
5921
5922 skb_unlink(skb, &chan->srej_q);
5923 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5924 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5925 if (err)
5926 break;
5927 }
5928
5929 if (skb_queue_empty(&chan->srej_q)) {
5930 chan->rx_state = L2CAP_RX_STATE_RECV;
5931 l2cap_send_ack(chan);
5932 }
5933
5934 return err;
5935 }
5936
5937 static void l2cap_handle_srej(struct l2cap_chan *chan,
5938 struct l2cap_ctrl *control)
5939 {
5940 struct sk_buff *skb;
5941
5942 BT_DBG("chan %p, control %p", chan, control);
5943
5944 if (control->reqseq == chan->next_tx_seq) {
5945 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5946 l2cap_send_disconn_req(chan, ECONNRESET);
5947 return;
5948 }
5949
5950 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5951
5952 if (skb == NULL) {
5953 BT_DBG("Seq %d not available for retransmission",
5954 control->reqseq);
5955 return;
5956 }
5957
5958 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5959 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5960 l2cap_send_disconn_req(chan, ECONNRESET);
5961 return;
5962 }
5963
5964 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5965
5966 if (control->poll) {
5967 l2cap_pass_to_tx(chan, control);
5968
5969 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5970 l2cap_retransmit(chan, control);
5971 l2cap_ertm_send(chan);
5972
5973 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5974 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5975 chan->srej_save_reqseq = control->reqseq;
5976 }
5977 } else {
5978 l2cap_pass_to_tx_fbit(chan, control);
5979
5980 if (control->final) {
5981 if (chan->srej_save_reqseq != control->reqseq ||
5982 !test_and_clear_bit(CONN_SREJ_ACT,
5983 &chan->conn_state))
5984 l2cap_retransmit(chan, control);
5985 } else {
5986 l2cap_retransmit(chan, control);
5987 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5988 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5989 chan->srej_save_reqseq = control->reqseq;
5990 }
5991 }
5992 }
5993 }
5994
5995 static void l2cap_handle_rej(struct l2cap_chan *chan,
5996 struct l2cap_ctrl *control)
5997 {
5998 struct sk_buff *skb;
5999
6000 BT_DBG("chan %p, control %p", chan, control);
6001
6002 if (control->reqseq == chan->next_tx_seq) {
6003 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6004 l2cap_send_disconn_req(chan, ECONNRESET);
6005 return;
6006 }
6007
6008 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6009
6010 if (chan->max_tx && skb &&
6011 bt_cb(skb)->control.retries >= chan->max_tx) {
6012 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6013 l2cap_send_disconn_req(chan, ECONNRESET);
6014 return;
6015 }
6016
6017 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6018
6019 l2cap_pass_to_tx(chan, control);
6020
6021 if (control->final) {
6022 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6023 l2cap_retransmit_all(chan, control);
6024 } else {
6025 l2cap_retransmit_all(chan, control);
6026 l2cap_ertm_send(chan);
6027 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6028 set_bit(CONN_REJ_ACT, &chan->conn_state);
6029 }
6030 }
6031
6032 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6033 {
6034 BT_DBG("chan %p, txseq %d", chan, txseq);
6035
6036 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6037 chan->expected_tx_seq);
6038
6039 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6040 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6041 chan->tx_win) {
6042 /* See notes below regarding "double poll" and
6043 * invalid packets.
6044 */
6045 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6046 BT_DBG("Invalid/Ignore - after SREJ");
6047 return L2CAP_TXSEQ_INVALID_IGNORE;
6048 } else {
6049 BT_DBG("Invalid - in window after SREJ sent");
6050 return L2CAP_TXSEQ_INVALID;
6051 }
6052 }
6053
6054 if (chan->srej_list.head == txseq) {
6055 BT_DBG("Expected SREJ");
6056 return L2CAP_TXSEQ_EXPECTED_SREJ;
6057 }
6058
6059 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6060 BT_DBG("Duplicate SREJ - txseq already stored");
6061 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6062 }
6063
6064 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6065 BT_DBG("Unexpected SREJ - not requested");
6066 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6067 }
6068 }
6069
6070 if (chan->expected_tx_seq == txseq) {
6071 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6072 chan->tx_win) {
6073 BT_DBG("Invalid - txseq outside tx window");
6074 return L2CAP_TXSEQ_INVALID;
6075 } else {
6076 BT_DBG("Expected");
6077 return L2CAP_TXSEQ_EXPECTED;
6078 }
6079 }
6080
6081 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6082 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6083 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6084 return L2CAP_TXSEQ_DUPLICATE;
6085 }
6086
6087 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6088 /* A source of invalid packets is a "double poll" condition,
6089 * where delays cause us to send multiple poll packets. If
6090 * the remote stack receives and processes both polls,
6091 * sequence numbers can wrap around in such a way that a
6092 * resent frame has a sequence number that looks like new data
6093 * with a sequence gap. This would trigger an erroneous SREJ
6094 * request.
6095 *
6096 * Fortunately, this is impossible with a tx window that's
6097 * less than half of the maximum sequence number, which allows
6098 * invalid frames to be safely ignored.
6099 *
6100 * With tx window sizes greater than half of the tx window
6101 * maximum, the frame is invalid and cannot be ignored. This
6102 * causes a disconnect.
6103 */
6104
6105 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6106 BT_DBG("Invalid/Ignore - txseq outside tx window");
6107 return L2CAP_TXSEQ_INVALID_IGNORE;
6108 } else {
6109 BT_DBG("Invalid - txseq outside tx window");
6110 return L2CAP_TXSEQ_INVALID;
6111 }
6112 } else {
6113 BT_DBG("Unexpected - txseq indicates missing frames");
6114 return L2CAP_TXSEQ_UNEXPECTED;
6115 }
6116 }
6117
6118 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6119 struct l2cap_ctrl *control,
6120 struct sk_buff *skb, u8 event)
6121 {
6122 int err = 0;
6123 bool skb_in_use = false;
6124
6125 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6126 event);
6127
6128 switch (event) {
6129 case L2CAP_EV_RECV_IFRAME:
6130 switch (l2cap_classify_txseq(chan, control->txseq)) {
6131 case L2CAP_TXSEQ_EXPECTED:
6132 l2cap_pass_to_tx(chan, control);
6133
6134 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6135 BT_DBG("Busy, discarding expected seq %d",
6136 control->txseq);
6137 break;
6138 }
6139
6140 chan->expected_tx_seq = __next_seq(chan,
6141 control->txseq);
6142
6143 chan->buffer_seq = chan->expected_tx_seq;
6144 skb_in_use = true;
6145
6146 err = l2cap_reassemble_sdu(chan, skb, control);
6147 if (err)
6148 break;
6149
6150 if (control->final) {
6151 if (!test_and_clear_bit(CONN_REJ_ACT,
6152 &chan->conn_state)) {
6153 control->final = 0;
6154 l2cap_retransmit_all(chan, control);
6155 l2cap_ertm_send(chan);
6156 }
6157 }
6158
6159 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6160 l2cap_send_ack(chan);
6161 break;
6162 case L2CAP_TXSEQ_UNEXPECTED:
6163 l2cap_pass_to_tx(chan, control);
6164
6165 /* Can't issue SREJ frames in the local busy state.
6166 * Drop this frame, it will be seen as missing
6167 * when local busy is exited.
6168 */
6169 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6170 BT_DBG("Busy, discarding unexpected seq %d",
6171 control->txseq);
6172 break;
6173 }
6174
6175 /* There was a gap in the sequence, so an SREJ
6176 * must be sent for each missing frame. The
6177 * current frame is stored for later use.
6178 */
6179 skb_queue_tail(&chan->srej_q, skb);
6180 skb_in_use = true;
6181 BT_DBG("Queued %p (queue len %d)", skb,
6182 skb_queue_len(&chan->srej_q));
6183
6184 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6185 l2cap_seq_list_clear(&chan->srej_list);
6186 l2cap_send_srej(chan, control->txseq);
6187
6188 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6189 break;
6190 case L2CAP_TXSEQ_DUPLICATE:
6191 l2cap_pass_to_tx(chan, control);
6192 break;
6193 case L2CAP_TXSEQ_INVALID_IGNORE:
6194 break;
6195 case L2CAP_TXSEQ_INVALID:
6196 default:
6197 l2cap_send_disconn_req(chan, ECONNRESET);
6198 break;
6199 }
6200 break;
6201 case L2CAP_EV_RECV_RR:
6202 l2cap_pass_to_tx(chan, control);
6203 if (control->final) {
6204 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6205
6206 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6207 !__chan_is_moving(chan)) {
6208 control->final = 0;
6209 l2cap_retransmit_all(chan, control);
6210 }
6211
6212 l2cap_ertm_send(chan);
6213 } else if (control->poll) {
6214 l2cap_send_i_or_rr_or_rnr(chan);
6215 } else {
6216 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6217 &chan->conn_state) &&
6218 chan->unacked_frames)
6219 __set_retrans_timer(chan);
6220
6221 l2cap_ertm_send(chan);
6222 }
6223 break;
6224 case L2CAP_EV_RECV_RNR:
6225 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6226 l2cap_pass_to_tx(chan, control);
6227 if (control && control->poll) {
6228 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6229 l2cap_send_rr_or_rnr(chan, 0);
6230 }
6231 __clear_retrans_timer(chan);
6232 l2cap_seq_list_clear(&chan->retrans_list);
6233 break;
6234 case L2CAP_EV_RECV_REJ:
6235 l2cap_handle_rej(chan, control);
6236 break;
6237 case L2CAP_EV_RECV_SREJ:
6238 l2cap_handle_srej(chan, control);
6239 break;
6240 default:
6241 break;
6242 }
6243
6244 if (skb && !skb_in_use) {
6245 BT_DBG("Freeing %p", skb);
6246 kfree_skb(skb);
6247 }
6248
6249 return err;
6250 }
6251
6252 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6253 struct l2cap_ctrl *control,
6254 struct sk_buff *skb, u8 event)
6255 {
6256 int err = 0;
6257 u16 txseq = control->txseq;
6258 bool skb_in_use = false;
6259
6260 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6261 event);
6262
6263 switch (event) {
6264 case L2CAP_EV_RECV_IFRAME:
6265 switch (l2cap_classify_txseq(chan, txseq)) {
6266 case L2CAP_TXSEQ_EXPECTED:
6267 /* Keep frame for reassembly later */
6268 l2cap_pass_to_tx(chan, control);
6269 skb_queue_tail(&chan->srej_q, skb);
6270 skb_in_use = true;
6271 BT_DBG("Queued %p (queue len %d)", skb,
6272 skb_queue_len(&chan->srej_q));
6273
6274 chan->expected_tx_seq = __next_seq(chan, txseq);
6275 break;
6276 case L2CAP_TXSEQ_EXPECTED_SREJ:
6277 l2cap_seq_list_pop(&chan->srej_list);
6278
6279 l2cap_pass_to_tx(chan, control);
6280 skb_queue_tail(&chan->srej_q, skb);
6281 skb_in_use = true;
6282 BT_DBG("Queued %p (queue len %d)", skb,
6283 skb_queue_len(&chan->srej_q));
6284
6285 err = l2cap_rx_queued_iframes(chan);
6286 if (err)
6287 break;
6288
6289 break;
6290 case L2CAP_TXSEQ_UNEXPECTED:
6291 /* Got a frame that can't be reassembled yet.
6292 * Save it for later, and send SREJs to cover
6293 * the missing frames.
6294 */
6295 skb_queue_tail(&chan->srej_q, skb);
6296 skb_in_use = true;
6297 BT_DBG("Queued %p (queue len %d)", skb,
6298 skb_queue_len(&chan->srej_q));
6299
6300 l2cap_pass_to_tx(chan, control);
6301 l2cap_send_srej(chan, control->txseq);
6302 break;
6303 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6304 /* This frame was requested with an SREJ, but
6305 * some expected retransmitted frames are
6306 * missing. Request retransmission of missing
6307 * SREJ'd frames.
6308 */
6309 skb_queue_tail(&chan->srej_q, skb);
6310 skb_in_use = true;
6311 BT_DBG("Queued %p (queue len %d)", skb,
6312 skb_queue_len(&chan->srej_q));
6313
6314 l2cap_pass_to_tx(chan, control);
6315 l2cap_send_srej_list(chan, control->txseq);
6316 break;
6317 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6318 /* We've already queued this frame. Drop this copy. */
6319 l2cap_pass_to_tx(chan, control);
6320 break;
6321 case L2CAP_TXSEQ_DUPLICATE:
6322 /* Expecting a later sequence number, so this frame
6323 * was already received. Ignore it completely.
6324 */
6325 break;
6326 case L2CAP_TXSEQ_INVALID_IGNORE:
6327 break;
6328 case L2CAP_TXSEQ_INVALID:
6329 default:
6330 l2cap_send_disconn_req(chan, ECONNRESET);
6331 break;
6332 }
6333 break;
6334 case L2CAP_EV_RECV_RR:
6335 l2cap_pass_to_tx(chan, control);
6336 if (control->final) {
6337 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6338
6339 if (!test_and_clear_bit(CONN_REJ_ACT,
6340 &chan->conn_state)) {
6341 control->final = 0;
6342 l2cap_retransmit_all(chan, control);
6343 }
6344
6345 l2cap_ertm_send(chan);
6346 } else if (control->poll) {
6347 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6348 &chan->conn_state) &&
6349 chan->unacked_frames) {
6350 __set_retrans_timer(chan);
6351 }
6352
6353 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6354 l2cap_send_srej_tail(chan);
6355 } else {
6356 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6357 &chan->conn_state) &&
6358 chan->unacked_frames)
6359 __set_retrans_timer(chan);
6360
6361 l2cap_send_ack(chan);
6362 }
6363 break;
6364 case L2CAP_EV_RECV_RNR:
6365 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6366 l2cap_pass_to_tx(chan, control);
6367 if (control->poll) {
6368 l2cap_send_srej_tail(chan);
6369 } else {
6370 struct l2cap_ctrl rr_control;
6371 memset(&rr_control, 0, sizeof(rr_control));
6372 rr_control.sframe = 1;
6373 rr_control.super = L2CAP_SUPER_RR;
6374 rr_control.reqseq = chan->buffer_seq;
6375 l2cap_send_sframe(chan, &rr_control);
6376 }
6377
6378 break;
6379 case L2CAP_EV_RECV_REJ:
6380 l2cap_handle_rej(chan, control);
6381 break;
6382 case L2CAP_EV_RECV_SREJ:
6383 l2cap_handle_srej(chan, control);
6384 break;
6385 }
6386
6387 if (skb && !skb_in_use) {
6388 BT_DBG("Freeing %p", skb);
6389 kfree_skb(skb);
6390 }
6391
6392 return err;
6393 }
6394
6395 static int l2cap_finish_move(struct l2cap_chan *chan)
6396 {
6397 BT_DBG("chan %p", chan);
6398
6399 chan->rx_state = L2CAP_RX_STATE_RECV;
6400
6401 if (chan->hs_hcon)
6402 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6403 else
6404 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6405
6406 return l2cap_resegment(chan);
6407 }
6408
6409 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6410 struct l2cap_ctrl *control,
6411 struct sk_buff *skb, u8 event)
6412 {
6413 int err;
6414
6415 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6416 event);
6417
6418 if (!control->poll)
6419 return -EPROTO;
6420
6421 l2cap_process_reqseq(chan, control->reqseq);
6422
6423 if (!skb_queue_empty(&chan->tx_q))
6424 chan->tx_send_head = skb_peek(&chan->tx_q);
6425 else
6426 chan->tx_send_head = NULL;
6427
6428 /* Rewind next_tx_seq to the point expected
6429 * by the receiver.
6430 */
6431 chan->next_tx_seq = control->reqseq;
6432 chan->unacked_frames = 0;
6433
6434 err = l2cap_finish_move(chan);
6435 if (err)
6436 return err;
6437
6438 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6439 l2cap_send_i_or_rr_or_rnr(chan);
6440
6441 if (event == L2CAP_EV_RECV_IFRAME)
6442 return -EPROTO;
6443
6444 return l2cap_rx_state_recv(chan, control, NULL, event);
6445 }
6446
6447 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6448 struct l2cap_ctrl *control,
6449 struct sk_buff *skb, u8 event)
6450 {
6451 int err;
6452
6453 if (!control->final)
6454 return -EPROTO;
6455
6456 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6457
6458 chan->rx_state = L2CAP_RX_STATE_RECV;
6459 l2cap_process_reqseq(chan, control->reqseq);
6460
6461 if (!skb_queue_empty(&chan->tx_q))
6462 chan->tx_send_head = skb_peek(&chan->tx_q);
6463 else
6464 chan->tx_send_head = NULL;
6465
6466 /* Rewind next_tx_seq to the point expected
6467 * by the receiver.
6468 */
6469 chan->next_tx_seq = control->reqseq;
6470 chan->unacked_frames = 0;
6471
6472 if (chan->hs_hcon)
6473 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6474 else
6475 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6476
6477 err = l2cap_resegment(chan);
6478
6479 if (!err)
6480 err = l2cap_rx_state_recv(chan, control, skb, event);
6481
6482 return err;
6483 }
6484
6485 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6486 {
6487 /* Make sure reqseq is for a packet that has been sent but not acked */
6488 u16 unacked;
6489
6490 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6491 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6492 }
6493
6494 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6495 struct sk_buff *skb, u8 event)
6496 {
6497 int err = 0;
6498
6499 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6500 control, skb, event, chan->rx_state);
6501
6502 if (__valid_reqseq(chan, control->reqseq)) {
6503 switch (chan->rx_state) {
6504 case L2CAP_RX_STATE_RECV:
6505 err = l2cap_rx_state_recv(chan, control, skb, event);
6506 break;
6507 case L2CAP_RX_STATE_SREJ_SENT:
6508 err = l2cap_rx_state_srej_sent(chan, control, skb,
6509 event);
6510 break;
6511 case L2CAP_RX_STATE_WAIT_P:
6512 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6513 break;
6514 case L2CAP_RX_STATE_WAIT_F:
6515 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6516 break;
6517 default:
6518 /* shut it down */
6519 break;
6520 }
6521 } else {
6522 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6523 control->reqseq, chan->next_tx_seq,
6524 chan->expected_ack_seq);
6525 l2cap_send_disconn_req(chan, ECONNRESET);
6526 }
6527
6528 return err;
6529 }
6530
6531 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6532 struct sk_buff *skb)
6533 {
6534 int err = 0;
6535
6536 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6537 chan->rx_state);
6538
6539 if (l2cap_classify_txseq(chan, control->txseq) ==
6540 L2CAP_TXSEQ_EXPECTED) {
6541 l2cap_pass_to_tx(chan, control);
6542
6543 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6544 __next_seq(chan, chan->buffer_seq));
6545
6546 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6547
6548 l2cap_reassemble_sdu(chan, skb, control);
6549 } else {
6550 if (chan->sdu) {
6551 kfree_skb(chan->sdu);
6552 chan->sdu = NULL;
6553 }
6554 chan->sdu_last_frag = NULL;
6555 chan->sdu_len = 0;
6556
6557 if (skb) {
6558 BT_DBG("Freeing %p", skb);
6559 kfree_skb(skb);
6560 }
6561 }
6562
6563 chan->last_acked_seq = control->txseq;
6564 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6565
6566 return err;
6567 }
6568
6569 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6570 {
6571 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6572 u16 len;
6573 u8 event;
6574
6575 __unpack_control(chan, skb);
6576
6577 len = skb->len;
6578
6579 /*
6580 * We can just drop the corrupted I-frame here.
6581 * Receiver will miss it and start proper recovery
6582 * procedures and ask for retransmission.
6583 */
6584 if (l2cap_check_fcs(chan, skb))
6585 goto drop;
6586
6587 if (!control->sframe && control->sar == L2CAP_SAR_START)
6588 len -= L2CAP_SDULEN_SIZE;
6589
6590 if (chan->fcs == L2CAP_FCS_CRC16)
6591 len -= L2CAP_FCS_SIZE;
6592
6593 if (len > chan->mps) {
6594 l2cap_send_disconn_req(chan, ECONNRESET);
6595 goto drop;
6596 }
6597
6598 if (!control->sframe) {
6599 int err;
6600
6601 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6602 control->sar, control->reqseq, control->final,
6603 control->txseq);
6604
6605 /* Validate F-bit - F=0 always valid, F=1 only
6606 * valid in TX WAIT_F
6607 */
6608 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6609 goto drop;
6610
6611 if (chan->mode != L2CAP_MODE_STREAMING) {
6612 event = L2CAP_EV_RECV_IFRAME;
6613 err = l2cap_rx(chan, control, skb, event);
6614 } else {
6615 err = l2cap_stream_rx(chan, control, skb);
6616 }
6617
6618 if (err)
6619 l2cap_send_disconn_req(chan, ECONNRESET);
6620 } else {
6621 const u8 rx_func_to_event[4] = {
6622 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6623 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6624 };
6625
6626 /* Only I-frames are expected in streaming mode */
6627 if (chan->mode == L2CAP_MODE_STREAMING)
6628 goto drop;
6629
6630 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6631 control->reqseq, control->final, control->poll,
6632 control->super);
6633
6634 if (len != 0) {
6635 BT_ERR("Trailing bytes: %d in sframe", len);
6636 l2cap_send_disconn_req(chan, ECONNRESET);
6637 goto drop;
6638 }
6639
6640 /* Validate F and P bits */
6641 if (control->final && (control->poll ||
6642 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6643 goto drop;
6644
6645 event = rx_func_to_event[control->super];
6646 if (l2cap_rx(chan, control, skb, event))
6647 l2cap_send_disconn_req(chan, ECONNRESET);
6648 }
6649
6650 return 0;
6651
6652 drop:
6653 kfree_skb(skb);
6654 return 0;
6655 }
6656
6657 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6658 {
6659 struct l2cap_conn *conn = chan->conn;
6660 struct l2cap_le_credits pkt;
6661 u16 return_credits;
6662
6663 /* We return more credits to the sender only after the amount of
6664 * credits falls below half of the initial amount.
6665 */
6666 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6667 return;
6668
6669 return_credits = le_max_credits - chan->rx_credits;
6670
6671 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6672
6673 chan->rx_credits += return_credits;
6674
6675 pkt.cid = cpu_to_le16(chan->scid);
6676 pkt.credits = cpu_to_le16(return_credits);
6677
6678 chan->ident = l2cap_get_ident(conn);
6679
6680 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6681 }
6682
6683 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6684 {
6685 int err;
6686
6687 if (!chan->rx_credits) {
6688 BT_ERR("No credits to receive LE L2CAP data");
6689 l2cap_send_disconn_req(chan, ECONNRESET);
6690 return -ENOBUFS;
6691 }
6692
6693 if (chan->imtu < skb->len) {
6694 BT_ERR("Too big LE L2CAP PDU");
6695 return -ENOBUFS;
6696 }
6697
6698 chan->rx_credits--;
6699 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6700
6701 l2cap_chan_le_send_credits(chan);
6702
6703 err = 0;
6704
6705 if (!chan->sdu) {
6706 u16 sdu_len;
6707
6708 sdu_len = get_unaligned_le16(skb->data);
6709 skb_pull(skb, L2CAP_SDULEN_SIZE);
6710
6711 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6712 sdu_len, skb->len, chan->imtu);
6713
6714 if (sdu_len > chan->imtu) {
6715 BT_ERR("Too big LE L2CAP SDU length received");
6716 err = -EMSGSIZE;
6717 goto failed;
6718 }
6719
6720 if (skb->len > sdu_len) {
6721 BT_ERR("Too much LE L2CAP data received");
6722 err = -EINVAL;
6723 goto failed;
6724 }
6725
6726 if (skb->len == sdu_len)
6727 return chan->ops->recv(chan, skb);
6728
6729 chan->sdu = skb;
6730 chan->sdu_len = sdu_len;
6731 chan->sdu_last_frag = skb;
6732
6733 return 0;
6734 }
6735
6736 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6737 chan->sdu->len, skb->len, chan->sdu_len);
6738
6739 if (chan->sdu->len + skb->len > chan->sdu_len) {
6740 BT_ERR("Too much LE L2CAP data received");
6741 err = -EINVAL;
6742 goto failed;
6743 }
6744
6745 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6746 skb = NULL;
6747
6748 if (chan->sdu->len == chan->sdu_len) {
6749 err = chan->ops->recv(chan, chan->sdu);
6750 if (!err) {
6751 chan->sdu = NULL;
6752 chan->sdu_last_frag = NULL;
6753 chan->sdu_len = 0;
6754 }
6755 }
6756
6757 failed:
6758 if (err) {
6759 kfree_skb(skb);
6760 kfree_skb(chan->sdu);
6761 chan->sdu = NULL;
6762 chan->sdu_last_frag = NULL;
6763 chan->sdu_len = 0;
6764 }
6765
6766 /* We can't return an error here since we took care of the skb
6767 * freeing internally. An error return would cause the caller to
6768 * do a double-free of the skb.
6769 */
6770 return 0;
6771 }
6772
6773 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6774 struct sk_buff *skb)
6775 {
6776 struct l2cap_chan *chan;
6777
6778 chan = l2cap_get_chan_by_scid(conn, cid);
6779 if (!chan) {
6780 if (cid == L2CAP_CID_A2MP) {
6781 chan = a2mp_channel_create(conn, skb);
6782 if (!chan) {
6783 kfree_skb(skb);
6784 return;
6785 }
6786
6787 l2cap_chan_lock(chan);
6788 } else {
6789 BT_DBG("unknown cid 0x%4.4x", cid);
6790 /* Drop packet and return */
6791 kfree_skb(skb);
6792 return;
6793 }
6794 }
6795
6796 BT_DBG("chan %p, len %d", chan, skb->len);
6797
6798 if (chan->state != BT_CONNECTED)
6799 goto drop;
6800
6801 switch (chan->mode) {
6802 case L2CAP_MODE_LE_FLOWCTL:
6803 if (l2cap_le_data_rcv(chan, skb) < 0)
6804 goto drop;
6805
6806 goto done;
6807
6808 case L2CAP_MODE_BASIC:
6809 /* If socket recv buffers overflows we drop data here
6810 * which is *bad* because L2CAP has to be reliable.
6811 * But we don't have any other choice. L2CAP doesn't
6812 * provide flow control mechanism. */
6813
6814 if (chan->imtu < skb->len) {
6815 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6816 goto drop;
6817 }
6818
6819 if (!chan->ops->recv(chan, skb))
6820 goto done;
6821 break;
6822
6823 case L2CAP_MODE_ERTM:
6824 case L2CAP_MODE_STREAMING:
6825 l2cap_data_rcv(chan, skb);
6826 goto done;
6827
6828 default:
6829 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6830 break;
6831 }
6832
6833 drop:
6834 kfree_skb(skb);
6835
6836 done:
6837 l2cap_chan_unlock(chan);
6838 }
6839
6840 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6841 struct sk_buff *skb)
6842 {
6843 struct hci_conn *hcon = conn->hcon;
6844 struct l2cap_chan *chan;
6845
6846 if (hcon->type != ACL_LINK)
6847 goto drop;
6848
6849 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6850 ACL_LINK);
6851 if (!chan)
6852 goto drop;
6853
6854 BT_DBG("chan %p, len %d", chan, skb->len);
6855
6856 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6857 goto drop;
6858
6859 if (chan->imtu < skb->len)
6860 goto drop;
6861
6862 /* Store remote BD_ADDR and PSM for msg_name */
6863 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6864 bt_cb(skb)->psm = psm;
6865
6866 if (!chan->ops->recv(chan, skb))
6867 return;
6868
6869 drop:
6870 kfree_skb(skb);
6871 }
6872
6873 static void l2cap_att_channel(struct l2cap_conn *conn,
6874 struct sk_buff *skb)
6875 {
6876 struct hci_conn *hcon = conn->hcon;
6877 struct l2cap_chan *chan;
6878
6879 if (hcon->type != LE_LINK)
6880 goto drop;
6881
6882 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6883 &hcon->src, &hcon->dst);
6884 if (!chan)
6885 goto drop;
6886
6887 BT_DBG("chan %p, len %d", chan, skb->len);
6888
6889 if (chan->imtu < skb->len)
6890 goto drop;
6891
6892 if (!chan->ops->recv(chan, skb))
6893 return;
6894
6895 drop:
6896 kfree_skb(skb);
6897 }
6898
6899 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6900 {
6901 struct l2cap_hdr *lh = (void *) skb->data;
6902 struct hci_conn *hcon = conn->hcon;
6903 u16 cid, len;
6904 __le16 psm;
6905
6906 if (hcon->state != BT_CONNECTED) {
6907 BT_DBG("queueing pending rx skb");
6908 skb_queue_tail(&conn->pending_rx, skb);
6909 return;
6910 }
6911
6912 skb_pull(skb, L2CAP_HDR_SIZE);
6913 cid = __le16_to_cpu(lh->cid);
6914 len = __le16_to_cpu(lh->len);
6915
6916 if (len != skb->len) {
6917 kfree_skb(skb);
6918 return;
6919 }
6920
6921 /* Since we can't actively block incoming LE connections we must
6922 * at least ensure that we ignore incoming data from them.
6923 */
6924 if (hcon->type == LE_LINK &&
6925 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6926 bdaddr_type(hcon, hcon->dst_type))) {
6927 kfree_skb(skb);
6928 return;
6929 }
6930
6931 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6932
6933 switch (cid) {
6934 case L2CAP_CID_SIGNALING:
6935 l2cap_sig_channel(conn, skb);
6936 break;
6937
6938 case L2CAP_CID_CONN_LESS:
6939 psm = get_unaligned((__le16 *) skb->data);
6940 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6941 l2cap_conless_channel(conn, psm, skb);
6942 break;
6943
6944 case L2CAP_CID_ATT:
6945 l2cap_att_channel(conn, skb);
6946 break;
6947
6948 case L2CAP_CID_LE_SIGNALING:
6949 l2cap_le_sig_channel(conn, skb);
6950 break;
6951
6952 case L2CAP_CID_SMP:
6953 if (smp_sig_channel(conn, skb))
6954 l2cap_conn_del(conn->hcon, EACCES);
6955 break;
6956
6957 default:
6958 l2cap_data_channel(conn, cid, skb);
6959 break;
6960 }
6961 }
6962
6963 static void process_pending_rx(struct work_struct *work)
6964 {
6965 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6966 pending_rx_work);
6967 struct sk_buff *skb;
6968
6969 BT_DBG("");
6970
6971 while ((skb = skb_dequeue(&conn->pending_rx)))
6972 l2cap_recv_frame(conn, skb);
6973 }
6974
6975 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6976 {
6977 struct l2cap_conn *conn = hcon->l2cap_data;
6978 struct hci_chan *hchan;
6979
6980 if (conn)
6981 return conn;
6982
6983 hchan = hci_chan_create(hcon);
6984 if (!hchan)
6985 return NULL;
6986
6987 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6988 if (!conn) {
6989 hci_chan_del(hchan);
6990 return NULL;
6991 }
6992
6993 kref_init(&conn->ref);
6994 hcon->l2cap_data = conn;
6995 conn->hcon = hcon;
6996 hci_conn_get(conn->hcon);
6997 conn->hchan = hchan;
6998
6999 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7000
7001 switch (hcon->type) {
7002 case LE_LINK:
7003 if (hcon->hdev->le_mtu) {
7004 conn->mtu = hcon->hdev->le_mtu;
7005 break;
7006 }
7007 /* fall through */
7008 default:
7009 conn->mtu = hcon->hdev->acl_mtu;
7010 break;
7011 }
7012
7013 conn->feat_mask = 0;
7014
7015 if (hcon->type == ACL_LINK)
7016 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
7017 &hcon->hdev->dev_flags);
7018
7019 mutex_init(&conn->ident_lock);
7020 mutex_init(&conn->chan_lock);
7021
7022 INIT_LIST_HEAD(&conn->chan_l);
7023 INIT_LIST_HEAD(&conn->users);
7024
7025 if (hcon->type == LE_LINK)
7026 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
7027 else
7028 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7029
7030 skb_queue_head_init(&conn->pending_rx);
7031 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7032
7033 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7034
7035 return conn;
7036 }
7037
7038 static bool is_valid_psm(u16 psm, u8 dst_type) {
7039 if (!psm)
7040 return false;
7041
7042 if (bdaddr_type_is_le(dst_type))
7043 return (psm <= 0x00ff);
7044
7045 /* PSM must be odd and lsb of upper byte must be 0 */
7046 return ((psm & 0x0101) == 0x0001);
7047 }
7048
7049 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7050 bdaddr_t *dst, u8 dst_type)
7051 {
7052 struct l2cap_conn *conn;
7053 struct hci_conn *hcon;
7054 struct hci_dev *hdev;
7055 int err;
7056
7057 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7058 dst_type, __le16_to_cpu(psm));
7059
7060 hdev = hci_get_route(dst, &chan->src);
7061 if (!hdev)
7062 return -EHOSTUNREACH;
7063
7064 hci_dev_lock(hdev);
7065
7066 l2cap_chan_lock(chan);
7067
7068 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7069 chan->chan_type != L2CAP_CHAN_RAW) {
7070 err = -EINVAL;
7071 goto done;
7072 }
7073
7074 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7075 err = -EINVAL;
7076 goto done;
7077 }
7078
7079 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7080 err = -EINVAL;
7081 goto done;
7082 }
7083
7084 switch (chan->mode) {
7085 case L2CAP_MODE_BASIC:
7086 break;
7087 case L2CAP_MODE_LE_FLOWCTL:
7088 l2cap_le_flowctl_init(chan);
7089 break;
7090 case L2CAP_MODE_ERTM:
7091 case L2CAP_MODE_STREAMING:
7092 if (!disable_ertm)
7093 break;
7094 /* fall through */
7095 default:
7096 err = -ENOTSUPP;
7097 goto done;
7098 }
7099
7100 switch (chan->state) {
7101 case BT_CONNECT:
7102 case BT_CONNECT2:
7103 case BT_CONFIG:
7104 /* Already connecting */
7105 err = 0;
7106 goto done;
7107
7108 case BT_CONNECTED:
7109 /* Already connected */
7110 err = -EISCONN;
7111 goto done;
7112
7113 case BT_OPEN:
7114 case BT_BOUND:
7115 /* Can connect */
7116 break;
7117
7118 default:
7119 err = -EBADFD;
7120 goto done;
7121 }
7122
7123 /* Set destination address and psm */
7124 bacpy(&chan->dst, dst);
7125 chan->dst_type = dst_type;
7126
7127 chan->psm = psm;
7128 chan->dcid = cid;
7129
7130 if (bdaddr_type_is_le(dst_type)) {
7131 u8 role;
7132
7133 /* Convert from L2CAP channel address type to HCI address type
7134 */
7135 if (dst_type == BDADDR_LE_PUBLIC)
7136 dst_type = ADDR_LE_DEV_PUBLIC;
7137 else
7138 dst_type = ADDR_LE_DEV_RANDOM;
7139
7140 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7141 role = HCI_ROLE_SLAVE;
7142 else
7143 role = HCI_ROLE_MASTER;
7144
7145 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7146 HCI_LE_CONN_TIMEOUT, role);
7147 } else {
7148 u8 auth_type = l2cap_get_auth_type(chan);
7149 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7150 }
7151
7152 if (IS_ERR(hcon)) {
7153 err = PTR_ERR(hcon);
7154 goto done;
7155 }
7156
7157 conn = l2cap_conn_add(hcon);
7158 if (!conn) {
7159 hci_conn_drop(hcon);
7160 err = -ENOMEM;
7161 goto done;
7162 }
7163
7164 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7165 hci_conn_drop(hcon);
7166 err = -EBUSY;
7167 goto done;
7168 }
7169
7170 /* Update source addr of the socket */
7171 bacpy(&chan->src, &hcon->src);
7172 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7173
7174 l2cap_chan_unlock(chan);
7175 l2cap_chan_add(conn, chan);
7176 l2cap_chan_lock(chan);
7177
7178 /* l2cap_chan_add takes its own ref so we can drop this one */
7179 hci_conn_drop(hcon);
7180
7181 l2cap_state_change(chan, BT_CONNECT);
7182 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7183
7184 /* Release chan->sport so that it can be reused by other
7185 * sockets (as it's only used for listening sockets).
7186 */
7187 write_lock(&chan_list_lock);
7188 chan->sport = 0;
7189 write_unlock(&chan_list_lock);
7190
7191 if (hcon->state == BT_CONNECTED) {
7192 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7193 __clear_chan_timer(chan);
7194 if (l2cap_chan_check_security(chan))
7195 l2cap_state_change(chan, BT_CONNECTED);
7196 } else
7197 l2cap_do_start(chan);
7198 }
7199
7200 err = 0;
7201
7202 done:
7203 l2cap_chan_unlock(chan);
7204 hci_dev_unlock(hdev);
7205 hci_dev_put(hdev);
7206 return err;
7207 }
7208 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7209
7210 /* ---- L2CAP interface with lower layer (HCI) ---- */
7211
7212 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7213 {
7214 int exact = 0, lm1 = 0, lm2 = 0;
7215 struct l2cap_chan *c;
7216
7217 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7218
7219 /* Find listening sockets and check their link_mode */
7220 read_lock(&chan_list_lock);
7221 list_for_each_entry(c, &chan_list, global_l) {
7222 if (c->state != BT_LISTEN)
7223 continue;
7224
7225 if (!bacmp(&c->src, &hdev->bdaddr)) {
7226 lm1 |= HCI_LM_ACCEPT;
7227 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7228 lm1 |= HCI_LM_MASTER;
7229 exact++;
7230 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7231 lm2 |= HCI_LM_ACCEPT;
7232 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7233 lm2 |= HCI_LM_MASTER;
7234 }
7235 }
7236 read_unlock(&chan_list_lock);
7237
7238 return exact ? lm1 : lm2;
7239 }
7240
7241 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7242 {
7243 struct l2cap_conn *conn;
7244
7245 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7246
7247 if (!status) {
7248 conn = l2cap_conn_add(hcon);
7249 if (conn)
7250 l2cap_conn_ready(conn);
7251 } else {
7252 l2cap_conn_del(hcon, bt_to_errno(status));
7253 }
7254 }
7255
7256 int l2cap_disconn_ind(struct hci_conn *hcon)
7257 {
7258 struct l2cap_conn *conn = hcon->l2cap_data;
7259
7260 BT_DBG("hcon %p", hcon);
7261
7262 if (!conn)
7263 return HCI_ERROR_REMOTE_USER_TERM;
7264 return conn->disc_reason;
7265 }
7266
7267 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7268 {
7269 BT_DBG("hcon %p reason %d", hcon, reason);
7270
7271 l2cap_conn_del(hcon, bt_to_errno(reason));
7272 }
7273
7274 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7275 {
7276 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7277 return;
7278
7279 if (encrypt == 0x00) {
7280 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7281 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7282 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7283 chan->sec_level == BT_SECURITY_FIPS)
7284 l2cap_chan_close(chan, ECONNREFUSED);
7285 } else {
7286 if (chan->sec_level == BT_SECURITY_MEDIUM)
7287 __clear_chan_timer(chan);
7288 }
7289 }
7290
7291 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7292 {
7293 struct l2cap_conn *conn = hcon->l2cap_data;
7294 struct l2cap_chan *chan;
7295
7296 if (!conn)
7297 return 0;
7298
7299 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7300
7301 if (hcon->type == LE_LINK) {
7302 if (!status && encrypt)
7303 smp_distribute_keys(conn);
7304 cancel_delayed_work(&conn->security_timer);
7305 }
7306
7307 mutex_lock(&conn->chan_lock);
7308
7309 list_for_each_entry(chan, &conn->chan_l, list) {
7310 l2cap_chan_lock(chan);
7311
7312 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7313 state_to_string(chan->state));
7314
7315 if (chan->scid == L2CAP_CID_A2MP) {
7316 l2cap_chan_unlock(chan);
7317 continue;
7318 }
7319
7320 if (chan->scid == L2CAP_CID_ATT) {
7321 if (!status && encrypt) {
7322 chan->sec_level = hcon->sec_level;
7323 l2cap_chan_ready(chan);
7324 }
7325
7326 l2cap_chan_unlock(chan);
7327 continue;
7328 }
7329
7330 if (!__l2cap_no_conn_pending(chan)) {
7331 l2cap_chan_unlock(chan);
7332 continue;
7333 }
7334
7335 if (!status && (chan->state == BT_CONNECTED ||
7336 chan->state == BT_CONFIG)) {
7337 chan->ops->resume(chan);
7338 l2cap_check_encryption(chan, encrypt);
7339 l2cap_chan_unlock(chan);
7340 continue;
7341 }
7342
7343 if (chan->state == BT_CONNECT) {
7344 if (!status)
7345 l2cap_start_connection(chan);
7346 else
7347 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7348 } else if (chan->state == BT_CONNECT2) {
7349 struct l2cap_conn_rsp rsp;
7350 __u16 res, stat;
7351
7352 if (!status) {
7353 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7354 res = L2CAP_CR_PEND;
7355 stat = L2CAP_CS_AUTHOR_PEND;
7356 chan->ops->defer(chan);
7357 } else {
7358 l2cap_state_change(chan, BT_CONFIG);
7359 res = L2CAP_CR_SUCCESS;
7360 stat = L2CAP_CS_NO_INFO;
7361 }
7362 } else {
7363 l2cap_state_change(chan, BT_DISCONN);
7364 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7365 res = L2CAP_CR_SEC_BLOCK;
7366 stat = L2CAP_CS_NO_INFO;
7367 }
7368
7369 rsp.scid = cpu_to_le16(chan->dcid);
7370 rsp.dcid = cpu_to_le16(chan->scid);
7371 rsp.result = cpu_to_le16(res);
7372 rsp.status = cpu_to_le16(stat);
7373 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7374 sizeof(rsp), &rsp);
7375
7376 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7377 res == L2CAP_CR_SUCCESS) {
7378 char buf[128];
7379 set_bit(CONF_REQ_SENT, &chan->conf_state);
7380 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7381 L2CAP_CONF_REQ,
7382 l2cap_build_conf_req(chan, buf),
7383 buf);
7384 chan->num_conf_req++;
7385 }
7386 }
7387
7388 l2cap_chan_unlock(chan);
7389 }
7390
7391 mutex_unlock(&conn->chan_lock);
7392
7393 return 0;
7394 }
7395
7396 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7397 {
7398 struct l2cap_conn *conn = hcon->l2cap_data;
7399 struct l2cap_hdr *hdr;
7400 int len;
7401
7402 /* For AMP controller do not create l2cap conn */
7403 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7404 goto drop;
7405
7406 if (!conn)
7407 conn = l2cap_conn_add(hcon);
7408
7409 if (!conn)
7410 goto drop;
7411
7412 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7413
7414 switch (flags) {
7415 case ACL_START:
7416 case ACL_START_NO_FLUSH:
7417 case ACL_COMPLETE:
7418 if (conn->rx_len) {
7419 BT_ERR("Unexpected start frame (len %d)", skb->len);
7420 kfree_skb(conn->rx_skb);
7421 conn->rx_skb = NULL;
7422 conn->rx_len = 0;
7423 l2cap_conn_unreliable(conn, ECOMM);
7424 }
7425
7426 /* Start fragment always begin with Basic L2CAP header */
7427 if (skb->len < L2CAP_HDR_SIZE) {
7428 BT_ERR("Frame is too short (len %d)", skb->len);
7429 l2cap_conn_unreliable(conn, ECOMM);
7430 goto drop;
7431 }
7432
7433 hdr = (struct l2cap_hdr *) skb->data;
7434 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7435
7436 if (len == skb->len) {
7437 /* Complete frame received */
7438 l2cap_recv_frame(conn, skb);
7439 return 0;
7440 }
7441
7442 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7443
7444 if (skb->len > len) {
7445 BT_ERR("Frame is too long (len %d, expected len %d)",
7446 skb->len, len);
7447 l2cap_conn_unreliable(conn, ECOMM);
7448 goto drop;
7449 }
7450
7451 /* Allocate skb for the complete frame (with header) */
7452 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7453 if (!conn->rx_skb)
7454 goto drop;
7455
7456 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7457 skb->len);
7458 conn->rx_len = len - skb->len;
7459 break;
7460
7461 case ACL_CONT:
7462 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7463
7464 if (!conn->rx_len) {
7465 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7466 l2cap_conn_unreliable(conn, ECOMM);
7467 goto drop;
7468 }
7469
7470 if (skb->len > conn->rx_len) {
7471 BT_ERR("Fragment is too long (len %d, expected %d)",
7472 skb->len, conn->rx_len);
7473 kfree_skb(conn->rx_skb);
7474 conn->rx_skb = NULL;
7475 conn->rx_len = 0;
7476 l2cap_conn_unreliable(conn, ECOMM);
7477 goto drop;
7478 }
7479
7480 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7481 skb->len);
7482 conn->rx_len -= skb->len;
7483
7484 if (!conn->rx_len) {
7485 /* Complete frame received. l2cap_recv_frame
7486 * takes ownership of the skb so set the global
7487 * rx_skb pointer to NULL first.
7488 */
7489 struct sk_buff *rx_skb = conn->rx_skb;
7490 conn->rx_skb = NULL;
7491 l2cap_recv_frame(conn, rx_skb);
7492 }
7493 break;
7494 }
7495
7496 drop:
7497 kfree_skb(skb);
7498 return 0;
7499 }
7500
7501 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7502 {
7503 struct l2cap_chan *c;
7504
7505 read_lock(&chan_list_lock);
7506
7507 list_for_each_entry(c, &chan_list, global_l) {
7508 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7509 &c->src, &c->dst,
7510 c->state, __le16_to_cpu(c->psm),
7511 c->scid, c->dcid, c->imtu, c->omtu,
7512 c->sec_level, c->mode);
7513 }
7514
7515 read_unlock(&chan_list_lock);
7516
7517 return 0;
7518 }
7519
7520 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7521 {
7522 return single_open(file, l2cap_debugfs_show, inode->i_private);
7523 }
7524
7525 static const struct file_operations l2cap_debugfs_fops = {
7526 .open = l2cap_debugfs_open,
7527 .read = seq_read,
7528 .llseek = seq_lseek,
7529 .release = single_release,
7530 };
7531
7532 static struct dentry *l2cap_debugfs;
7533
7534 int __init l2cap_init(void)
7535 {
7536 int err;
7537
7538 err = l2cap_init_sockets();
7539 if (err < 0)
7540 return err;
7541
7542 if (IS_ERR_OR_NULL(bt_debugfs))
7543 return 0;
7544
7545 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7546 NULL, &l2cap_debugfs_fops);
7547
7548 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7549 &le_max_credits);
7550 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7551 &le_default_mps);
7552
7553 return 0;
7554 }
7555
7556 void l2cap_exit(void)
7557 {
7558 debugfs_remove(l2cap_debugfs);
7559 l2cap_cleanup_sockets();
7560 }
7561
7562 module_param(disable_ertm, bool, 0644);
7563 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.207164 seconds and 5 git commands to generate.