Bluetooth: move l2cap_sock_recvmsg() to l2cap_sock.c
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 #define VERSION "2.15"
59
60 int disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static struct workqueue_struct *_busy_wq;
66
67 struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 };
70
71 static void l2cap_busy_work(struct work_struct *work);
72
73 static void l2cap_sock_close(struct sock *sk);
74
75 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
76 u8 code, u8 ident, u16 dlen, void *data);
77
78 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
79
80 /* ---- L2CAP timers ---- */
81 void l2cap_sock_set_timer(struct sock *sk, long timeout)
82 {
83 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
84 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
85 }
86
87 static void l2cap_sock_clear_timer(struct sock *sk)
88 {
89 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 sk_stop_timer(sk, &sk->sk_timer);
91 }
92
93 /* ---- L2CAP channels ---- */
94 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
95 {
96 struct sock *s;
97 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
98 if (l2cap_pi(s)->dcid == cid)
99 break;
100 }
101 return s;
102 }
103
104 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
105 {
106 struct sock *s;
107 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
108 if (l2cap_pi(s)->scid == cid)
109 break;
110 }
111 return s;
112 }
113
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
117 {
118 struct sock *s;
119 read_lock(&l->lock);
120 s = __l2cap_get_chan_by_scid(l, cid);
121 if (s)
122 bh_lock_sock(s);
123 read_unlock(&l->lock);
124 return s;
125 }
126
127 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
128 {
129 struct sock *s;
130 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
131 if (l2cap_pi(s)->ident == ident)
132 break;
133 }
134 return s;
135 }
136
137 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
138 {
139 struct sock *s;
140 read_lock(&l->lock);
141 s = __l2cap_get_chan_by_ident(l, ident);
142 if (s)
143 bh_lock_sock(s);
144 read_unlock(&l->lock);
145 return s;
146 }
147
148 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
149 {
150 u16 cid = L2CAP_CID_DYN_START;
151
152 for (; cid < L2CAP_CID_DYN_END; cid++) {
153 if (!__l2cap_get_chan_by_scid(l, cid))
154 return cid;
155 }
156
157 return 0;
158 }
159
160 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
161 {
162 sock_hold(sk);
163
164 if (l->head)
165 l2cap_pi(l->head)->prev_c = sk;
166
167 l2cap_pi(sk)->next_c = l->head;
168 l2cap_pi(sk)->prev_c = NULL;
169 l->head = sk;
170 }
171
172 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
173 {
174 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
175
176 write_lock_bh(&l->lock);
177 if (sk == l->head)
178 l->head = next;
179
180 if (next)
181 l2cap_pi(next)->prev_c = prev;
182 if (prev)
183 l2cap_pi(prev)->next_c = next;
184 write_unlock_bh(&l->lock);
185
186 __sock_put(sk);
187 }
188
189 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
190 {
191 struct l2cap_chan_list *l = &conn->chan_list;
192
193 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
194 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
195
196 conn->disc_reason = 0x13;
197
198 l2cap_pi(sk)->conn = conn;
199
200 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
201 /* Alloc CID for connection-oriented socket */
202 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
203 } else if (sk->sk_type == SOCK_DGRAM) {
204 /* Connectionless socket */
205 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
206 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
207 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
208 } else {
209 /* Raw socket can send/recv signalling messages only */
210 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
211 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
212 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
213 }
214
215 __l2cap_chan_link(l, sk);
216
217 if (parent)
218 bt_accept_enqueue(parent, sk);
219 }
220
221 /* Delete channel.
222 * Must be called on the locked socket. */
223 static void l2cap_chan_del(struct sock *sk, int err)
224 {
225 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
226 struct sock *parent = bt_sk(sk)->parent;
227
228 l2cap_sock_clear_timer(sk);
229
230 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
231
232 if (conn) {
233 /* Unlink from channel list */
234 l2cap_chan_unlink(&conn->chan_list, sk);
235 l2cap_pi(sk)->conn = NULL;
236 hci_conn_put(conn->hcon);
237 }
238
239 sk->sk_state = BT_CLOSED;
240 sock_set_flag(sk, SOCK_ZAPPED);
241
242 if (err)
243 sk->sk_err = err;
244
245 if (parent) {
246 bt_accept_unlink(sk);
247 parent->sk_data_ready(parent, 0);
248 } else
249 sk->sk_state_change(sk);
250
251 skb_queue_purge(TX_QUEUE(sk));
252
253 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
254 struct srej_list *l, *tmp;
255
256 del_timer(&l2cap_pi(sk)->retrans_timer);
257 del_timer(&l2cap_pi(sk)->monitor_timer);
258 del_timer(&l2cap_pi(sk)->ack_timer);
259
260 skb_queue_purge(SREJ_QUEUE(sk));
261 skb_queue_purge(BUSY_QUEUE(sk));
262
263 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
264 list_del(&l->list);
265 kfree(l);
266 }
267 }
268 }
269
270 static inline u8 l2cap_get_auth_type(struct sock *sk)
271 {
272 if (sk->sk_type == SOCK_RAW) {
273 switch (l2cap_pi(sk)->sec_level) {
274 case BT_SECURITY_HIGH:
275 return HCI_AT_DEDICATED_BONDING_MITM;
276 case BT_SECURITY_MEDIUM:
277 return HCI_AT_DEDICATED_BONDING;
278 default:
279 return HCI_AT_NO_BONDING;
280 }
281 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
283 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
284
285 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
286 return HCI_AT_NO_BONDING_MITM;
287 else
288 return HCI_AT_NO_BONDING;
289 } else {
290 switch (l2cap_pi(sk)->sec_level) {
291 case BT_SECURITY_HIGH:
292 return HCI_AT_GENERAL_BONDING_MITM;
293 case BT_SECURITY_MEDIUM:
294 return HCI_AT_GENERAL_BONDING;
295 default:
296 return HCI_AT_NO_BONDING;
297 }
298 }
299 }
300
301 /* Service level security */
302 static inline int l2cap_check_security(struct sock *sk)
303 {
304 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
305 __u8 auth_type;
306
307 auth_type = l2cap_get_auth_type(sk);
308
309 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
310 auth_type);
311 }
312
313 u8 l2cap_get_ident(struct l2cap_conn *conn)
314 {
315 u8 id;
316
317 /* Get next available identificator.
318 * 1 - 128 are used by kernel.
319 * 129 - 199 are reserved.
320 * 200 - 254 are used by utilities like l2ping, etc.
321 */
322
323 spin_lock_bh(&conn->lock);
324
325 if (++conn->tx_ident > 128)
326 conn->tx_ident = 1;
327
328 id = conn->tx_ident;
329
330 spin_unlock_bh(&conn->lock);
331
332 return id;
333 }
334
335 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
336 {
337 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
338 u8 flags;
339
340 BT_DBG("code 0x%2.2x", code);
341
342 if (!skb)
343 return;
344
345 if (lmp_no_flush_capable(conn->hcon->hdev))
346 flags = ACL_START_NO_FLUSH;
347 else
348 flags = ACL_START;
349
350 hci_send_acl(conn->hcon, skb, flags);
351 }
352
353 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
354 {
355 struct sk_buff *skb;
356 struct l2cap_hdr *lh;
357 struct l2cap_conn *conn = pi->conn;
358 struct sock *sk = (struct sock *)pi;
359 int count, hlen = L2CAP_HDR_SIZE + 2;
360 u8 flags;
361
362 if (sk->sk_state != BT_CONNECTED)
363 return;
364
365 if (pi->fcs == L2CAP_FCS_CRC16)
366 hlen += 2;
367
368 BT_DBG("pi %p, control 0x%2.2x", pi, control);
369
370 count = min_t(unsigned int, conn->mtu, hlen);
371 control |= L2CAP_CTRL_FRAME_TYPE;
372
373 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
374 control |= L2CAP_CTRL_FINAL;
375 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
376 }
377
378 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
379 control |= L2CAP_CTRL_POLL;
380 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
381 }
382
383 skb = bt_skb_alloc(count, GFP_ATOMIC);
384 if (!skb)
385 return;
386
387 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
388 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
389 lh->cid = cpu_to_le16(pi->dcid);
390 put_unaligned_le16(control, skb_put(skb, 2));
391
392 if (pi->fcs == L2CAP_FCS_CRC16) {
393 u16 fcs = crc16(0, (u8 *)lh, count - 2);
394 put_unaligned_le16(fcs, skb_put(skb, 2));
395 }
396
397 if (lmp_no_flush_capable(conn->hcon->hdev))
398 flags = ACL_START_NO_FLUSH;
399 else
400 flags = ACL_START;
401
402 hci_send_acl(pi->conn->hcon, skb, flags);
403 }
404
405 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
406 {
407 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
408 control |= L2CAP_SUPER_RCV_NOT_READY;
409 pi->conn_state |= L2CAP_CONN_RNR_SENT;
410 } else
411 control |= L2CAP_SUPER_RCV_READY;
412
413 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
414
415 l2cap_send_sframe(pi, control);
416 }
417
418 static inline int __l2cap_no_conn_pending(struct sock *sk)
419 {
420 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
421 }
422
423 static void l2cap_do_start(struct sock *sk)
424 {
425 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
426
427 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
428 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
429 return;
430
431 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
432 struct l2cap_conn_req req;
433 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
434 req.psm = l2cap_pi(sk)->psm;
435
436 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
437 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
438
439 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
440 L2CAP_CONN_REQ, sizeof(req), &req);
441 }
442 } else {
443 struct l2cap_info_req req;
444 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
445
446 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
447 conn->info_ident = l2cap_get_ident(conn);
448
449 mod_timer(&conn->info_timer, jiffies +
450 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
451
452 l2cap_send_cmd(conn, conn->info_ident,
453 L2CAP_INFO_REQ, sizeof(req), &req);
454 }
455 }
456
457 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
458 {
459 u32 local_feat_mask = l2cap_feat_mask;
460 if (!disable_ertm)
461 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
462
463 switch (mode) {
464 case L2CAP_MODE_ERTM:
465 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
466 case L2CAP_MODE_STREAMING:
467 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
468 default:
469 return 0x00;
470 }
471 }
472
473 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
474 {
475 struct l2cap_disconn_req req;
476
477 if (!conn)
478 return;
479
480 skb_queue_purge(TX_QUEUE(sk));
481
482 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
483 del_timer(&l2cap_pi(sk)->retrans_timer);
484 del_timer(&l2cap_pi(sk)->monitor_timer);
485 del_timer(&l2cap_pi(sk)->ack_timer);
486 }
487
488 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
489 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
490 l2cap_send_cmd(conn, l2cap_get_ident(conn),
491 L2CAP_DISCONN_REQ, sizeof(req), &req);
492
493 sk->sk_state = BT_DISCONN;
494 sk->sk_err = err;
495 }
496
497 /* ---- L2CAP connections ---- */
498 static void l2cap_conn_start(struct l2cap_conn *conn)
499 {
500 struct l2cap_chan_list *l = &conn->chan_list;
501 struct sock_del_list del, *tmp1, *tmp2;
502 struct sock *sk;
503
504 BT_DBG("conn %p", conn);
505
506 INIT_LIST_HEAD(&del.list);
507
508 read_lock(&l->lock);
509
510 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
511 bh_lock_sock(sk);
512
513 if (sk->sk_type != SOCK_SEQPACKET &&
514 sk->sk_type != SOCK_STREAM) {
515 bh_unlock_sock(sk);
516 continue;
517 }
518
519 if (sk->sk_state == BT_CONNECT) {
520 struct l2cap_conn_req req;
521
522 if (!l2cap_check_security(sk) ||
523 !__l2cap_no_conn_pending(sk)) {
524 bh_unlock_sock(sk);
525 continue;
526 }
527
528 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
529 conn->feat_mask)
530 && l2cap_pi(sk)->conf_state &
531 L2CAP_CONF_STATE2_DEVICE) {
532 tmp1 = kzalloc(sizeof(struct sock_del_list),
533 GFP_ATOMIC);
534 tmp1->sk = sk;
535 list_add_tail(&tmp1->list, &del.list);
536 bh_unlock_sock(sk);
537 continue;
538 }
539
540 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
541 req.psm = l2cap_pi(sk)->psm;
542
543 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
544 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
545
546 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
547 L2CAP_CONN_REQ, sizeof(req), &req);
548
549 } else if (sk->sk_state == BT_CONNECT2) {
550 struct l2cap_conn_rsp rsp;
551 char buf[128];
552 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
553 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
554
555 if (l2cap_check_security(sk)) {
556 if (bt_sk(sk)->defer_setup) {
557 struct sock *parent = bt_sk(sk)->parent;
558 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
559 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
560 parent->sk_data_ready(parent, 0);
561
562 } else {
563 sk->sk_state = BT_CONFIG;
564 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
565 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
566 }
567 } else {
568 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
569 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
570 }
571
572 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
573 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
574
575 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
576 rsp.result != L2CAP_CR_SUCCESS) {
577 bh_unlock_sock(sk);
578 continue;
579 }
580
581 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
582 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
583 l2cap_build_conf_req(sk, buf), buf);
584 l2cap_pi(sk)->num_conf_req++;
585 }
586
587 bh_unlock_sock(sk);
588 }
589
590 read_unlock(&l->lock);
591
592 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
593 bh_lock_sock(tmp1->sk);
594 __l2cap_sock_close(tmp1->sk, ECONNRESET);
595 bh_unlock_sock(tmp1->sk);
596 list_del(&tmp1->list);
597 kfree(tmp1);
598 }
599 }
600
601 static void l2cap_conn_ready(struct l2cap_conn *conn)
602 {
603 struct l2cap_chan_list *l = &conn->chan_list;
604 struct sock *sk;
605
606 BT_DBG("conn %p", conn);
607
608 read_lock(&l->lock);
609
610 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
611 bh_lock_sock(sk);
612
613 if (sk->sk_type != SOCK_SEQPACKET &&
614 sk->sk_type != SOCK_STREAM) {
615 l2cap_sock_clear_timer(sk);
616 sk->sk_state = BT_CONNECTED;
617 sk->sk_state_change(sk);
618 } else if (sk->sk_state == BT_CONNECT)
619 l2cap_do_start(sk);
620
621 bh_unlock_sock(sk);
622 }
623
624 read_unlock(&l->lock);
625 }
626
627 /* Notify sockets that we cannot guaranty reliability anymore */
628 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
629 {
630 struct l2cap_chan_list *l = &conn->chan_list;
631 struct sock *sk;
632
633 BT_DBG("conn %p", conn);
634
635 read_lock(&l->lock);
636
637 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
638 if (l2cap_pi(sk)->force_reliable)
639 sk->sk_err = err;
640 }
641
642 read_unlock(&l->lock);
643 }
644
645 static void l2cap_info_timeout(unsigned long arg)
646 {
647 struct l2cap_conn *conn = (void *) arg;
648
649 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
650 conn->info_ident = 0;
651
652 l2cap_conn_start(conn);
653 }
654
655 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
656 {
657 struct l2cap_conn *conn = hcon->l2cap_data;
658
659 if (conn || status)
660 return conn;
661
662 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
663 if (!conn)
664 return NULL;
665
666 hcon->l2cap_data = conn;
667 conn->hcon = hcon;
668
669 BT_DBG("hcon %p conn %p", hcon, conn);
670
671 conn->mtu = hcon->hdev->acl_mtu;
672 conn->src = &hcon->hdev->bdaddr;
673 conn->dst = &hcon->dst;
674
675 conn->feat_mask = 0;
676
677 spin_lock_init(&conn->lock);
678 rwlock_init(&conn->chan_list.lock);
679
680 setup_timer(&conn->info_timer, l2cap_info_timeout,
681 (unsigned long) conn);
682
683 conn->disc_reason = 0x13;
684
685 return conn;
686 }
687
688 static void l2cap_conn_del(struct hci_conn *hcon, int err)
689 {
690 struct l2cap_conn *conn = hcon->l2cap_data;
691 struct sock *sk;
692
693 if (!conn)
694 return;
695
696 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
697
698 kfree_skb(conn->rx_skb);
699
700 /* Kill channels */
701 while ((sk = conn->chan_list.head)) {
702 bh_lock_sock(sk);
703 l2cap_chan_del(sk, err);
704 bh_unlock_sock(sk);
705 l2cap_sock_kill(sk);
706 }
707
708 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
709 del_timer_sync(&conn->info_timer);
710
711 hcon->l2cap_data = NULL;
712 kfree(conn);
713 }
714
715 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
716 {
717 struct l2cap_chan_list *l = &conn->chan_list;
718 write_lock_bh(&l->lock);
719 __l2cap_chan_add(conn, sk, parent);
720 write_unlock_bh(&l->lock);
721 }
722
723 /* ---- Socket interface ---- */
724
725 /* Find socket with psm and source bdaddr.
726 * Returns closest match.
727 */
728 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
729 {
730 struct sock *sk = NULL, *sk1 = NULL;
731 struct hlist_node *node;
732
733 read_lock(&l2cap_sk_list.lock);
734
735 sk_for_each(sk, node, &l2cap_sk_list.head) {
736 if (state && sk->sk_state != state)
737 continue;
738
739 if (l2cap_pi(sk)->psm == psm) {
740 /* Exact match. */
741 if (!bacmp(&bt_sk(sk)->src, src))
742 break;
743
744 /* Closest match */
745 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
746 sk1 = sk;
747 }
748 }
749
750 read_unlock(&l2cap_sk_list.lock);
751
752 return node ? sk : sk1;
753 }
754
755 static void l2cap_sock_cleanup_listen(struct sock *parent)
756 {
757 struct sock *sk;
758
759 BT_DBG("parent %p", parent);
760
761 /* Close not yet accepted channels */
762 while ((sk = bt_accept_dequeue(parent, NULL)))
763 l2cap_sock_close(sk);
764
765 parent->sk_state = BT_CLOSED;
766 sock_set_flag(parent, SOCK_ZAPPED);
767 }
768
769 /* Kill socket (only if zapped and orphan)
770 * Must be called on unlocked socket.
771 */
772 void l2cap_sock_kill(struct sock *sk)
773 {
774 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
775 return;
776
777 BT_DBG("sk %p state %d", sk, sk->sk_state);
778
779 /* Kill poor orphan */
780 bt_sock_unlink(&l2cap_sk_list, sk);
781 sock_set_flag(sk, SOCK_DEAD);
782 sock_put(sk);
783 }
784
785 void __l2cap_sock_close(struct sock *sk, int reason)
786 {
787 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
788
789 switch (sk->sk_state) {
790 case BT_LISTEN:
791 l2cap_sock_cleanup_listen(sk);
792 break;
793
794 case BT_CONNECTED:
795 case BT_CONFIG:
796 if (sk->sk_type == SOCK_SEQPACKET ||
797 sk->sk_type == SOCK_STREAM) {
798 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
799
800 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
801 l2cap_send_disconn_req(conn, sk, reason);
802 } else
803 l2cap_chan_del(sk, reason);
804 break;
805
806 case BT_CONNECT2:
807 if (sk->sk_type == SOCK_SEQPACKET ||
808 sk->sk_type == SOCK_STREAM) {
809 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
810 struct l2cap_conn_rsp rsp;
811 __u16 result;
812
813 if (bt_sk(sk)->defer_setup)
814 result = L2CAP_CR_SEC_BLOCK;
815 else
816 result = L2CAP_CR_BAD_PSM;
817 sk->sk_state = BT_DISCONN;
818
819 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
820 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
821 rsp.result = cpu_to_le16(result);
822 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
823 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
824 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
825 } else
826 l2cap_chan_del(sk, reason);
827 break;
828
829 case BT_CONNECT:
830 case BT_DISCONN:
831 l2cap_chan_del(sk, reason);
832 break;
833
834 default:
835 sock_set_flag(sk, SOCK_ZAPPED);
836 break;
837 }
838 }
839
840 /* Must be called on unlocked socket. */
841 static void l2cap_sock_close(struct sock *sk)
842 {
843 l2cap_sock_clear_timer(sk);
844 lock_sock(sk);
845 __l2cap_sock_close(sk, ECONNRESET);
846 release_sock(sk);
847 l2cap_sock_kill(sk);
848 }
849
850 int l2cap_do_connect(struct sock *sk)
851 {
852 bdaddr_t *src = &bt_sk(sk)->src;
853 bdaddr_t *dst = &bt_sk(sk)->dst;
854 struct l2cap_conn *conn;
855 struct hci_conn *hcon;
856 struct hci_dev *hdev;
857 __u8 auth_type;
858 int err;
859
860 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
861 l2cap_pi(sk)->psm);
862
863 hdev = hci_get_route(dst, src);
864 if (!hdev)
865 return -EHOSTUNREACH;
866
867 hci_dev_lock_bh(hdev);
868
869 err = -ENOMEM;
870
871 auth_type = l2cap_get_auth_type(sk);
872
873 hcon = hci_connect(hdev, ACL_LINK, dst,
874 l2cap_pi(sk)->sec_level, auth_type);
875 if (!hcon)
876 goto done;
877
878 conn = l2cap_conn_add(hcon, 0);
879 if (!conn) {
880 hci_conn_put(hcon);
881 goto done;
882 }
883
884 err = 0;
885
886 /* Update source addr of the socket */
887 bacpy(src, conn->src);
888
889 l2cap_chan_add(conn, sk, NULL);
890
891 sk->sk_state = BT_CONNECT;
892 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
893
894 if (hcon->state == BT_CONNECTED) {
895 if (sk->sk_type != SOCK_SEQPACKET &&
896 sk->sk_type != SOCK_STREAM) {
897 l2cap_sock_clear_timer(sk);
898 if (l2cap_check_security(sk))
899 sk->sk_state = BT_CONNECTED;
900 } else
901 l2cap_do_start(sk);
902 }
903
904 done:
905 hci_dev_unlock_bh(hdev);
906 hci_dev_put(hdev);
907 return err;
908 }
909
910 static int __l2cap_wait_ack(struct sock *sk)
911 {
912 DECLARE_WAITQUEUE(wait, current);
913 int err = 0;
914 int timeo = HZ/5;
915
916 add_wait_queue(sk_sleep(sk), &wait);
917 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
918 set_current_state(TASK_INTERRUPTIBLE);
919
920 if (!timeo)
921 timeo = HZ/5;
922
923 if (signal_pending(current)) {
924 err = sock_intr_errno(timeo);
925 break;
926 }
927
928 release_sock(sk);
929 timeo = schedule_timeout(timeo);
930 lock_sock(sk);
931
932 err = sock_error(sk);
933 if (err)
934 break;
935 }
936 set_current_state(TASK_RUNNING);
937 remove_wait_queue(sk_sleep(sk), &wait);
938 return err;
939 }
940
941 static void l2cap_monitor_timeout(unsigned long arg)
942 {
943 struct sock *sk = (void *) arg;
944
945 BT_DBG("sk %p", sk);
946
947 bh_lock_sock(sk);
948 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
949 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
950 bh_unlock_sock(sk);
951 return;
952 }
953
954 l2cap_pi(sk)->retry_count++;
955 __mod_monitor_timer();
956
957 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
958 bh_unlock_sock(sk);
959 }
960
961 static void l2cap_retrans_timeout(unsigned long arg)
962 {
963 struct sock *sk = (void *) arg;
964
965 BT_DBG("sk %p", sk);
966
967 bh_lock_sock(sk);
968 l2cap_pi(sk)->retry_count = 1;
969 __mod_monitor_timer();
970
971 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
972
973 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
974 bh_unlock_sock(sk);
975 }
976
977 static void l2cap_drop_acked_frames(struct sock *sk)
978 {
979 struct sk_buff *skb;
980
981 while ((skb = skb_peek(TX_QUEUE(sk))) &&
982 l2cap_pi(sk)->unacked_frames) {
983 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
984 break;
985
986 skb = skb_dequeue(TX_QUEUE(sk));
987 kfree_skb(skb);
988
989 l2cap_pi(sk)->unacked_frames--;
990 }
991
992 if (!l2cap_pi(sk)->unacked_frames)
993 del_timer(&l2cap_pi(sk)->retrans_timer);
994 }
995
996 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
997 {
998 struct l2cap_pinfo *pi = l2cap_pi(sk);
999 struct hci_conn *hcon = pi->conn->hcon;
1000 u16 flags;
1001
1002 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1003
1004 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1005 flags = ACL_START_NO_FLUSH;
1006 else
1007 flags = ACL_START;
1008
1009 hci_send_acl(hcon, skb, flags);
1010 }
1011
1012 static void l2cap_streaming_send(struct sock *sk)
1013 {
1014 struct sk_buff *skb;
1015 struct l2cap_pinfo *pi = l2cap_pi(sk);
1016 u16 control, fcs;
1017
1018 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1019 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1020 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1021 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1022
1023 if (pi->fcs == L2CAP_FCS_CRC16) {
1024 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1025 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1026 }
1027
1028 l2cap_do_send(sk, skb);
1029
1030 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1031 }
1032 }
1033
1034 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1035 {
1036 struct l2cap_pinfo *pi = l2cap_pi(sk);
1037 struct sk_buff *skb, *tx_skb;
1038 u16 control, fcs;
1039
1040 skb = skb_peek(TX_QUEUE(sk));
1041 if (!skb)
1042 return;
1043
1044 do {
1045 if (bt_cb(skb)->tx_seq == tx_seq)
1046 break;
1047
1048 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1049 return;
1050
1051 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1052
1053 if (pi->remote_max_tx &&
1054 bt_cb(skb)->retries == pi->remote_max_tx) {
1055 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1056 return;
1057 }
1058
1059 tx_skb = skb_clone(skb, GFP_ATOMIC);
1060 bt_cb(skb)->retries++;
1061 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1062
1063 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1064 control |= L2CAP_CTRL_FINAL;
1065 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1066 }
1067
1068 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1069 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1070
1071 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1072
1073 if (pi->fcs == L2CAP_FCS_CRC16) {
1074 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1075 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1076 }
1077
1078 l2cap_do_send(sk, tx_skb);
1079 }
1080
1081 static int l2cap_ertm_send(struct sock *sk)
1082 {
1083 struct sk_buff *skb, *tx_skb;
1084 struct l2cap_pinfo *pi = l2cap_pi(sk);
1085 u16 control, fcs;
1086 int nsent = 0;
1087
1088 if (sk->sk_state != BT_CONNECTED)
1089 return -ENOTCONN;
1090
1091 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1092
1093 if (pi->remote_max_tx &&
1094 bt_cb(skb)->retries == pi->remote_max_tx) {
1095 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1096 break;
1097 }
1098
1099 tx_skb = skb_clone(skb, GFP_ATOMIC);
1100
1101 bt_cb(skb)->retries++;
1102
1103 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1104 control &= L2CAP_CTRL_SAR;
1105
1106 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1107 control |= L2CAP_CTRL_FINAL;
1108 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1109 }
1110 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1111 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1112 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1113
1114
1115 if (pi->fcs == L2CAP_FCS_CRC16) {
1116 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1117 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1118 }
1119
1120 l2cap_do_send(sk, tx_skb);
1121
1122 __mod_retrans_timer();
1123
1124 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1125 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1126
1127 pi->unacked_frames++;
1128 pi->frames_sent++;
1129
1130 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1131 sk->sk_send_head = NULL;
1132 else
1133 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1134
1135 nsent++;
1136 }
1137
1138 return nsent;
1139 }
1140
1141 static int l2cap_retransmit_frames(struct sock *sk)
1142 {
1143 struct l2cap_pinfo *pi = l2cap_pi(sk);
1144 int ret;
1145
1146 if (!skb_queue_empty(TX_QUEUE(sk)))
1147 sk->sk_send_head = TX_QUEUE(sk)->next;
1148
1149 pi->next_tx_seq = pi->expected_ack_seq;
1150 ret = l2cap_ertm_send(sk);
1151 return ret;
1152 }
1153
1154 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1155 {
1156 struct sock *sk = (struct sock *)pi;
1157 u16 control = 0;
1158
1159 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1160
1161 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1162 control |= L2CAP_SUPER_RCV_NOT_READY;
1163 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1164 l2cap_send_sframe(pi, control);
1165 return;
1166 }
1167
1168 if (l2cap_ertm_send(sk) > 0)
1169 return;
1170
1171 control |= L2CAP_SUPER_RCV_READY;
1172 l2cap_send_sframe(pi, control);
1173 }
1174
1175 static void l2cap_send_srejtail(struct sock *sk)
1176 {
1177 struct srej_list *tail;
1178 u16 control;
1179
1180 control = L2CAP_SUPER_SELECT_REJECT;
1181 control |= L2CAP_CTRL_FINAL;
1182
1183 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1184 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1185
1186 l2cap_send_sframe(l2cap_pi(sk), control);
1187 }
1188
1189 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1190 {
1191 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1192 struct sk_buff **frag;
1193 int err, sent = 0;
1194
1195 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1196 return -EFAULT;
1197
1198 sent += count;
1199 len -= count;
1200
1201 /* Continuation fragments (no L2CAP header) */
1202 frag = &skb_shinfo(skb)->frag_list;
1203 while (len) {
1204 count = min_t(unsigned int, conn->mtu, len);
1205
1206 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1207 if (!*frag)
1208 return err;
1209 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1210 return -EFAULT;
1211
1212 sent += count;
1213 len -= count;
1214
1215 frag = &(*frag)->next;
1216 }
1217
1218 return sent;
1219 }
1220
1221 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1222 {
1223 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1224 struct sk_buff *skb;
1225 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1226 struct l2cap_hdr *lh;
1227
1228 BT_DBG("sk %p len %d", sk, (int)len);
1229
1230 count = min_t(unsigned int, (conn->mtu - hlen), len);
1231 skb = bt_skb_send_alloc(sk, count + hlen,
1232 msg->msg_flags & MSG_DONTWAIT, &err);
1233 if (!skb)
1234 return ERR_PTR(err);
1235
1236 /* Create L2CAP header */
1237 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1238 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1239 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1240 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1241
1242 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1243 if (unlikely(err < 0)) {
1244 kfree_skb(skb);
1245 return ERR_PTR(err);
1246 }
1247 return skb;
1248 }
1249
1250 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1251 {
1252 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1253 struct sk_buff *skb;
1254 int err, count, hlen = L2CAP_HDR_SIZE;
1255 struct l2cap_hdr *lh;
1256
1257 BT_DBG("sk %p len %d", sk, (int)len);
1258
1259 count = min_t(unsigned int, (conn->mtu - hlen), len);
1260 skb = bt_skb_send_alloc(sk, count + hlen,
1261 msg->msg_flags & MSG_DONTWAIT, &err);
1262 if (!skb)
1263 return ERR_PTR(err);
1264
1265 /* Create L2CAP header */
1266 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1267 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1268 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1269
1270 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1271 if (unlikely(err < 0)) {
1272 kfree_skb(skb);
1273 return ERR_PTR(err);
1274 }
1275 return skb;
1276 }
1277
1278 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1279 {
1280 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1281 struct sk_buff *skb;
1282 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1283 struct l2cap_hdr *lh;
1284
1285 BT_DBG("sk %p len %d", sk, (int)len);
1286
1287 if (!conn)
1288 return ERR_PTR(-ENOTCONN);
1289
1290 if (sdulen)
1291 hlen += 2;
1292
1293 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1294 hlen += 2;
1295
1296 count = min_t(unsigned int, (conn->mtu - hlen), len);
1297 skb = bt_skb_send_alloc(sk, count + hlen,
1298 msg->msg_flags & MSG_DONTWAIT, &err);
1299 if (!skb)
1300 return ERR_PTR(err);
1301
1302 /* Create L2CAP header */
1303 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1304 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1305 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1306 put_unaligned_le16(control, skb_put(skb, 2));
1307 if (sdulen)
1308 put_unaligned_le16(sdulen, skb_put(skb, 2));
1309
1310 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1311 if (unlikely(err < 0)) {
1312 kfree_skb(skb);
1313 return ERR_PTR(err);
1314 }
1315
1316 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1317 put_unaligned_le16(0, skb_put(skb, 2));
1318
1319 bt_cb(skb)->retries = 0;
1320 return skb;
1321 }
1322
1323 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1324 {
1325 struct l2cap_pinfo *pi = l2cap_pi(sk);
1326 struct sk_buff *skb;
1327 struct sk_buff_head sar_queue;
1328 u16 control;
1329 size_t size = 0;
1330
1331 skb_queue_head_init(&sar_queue);
1332 control = L2CAP_SDU_START;
1333 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1334 if (IS_ERR(skb))
1335 return PTR_ERR(skb);
1336
1337 __skb_queue_tail(&sar_queue, skb);
1338 len -= pi->remote_mps;
1339 size += pi->remote_mps;
1340
1341 while (len > 0) {
1342 size_t buflen;
1343
1344 if (len > pi->remote_mps) {
1345 control = L2CAP_SDU_CONTINUE;
1346 buflen = pi->remote_mps;
1347 } else {
1348 control = L2CAP_SDU_END;
1349 buflen = len;
1350 }
1351
1352 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1353 if (IS_ERR(skb)) {
1354 skb_queue_purge(&sar_queue);
1355 return PTR_ERR(skb);
1356 }
1357
1358 __skb_queue_tail(&sar_queue, skb);
1359 len -= buflen;
1360 size += buflen;
1361 }
1362 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1363 if (sk->sk_send_head == NULL)
1364 sk->sk_send_head = sar_queue.next;
1365
1366 return size;
1367 }
1368
1369 int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1370 {
1371 struct sock *sk = sock->sk;
1372 struct l2cap_pinfo *pi = l2cap_pi(sk);
1373 struct sk_buff *skb;
1374 u16 control;
1375 int err;
1376
1377 BT_DBG("sock %p, sk %p", sock, sk);
1378
1379 err = sock_error(sk);
1380 if (err)
1381 return err;
1382
1383 if (msg->msg_flags & MSG_OOB)
1384 return -EOPNOTSUPP;
1385
1386 lock_sock(sk);
1387
1388 if (sk->sk_state != BT_CONNECTED) {
1389 err = -ENOTCONN;
1390 goto done;
1391 }
1392
1393 /* Connectionless channel */
1394 if (sk->sk_type == SOCK_DGRAM) {
1395 skb = l2cap_create_connless_pdu(sk, msg, len);
1396 if (IS_ERR(skb)) {
1397 err = PTR_ERR(skb);
1398 } else {
1399 l2cap_do_send(sk, skb);
1400 err = len;
1401 }
1402 goto done;
1403 }
1404
1405 switch (pi->mode) {
1406 case L2CAP_MODE_BASIC:
1407 /* Check outgoing MTU */
1408 if (len > pi->omtu) {
1409 err = -EMSGSIZE;
1410 goto done;
1411 }
1412
1413 /* Create a basic PDU */
1414 skb = l2cap_create_basic_pdu(sk, msg, len);
1415 if (IS_ERR(skb)) {
1416 err = PTR_ERR(skb);
1417 goto done;
1418 }
1419
1420 l2cap_do_send(sk, skb);
1421 err = len;
1422 break;
1423
1424 case L2CAP_MODE_ERTM:
1425 case L2CAP_MODE_STREAMING:
1426 /* Entire SDU fits into one PDU */
1427 if (len <= pi->remote_mps) {
1428 control = L2CAP_SDU_UNSEGMENTED;
1429 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1430 if (IS_ERR(skb)) {
1431 err = PTR_ERR(skb);
1432 goto done;
1433 }
1434 __skb_queue_tail(TX_QUEUE(sk), skb);
1435
1436 if (sk->sk_send_head == NULL)
1437 sk->sk_send_head = skb;
1438
1439 } else {
1440 /* Segment SDU into multiples PDUs */
1441 err = l2cap_sar_segment_sdu(sk, msg, len);
1442 if (err < 0)
1443 goto done;
1444 }
1445
1446 if (pi->mode == L2CAP_MODE_STREAMING) {
1447 l2cap_streaming_send(sk);
1448 } else {
1449 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1450 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
1451 err = len;
1452 break;
1453 }
1454 err = l2cap_ertm_send(sk);
1455 }
1456
1457 if (err >= 0)
1458 err = len;
1459 break;
1460
1461 default:
1462 BT_DBG("bad state %1.1x", pi->mode);
1463 err = -EBADFD;
1464 }
1465
1466 done:
1467 release_sock(sk);
1468 return err;
1469 }
1470
1471 int l2cap_sock_shutdown(struct socket *sock, int how)
1472 {
1473 struct sock *sk = sock->sk;
1474 int err = 0;
1475
1476 BT_DBG("sock %p, sk %p", sock, sk);
1477
1478 if (!sk)
1479 return 0;
1480
1481 lock_sock(sk);
1482 if (!sk->sk_shutdown) {
1483 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
1484 err = __l2cap_wait_ack(sk);
1485
1486 sk->sk_shutdown = SHUTDOWN_MASK;
1487 l2cap_sock_clear_timer(sk);
1488 __l2cap_sock_close(sk, 0);
1489
1490 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1491 err = bt_sock_wait_state(sk, BT_CLOSED,
1492 sk->sk_lingertime);
1493 }
1494
1495 if (!err && sk->sk_err)
1496 err = -sk->sk_err;
1497
1498 release_sock(sk);
1499 return err;
1500 }
1501
1502 static void l2cap_chan_ready(struct sock *sk)
1503 {
1504 struct sock *parent = bt_sk(sk)->parent;
1505
1506 BT_DBG("sk %p, parent %p", sk, parent);
1507
1508 l2cap_pi(sk)->conf_state = 0;
1509 l2cap_sock_clear_timer(sk);
1510
1511 if (!parent) {
1512 /* Outgoing channel.
1513 * Wake up socket sleeping on connect.
1514 */
1515 sk->sk_state = BT_CONNECTED;
1516 sk->sk_state_change(sk);
1517 } else {
1518 /* Incoming channel.
1519 * Wake up socket sleeping on accept.
1520 */
1521 parent->sk_data_ready(parent, 0);
1522 }
1523 }
1524
1525 /* Copy frame to all raw sockets on that connection */
1526 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1527 {
1528 struct l2cap_chan_list *l = &conn->chan_list;
1529 struct sk_buff *nskb;
1530 struct sock *sk;
1531
1532 BT_DBG("conn %p", conn);
1533
1534 read_lock(&l->lock);
1535 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1536 if (sk->sk_type != SOCK_RAW)
1537 continue;
1538
1539 /* Don't send frame to the socket it came from */
1540 if (skb->sk == sk)
1541 continue;
1542 nskb = skb_clone(skb, GFP_ATOMIC);
1543 if (!nskb)
1544 continue;
1545
1546 if (sock_queue_rcv_skb(sk, nskb))
1547 kfree_skb(nskb);
1548 }
1549 read_unlock(&l->lock);
1550 }
1551
1552 /* ---- L2CAP signalling commands ---- */
1553 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1554 u8 code, u8 ident, u16 dlen, void *data)
1555 {
1556 struct sk_buff *skb, **frag;
1557 struct l2cap_cmd_hdr *cmd;
1558 struct l2cap_hdr *lh;
1559 int len, count;
1560
1561 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1562 conn, code, ident, dlen);
1563
1564 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1565 count = min_t(unsigned int, conn->mtu, len);
1566
1567 skb = bt_skb_alloc(count, GFP_ATOMIC);
1568 if (!skb)
1569 return NULL;
1570
1571 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1572 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1573 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1574
1575 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1576 cmd->code = code;
1577 cmd->ident = ident;
1578 cmd->len = cpu_to_le16(dlen);
1579
1580 if (dlen) {
1581 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1582 memcpy(skb_put(skb, count), data, count);
1583 data += count;
1584 }
1585
1586 len -= skb->len;
1587
1588 /* Continuation fragments (no L2CAP header) */
1589 frag = &skb_shinfo(skb)->frag_list;
1590 while (len) {
1591 count = min_t(unsigned int, conn->mtu, len);
1592
1593 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1594 if (!*frag)
1595 goto fail;
1596
1597 memcpy(skb_put(*frag, count), data, count);
1598
1599 len -= count;
1600 data += count;
1601
1602 frag = &(*frag)->next;
1603 }
1604
1605 return skb;
1606
1607 fail:
1608 kfree_skb(skb);
1609 return NULL;
1610 }
1611
1612 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1613 {
1614 struct l2cap_conf_opt *opt = *ptr;
1615 int len;
1616
1617 len = L2CAP_CONF_OPT_SIZE + opt->len;
1618 *ptr += len;
1619
1620 *type = opt->type;
1621 *olen = opt->len;
1622
1623 switch (opt->len) {
1624 case 1:
1625 *val = *((u8 *) opt->val);
1626 break;
1627
1628 case 2:
1629 *val = get_unaligned_le16(opt->val);
1630 break;
1631
1632 case 4:
1633 *val = get_unaligned_le32(opt->val);
1634 break;
1635
1636 default:
1637 *val = (unsigned long) opt->val;
1638 break;
1639 }
1640
1641 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1642 return len;
1643 }
1644
1645 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1646 {
1647 struct l2cap_conf_opt *opt = *ptr;
1648
1649 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1650
1651 opt->type = type;
1652 opt->len = len;
1653
1654 switch (len) {
1655 case 1:
1656 *((u8 *) opt->val) = val;
1657 break;
1658
1659 case 2:
1660 put_unaligned_le16(val, opt->val);
1661 break;
1662
1663 case 4:
1664 put_unaligned_le32(val, opt->val);
1665 break;
1666
1667 default:
1668 memcpy(opt->val, (void *) val, len);
1669 break;
1670 }
1671
1672 *ptr += L2CAP_CONF_OPT_SIZE + len;
1673 }
1674
1675 static void l2cap_ack_timeout(unsigned long arg)
1676 {
1677 struct sock *sk = (void *) arg;
1678
1679 bh_lock_sock(sk);
1680 l2cap_send_ack(l2cap_pi(sk));
1681 bh_unlock_sock(sk);
1682 }
1683
1684 static inline void l2cap_ertm_init(struct sock *sk)
1685 {
1686 l2cap_pi(sk)->expected_ack_seq = 0;
1687 l2cap_pi(sk)->unacked_frames = 0;
1688 l2cap_pi(sk)->buffer_seq = 0;
1689 l2cap_pi(sk)->num_acked = 0;
1690 l2cap_pi(sk)->frames_sent = 0;
1691
1692 setup_timer(&l2cap_pi(sk)->retrans_timer,
1693 l2cap_retrans_timeout, (unsigned long) sk);
1694 setup_timer(&l2cap_pi(sk)->monitor_timer,
1695 l2cap_monitor_timeout, (unsigned long) sk);
1696 setup_timer(&l2cap_pi(sk)->ack_timer,
1697 l2cap_ack_timeout, (unsigned long) sk);
1698
1699 __skb_queue_head_init(SREJ_QUEUE(sk));
1700 __skb_queue_head_init(BUSY_QUEUE(sk));
1701
1702 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1703
1704 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1705 }
1706
1707 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1708 {
1709 switch (mode) {
1710 case L2CAP_MODE_STREAMING:
1711 case L2CAP_MODE_ERTM:
1712 if (l2cap_mode_supported(mode, remote_feat_mask))
1713 return mode;
1714 /* fall through */
1715 default:
1716 return L2CAP_MODE_BASIC;
1717 }
1718 }
1719
1720 int l2cap_build_conf_req(struct sock *sk, void *data)
1721 {
1722 struct l2cap_pinfo *pi = l2cap_pi(sk);
1723 struct l2cap_conf_req *req = data;
1724 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1725 void *ptr = req->data;
1726
1727 BT_DBG("sk %p", sk);
1728
1729 if (pi->num_conf_req || pi->num_conf_rsp)
1730 goto done;
1731
1732 switch (pi->mode) {
1733 case L2CAP_MODE_STREAMING:
1734 case L2CAP_MODE_ERTM:
1735 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1736 break;
1737
1738 /* fall through */
1739 default:
1740 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1741 break;
1742 }
1743
1744 done:
1745 if (pi->imtu != L2CAP_DEFAULT_MTU)
1746 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1747
1748 switch (pi->mode) {
1749 case L2CAP_MODE_BASIC:
1750 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1751 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1752 break;
1753
1754 rfc.mode = L2CAP_MODE_BASIC;
1755 rfc.txwin_size = 0;
1756 rfc.max_transmit = 0;
1757 rfc.retrans_timeout = 0;
1758 rfc.monitor_timeout = 0;
1759 rfc.max_pdu_size = 0;
1760
1761 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1762 (unsigned long) &rfc);
1763 break;
1764
1765 case L2CAP_MODE_ERTM:
1766 rfc.mode = L2CAP_MODE_ERTM;
1767 rfc.txwin_size = pi->tx_win;
1768 rfc.max_transmit = pi->max_tx;
1769 rfc.retrans_timeout = 0;
1770 rfc.monitor_timeout = 0;
1771 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1772 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1773 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1774
1775 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1776 (unsigned long) &rfc);
1777
1778 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1779 break;
1780
1781 if (pi->fcs == L2CAP_FCS_NONE ||
1782 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1783 pi->fcs = L2CAP_FCS_NONE;
1784 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1785 }
1786 break;
1787
1788 case L2CAP_MODE_STREAMING:
1789 rfc.mode = L2CAP_MODE_STREAMING;
1790 rfc.txwin_size = 0;
1791 rfc.max_transmit = 0;
1792 rfc.retrans_timeout = 0;
1793 rfc.monitor_timeout = 0;
1794 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1795 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1796 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1797
1798 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1799 (unsigned long) &rfc);
1800
1801 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1802 break;
1803
1804 if (pi->fcs == L2CAP_FCS_NONE ||
1805 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1806 pi->fcs = L2CAP_FCS_NONE;
1807 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1808 }
1809 break;
1810 }
1811
1812 /* FIXME: Need actual value of the flush timeout */
1813 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1814 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1815
1816 req->dcid = cpu_to_le16(pi->dcid);
1817 req->flags = cpu_to_le16(0);
1818
1819 return ptr - data;
1820 }
1821
1822 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1823 {
1824 struct l2cap_pinfo *pi = l2cap_pi(sk);
1825 struct l2cap_conf_rsp *rsp = data;
1826 void *ptr = rsp->data;
1827 void *req = pi->conf_req;
1828 int len = pi->conf_len;
1829 int type, hint, olen;
1830 unsigned long val;
1831 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1832 u16 mtu = L2CAP_DEFAULT_MTU;
1833 u16 result = L2CAP_CONF_SUCCESS;
1834
1835 BT_DBG("sk %p", sk);
1836
1837 while (len >= L2CAP_CONF_OPT_SIZE) {
1838 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1839
1840 hint = type & L2CAP_CONF_HINT;
1841 type &= L2CAP_CONF_MASK;
1842
1843 switch (type) {
1844 case L2CAP_CONF_MTU:
1845 mtu = val;
1846 break;
1847
1848 case L2CAP_CONF_FLUSH_TO:
1849 pi->flush_to = val;
1850 break;
1851
1852 case L2CAP_CONF_QOS:
1853 break;
1854
1855 case L2CAP_CONF_RFC:
1856 if (olen == sizeof(rfc))
1857 memcpy(&rfc, (void *) val, olen);
1858 break;
1859
1860 case L2CAP_CONF_FCS:
1861 if (val == L2CAP_FCS_NONE)
1862 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1863
1864 break;
1865
1866 default:
1867 if (hint)
1868 break;
1869
1870 result = L2CAP_CONF_UNKNOWN;
1871 *((u8 *) ptr++) = type;
1872 break;
1873 }
1874 }
1875
1876 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1877 goto done;
1878
1879 switch (pi->mode) {
1880 case L2CAP_MODE_STREAMING:
1881 case L2CAP_MODE_ERTM:
1882 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1883 pi->mode = l2cap_select_mode(rfc.mode,
1884 pi->conn->feat_mask);
1885 break;
1886 }
1887
1888 if (pi->mode != rfc.mode)
1889 return -ECONNREFUSED;
1890
1891 break;
1892 }
1893
1894 done:
1895 if (pi->mode != rfc.mode) {
1896 result = L2CAP_CONF_UNACCEPT;
1897 rfc.mode = pi->mode;
1898
1899 if (pi->num_conf_rsp == 1)
1900 return -ECONNREFUSED;
1901
1902 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1903 sizeof(rfc), (unsigned long) &rfc);
1904 }
1905
1906
1907 if (result == L2CAP_CONF_SUCCESS) {
1908 /* Configure output options and let the other side know
1909 * which ones we don't like. */
1910
1911 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1912 result = L2CAP_CONF_UNACCEPT;
1913 else {
1914 pi->omtu = mtu;
1915 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1916 }
1917 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1918
1919 switch (rfc.mode) {
1920 case L2CAP_MODE_BASIC:
1921 pi->fcs = L2CAP_FCS_NONE;
1922 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1923 break;
1924
1925 case L2CAP_MODE_ERTM:
1926 pi->remote_tx_win = rfc.txwin_size;
1927 pi->remote_max_tx = rfc.max_transmit;
1928
1929 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1930 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1931
1932 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1933
1934 rfc.retrans_timeout =
1935 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1936 rfc.monitor_timeout =
1937 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1938
1939 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1940
1941 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1942 sizeof(rfc), (unsigned long) &rfc);
1943
1944 break;
1945
1946 case L2CAP_MODE_STREAMING:
1947 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1948 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1949
1950 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1951
1952 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1953
1954 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1955 sizeof(rfc), (unsigned long) &rfc);
1956
1957 break;
1958
1959 default:
1960 result = L2CAP_CONF_UNACCEPT;
1961
1962 memset(&rfc, 0, sizeof(rfc));
1963 rfc.mode = pi->mode;
1964 }
1965
1966 if (result == L2CAP_CONF_SUCCESS)
1967 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1968 }
1969 rsp->scid = cpu_to_le16(pi->dcid);
1970 rsp->result = cpu_to_le16(result);
1971 rsp->flags = cpu_to_le16(0x0000);
1972
1973 return ptr - data;
1974 }
1975
1976 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1977 {
1978 struct l2cap_pinfo *pi = l2cap_pi(sk);
1979 struct l2cap_conf_req *req = data;
1980 void *ptr = req->data;
1981 int type, olen;
1982 unsigned long val;
1983 struct l2cap_conf_rfc rfc;
1984
1985 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1986
1987 while (len >= L2CAP_CONF_OPT_SIZE) {
1988 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1989
1990 switch (type) {
1991 case L2CAP_CONF_MTU:
1992 if (val < L2CAP_DEFAULT_MIN_MTU) {
1993 *result = L2CAP_CONF_UNACCEPT;
1994 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1995 } else
1996 pi->imtu = val;
1997 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1998 break;
1999
2000 case L2CAP_CONF_FLUSH_TO:
2001 pi->flush_to = val;
2002 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2003 2, pi->flush_to);
2004 break;
2005
2006 case L2CAP_CONF_RFC:
2007 if (olen == sizeof(rfc))
2008 memcpy(&rfc, (void *)val, olen);
2009
2010 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2011 rfc.mode != pi->mode)
2012 return -ECONNREFUSED;
2013
2014 pi->fcs = 0;
2015
2016 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2017 sizeof(rfc), (unsigned long) &rfc);
2018 break;
2019 }
2020 }
2021
2022 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2023 return -ECONNREFUSED;
2024
2025 pi->mode = rfc.mode;
2026
2027 if (*result == L2CAP_CONF_SUCCESS) {
2028 switch (rfc.mode) {
2029 case L2CAP_MODE_ERTM:
2030 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2031 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2032 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2033 break;
2034 case L2CAP_MODE_STREAMING:
2035 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2036 }
2037 }
2038
2039 req->dcid = cpu_to_le16(pi->dcid);
2040 req->flags = cpu_to_le16(0x0000);
2041
2042 return ptr - data;
2043 }
2044
2045 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2046 {
2047 struct l2cap_conf_rsp *rsp = data;
2048 void *ptr = rsp->data;
2049
2050 BT_DBG("sk %p", sk);
2051
2052 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2053 rsp->result = cpu_to_le16(result);
2054 rsp->flags = cpu_to_le16(flags);
2055
2056 return ptr - data;
2057 }
2058
2059 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2060 {
2061 struct l2cap_pinfo *pi = l2cap_pi(sk);
2062 int type, olen;
2063 unsigned long val;
2064 struct l2cap_conf_rfc rfc;
2065
2066 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2067
2068 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2069 return;
2070
2071 while (len >= L2CAP_CONF_OPT_SIZE) {
2072 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2073
2074 switch (type) {
2075 case L2CAP_CONF_RFC:
2076 if (olen == sizeof(rfc))
2077 memcpy(&rfc, (void *)val, olen);
2078 goto done;
2079 }
2080 }
2081
2082 done:
2083 switch (rfc.mode) {
2084 case L2CAP_MODE_ERTM:
2085 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2086 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2087 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2088 break;
2089 case L2CAP_MODE_STREAMING:
2090 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2091 }
2092 }
2093
2094 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2095 {
2096 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2097
2098 if (rej->reason != 0x0000)
2099 return 0;
2100
2101 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2102 cmd->ident == conn->info_ident) {
2103 del_timer(&conn->info_timer);
2104
2105 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2106 conn->info_ident = 0;
2107
2108 l2cap_conn_start(conn);
2109 }
2110
2111 return 0;
2112 }
2113
2114 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2115 {
2116 struct l2cap_chan_list *list = &conn->chan_list;
2117 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2118 struct l2cap_conn_rsp rsp;
2119 struct sock *parent, *sk = NULL;
2120 int result, status = L2CAP_CS_NO_INFO;
2121
2122 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2123 __le16 psm = req->psm;
2124
2125 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2126
2127 /* Check if we have socket listening on psm */
2128 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2129 if (!parent) {
2130 result = L2CAP_CR_BAD_PSM;
2131 goto sendresp;
2132 }
2133
2134 bh_lock_sock(parent);
2135
2136 /* Check if the ACL is secure enough (if not SDP) */
2137 if (psm != cpu_to_le16(0x0001) &&
2138 !hci_conn_check_link_mode(conn->hcon)) {
2139 conn->disc_reason = 0x05;
2140 result = L2CAP_CR_SEC_BLOCK;
2141 goto response;
2142 }
2143
2144 result = L2CAP_CR_NO_MEM;
2145
2146 /* Check for backlog size */
2147 if (sk_acceptq_is_full(parent)) {
2148 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2149 goto response;
2150 }
2151
2152 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2153 if (!sk)
2154 goto response;
2155
2156 write_lock_bh(&list->lock);
2157
2158 /* Check if we already have channel with that dcid */
2159 if (__l2cap_get_chan_by_dcid(list, scid)) {
2160 write_unlock_bh(&list->lock);
2161 sock_set_flag(sk, SOCK_ZAPPED);
2162 l2cap_sock_kill(sk);
2163 goto response;
2164 }
2165
2166 hci_conn_hold(conn->hcon);
2167
2168 l2cap_sock_init(sk, parent);
2169 bacpy(&bt_sk(sk)->src, conn->src);
2170 bacpy(&bt_sk(sk)->dst, conn->dst);
2171 l2cap_pi(sk)->psm = psm;
2172 l2cap_pi(sk)->dcid = scid;
2173
2174 __l2cap_chan_add(conn, sk, parent);
2175 dcid = l2cap_pi(sk)->scid;
2176
2177 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2178
2179 l2cap_pi(sk)->ident = cmd->ident;
2180
2181 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2182 if (l2cap_check_security(sk)) {
2183 if (bt_sk(sk)->defer_setup) {
2184 sk->sk_state = BT_CONNECT2;
2185 result = L2CAP_CR_PEND;
2186 status = L2CAP_CS_AUTHOR_PEND;
2187 parent->sk_data_ready(parent, 0);
2188 } else {
2189 sk->sk_state = BT_CONFIG;
2190 result = L2CAP_CR_SUCCESS;
2191 status = L2CAP_CS_NO_INFO;
2192 }
2193 } else {
2194 sk->sk_state = BT_CONNECT2;
2195 result = L2CAP_CR_PEND;
2196 status = L2CAP_CS_AUTHEN_PEND;
2197 }
2198 } else {
2199 sk->sk_state = BT_CONNECT2;
2200 result = L2CAP_CR_PEND;
2201 status = L2CAP_CS_NO_INFO;
2202 }
2203
2204 write_unlock_bh(&list->lock);
2205
2206 response:
2207 bh_unlock_sock(parent);
2208
2209 sendresp:
2210 rsp.scid = cpu_to_le16(scid);
2211 rsp.dcid = cpu_to_le16(dcid);
2212 rsp.result = cpu_to_le16(result);
2213 rsp.status = cpu_to_le16(status);
2214 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2215
2216 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2217 struct l2cap_info_req info;
2218 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2219
2220 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2221 conn->info_ident = l2cap_get_ident(conn);
2222
2223 mod_timer(&conn->info_timer, jiffies +
2224 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2225
2226 l2cap_send_cmd(conn, conn->info_ident,
2227 L2CAP_INFO_REQ, sizeof(info), &info);
2228 }
2229
2230 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2231 result == L2CAP_CR_SUCCESS) {
2232 u8 buf[128];
2233 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2234 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2235 l2cap_build_conf_req(sk, buf), buf);
2236 l2cap_pi(sk)->num_conf_req++;
2237 }
2238
2239 return 0;
2240 }
2241
2242 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2243 {
2244 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2245 u16 scid, dcid, result, status;
2246 struct sock *sk;
2247 u8 req[128];
2248
2249 scid = __le16_to_cpu(rsp->scid);
2250 dcid = __le16_to_cpu(rsp->dcid);
2251 result = __le16_to_cpu(rsp->result);
2252 status = __le16_to_cpu(rsp->status);
2253
2254 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2255
2256 if (scid) {
2257 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2258 if (!sk)
2259 return -EFAULT;
2260 } else {
2261 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2262 if (!sk)
2263 return -EFAULT;
2264 }
2265
2266 switch (result) {
2267 case L2CAP_CR_SUCCESS:
2268 sk->sk_state = BT_CONFIG;
2269 l2cap_pi(sk)->ident = 0;
2270 l2cap_pi(sk)->dcid = dcid;
2271 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2272
2273 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2274 break;
2275
2276 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2277
2278 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2279 l2cap_build_conf_req(sk, req), req);
2280 l2cap_pi(sk)->num_conf_req++;
2281 break;
2282
2283 case L2CAP_CR_PEND:
2284 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2285 break;
2286
2287 default:
2288 /* don't delete l2cap channel if sk is owned by user */
2289 if (sock_owned_by_user(sk)) {
2290 sk->sk_state = BT_DISCONN;
2291 l2cap_sock_clear_timer(sk);
2292 l2cap_sock_set_timer(sk, HZ / 5);
2293 break;
2294 }
2295
2296 l2cap_chan_del(sk, ECONNREFUSED);
2297 break;
2298 }
2299
2300 bh_unlock_sock(sk);
2301 return 0;
2302 }
2303
2304 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2305 {
2306 /* FCS is enabled only in ERTM or streaming mode, if one or both
2307 * sides request it.
2308 */
2309 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2310 pi->fcs = L2CAP_FCS_NONE;
2311 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2312 pi->fcs = L2CAP_FCS_CRC16;
2313 }
2314
2315 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2316 {
2317 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2318 u16 dcid, flags;
2319 u8 rsp[64];
2320 struct sock *sk;
2321 int len;
2322
2323 dcid = __le16_to_cpu(req->dcid);
2324 flags = __le16_to_cpu(req->flags);
2325
2326 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2327
2328 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2329 if (!sk)
2330 return -ENOENT;
2331
2332 if (sk->sk_state != BT_CONFIG) {
2333 struct l2cap_cmd_rej rej;
2334
2335 rej.reason = cpu_to_le16(0x0002);
2336 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2337 sizeof(rej), &rej);
2338 goto unlock;
2339 }
2340
2341 /* Reject if config buffer is too small. */
2342 len = cmd_len - sizeof(*req);
2343 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2344 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2345 l2cap_build_conf_rsp(sk, rsp,
2346 L2CAP_CONF_REJECT, flags), rsp);
2347 goto unlock;
2348 }
2349
2350 /* Store config. */
2351 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2352 l2cap_pi(sk)->conf_len += len;
2353
2354 if (flags & 0x0001) {
2355 /* Incomplete config. Send empty response. */
2356 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2357 l2cap_build_conf_rsp(sk, rsp,
2358 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2359 goto unlock;
2360 }
2361
2362 /* Complete config. */
2363 len = l2cap_parse_conf_req(sk, rsp);
2364 if (len < 0) {
2365 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2366 goto unlock;
2367 }
2368
2369 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2370 l2cap_pi(sk)->num_conf_rsp++;
2371
2372 /* Reset config buffer. */
2373 l2cap_pi(sk)->conf_len = 0;
2374
2375 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2376 goto unlock;
2377
2378 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2379 set_default_fcs(l2cap_pi(sk));
2380
2381 sk->sk_state = BT_CONNECTED;
2382
2383 l2cap_pi(sk)->next_tx_seq = 0;
2384 l2cap_pi(sk)->expected_tx_seq = 0;
2385 __skb_queue_head_init(TX_QUEUE(sk));
2386 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2387 l2cap_ertm_init(sk);
2388
2389 l2cap_chan_ready(sk);
2390 goto unlock;
2391 }
2392
2393 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2394 u8 buf[64];
2395 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2396 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2397 l2cap_build_conf_req(sk, buf), buf);
2398 l2cap_pi(sk)->num_conf_req++;
2399 }
2400
2401 unlock:
2402 bh_unlock_sock(sk);
2403 return 0;
2404 }
2405
2406 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2407 {
2408 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2409 u16 scid, flags, result;
2410 struct sock *sk;
2411 int len = cmd->len - sizeof(*rsp);
2412
2413 scid = __le16_to_cpu(rsp->scid);
2414 flags = __le16_to_cpu(rsp->flags);
2415 result = __le16_to_cpu(rsp->result);
2416
2417 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2418 scid, flags, result);
2419
2420 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2421 if (!sk)
2422 return 0;
2423
2424 switch (result) {
2425 case L2CAP_CONF_SUCCESS:
2426 l2cap_conf_rfc_get(sk, rsp->data, len);
2427 break;
2428
2429 case L2CAP_CONF_UNACCEPT:
2430 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2431 char req[64];
2432
2433 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2434 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2435 goto done;
2436 }
2437
2438 /* throw out any old stored conf requests */
2439 result = L2CAP_CONF_SUCCESS;
2440 len = l2cap_parse_conf_rsp(sk, rsp->data,
2441 len, req, &result);
2442 if (len < 0) {
2443 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2444 goto done;
2445 }
2446
2447 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2448 L2CAP_CONF_REQ, len, req);
2449 l2cap_pi(sk)->num_conf_req++;
2450 if (result != L2CAP_CONF_SUCCESS)
2451 goto done;
2452 break;
2453 }
2454
2455 default:
2456 sk->sk_err = ECONNRESET;
2457 l2cap_sock_set_timer(sk, HZ * 5);
2458 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2459 goto done;
2460 }
2461
2462 if (flags & 0x01)
2463 goto done;
2464
2465 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2466
2467 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2468 set_default_fcs(l2cap_pi(sk));
2469
2470 sk->sk_state = BT_CONNECTED;
2471 l2cap_pi(sk)->next_tx_seq = 0;
2472 l2cap_pi(sk)->expected_tx_seq = 0;
2473 __skb_queue_head_init(TX_QUEUE(sk));
2474 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2475 l2cap_ertm_init(sk);
2476
2477 l2cap_chan_ready(sk);
2478 }
2479
2480 done:
2481 bh_unlock_sock(sk);
2482 return 0;
2483 }
2484
2485 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2486 {
2487 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2488 struct l2cap_disconn_rsp rsp;
2489 u16 dcid, scid;
2490 struct sock *sk;
2491
2492 scid = __le16_to_cpu(req->scid);
2493 dcid = __le16_to_cpu(req->dcid);
2494
2495 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2496
2497 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2498 if (!sk)
2499 return 0;
2500
2501 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2502 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2503 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2504
2505 sk->sk_shutdown = SHUTDOWN_MASK;
2506
2507 /* don't delete l2cap channel if sk is owned by user */
2508 if (sock_owned_by_user(sk)) {
2509 sk->sk_state = BT_DISCONN;
2510 l2cap_sock_clear_timer(sk);
2511 l2cap_sock_set_timer(sk, HZ / 5);
2512 bh_unlock_sock(sk);
2513 return 0;
2514 }
2515
2516 l2cap_chan_del(sk, ECONNRESET);
2517 bh_unlock_sock(sk);
2518
2519 l2cap_sock_kill(sk);
2520 return 0;
2521 }
2522
2523 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2524 {
2525 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2526 u16 dcid, scid;
2527 struct sock *sk;
2528
2529 scid = __le16_to_cpu(rsp->scid);
2530 dcid = __le16_to_cpu(rsp->dcid);
2531
2532 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2533
2534 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2535 if (!sk)
2536 return 0;
2537
2538 /* don't delete l2cap channel if sk is owned by user */
2539 if (sock_owned_by_user(sk)) {
2540 sk->sk_state = BT_DISCONN;
2541 l2cap_sock_clear_timer(sk);
2542 l2cap_sock_set_timer(sk, HZ / 5);
2543 bh_unlock_sock(sk);
2544 return 0;
2545 }
2546
2547 l2cap_chan_del(sk, 0);
2548 bh_unlock_sock(sk);
2549
2550 l2cap_sock_kill(sk);
2551 return 0;
2552 }
2553
2554 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2555 {
2556 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2557 u16 type;
2558
2559 type = __le16_to_cpu(req->type);
2560
2561 BT_DBG("type 0x%4.4x", type);
2562
2563 if (type == L2CAP_IT_FEAT_MASK) {
2564 u8 buf[8];
2565 u32 feat_mask = l2cap_feat_mask;
2566 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2567 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2568 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2569 if (!disable_ertm)
2570 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2571 | L2CAP_FEAT_FCS;
2572 put_unaligned_le32(feat_mask, rsp->data);
2573 l2cap_send_cmd(conn, cmd->ident,
2574 L2CAP_INFO_RSP, sizeof(buf), buf);
2575 } else if (type == L2CAP_IT_FIXED_CHAN) {
2576 u8 buf[12];
2577 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2578 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2579 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2580 memcpy(buf + 4, l2cap_fixed_chan, 8);
2581 l2cap_send_cmd(conn, cmd->ident,
2582 L2CAP_INFO_RSP, sizeof(buf), buf);
2583 } else {
2584 struct l2cap_info_rsp rsp;
2585 rsp.type = cpu_to_le16(type);
2586 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2587 l2cap_send_cmd(conn, cmd->ident,
2588 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2589 }
2590
2591 return 0;
2592 }
2593
2594 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2595 {
2596 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2597 u16 type, result;
2598
2599 type = __le16_to_cpu(rsp->type);
2600 result = __le16_to_cpu(rsp->result);
2601
2602 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2603
2604 del_timer(&conn->info_timer);
2605
2606 if (result != L2CAP_IR_SUCCESS) {
2607 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2608 conn->info_ident = 0;
2609
2610 l2cap_conn_start(conn);
2611
2612 return 0;
2613 }
2614
2615 if (type == L2CAP_IT_FEAT_MASK) {
2616 conn->feat_mask = get_unaligned_le32(rsp->data);
2617
2618 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2619 struct l2cap_info_req req;
2620 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2621
2622 conn->info_ident = l2cap_get_ident(conn);
2623
2624 l2cap_send_cmd(conn, conn->info_ident,
2625 L2CAP_INFO_REQ, sizeof(req), &req);
2626 } else {
2627 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2628 conn->info_ident = 0;
2629
2630 l2cap_conn_start(conn);
2631 }
2632 } else if (type == L2CAP_IT_FIXED_CHAN) {
2633 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2634 conn->info_ident = 0;
2635
2636 l2cap_conn_start(conn);
2637 }
2638
2639 return 0;
2640 }
2641
2642 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2643 {
2644 u8 *data = skb->data;
2645 int len = skb->len;
2646 struct l2cap_cmd_hdr cmd;
2647 int err = 0;
2648
2649 l2cap_raw_recv(conn, skb);
2650
2651 while (len >= L2CAP_CMD_HDR_SIZE) {
2652 u16 cmd_len;
2653 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2654 data += L2CAP_CMD_HDR_SIZE;
2655 len -= L2CAP_CMD_HDR_SIZE;
2656
2657 cmd_len = le16_to_cpu(cmd.len);
2658
2659 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2660
2661 if (cmd_len > len || !cmd.ident) {
2662 BT_DBG("corrupted command");
2663 break;
2664 }
2665
2666 switch (cmd.code) {
2667 case L2CAP_COMMAND_REJ:
2668 l2cap_command_rej(conn, &cmd, data);
2669 break;
2670
2671 case L2CAP_CONN_REQ:
2672 err = l2cap_connect_req(conn, &cmd, data);
2673 break;
2674
2675 case L2CAP_CONN_RSP:
2676 err = l2cap_connect_rsp(conn, &cmd, data);
2677 break;
2678
2679 case L2CAP_CONF_REQ:
2680 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2681 break;
2682
2683 case L2CAP_CONF_RSP:
2684 err = l2cap_config_rsp(conn, &cmd, data);
2685 break;
2686
2687 case L2CAP_DISCONN_REQ:
2688 err = l2cap_disconnect_req(conn, &cmd, data);
2689 break;
2690
2691 case L2CAP_DISCONN_RSP:
2692 err = l2cap_disconnect_rsp(conn, &cmd, data);
2693 break;
2694
2695 case L2CAP_ECHO_REQ:
2696 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2697 break;
2698
2699 case L2CAP_ECHO_RSP:
2700 break;
2701
2702 case L2CAP_INFO_REQ:
2703 err = l2cap_information_req(conn, &cmd, data);
2704 break;
2705
2706 case L2CAP_INFO_RSP:
2707 err = l2cap_information_rsp(conn, &cmd, data);
2708 break;
2709
2710 default:
2711 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2712 err = -EINVAL;
2713 break;
2714 }
2715
2716 if (err) {
2717 struct l2cap_cmd_rej rej;
2718 BT_DBG("error %d", err);
2719
2720 /* FIXME: Map err to a valid reason */
2721 rej.reason = cpu_to_le16(0);
2722 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2723 }
2724
2725 data += cmd_len;
2726 len -= cmd_len;
2727 }
2728
2729 kfree_skb(skb);
2730 }
2731
2732 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2733 {
2734 u16 our_fcs, rcv_fcs;
2735 int hdr_size = L2CAP_HDR_SIZE + 2;
2736
2737 if (pi->fcs == L2CAP_FCS_CRC16) {
2738 skb_trim(skb, skb->len - 2);
2739 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2740 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2741
2742 if (our_fcs != rcv_fcs)
2743 return -EBADMSG;
2744 }
2745 return 0;
2746 }
2747
2748 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2749 {
2750 struct l2cap_pinfo *pi = l2cap_pi(sk);
2751 u16 control = 0;
2752
2753 pi->frames_sent = 0;
2754
2755 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2756
2757 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2758 control |= L2CAP_SUPER_RCV_NOT_READY;
2759 l2cap_send_sframe(pi, control);
2760 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2761 }
2762
2763 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2764 l2cap_retransmit_frames(sk);
2765
2766 l2cap_ertm_send(sk);
2767
2768 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2769 pi->frames_sent == 0) {
2770 control |= L2CAP_SUPER_RCV_READY;
2771 l2cap_send_sframe(pi, control);
2772 }
2773 }
2774
2775 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2776 {
2777 struct sk_buff *next_skb;
2778 struct l2cap_pinfo *pi = l2cap_pi(sk);
2779 int tx_seq_offset, next_tx_seq_offset;
2780
2781 bt_cb(skb)->tx_seq = tx_seq;
2782 bt_cb(skb)->sar = sar;
2783
2784 next_skb = skb_peek(SREJ_QUEUE(sk));
2785 if (!next_skb) {
2786 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2787 return 0;
2788 }
2789
2790 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2791 if (tx_seq_offset < 0)
2792 tx_seq_offset += 64;
2793
2794 do {
2795 if (bt_cb(next_skb)->tx_seq == tx_seq)
2796 return -EINVAL;
2797
2798 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2799 pi->buffer_seq) % 64;
2800 if (next_tx_seq_offset < 0)
2801 next_tx_seq_offset += 64;
2802
2803 if (next_tx_seq_offset > tx_seq_offset) {
2804 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2805 return 0;
2806 }
2807
2808 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2809 break;
2810
2811 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2812
2813 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2814
2815 return 0;
2816 }
2817
2818 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2819 {
2820 struct l2cap_pinfo *pi = l2cap_pi(sk);
2821 struct sk_buff *_skb;
2822 int err;
2823
2824 switch (control & L2CAP_CTRL_SAR) {
2825 case L2CAP_SDU_UNSEGMENTED:
2826 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2827 goto drop;
2828
2829 err = sock_queue_rcv_skb(sk, skb);
2830 if (!err)
2831 return err;
2832
2833 break;
2834
2835 case L2CAP_SDU_START:
2836 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2837 goto drop;
2838
2839 pi->sdu_len = get_unaligned_le16(skb->data);
2840
2841 if (pi->sdu_len > pi->imtu)
2842 goto disconnect;
2843
2844 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2845 if (!pi->sdu)
2846 return -ENOMEM;
2847
2848 /* pull sdu_len bytes only after alloc, because of Local Busy
2849 * condition we have to be sure that this will be executed
2850 * only once, i.e., when alloc does not fail */
2851 skb_pull(skb, 2);
2852
2853 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2854
2855 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2856 pi->partial_sdu_len = skb->len;
2857 break;
2858
2859 case L2CAP_SDU_CONTINUE:
2860 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2861 goto disconnect;
2862
2863 if (!pi->sdu)
2864 goto disconnect;
2865
2866 pi->partial_sdu_len += skb->len;
2867 if (pi->partial_sdu_len > pi->sdu_len)
2868 goto drop;
2869
2870 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2871
2872 break;
2873
2874 case L2CAP_SDU_END:
2875 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2876 goto disconnect;
2877
2878 if (!pi->sdu)
2879 goto disconnect;
2880
2881 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2882 pi->partial_sdu_len += skb->len;
2883
2884 if (pi->partial_sdu_len > pi->imtu)
2885 goto drop;
2886
2887 if (pi->partial_sdu_len != pi->sdu_len)
2888 goto drop;
2889
2890 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2891 }
2892
2893 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2894 if (!_skb) {
2895 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2896 return -ENOMEM;
2897 }
2898
2899 err = sock_queue_rcv_skb(sk, _skb);
2900 if (err < 0) {
2901 kfree_skb(_skb);
2902 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2903 return err;
2904 }
2905
2906 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2907 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2908
2909 kfree_skb(pi->sdu);
2910 break;
2911 }
2912
2913 kfree_skb(skb);
2914 return 0;
2915
2916 drop:
2917 kfree_skb(pi->sdu);
2918 pi->sdu = NULL;
2919
2920 disconnect:
2921 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2922 kfree_skb(skb);
2923 return 0;
2924 }
2925
2926 static int l2cap_try_push_rx_skb(struct sock *sk)
2927 {
2928 struct l2cap_pinfo *pi = l2cap_pi(sk);
2929 struct sk_buff *skb;
2930 u16 control;
2931 int err;
2932
2933 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2934 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2935 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2936 if (err < 0) {
2937 skb_queue_head(BUSY_QUEUE(sk), skb);
2938 return -EBUSY;
2939 }
2940
2941 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2942 }
2943
2944 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2945 goto done;
2946
2947 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2948 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2949 l2cap_send_sframe(pi, control);
2950 l2cap_pi(sk)->retry_count = 1;
2951
2952 del_timer(&pi->retrans_timer);
2953 __mod_monitor_timer();
2954
2955 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2956
2957 done:
2958 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2959 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2960
2961 BT_DBG("sk %p, Exit local busy", sk);
2962
2963 return 0;
2964 }
2965
2966 static void l2cap_busy_work(struct work_struct *work)
2967 {
2968 DECLARE_WAITQUEUE(wait, current);
2969 struct l2cap_pinfo *pi =
2970 container_of(work, struct l2cap_pinfo, busy_work);
2971 struct sock *sk = (struct sock *)pi;
2972 int n_tries = 0, timeo = HZ/5, err;
2973 struct sk_buff *skb;
2974
2975 lock_sock(sk);
2976
2977 add_wait_queue(sk_sleep(sk), &wait);
2978 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
2979 set_current_state(TASK_INTERRUPTIBLE);
2980
2981 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2982 err = -EBUSY;
2983 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
2984 break;
2985 }
2986
2987 if (!timeo)
2988 timeo = HZ/5;
2989
2990 if (signal_pending(current)) {
2991 err = sock_intr_errno(timeo);
2992 break;
2993 }
2994
2995 release_sock(sk);
2996 timeo = schedule_timeout(timeo);
2997 lock_sock(sk);
2998
2999 err = sock_error(sk);
3000 if (err)
3001 break;
3002
3003 if (l2cap_try_push_rx_skb(sk) == 0)
3004 break;
3005 }
3006
3007 set_current_state(TASK_RUNNING);
3008 remove_wait_queue(sk_sleep(sk), &wait);
3009
3010 release_sock(sk);
3011 }
3012
3013 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3014 {
3015 struct l2cap_pinfo *pi = l2cap_pi(sk);
3016 int sctrl, err;
3017
3018 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3019 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3020 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3021 return l2cap_try_push_rx_skb(sk);
3022
3023
3024 }
3025
3026 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3027 if (err >= 0) {
3028 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3029 return err;
3030 }
3031
3032 /* Busy Condition */
3033 BT_DBG("sk %p, Enter local busy", sk);
3034
3035 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3036 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3037 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3038
3039 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3040 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3041 l2cap_send_sframe(pi, sctrl);
3042
3043 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3044
3045 del_timer(&pi->ack_timer);
3046
3047 queue_work(_busy_wq, &pi->busy_work);
3048
3049 return err;
3050 }
3051
3052 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3053 {
3054 struct l2cap_pinfo *pi = l2cap_pi(sk);
3055 struct sk_buff *_skb;
3056 int err = -EINVAL;
3057
3058 /*
3059 * TODO: We have to notify the userland if some data is lost with the
3060 * Streaming Mode.
3061 */
3062
3063 switch (control & L2CAP_CTRL_SAR) {
3064 case L2CAP_SDU_UNSEGMENTED:
3065 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3066 kfree_skb(pi->sdu);
3067 break;
3068 }
3069
3070 err = sock_queue_rcv_skb(sk, skb);
3071 if (!err)
3072 return 0;
3073
3074 break;
3075
3076 case L2CAP_SDU_START:
3077 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3078 kfree_skb(pi->sdu);
3079 break;
3080 }
3081
3082 pi->sdu_len = get_unaligned_le16(skb->data);
3083 skb_pull(skb, 2);
3084
3085 if (pi->sdu_len > pi->imtu) {
3086 err = -EMSGSIZE;
3087 break;
3088 }
3089
3090 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3091 if (!pi->sdu) {
3092 err = -ENOMEM;
3093 break;
3094 }
3095
3096 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3097
3098 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3099 pi->partial_sdu_len = skb->len;
3100 err = 0;
3101 break;
3102
3103 case L2CAP_SDU_CONTINUE:
3104 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3105 break;
3106
3107 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3108
3109 pi->partial_sdu_len += skb->len;
3110 if (pi->partial_sdu_len > pi->sdu_len)
3111 kfree_skb(pi->sdu);
3112 else
3113 err = 0;
3114
3115 break;
3116
3117 case L2CAP_SDU_END:
3118 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3119 break;
3120
3121 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3122
3123 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3124 pi->partial_sdu_len += skb->len;
3125
3126 if (pi->partial_sdu_len > pi->imtu)
3127 goto drop;
3128
3129 if (pi->partial_sdu_len == pi->sdu_len) {
3130 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3131 err = sock_queue_rcv_skb(sk, _skb);
3132 if (err < 0)
3133 kfree_skb(_skb);
3134 }
3135 err = 0;
3136
3137 drop:
3138 kfree_skb(pi->sdu);
3139 break;
3140 }
3141
3142 kfree_skb(skb);
3143 return err;
3144 }
3145
3146 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3147 {
3148 struct sk_buff *skb;
3149 u16 control;
3150
3151 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3152 if (bt_cb(skb)->tx_seq != tx_seq)
3153 break;
3154
3155 skb = skb_dequeue(SREJ_QUEUE(sk));
3156 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3157 l2cap_ertm_reassembly_sdu(sk, skb, control);
3158 l2cap_pi(sk)->buffer_seq_srej =
3159 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3160 tx_seq = (tx_seq + 1) % 64;
3161 }
3162 }
3163
3164 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3165 {
3166 struct l2cap_pinfo *pi = l2cap_pi(sk);
3167 struct srej_list *l, *tmp;
3168 u16 control;
3169
3170 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3171 if (l->tx_seq == tx_seq) {
3172 list_del(&l->list);
3173 kfree(l);
3174 return;
3175 }
3176 control = L2CAP_SUPER_SELECT_REJECT;
3177 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3178 l2cap_send_sframe(pi, control);
3179 list_del(&l->list);
3180 list_add_tail(&l->list, SREJ_LIST(sk));
3181 }
3182 }
3183
3184 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3185 {
3186 struct l2cap_pinfo *pi = l2cap_pi(sk);
3187 struct srej_list *new;
3188 u16 control;
3189
3190 while (tx_seq != pi->expected_tx_seq) {
3191 control = L2CAP_SUPER_SELECT_REJECT;
3192 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3193 l2cap_send_sframe(pi, control);
3194
3195 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3196 new->tx_seq = pi->expected_tx_seq;
3197 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3198 list_add_tail(&new->list, SREJ_LIST(sk));
3199 }
3200 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3201 }
3202
3203 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3204 {
3205 struct l2cap_pinfo *pi = l2cap_pi(sk);
3206 u8 tx_seq = __get_txseq(rx_control);
3207 u8 req_seq = __get_reqseq(rx_control);
3208 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3209 int tx_seq_offset, expected_tx_seq_offset;
3210 int num_to_ack = (pi->tx_win/6) + 1;
3211 int err = 0;
3212
3213 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3214 rx_control);
3215
3216 if (L2CAP_CTRL_FINAL & rx_control &&
3217 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3218 del_timer(&pi->monitor_timer);
3219 if (pi->unacked_frames > 0)
3220 __mod_retrans_timer();
3221 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3222 }
3223
3224 pi->expected_ack_seq = req_seq;
3225 l2cap_drop_acked_frames(sk);
3226
3227 if (tx_seq == pi->expected_tx_seq)
3228 goto expected;
3229
3230 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3231 if (tx_seq_offset < 0)
3232 tx_seq_offset += 64;
3233
3234 /* invalid tx_seq */
3235 if (tx_seq_offset >= pi->tx_win) {
3236 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3237 goto drop;
3238 }
3239
3240 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3241 goto drop;
3242
3243 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3244 struct srej_list *first;
3245
3246 first = list_first_entry(SREJ_LIST(sk),
3247 struct srej_list, list);
3248 if (tx_seq == first->tx_seq) {
3249 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3250 l2cap_check_srej_gap(sk, tx_seq);
3251
3252 list_del(&first->list);
3253 kfree(first);
3254
3255 if (list_empty(SREJ_LIST(sk))) {
3256 pi->buffer_seq = pi->buffer_seq_srej;
3257 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3258 l2cap_send_ack(pi);
3259 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3260 }
3261 } else {
3262 struct srej_list *l;
3263
3264 /* duplicated tx_seq */
3265 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3266 goto drop;
3267
3268 list_for_each_entry(l, SREJ_LIST(sk), list) {
3269 if (l->tx_seq == tx_seq) {
3270 l2cap_resend_srejframe(sk, tx_seq);
3271 return 0;
3272 }
3273 }
3274 l2cap_send_srejframe(sk, tx_seq);
3275 }
3276 } else {
3277 expected_tx_seq_offset =
3278 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3279 if (expected_tx_seq_offset < 0)
3280 expected_tx_seq_offset += 64;
3281
3282 /* duplicated tx_seq */
3283 if (tx_seq_offset < expected_tx_seq_offset)
3284 goto drop;
3285
3286 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3287
3288 BT_DBG("sk %p, Enter SREJ", sk);
3289
3290 INIT_LIST_HEAD(SREJ_LIST(sk));
3291 pi->buffer_seq_srej = pi->buffer_seq;
3292
3293 __skb_queue_head_init(SREJ_QUEUE(sk));
3294 __skb_queue_head_init(BUSY_QUEUE(sk));
3295 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3296
3297 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3298
3299 l2cap_send_srejframe(sk, tx_seq);
3300
3301 del_timer(&pi->ack_timer);
3302 }
3303 return 0;
3304
3305 expected:
3306 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3307
3308 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3309 bt_cb(skb)->tx_seq = tx_seq;
3310 bt_cb(skb)->sar = sar;
3311 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3312 return 0;
3313 }
3314
3315 err = l2cap_push_rx_skb(sk, skb, rx_control);
3316 if (err < 0)
3317 return 0;
3318
3319 if (rx_control & L2CAP_CTRL_FINAL) {
3320 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3321 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3322 else
3323 l2cap_retransmit_frames(sk);
3324 }
3325
3326 __mod_ack_timer();
3327
3328 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3329 if (pi->num_acked == num_to_ack - 1)
3330 l2cap_send_ack(pi);
3331
3332 return 0;
3333
3334 drop:
3335 kfree_skb(skb);
3336 return 0;
3337 }
3338
3339 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3340 {
3341 struct l2cap_pinfo *pi = l2cap_pi(sk);
3342
3343 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3344 rx_control);
3345
3346 pi->expected_ack_seq = __get_reqseq(rx_control);
3347 l2cap_drop_acked_frames(sk);
3348
3349 if (rx_control & L2CAP_CTRL_POLL) {
3350 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3351 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3352 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3353 (pi->unacked_frames > 0))
3354 __mod_retrans_timer();
3355
3356 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3357 l2cap_send_srejtail(sk);
3358 } else {
3359 l2cap_send_i_or_rr_or_rnr(sk);
3360 }
3361
3362 } else if (rx_control & L2CAP_CTRL_FINAL) {
3363 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3364
3365 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3366 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3367 else
3368 l2cap_retransmit_frames(sk);
3369
3370 } else {
3371 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3372 (pi->unacked_frames > 0))
3373 __mod_retrans_timer();
3374
3375 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3376 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3377 l2cap_send_ack(pi);
3378 else
3379 l2cap_ertm_send(sk);
3380 }
3381 }
3382
3383 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3384 {
3385 struct l2cap_pinfo *pi = l2cap_pi(sk);
3386 u8 tx_seq = __get_reqseq(rx_control);
3387
3388 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3389
3390 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3391
3392 pi->expected_ack_seq = tx_seq;
3393 l2cap_drop_acked_frames(sk);
3394
3395 if (rx_control & L2CAP_CTRL_FINAL) {
3396 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3397 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3398 else
3399 l2cap_retransmit_frames(sk);
3400 } else {
3401 l2cap_retransmit_frames(sk);
3402
3403 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3404 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3405 }
3406 }
3407 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3408 {
3409 struct l2cap_pinfo *pi = l2cap_pi(sk);
3410 u8 tx_seq = __get_reqseq(rx_control);
3411
3412 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3413
3414 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3415
3416 if (rx_control & L2CAP_CTRL_POLL) {
3417 pi->expected_ack_seq = tx_seq;
3418 l2cap_drop_acked_frames(sk);
3419
3420 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3421 l2cap_retransmit_one_frame(sk, tx_seq);
3422
3423 l2cap_ertm_send(sk);
3424
3425 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3426 pi->srej_save_reqseq = tx_seq;
3427 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3428 }
3429 } else if (rx_control & L2CAP_CTRL_FINAL) {
3430 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3431 pi->srej_save_reqseq == tx_seq)
3432 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3433 else
3434 l2cap_retransmit_one_frame(sk, tx_seq);
3435 } else {
3436 l2cap_retransmit_one_frame(sk, tx_seq);
3437 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3438 pi->srej_save_reqseq = tx_seq;
3439 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3440 }
3441 }
3442 }
3443
3444 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3445 {
3446 struct l2cap_pinfo *pi = l2cap_pi(sk);
3447 u8 tx_seq = __get_reqseq(rx_control);
3448
3449 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3450
3451 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3452 pi->expected_ack_seq = tx_seq;
3453 l2cap_drop_acked_frames(sk);
3454
3455 if (rx_control & L2CAP_CTRL_POLL)
3456 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3457
3458 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3459 del_timer(&pi->retrans_timer);
3460 if (rx_control & L2CAP_CTRL_POLL)
3461 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3462 return;
3463 }
3464
3465 if (rx_control & L2CAP_CTRL_POLL)
3466 l2cap_send_srejtail(sk);
3467 else
3468 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3469 }
3470
3471 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3472 {
3473 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3474
3475 if (L2CAP_CTRL_FINAL & rx_control &&
3476 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3477 del_timer(&l2cap_pi(sk)->monitor_timer);
3478 if (l2cap_pi(sk)->unacked_frames > 0)
3479 __mod_retrans_timer();
3480 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3481 }
3482
3483 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3484 case L2CAP_SUPER_RCV_READY:
3485 l2cap_data_channel_rrframe(sk, rx_control);
3486 break;
3487
3488 case L2CAP_SUPER_REJECT:
3489 l2cap_data_channel_rejframe(sk, rx_control);
3490 break;
3491
3492 case L2CAP_SUPER_SELECT_REJECT:
3493 l2cap_data_channel_srejframe(sk, rx_control);
3494 break;
3495
3496 case L2CAP_SUPER_RCV_NOT_READY:
3497 l2cap_data_channel_rnrframe(sk, rx_control);
3498 break;
3499 }
3500
3501 kfree_skb(skb);
3502 return 0;
3503 }
3504
3505 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3506 {
3507 struct l2cap_pinfo *pi = l2cap_pi(sk);
3508 u16 control;
3509 u8 req_seq;
3510 int len, next_tx_seq_offset, req_seq_offset;
3511
3512 control = get_unaligned_le16(skb->data);
3513 skb_pull(skb, 2);
3514 len = skb->len;
3515
3516 /*
3517 * We can just drop the corrupted I-frame here.
3518 * Receiver will miss it and start proper recovery
3519 * procedures and ask retransmission.
3520 */
3521 if (l2cap_check_fcs(pi, skb))
3522 goto drop;
3523
3524 if (__is_sar_start(control) && __is_iframe(control))
3525 len -= 2;
3526
3527 if (pi->fcs == L2CAP_FCS_CRC16)
3528 len -= 2;
3529
3530 if (len > pi->mps) {
3531 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3532 goto drop;
3533 }
3534
3535 req_seq = __get_reqseq(control);
3536 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3537 if (req_seq_offset < 0)
3538 req_seq_offset += 64;
3539
3540 next_tx_seq_offset =
3541 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3542 if (next_tx_seq_offset < 0)
3543 next_tx_seq_offset += 64;
3544
3545 /* check for invalid req-seq */
3546 if (req_seq_offset > next_tx_seq_offset) {
3547 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3548 goto drop;
3549 }
3550
3551 if (__is_iframe(control)) {
3552 if (len < 0) {
3553 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3554 goto drop;
3555 }
3556
3557 l2cap_data_channel_iframe(sk, control, skb);
3558 } else {
3559 if (len != 0) {
3560 BT_ERR("%d", len);
3561 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3562 goto drop;
3563 }
3564
3565 l2cap_data_channel_sframe(sk, control, skb);
3566 }
3567
3568 return 0;
3569
3570 drop:
3571 kfree_skb(skb);
3572 return 0;
3573 }
3574
3575 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3576 {
3577 struct sock *sk;
3578 struct l2cap_pinfo *pi;
3579 u16 control;
3580 u8 tx_seq;
3581 int len;
3582
3583 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3584 if (!sk) {
3585 BT_DBG("unknown cid 0x%4.4x", cid);
3586 goto drop;
3587 }
3588
3589 pi = l2cap_pi(sk);
3590
3591 BT_DBG("sk %p, len %d", sk, skb->len);
3592
3593 if (sk->sk_state != BT_CONNECTED)
3594 goto drop;
3595
3596 switch (pi->mode) {
3597 case L2CAP_MODE_BASIC:
3598 /* If socket recv buffers overflows we drop data here
3599 * which is *bad* because L2CAP has to be reliable.
3600 * But we don't have any other choice. L2CAP doesn't
3601 * provide flow control mechanism. */
3602
3603 if (pi->imtu < skb->len)
3604 goto drop;
3605
3606 if (!sock_queue_rcv_skb(sk, skb))
3607 goto done;
3608 break;
3609
3610 case L2CAP_MODE_ERTM:
3611 if (!sock_owned_by_user(sk)) {
3612 l2cap_ertm_data_rcv(sk, skb);
3613 } else {
3614 if (sk_add_backlog(sk, skb))
3615 goto drop;
3616 }
3617
3618 goto done;
3619
3620 case L2CAP_MODE_STREAMING:
3621 control = get_unaligned_le16(skb->data);
3622 skb_pull(skb, 2);
3623 len = skb->len;
3624
3625 if (l2cap_check_fcs(pi, skb))
3626 goto drop;
3627
3628 if (__is_sar_start(control))
3629 len -= 2;
3630
3631 if (pi->fcs == L2CAP_FCS_CRC16)
3632 len -= 2;
3633
3634 if (len > pi->mps || len < 0 || __is_sframe(control))
3635 goto drop;
3636
3637 tx_seq = __get_txseq(control);
3638
3639 if (pi->expected_tx_seq == tx_seq)
3640 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3641 else
3642 pi->expected_tx_seq = (tx_seq + 1) % 64;
3643
3644 l2cap_streaming_reassembly_sdu(sk, skb, control);
3645
3646 goto done;
3647
3648 default:
3649 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3650 break;
3651 }
3652
3653 drop:
3654 kfree_skb(skb);
3655
3656 done:
3657 if (sk)
3658 bh_unlock_sock(sk);
3659
3660 return 0;
3661 }
3662
3663 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3664 {
3665 struct sock *sk;
3666
3667 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3668 if (!sk)
3669 goto drop;
3670
3671 bh_lock_sock(sk);
3672
3673 BT_DBG("sk %p, len %d", sk, skb->len);
3674
3675 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3676 goto drop;
3677
3678 if (l2cap_pi(sk)->imtu < skb->len)
3679 goto drop;
3680
3681 if (!sock_queue_rcv_skb(sk, skb))
3682 goto done;
3683
3684 drop:
3685 kfree_skb(skb);
3686
3687 done:
3688 if (sk)
3689 bh_unlock_sock(sk);
3690 return 0;
3691 }
3692
3693 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3694 {
3695 struct l2cap_hdr *lh = (void *) skb->data;
3696 u16 cid, len;
3697 __le16 psm;
3698
3699 skb_pull(skb, L2CAP_HDR_SIZE);
3700 cid = __le16_to_cpu(lh->cid);
3701 len = __le16_to_cpu(lh->len);
3702
3703 if (len != skb->len) {
3704 kfree_skb(skb);
3705 return;
3706 }
3707
3708 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3709
3710 switch (cid) {
3711 case L2CAP_CID_SIGNALING:
3712 l2cap_sig_channel(conn, skb);
3713 break;
3714
3715 case L2CAP_CID_CONN_LESS:
3716 psm = get_unaligned_le16(skb->data);
3717 skb_pull(skb, 2);
3718 l2cap_conless_channel(conn, psm, skb);
3719 break;
3720
3721 default:
3722 l2cap_data_channel(conn, cid, skb);
3723 break;
3724 }
3725 }
3726
3727 /* ---- L2CAP interface with lower layer (HCI) ---- */
3728
3729 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3730 {
3731 int exact = 0, lm1 = 0, lm2 = 0;
3732 register struct sock *sk;
3733 struct hlist_node *node;
3734
3735 if (type != ACL_LINK)
3736 return -EINVAL;
3737
3738 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3739
3740 /* Find listening sockets and check their link_mode */
3741 read_lock(&l2cap_sk_list.lock);
3742 sk_for_each(sk, node, &l2cap_sk_list.head) {
3743 if (sk->sk_state != BT_LISTEN)
3744 continue;
3745
3746 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3747 lm1 |= HCI_LM_ACCEPT;
3748 if (l2cap_pi(sk)->role_switch)
3749 lm1 |= HCI_LM_MASTER;
3750 exact++;
3751 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3752 lm2 |= HCI_LM_ACCEPT;
3753 if (l2cap_pi(sk)->role_switch)
3754 lm2 |= HCI_LM_MASTER;
3755 }
3756 }
3757 read_unlock(&l2cap_sk_list.lock);
3758
3759 return exact ? lm1 : lm2;
3760 }
3761
3762 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3763 {
3764 struct l2cap_conn *conn;
3765
3766 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3767
3768 if (hcon->type != ACL_LINK)
3769 return -EINVAL;
3770
3771 if (!status) {
3772 conn = l2cap_conn_add(hcon, status);
3773 if (conn)
3774 l2cap_conn_ready(conn);
3775 } else
3776 l2cap_conn_del(hcon, bt_err(status));
3777
3778 return 0;
3779 }
3780
3781 static int l2cap_disconn_ind(struct hci_conn *hcon)
3782 {
3783 struct l2cap_conn *conn = hcon->l2cap_data;
3784
3785 BT_DBG("hcon %p", hcon);
3786
3787 if (hcon->type != ACL_LINK || !conn)
3788 return 0x13;
3789
3790 return conn->disc_reason;
3791 }
3792
3793 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3794 {
3795 BT_DBG("hcon %p reason %d", hcon, reason);
3796
3797 if (hcon->type != ACL_LINK)
3798 return -EINVAL;
3799
3800 l2cap_conn_del(hcon, bt_err(reason));
3801
3802 return 0;
3803 }
3804
3805 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3806 {
3807 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3808 return;
3809
3810 if (encrypt == 0x00) {
3811 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3812 l2cap_sock_clear_timer(sk);
3813 l2cap_sock_set_timer(sk, HZ * 5);
3814 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3815 __l2cap_sock_close(sk, ECONNREFUSED);
3816 } else {
3817 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3818 l2cap_sock_clear_timer(sk);
3819 }
3820 }
3821
3822 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3823 {
3824 struct l2cap_chan_list *l;
3825 struct l2cap_conn *conn = hcon->l2cap_data;
3826 struct sock *sk;
3827
3828 if (!conn)
3829 return 0;
3830
3831 l = &conn->chan_list;
3832
3833 BT_DBG("conn %p", conn);
3834
3835 read_lock(&l->lock);
3836
3837 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3838 bh_lock_sock(sk);
3839
3840 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3841 bh_unlock_sock(sk);
3842 continue;
3843 }
3844
3845 if (!status && (sk->sk_state == BT_CONNECTED ||
3846 sk->sk_state == BT_CONFIG)) {
3847 l2cap_check_encryption(sk, encrypt);
3848 bh_unlock_sock(sk);
3849 continue;
3850 }
3851
3852 if (sk->sk_state == BT_CONNECT) {
3853 if (!status) {
3854 struct l2cap_conn_req req;
3855 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3856 req.psm = l2cap_pi(sk)->psm;
3857
3858 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3859 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3860
3861 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3862 L2CAP_CONN_REQ, sizeof(req), &req);
3863 } else {
3864 l2cap_sock_clear_timer(sk);
3865 l2cap_sock_set_timer(sk, HZ / 10);
3866 }
3867 } else if (sk->sk_state == BT_CONNECT2) {
3868 struct l2cap_conn_rsp rsp;
3869 __u16 result;
3870
3871 if (!status) {
3872 sk->sk_state = BT_CONFIG;
3873 result = L2CAP_CR_SUCCESS;
3874 } else {
3875 sk->sk_state = BT_DISCONN;
3876 l2cap_sock_set_timer(sk, HZ / 10);
3877 result = L2CAP_CR_SEC_BLOCK;
3878 }
3879
3880 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3881 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3882 rsp.result = cpu_to_le16(result);
3883 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3884 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3885 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3886 }
3887
3888 bh_unlock_sock(sk);
3889 }
3890
3891 read_unlock(&l->lock);
3892
3893 return 0;
3894 }
3895
3896 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3897 {
3898 struct l2cap_conn *conn = hcon->l2cap_data;
3899
3900 if (!conn)
3901 conn = l2cap_conn_add(hcon, 0);
3902
3903 if (!conn)
3904 goto drop;
3905
3906 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3907
3908 if (!(flags & ACL_CONT)) {
3909 struct l2cap_hdr *hdr;
3910 struct sock *sk;
3911 u16 cid;
3912 int len;
3913
3914 if (conn->rx_len) {
3915 BT_ERR("Unexpected start frame (len %d)", skb->len);
3916 kfree_skb(conn->rx_skb);
3917 conn->rx_skb = NULL;
3918 conn->rx_len = 0;
3919 l2cap_conn_unreliable(conn, ECOMM);
3920 }
3921
3922 /* Start fragment always begin with Basic L2CAP header */
3923 if (skb->len < L2CAP_HDR_SIZE) {
3924 BT_ERR("Frame is too short (len %d)", skb->len);
3925 l2cap_conn_unreliable(conn, ECOMM);
3926 goto drop;
3927 }
3928
3929 hdr = (struct l2cap_hdr *) skb->data;
3930 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3931 cid = __le16_to_cpu(hdr->cid);
3932
3933 if (len == skb->len) {
3934 /* Complete frame received */
3935 l2cap_recv_frame(conn, skb);
3936 return 0;
3937 }
3938
3939 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3940
3941 if (skb->len > len) {
3942 BT_ERR("Frame is too long (len %d, expected len %d)",
3943 skb->len, len);
3944 l2cap_conn_unreliable(conn, ECOMM);
3945 goto drop;
3946 }
3947
3948 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3949
3950 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3951 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3952 len, l2cap_pi(sk)->imtu);
3953 bh_unlock_sock(sk);
3954 l2cap_conn_unreliable(conn, ECOMM);
3955 goto drop;
3956 }
3957
3958 if (sk)
3959 bh_unlock_sock(sk);
3960
3961 /* Allocate skb for the complete frame (with header) */
3962 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3963 if (!conn->rx_skb)
3964 goto drop;
3965
3966 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3967 skb->len);
3968 conn->rx_len = len - skb->len;
3969 } else {
3970 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3971
3972 if (!conn->rx_len) {
3973 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3974 l2cap_conn_unreliable(conn, ECOMM);
3975 goto drop;
3976 }
3977
3978 if (skb->len > conn->rx_len) {
3979 BT_ERR("Fragment is too long (len %d, expected %d)",
3980 skb->len, conn->rx_len);
3981 kfree_skb(conn->rx_skb);
3982 conn->rx_skb = NULL;
3983 conn->rx_len = 0;
3984 l2cap_conn_unreliable(conn, ECOMM);
3985 goto drop;
3986 }
3987
3988 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3989 skb->len);
3990 conn->rx_len -= skb->len;
3991
3992 if (!conn->rx_len) {
3993 /* Complete frame received */
3994 l2cap_recv_frame(conn, conn->rx_skb);
3995 conn->rx_skb = NULL;
3996 }
3997 }
3998
3999 drop:
4000 kfree_skb(skb);
4001 return 0;
4002 }
4003
4004 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4005 {
4006 struct sock *sk;
4007 struct hlist_node *node;
4008
4009 read_lock_bh(&l2cap_sk_list.lock);
4010
4011 sk_for_each(sk, node, &l2cap_sk_list.head) {
4012 struct l2cap_pinfo *pi = l2cap_pi(sk);
4013
4014 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4015 batostr(&bt_sk(sk)->src),
4016 batostr(&bt_sk(sk)->dst),
4017 sk->sk_state, __le16_to_cpu(pi->psm),
4018 pi->scid, pi->dcid,
4019 pi->imtu, pi->omtu, pi->sec_level);
4020 }
4021
4022 read_unlock_bh(&l2cap_sk_list.lock);
4023
4024 return 0;
4025 }
4026
4027 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4028 {
4029 return single_open(file, l2cap_debugfs_show, inode->i_private);
4030 }
4031
4032 static const struct file_operations l2cap_debugfs_fops = {
4033 .open = l2cap_debugfs_open,
4034 .read = seq_read,
4035 .llseek = seq_lseek,
4036 .release = single_release,
4037 };
4038
4039 static struct dentry *l2cap_debugfs;
4040
4041 static struct hci_proto l2cap_hci_proto = {
4042 .name = "L2CAP",
4043 .id = HCI_PROTO_L2CAP,
4044 .connect_ind = l2cap_connect_ind,
4045 .connect_cfm = l2cap_connect_cfm,
4046 .disconn_ind = l2cap_disconn_ind,
4047 .disconn_cfm = l2cap_disconn_cfm,
4048 .security_cfm = l2cap_security_cfm,
4049 .recv_acldata = l2cap_recv_acldata
4050 };
4051
4052 static int __init l2cap_init(void)
4053 {
4054 int err;
4055
4056 err = l2cap_init_sockets();
4057 if (err < 0)
4058 return err;
4059
4060 _busy_wq = create_singlethread_workqueue("l2cap");
4061 if (!_busy_wq) {
4062 err = -ENOMEM;
4063 goto error;
4064 }
4065
4066 err = hci_register_proto(&l2cap_hci_proto);
4067 if (err < 0) {
4068 BT_ERR("L2CAP protocol registration failed");
4069 bt_sock_unregister(BTPROTO_L2CAP);
4070 goto error;
4071 }
4072
4073 if (bt_debugfs) {
4074 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4075 bt_debugfs, NULL, &l2cap_debugfs_fops);
4076 if (!l2cap_debugfs)
4077 BT_ERR("Failed to create L2CAP debug file");
4078 }
4079
4080 BT_INFO("L2CAP ver %s", VERSION);
4081 BT_INFO("L2CAP socket layer initialized");
4082
4083 return 0;
4084
4085 error:
4086 destroy_workqueue(_busy_wq);
4087 l2cap_cleanup_sockets();
4088 return err;
4089 }
4090
4091 static void __exit l2cap_exit(void)
4092 {
4093 debugfs_remove(l2cap_debugfs);
4094
4095 flush_workqueue(_busy_wq);
4096 destroy_workqueue(_busy_wq);
4097
4098 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4099 BT_ERR("L2CAP protocol unregistration failed");
4100
4101 l2cap_cleanup_sockets();
4102 }
4103
4104 void l2cap_load(void)
4105 {
4106 /* Dummy function to trigger automatic L2CAP module loading by
4107 * other modules that use L2CAP sockets but don't use any other
4108 * symbols from it. */
4109 }
4110 EXPORT_SYMBOL(l2cap_load);
4111
4112 module_init(l2cap_init);
4113 module_exit(l2cap_exit);
4114
4115 module_param(disable_ertm, bool, 0644);
4116 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4117
4118 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4119 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4120 MODULE_VERSION(VERSION);
4121 MODULE_LICENSE("GPL");
4122 MODULE_ALIAS("bt-proto-0");
This page took 0.174761 seconds and 5 git commands to generate.