Linux 2.6.36-rc6
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core and sockets. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 #define VERSION "2.15"
59
60 static int disable_ertm = 0;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static const struct proto_ops l2cap_sock_ops;
66
67 static struct workqueue_struct *_busy_wq;
68
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 };
72
73 static void l2cap_busy_work(struct work_struct *work);
74
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
78
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
82
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_timeout(unsigned long arg)
87 {
88 struct sock *sk = (struct sock *) arg;
89 int reason;
90
91 BT_DBG("sock %p state %d", sk, sk->sk_state);
92
93 bh_lock_sock(sk);
94
95 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
96 reason = ECONNREFUSED;
97 else if (sk->sk_state == BT_CONNECT &&
98 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
99 reason = ECONNREFUSED;
100 else
101 reason = ETIMEDOUT;
102
103 __l2cap_sock_close(sk, reason);
104
105 bh_unlock_sock(sk);
106
107 l2cap_sock_kill(sk);
108 sock_put(sk);
109 }
110
111 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
112 {
113 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
114 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
115 }
116
117 static void l2cap_sock_clear_timer(struct sock *sk)
118 {
119 BT_DBG("sock %p state %d", sk, sk->sk_state);
120 sk_stop_timer(sk, &sk->sk_timer);
121 }
122
123 /* ---- L2CAP channels ---- */
124 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
125 {
126 struct sock *s;
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->dcid == cid)
129 break;
130 }
131 return s;
132 }
133
134 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
135 {
136 struct sock *s;
137 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
138 if (l2cap_pi(s)->scid == cid)
139 break;
140 }
141 return s;
142 }
143
144 /* Find channel with given SCID.
145 * Returns locked socket */
146 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
147 {
148 struct sock *s;
149 read_lock(&l->lock);
150 s = __l2cap_get_chan_by_scid(l, cid);
151 if (s)
152 bh_lock_sock(s);
153 read_unlock(&l->lock);
154 return s;
155 }
156
157 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
158 {
159 struct sock *s;
160 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
161 if (l2cap_pi(s)->ident == ident)
162 break;
163 }
164 return s;
165 }
166
167 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 {
169 struct sock *s;
170 read_lock(&l->lock);
171 s = __l2cap_get_chan_by_ident(l, ident);
172 if (s)
173 bh_lock_sock(s);
174 read_unlock(&l->lock);
175 return s;
176 }
177
178 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
179 {
180 u16 cid = L2CAP_CID_DYN_START;
181
182 for (; cid < L2CAP_CID_DYN_END; cid++) {
183 if (!__l2cap_get_chan_by_scid(l, cid))
184 return cid;
185 }
186
187 return 0;
188 }
189
190 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
191 {
192 sock_hold(sk);
193
194 if (l->head)
195 l2cap_pi(l->head)->prev_c = sk;
196
197 l2cap_pi(sk)->next_c = l->head;
198 l2cap_pi(sk)->prev_c = NULL;
199 l->head = sk;
200 }
201
202 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
203 {
204 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
205
206 write_lock_bh(&l->lock);
207 if (sk == l->head)
208 l->head = next;
209
210 if (next)
211 l2cap_pi(next)->prev_c = prev;
212 if (prev)
213 l2cap_pi(prev)->next_c = next;
214 write_unlock_bh(&l->lock);
215
216 __sock_put(sk);
217 }
218
219 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
220 {
221 struct l2cap_chan_list *l = &conn->chan_list;
222
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
224 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
225
226 conn->disc_reason = 0x13;
227
228 l2cap_pi(sk)->conn = conn;
229
230 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
233 } else if (sk->sk_type == SOCK_DGRAM) {
234 /* Connectionless socket */
235 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 } else {
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
243 }
244
245 __l2cap_chan_link(l, sk);
246
247 if (parent)
248 bt_accept_enqueue(parent, sk);
249 }
250
251 /* Delete channel.
252 * Must be called on the locked socket. */
253 static void l2cap_chan_del(struct sock *sk, int err)
254 {
255 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
256 struct sock *parent = bt_sk(sk)->parent;
257
258 l2cap_sock_clear_timer(sk);
259
260 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
261
262 if (conn) {
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn->chan_list, sk);
265 l2cap_pi(sk)->conn = NULL;
266 hci_conn_put(conn->hcon);
267 }
268
269 sk->sk_state = BT_CLOSED;
270 sock_set_flag(sk, SOCK_ZAPPED);
271
272 if (err)
273 sk->sk_err = err;
274
275 if (parent) {
276 bt_accept_unlink(sk);
277 parent->sk_data_ready(parent, 0);
278 } else
279 sk->sk_state_change(sk);
280
281 skb_queue_purge(TX_QUEUE(sk));
282
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
285
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
289
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
292
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
294 list_del(&l->list);
295 kfree(l);
296 }
297 }
298 }
299
300 /* Service level security */
301 static inline int l2cap_check_security(struct sock *sk)
302 {
303 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
304 __u8 auth_type;
305
306 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
308 auth_type = HCI_AT_NO_BONDING_MITM;
309 else
310 auth_type = HCI_AT_NO_BONDING;
311
312 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
313 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
314 } else {
315 switch (l2cap_pi(sk)->sec_level) {
316 case BT_SECURITY_HIGH:
317 auth_type = HCI_AT_GENERAL_BONDING_MITM;
318 break;
319 case BT_SECURITY_MEDIUM:
320 auth_type = HCI_AT_GENERAL_BONDING;
321 break;
322 default:
323 auth_type = HCI_AT_NO_BONDING;
324 break;
325 }
326 }
327
328 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
329 auth_type);
330 }
331
332 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
333 {
334 u8 id;
335
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
340 */
341
342 spin_lock_bh(&conn->lock);
343
344 if (++conn->tx_ident > 128)
345 conn->tx_ident = 1;
346
347 id = conn->tx_ident;
348
349 spin_unlock_bh(&conn->lock);
350
351 return id;
352 }
353
354 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
355 {
356 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
357
358 BT_DBG("code 0x%2.2x", code);
359
360 if (!skb)
361 return;
362
363 hci_send_acl(conn->hcon, skb, 0);
364 }
365
366 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
367 {
368 struct sk_buff *skb;
369 struct l2cap_hdr *lh;
370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
372 int count, hlen = L2CAP_HDR_SIZE + 2;
373
374 if (sk->sk_state != BT_CONNECTED)
375 return;
376
377 if (pi->fcs == L2CAP_FCS_CRC16)
378 hlen += 2;
379
380 BT_DBG("pi %p, control 0x%2.2x", pi, control);
381
382 count = min_t(unsigned int, conn->mtu, hlen);
383 control |= L2CAP_CTRL_FRAME_TYPE;
384
385 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
386 control |= L2CAP_CTRL_FINAL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
388 }
389
390 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
391 control |= L2CAP_CTRL_POLL;
392 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
393 }
394
395 skb = bt_skb_alloc(count, GFP_ATOMIC);
396 if (!skb)
397 return;
398
399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
400 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
401 lh->cid = cpu_to_le16(pi->dcid);
402 put_unaligned_le16(control, skb_put(skb, 2));
403
404 if (pi->fcs == L2CAP_FCS_CRC16) {
405 u16 fcs = crc16(0, (u8 *)lh, count - 2);
406 put_unaligned_le16(fcs, skb_put(skb, 2));
407 }
408
409 hci_send_acl(pi->conn->hcon, skb, 0);
410 }
411
412 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
413 {
414 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
415 control |= L2CAP_SUPER_RCV_NOT_READY;
416 pi->conn_state |= L2CAP_CONN_RNR_SENT;
417 } else
418 control |= L2CAP_SUPER_RCV_READY;
419
420 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
421
422 l2cap_send_sframe(pi, control);
423 }
424
425 static inline int __l2cap_no_conn_pending(struct sock *sk)
426 {
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
428 }
429
430 static void l2cap_do_start(struct sock *sk)
431 {
432 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
433
434 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
436 return;
437
438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
439 struct l2cap_conn_req req;
440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
441 req.psm = l2cap_pi(sk)->psm;
442
443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
445
446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
447 L2CAP_CONN_REQ, sizeof(req), &req);
448 }
449 } else {
450 struct l2cap_info_req req;
451 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
452
453 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
454 conn->info_ident = l2cap_get_ident(conn);
455
456 mod_timer(&conn->info_timer, jiffies +
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
458
459 l2cap_send_cmd(conn, conn->info_ident,
460 L2CAP_INFO_REQ, sizeof(req), &req);
461 }
462 }
463
464 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
465 {
466 u32 local_feat_mask = l2cap_feat_mask;
467 if (!disable_ertm)
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
469
470 switch (mode) {
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
475 default:
476 return 0x00;
477 }
478 }
479
480 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
481 {
482 struct l2cap_disconn_req req;
483
484 if (!conn)
485 return;
486
487 skb_queue_purge(TX_QUEUE(sk));
488
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
493 }
494
495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
498 L2CAP_DISCONN_REQ, sizeof(req), &req);
499
500 sk->sk_state = BT_DISCONN;
501 sk->sk_err = err;
502 }
503
504 /* ---- L2CAP connections ---- */
505 static void l2cap_conn_start(struct l2cap_conn *conn)
506 {
507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
509 struct sock *sk;
510
511 BT_DBG("conn %p", conn);
512
513 INIT_LIST_HEAD(&del.list);
514
515 read_lock(&l->lock);
516
517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
518 bh_lock_sock(sk);
519
520 if (sk->sk_type != SOCK_SEQPACKET &&
521 sk->sk_type != SOCK_STREAM) {
522 bh_unlock_sock(sk);
523 continue;
524 }
525
526 if (sk->sk_state == BT_CONNECT) {
527 struct l2cap_conn_req req;
528
529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
531 bh_unlock_sock(sk);
532 continue;
533 }
534
535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
536 conn->feat_mask)
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
540 GFP_ATOMIC);
541 tmp1->sk = sk;
542 list_add_tail(&tmp1->list, &del.list);
543 bh_unlock_sock(sk);
544 continue;
545 }
546
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
549
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
552
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
555
556 } else if (sk->sk_state == BT_CONNECT2) {
557 struct l2cap_conn_rsp rsp;
558 char buf[128];
559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
561
562 if (l2cap_check_security(sk)) {
563 if (bt_sk(sk)->defer_setup) {
564 struct sock *parent = bt_sk(sk)->parent;
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
567 parent->sk_data_ready(parent, 0);
568
569 } else {
570 sk->sk_state = BT_CONFIG;
571 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
572 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
573 }
574 } else {
575 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
576 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
577 }
578
579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
581
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
584 bh_unlock_sock(sk);
585 continue;
586 }
587
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
592 }
593
594 bh_unlock_sock(sk);
595 }
596
597 read_unlock(&l->lock);
598
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
604 kfree(tmp1);
605 }
606 }
607
608 static void l2cap_conn_ready(struct l2cap_conn *conn)
609 {
610 struct l2cap_chan_list *l = &conn->chan_list;
611 struct sock *sk;
612
613 BT_DBG("conn %p", conn);
614
615 read_lock(&l->lock);
616
617 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
618 bh_lock_sock(sk);
619
620 if (sk->sk_type != SOCK_SEQPACKET &&
621 sk->sk_type != SOCK_STREAM) {
622 l2cap_sock_clear_timer(sk);
623 sk->sk_state = BT_CONNECTED;
624 sk->sk_state_change(sk);
625 } else if (sk->sk_state == BT_CONNECT)
626 l2cap_do_start(sk);
627
628 bh_unlock_sock(sk);
629 }
630
631 read_unlock(&l->lock);
632 }
633
634 /* Notify sockets that we cannot guaranty reliability anymore */
635 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
636 {
637 struct l2cap_chan_list *l = &conn->chan_list;
638 struct sock *sk;
639
640 BT_DBG("conn %p", conn);
641
642 read_lock(&l->lock);
643
644 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
645 if (l2cap_pi(sk)->force_reliable)
646 sk->sk_err = err;
647 }
648
649 read_unlock(&l->lock);
650 }
651
652 static void l2cap_info_timeout(unsigned long arg)
653 {
654 struct l2cap_conn *conn = (void *) arg;
655
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
657 conn->info_ident = 0;
658
659 l2cap_conn_start(conn);
660 }
661
662 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
663 {
664 struct l2cap_conn *conn = hcon->l2cap_data;
665
666 if (conn || status)
667 return conn;
668
669 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
670 if (!conn)
671 return NULL;
672
673 hcon->l2cap_data = conn;
674 conn->hcon = hcon;
675
676 BT_DBG("hcon %p conn %p", hcon, conn);
677
678 conn->mtu = hcon->hdev->acl_mtu;
679 conn->src = &hcon->hdev->bdaddr;
680 conn->dst = &hcon->dst;
681
682 conn->feat_mask = 0;
683
684 spin_lock_init(&conn->lock);
685 rwlock_init(&conn->chan_list.lock);
686
687 setup_timer(&conn->info_timer, l2cap_info_timeout,
688 (unsigned long) conn);
689
690 conn->disc_reason = 0x13;
691
692 return conn;
693 }
694
695 static void l2cap_conn_del(struct hci_conn *hcon, int err)
696 {
697 struct l2cap_conn *conn = hcon->l2cap_data;
698 struct sock *sk;
699
700 if (!conn)
701 return;
702
703 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
704
705 kfree_skb(conn->rx_skb);
706
707 /* Kill channels */
708 while ((sk = conn->chan_list.head)) {
709 bh_lock_sock(sk);
710 l2cap_chan_del(sk, err);
711 bh_unlock_sock(sk);
712 l2cap_sock_kill(sk);
713 }
714
715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
716 del_timer_sync(&conn->info_timer);
717
718 hcon->l2cap_data = NULL;
719 kfree(conn);
720 }
721
722 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
723 {
724 struct l2cap_chan_list *l = &conn->chan_list;
725 write_lock_bh(&l->lock);
726 __l2cap_chan_add(conn, sk, parent);
727 write_unlock_bh(&l->lock);
728 }
729
730 /* ---- Socket interface ---- */
731 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
732 {
733 struct sock *sk;
734 struct hlist_node *node;
735 sk_for_each(sk, node, &l2cap_sk_list.head)
736 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
737 goto found;
738 sk = NULL;
739 found:
740 return sk;
741 }
742
743 /* Find socket with psm and source bdaddr.
744 * Returns closest match.
745 */
746 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
747 {
748 struct sock *sk = NULL, *sk1 = NULL;
749 struct hlist_node *node;
750
751 sk_for_each(sk, node, &l2cap_sk_list.head) {
752 if (state && sk->sk_state != state)
753 continue;
754
755 if (l2cap_pi(sk)->psm == psm) {
756 /* Exact match. */
757 if (!bacmp(&bt_sk(sk)->src, src))
758 break;
759
760 /* Closest match */
761 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
762 sk1 = sk;
763 }
764 }
765 return node ? sk : sk1;
766 }
767
768 /* Find socket with given address (psm, src).
769 * Returns locked socket */
770 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
771 {
772 struct sock *s;
773 read_lock(&l2cap_sk_list.lock);
774 s = __l2cap_get_sock_by_psm(state, psm, src);
775 if (s)
776 bh_lock_sock(s);
777 read_unlock(&l2cap_sk_list.lock);
778 return s;
779 }
780
781 static void l2cap_sock_destruct(struct sock *sk)
782 {
783 BT_DBG("sk %p", sk);
784
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
787 }
788
789 static void l2cap_sock_cleanup_listen(struct sock *parent)
790 {
791 struct sock *sk;
792
793 BT_DBG("parent %p", parent);
794
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
798
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
801 }
802
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
805 */
806 static void l2cap_sock_kill(struct sock *sk)
807 {
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
809 return;
810
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
812
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
816 sock_put(sk);
817 }
818
819 static void __l2cap_sock_close(struct sock *sk, int reason)
820 {
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
822
823 switch (sk->sk_state) {
824 case BT_LISTEN:
825 l2cap_sock_cleanup_listen(sk);
826 break;
827
828 case BT_CONNECTED:
829 case BT_CONFIG:
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
833
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
836 } else
837 l2cap_chan_del(sk, reason);
838 break;
839
840 case BT_CONNECT2:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
845 __u16 result;
846
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
849 else
850 result = L2CAP_CR_BAD_PSM;
851
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
858 } else
859 l2cap_chan_del(sk, reason);
860 break;
861
862 case BT_CONNECT:
863 case BT_DISCONN:
864 l2cap_chan_del(sk, reason);
865 break;
866
867 default:
868 sock_set_flag(sk, SOCK_ZAPPED);
869 break;
870 }
871 }
872
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock *sk)
875 {
876 l2cap_sock_clear_timer(sk);
877 lock_sock(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
879 release_sock(sk);
880 l2cap_sock_kill(sk);
881 }
882
883 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
884 {
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
886
887 BT_DBG("sk %p", sk);
888
889 if (parent) {
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
892
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
903 } else {
904 pi->imtu = L2CAP_DEFAULT_MTU;
905 pi->omtu = 0;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
910 pi->mode = L2CAP_MODE_BASIC;
911 }
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
916 pi->role_switch = 0;
917 pi->force_reliable = 0;
918 }
919
920 /* Default config options */
921 pi->conf_len = 0;
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
927 }
928
929 static struct proto l2cap_proto = {
930 .name = "L2CAP",
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
933 };
934
935 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
936 {
937 struct sock *sk;
938
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
940 if (!sk)
941 return NULL;
942
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
945
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
948
949 sock_reset_flag(sk, SOCK_ZAPPED);
950
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
953
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
955
956 bt_sock_link(&l2cap_sk_list, sk);
957 return sk;
958 }
959
960 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
961 int kern)
962 {
963 struct sock *sk;
964
965 BT_DBG("sock %p", sock);
966
967 sock->state = SS_UNCONNECTED;
968
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
972
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
974 return -EPERM;
975
976 sock->ops = &l2cap_sock_ops;
977
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
979 if (!sk)
980 return -ENOMEM;
981
982 l2cap_sock_init(sk, NULL);
983 return 0;
984 }
985
986 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
987 {
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
990 int len, err = 0;
991
992 BT_DBG("sk %p", sk);
993
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
995 return -EINVAL;
996
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1000
1001 if (la.l2_cid)
1002 return -EINVAL;
1003
1004 lock_sock(sk);
1005
1006 if (sk->sk_state != BT_OPEN) {
1007 err = -EBADFD;
1008 goto done;
1009 }
1010
1011 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
1012 !capable(CAP_NET_BIND_SERVICE)) {
1013 err = -EACCES;
1014 goto done;
1015 }
1016
1017 write_lock_bh(&l2cap_sk_list.lock);
1018
1019 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1020 err = -EADDRINUSE;
1021 } else {
1022 /* Save source address */
1023 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1024 l2cap_pi(sk)->psm = la.l2_psm;
1025 l2cap_pi(sk)->sport = la.l2_psm;
1026 sk->sk_state = BT_BOUND;
1027
1028 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1029 __le16_to_cpu(la.l2_psm) == 0x0003)
1030 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1031 }
1032
1033 write_unlock_bh(&l2cap_sk_list.lock);
1034
1035 done:
1036 release_sock(sk);
1037 return err;
1038 }
1039
1040 static int l2cap_do_connect(struct sock *sk)
1041 {
1042 bdaddr_t *src = &bt_sk(sk)->src;
1043 bdaddr_t *dst = &bt_sk(sk)->dst;
1044 struct l2cap_conn *conn;
1045 struct hci_conn *hcon;
1046 struct hci_dev *hdev;
1047 __u8 auth_type;
1048 int err;
1049
1050 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1051 l2cap_pi(sk)->psm);
1052
1053 hdev = hci_get_route(dst, src);
1054 if (!hdev)
1055 return -EHOSTUNREACH;
1056
1057 hci_dev_lock_bh(hdev);
1058
1059 err = -ENOMEM;
1060
1061 if (sk->sk_type == SOCK_RAW) {
1062 switch (l2cap_pi(sk)->sec_level) {
1063 case BT_SECURITY_HIGH:
1064 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1065 break;
1066 case BT_SECURITY_MEDIUM:
1067 auth_type = HCI_AT_DEDICATED_BONDING;
1068 break;
1069 default:
1070 auth_type = HCI_AT_NO_BONDING;
1071 break;
1072 }
1073 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1074 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1075 auth_type = HCI_AT_NO_BONDING_MITM;
1076 else
1077 auth_type = HCI_AT_NO_BONDING;
1078
1079 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1080 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1081 } else {
1082 switch (l2cap_pi(sk)->sec_level) {
1083 case BT_SECURITY_HIGH:
1084 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1085 break;
1086 case BT_SECURITY_MEDIUM:
1087 auth_type = HCI_AT_GENERAL_BONDING;
1088 break;
1089 default:
1090 auth_type = HCI_AT_NO_BONDING;
1091 break;
1092 }
1093 }
1094
1095 hcon = hci_connect(hdev, ACL_LINK, dst,
1096 l2cap_pi(sk)->sec_level, auth_type);
1097 if (!hcon)
1098 goto done;
1099
1100 conn = l2cap_conn_add(hcon, 0);
1101 if (!conn) {
1102 hci_conn_put(hcon);
1103 goto done;
1104 }
1105
1106 err = 0;
1107
1108 /* Update source addr of the socket */
1109 bacpy(src, conn->src);
1110
1111 l2cap_chan_add(conn, sk, NULL);
1112
1113 sk->sk_state = BT_CONNECT;
1114 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1115
1116 if (hcon->state == BT_CONNECTED) {
1117 if (sk->sk_type != SOCK_SEQPACKET &&
1118 sk->sk_type != SOCK_STREAM) {
1119 l2cap_sock_clear_timer(sk);
1120 sk->sk_state = BT_CONNECTED;
1121 } else
1122 l2cap_do_start(sk);
1123 }
1124
1125 done:
1126 hci_dev_unlock_bh(hdev);
1127 hci_dev_put(hdev);
1128 return err;
1129 }
1130
1131 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1132 {
1133 struct sock *sk = sock->sk;
1134 struct sockaddr_l2 la;
1135 int len, err = 0;
1136
1137 BT_DBG("sk %p", sk);
1138
1139 if (!addr || alen < sizeof(addr->sa_family) ||
1140 addr->sa_family != AF_BLUETOOTH)
1141 return -EINVAL;
1142
1143 memset(&la, 0, sizeof(la));
1144 len = min_t(unsigned int, sizeof(la), alen);
1145 memcpy(&la, addr, len);
1146
1147 if (la.l2_cid)
1148 return -EINVAL;
1149
1150 lock_sock(sk);
1151
1152 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1153 && !la.l2_psm) {
1154 err = -EINVAL;
1155 goto done;
1156 }
1157
1158 switch (l2cap_pi(sk)->mode) {
1159 case L2CAP_MODE_BASIC:
1160 break;
1161 case L2CAP_MODE_ERTM:
1162 case L2CAP_MODE_STREAMING:
1163 if (!disable_ertm)
1164 break;
1165 /* fall through */
1166 default:
1167 err = -ENOTSUPP;
1168 goto done;
1169 }
1170
1171 switch (sk->sk_state) {
1172 case BT_CONNECT:
1173 case BT_CONNECT2:
1174 case BT_CONFIG:
1175 /* Already connecting */
1176 goto wait;
1177
1178 case BT_CONNECTED:
1179 /* Already connected */
1180 err = -EISCONN;
1181 goto done;
1182
1183 case BT_OPEN:
1184 case BT_BOUND:
1185 /* Can connect */
1186 break;
1187
1188 default:
1189 err = -EBADFD;
1190 goto done;
1191 }
1192
1193 /* Set destination address and psm */
1194 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1195 l2cap_pi(sk)->psm = la.l2_psm;
1196
1197 err = l2cap_do_connect(sk);
1198 if (err)
1199 goto done;
1200
1201 wait:
1202 err = bt_sock_wait_state(sk, BT_CONNECTED,
1203 sock_sndtimeo(sk, flags & O_NONBLOCK));
1204 done:
1205 release_sock(sk);
1206 return err;
1207 }
1208
1209 static int l2cap_sock_listen(struct socket *sock, int backlog)
1210 {
1211 struct sock *sk = sock->sk;
1212 int err = 0;
1213
1214 BT_DBG("sk %p backlog %d", sk, backlog);
1215
1216 lock_sock(sk);
1217
1218 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1219 || sk->sk_state != BT_BOUND) {
1220 err = -EBADFD;
1221 goto done;
1222 }
1223
1224 switch (l2cap_pi(sk)->mode) {
1225 case L2CAP_MODE_BASIC:
1226 break;
1227 case L2CAP_MODE_ERTM:
1228 case L2CAP_MODE_STREAMING:
1229 if (!disable_ertm)
1230 break;
1231 /* fall through */
1232 default:
1233 err = -ENOTSUPP;
1234 goto done;
1235 }
1236
1237 if (!l2cap_pi(sk)->psm) {
1238 bdaddr_t *src = &bt_sk(sk)->src;
1239 u16 psm;
1240
1241 err = -EINVAL;
1242
1243 write_lock_bh(&l2cap_sk_list.lock);
1244
1245 for (psm = 0x1001; psm < 0x1100; psm += 2)
1246 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1247 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1248 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1249 err = 0;
1250 break;
1251 }
1252
1253 write_unlock_bh(&l2cap_sk_list.lock);
1254
1255 if (err < 0)
1256 goto done;
1257 }
1258
1259 sk->sk_max_ack_backlog = backlog;
1260 sk->sk_ack_backlog = 0;
1261 sk->sk_state = BT_LISTEN;
1262
1263 done:
1264 release_sock(sk);
1265 return err;
1266 }
1267
1268 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1269 {
1270 DECLARE_WAITQUEUE(wait, current);
1271 struct sock *sk = sock->sk, *nsk;
1272 long timeo;
1273 int err = 0;
1274
1275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1276
1277 if (sk->sk_state != BT_LISTEN) {
1278 err = -EBADFD;
1279 goto done;
1280 }
1281
1282 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1283
1284 BT_DBG("sk %p timeo %ld", sk, timeo);
1285
1286 /* Wait for an incoming connection. (wake-one). */
1287 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1288 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1289 set_current_state(TASK_INTERRUPTIBLE);
1290 if (!timeo) {
1291 err = -EAGAIN;
1292 break;
1293 }
1294
1295 release_sock(sk);
1296 timeo = schedule_timeout(timeo);
1297 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1298
1299 if (sk->sk_state != BT_LISTEN) {
1300 err = -EBADFD;
1301 break;
1302 }
1303
1304 if (signal_pending(current)) {
1305 err = sock_intr_errno(timeo);
1306 break;
1307 }
1308 }
1309 set_current_state(TASK_RUNNING);
1310 remove_wait_queue(sk_sleep(sk), &wait);
1311
1312 if (err)
1313 goto done;
1314
1315 newsock->state = SS_CONNECTED;
1316
1317 BT_DBG("new socket %p", nsk);
1318
1319 done:
1320 release_sock(sk);
1321 return err;
1322 }
1323
1324 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1325 {
1326 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1327 struct sock *sk = sock->sk;
1328
1329 BT_DBG("sock %p, sk %p", sock, sk);
1330
1331 addr->sa_family = AF_BLUETOOTH;
1332 *len = sizeof(struct sockaddr_l2);
1333
1334 if (peer) {
1335 la->l2_psm = l2cap_pi(sk)->psm;
1336 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1337 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1338 } else {
1339 la->l2_psm = l2cap_pi(sk)->sport;
1340 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1341 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1342 }
1343
1344 return 0;
1345 }
1346
1347 static int __l2cap_wait_ack(struct sock *sk)
1348 {
1349 DECLARE_WAITQUEUE(wait, current);
1350 int err = 0;
1351 int timeo = HZ/5;
1352
1353 add_wait_queue(sk_sleep(sk), &wait);
1354 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1355 set_current_state(TASK_INTERRUPTIBLE);
1356
1357 if (!timeo)
1358 timeo = HZ/5;
1359
1360 if (signal_pending(current)) {
1361 err = sock_intr_errno(timeo);
1362 break;
1363 }
1364
1365 release_sock(sk);
1366 timeo = schedule_timeout(timeo);
1367 lock_sock(sk);
1368
1369 err = sock_error(sk);
1370 if (err)
1371 break;
1372 }
1373 set_current_state(TASK_RUNNING);
1374 remove_wait_queue(sk_sleep(sk), &wait);
1375 return err;
1376 }
1377
1378 static void l2cap_monitor_timeout(unsigned long arg)
1379 {
1380 struct sock *sk = (void *) arg;
1381
1382 BT_DBG("sk %p", sk);
1383
1384 bh_lock_sock(sk);
1385 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1386 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1387 bh_unlock_sock(sk);
1388 return;
1389 }
1390
1391 l2cap_pi(sk)->retry_count++;
1392 __mod_monitor_timer();
1393
1394 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1395 bh_unlock_sock(sk);
1396 }
1397
1398 static void l2cap_retrans_timeout(unsigned long arg)
1399 {
1400 struct sock *sk = (void *) arg;
1401
1402 BT_DBG("sk %p", sk);
1403
1404 bh_lock_sock(sk);
1405 l2cap_pi(sk)->retry_count = 1;
1406 __mod_monitor_timer();
1407
1408 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1409
1410 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1411 bh_unlock_sock(sk);
1412 }
1413
1414 static void l2cap_drop_acked_frames(struct sock *sk)
1415 {
1416 struct sk_buff *skb;
1417
1418 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1419 l2cap_pi(sk)->unacked_frames) {
1420 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1421 break;
1422
1423 skb = skb_dequeue(TX_QUEUE(sk));
1424 kfree_skb(skb);
1425
1426 l2cap_pi(sk)->unacked_frames--;
1427 }
1428
1429 if (!l2cap_pi(sk)->unacked_frames)
1430 del_timer(&l2cap_pi(sk)->retrans_timer);
1431 }
1432
1433 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1434 {
1435 struct l2cap_pinfo *pi = l2cap_pi(sk);
1436
1437 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1438
1439 hci_send_acl(pi->conn->hcon, skb, 0);
1440 }
1441
1442 static void l2cap_streaming_send(struct sock *sk)
1443 {
1444 struct sk_buff *skb, *tx_skb;
1445 struct l2cap_pinfo *pi = l2cap_pi(sk);
1446 u16 control, fcs;
1447
1448 while ((skb = sk->sk_send_head)) {
1449 tx_skb = skb_clone(skb, GFP_ATOMIC);
1450
1451 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1452 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1453 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1454
1455 if (pi->fcs == L2CAP_FCS_CRC16) {
1456 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1457 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1458 }
1459
1460 l2cap_do_send(sk, tx_skb);
1461
1462 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1463
1464 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1465 sk->sk_send_head = NULL;
1466 else
1467 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1468
1469 skb = skb_dequeue(TX_QUEUE(sk));
1470 kfree_skb(skb);
1471 }
1472 }
1473
1474 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1475 {
1476 struct l2cap_pinfo *pi = l2cap_pi(sk);
1477 struct sk_buff *skb, *tx_skb;
1478 u16 control, fcs;
1479
1480 skb = skb_peek(TX_QUEUE(sk));
1481 if (!skb)
1482 return;
1483
1484 do {
1485 if (bt_cb(skb)->tx_seq == tx_seq)
1486 break;
1487
1488 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1489 return;
1490
1491 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1492
1493 if (pi->remote_max_tx &&
1494 bt_cb(skb)->retries == pi->remote_max_tx) {
1495 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1496 return;
1497 }
1498
1499 tx_skb = skb_clone(skb, GFP_ATOMIC);
1500 bt_cb(skb)->retries++;
1501 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1502
1503 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1504 control |= L2CAP_CTRL_FINAL;
1505 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1506 }
1507
1508 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1509 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1510
1511 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1512
1513 if (pi->fcs == L2CAP_FCS_CRC16) {
1514 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1515 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1516 }
1517
1518 l2cap_do_send(sk, tx_skb);
1519 }
1520
1521 static int l2cap_ertm_send(struct sock *sk)
1522 {
1523 struct sk_buff *skb, *tx_skb;
1524 struct l2cap_pinfo *pi = l2cap_pi(sk);
1525 u16 control, fcs;
1526 int nsent = 0;
1527
1528 if (sk->sk_state != BT_CONNECTED)
1529 return -ENOTCONN;
1530
1531 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1532
1533 if (pi->remote_max_tx &&
1534 bt_cb(skb)->retries == pi->remote_max_tx) {
1535 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1536 break;
1537 }
1538
1539 tx_skb = skb_clone(skb, GFP_ATOMIC);
1540
1541 bt_cb(skb)->retries++;
1542
1543 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1544 control &= L2CAP_CTRL_SAR;
1545
1546 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1547 control |= L2CAP_CTRL_FINAL;
1548 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1549 }
1550 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1551 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1552 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1553
1554
1555 if (pi->fcs == L2CAP_FCS_CRC16) {
1556 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1557 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1558 }
1559
1560 l2cap_do_send(sk, tx_skb);
1561
1562 __mod_retrans_timer();
1563
1564 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1565 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1566
1567 pi->unacked_frames++;
1568 pi->frames_sent++;
1569
1570 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1571 sk->sk_send_head = NULL;
1572 else
1573 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1574
1575 nsent++;
1576 }
1577
1578 return nsent;
1579 }
1580
1581 static int l2cap_retransmit_frames(struct sock *sk)
1582 {
1583 struct l2cap_pinfo *pi = l2cap_pi(sk);
1584 int ret;
1585
1586 if (!skb_queue_empty(TX_QUEUE(sk)))
1587 sk->sk_send_head = TX_QUEUE(sk)->next;
1588
1589 pi->next_tx_seq = pi->expected_ack_seq;
1590 ret = l2cap_ertm_send(sk);
1591 return ret;
1592 }
1593
1594 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1595 {
1596 struct sock *sk = (struct sock *)pi;
1597 u16 control = 0;
1598
1599 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1600
1601 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1602 control |= L2CAP_SUPER_RCV_NOT_READY;
1603 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1604 l2cap_send_sframe(pi, control);
1605 return;
1606 }
1607
1608 if (l2cap_ertm_send(sk) > 0)
1609 return;
1610
1611 control |= L2CAP_SUPER_RCV_READY;
1612 l2cap_send_sframe(pi, control);
1613 }
1614
1615 static void l2cap_send_srejtail(struct sock *sk)
1616 {
1617 struct srej_list *tail;
1618 u16 control;
1619
1620 control = L2CAP_SUPER_SELECT_REJECT;
1621 control |= L2CAP_CTRL_FINAL;
1622
1623 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1624 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1625
1626 l2cap_send_sframe(l2cap_pi(sk), control);
1627 }
1628
1629 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1630 {
1631 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1632 struct sk_buff **frag;
1633 int err, sent = 0;
1634
1635 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1636 return -EFAULT;
1637
1638 sent += count;
1639 len -= count;
1640
1641 /* Continuation fragments (no L2CAP header) */
1642 frag = &skb_shinfo(skb)->frag_list;
1643 while (len) {
1644 count = min_t(unsigned int, conn->mtu, len);
1645
1646 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1647 if (!*frag)
1648 return -EFAULT;
1649 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1650 return -EFAULT;
1651
1652 sent += count;
1653 len -= count;
1654
1655 frag = &(*frag)->next;
1656 }
1657
1658 return sent;
1659 }
1660
1661 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1662 {
1663 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1664 struct sk_buff *skb;
1665 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1666 struct l2cap_hdr *lh;
1667
1668 BT_DBG("sk %p len %d", sk, (int)len);
1669
1670 count = min_t(unsigned int, (conn->mtu - hlen), len);
1671 skb = bt_skb_send_alloc(sk, count + hlen,
1672 msg->msg_flags & MSG_DONTWAIT, &err);
1673 if (!skb)
1674 return ERR_PTR(-ENOMEM);
1675
1676 /* Create L2CAP header */
1677 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1678 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1679 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1680 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1681
1682 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1683 if (unlikely(err < 0)) {
1684 kfree_skb(skb);
1685 return ERR_PTR(err);
1686 }
1687 return skb;
1688 }
1689
1690 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1691 {
1692 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1693 struct sk_buff *skb;
1694 int err, count, hlen = L2CAP_HDR_SIZE;
1695 struct l2cap_hdr *lh;
1696
1697 BT_DBG("sk %p len %d", sk, (int)len);
1698
1699 count = min_t(unsigned int, (conn->mtu - hlen), len);
1700 skb = bt_skb_send_alloc(sk, count + hlen,
1701 msg->msg_flags & MSG_DONTWAIT, &err);
1702 if (!skb)
1703 return ERR_PTR(-ENOMEM);
1704
1705 /* Create L2CAP header */
1706 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1707 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1708 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1709
1710 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1711 if (unlikely(err < 0)) {
1712 kfree_skb(skb);
1713 return ERR_PTR(err);
1714 }
1715 return skb;
1716 }
1717
1718 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1719 {
1720 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1721 struct sk_buff *skb;
1722 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1723 struct l2cap_hdr *lh;
1724
1725 BT_DBG("sk %p len %d", sk, (int)len);
1726
1727 if (!conn)
1728 return ERR_PTR(-ENOTCONN);
1729
1730 if (sdulen)
1731 hlen += 2;
1732
1733 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1734 hlen += 2;
1735
1736 count = min_t(unsigned int, (conn->mtu - hlen), len);
1737 skb = bt_skb_send_alloc(sk, count + hlen,
1738 msg->msg_flags & MSG_DONTWAIT, &err);
1739 if (!skb)
1740 return ERR_PTR(-ENOMEM);
1741
1742 /* Create L2CAP header */
1743 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1744 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1745 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1746 put_unaligned_le16(control, skb_put(skb, 2));
1747 if (sdulen)
1748 put_unaligned_le16(sdulen, skb_put(skb, 2));
1749
1750 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1751 if (unlikely(err < 0)) {
1752 kfree_skb(skb);
1753 return ERR_PTR(err);
1754 }
1755
1756 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1757 put_unaligned_le16(0, skb_put(skb, 2));
1758
1759 bt_cb(skb)->retries = 0;
1760 return skb;
1761 }
1762
1763 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1764 {
1765 struct l2cap_pinfo *pi = l2cap_pi(sk);
1766 struct sk_buff *skb;
1767 struct sk_buff_head sar_queue;
1768 u16 control;
1769 size_t size = 0;
1770
1771 skb_queue_head_init(&sar_queue);
1772 control = L2CAP_SDU_START;
1773 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1774 if (IS_ERR(skb))
1775 return PTR_ERR(skb);
1776
1777 __skb_queue_tail(&sar_queue, skb);
1778 len -= pi->remote_mps;
1779 size += pi->remote_mps;
1780
1781 while (len > 0) {
1782 size_t buflen;
1783
1784 if (len > pi->remote_mps) {
1785 control = L2CAP_SDU_CONTINUE;
1786 buflen = pi->remote_mps;
1787 } else {
1788 control = L2CAP_SDU_END;
1789 buflen = len;
1790 }
1791
1792 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1793 if (IS_ERR(skb)) {
1794 skb_queue_purge(&sar_queue);
1795 return PTR_ERR(skb);
1796 }
1797
1798 __skb_queue_tail(&sar_queue, skb);
1799 len -= buflen;
1800 size += buflen;
1801 }
1802 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1803 if (sk->sk_send_head == NULL)
1804 sk->sk_send_head = sar_queue.next;
1805
1806 return size;
1807 }
1808
1809 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1810 {
1811 struct sock *sk = sock->sk;
1812 struct l2cap_pinfo *pi = l2cap_pi(sk);
1813 struct sk_buff *skb;
1814 u16 control;
1815 int err;
1816
1817 BT_DBG("sock %p, sk %p", sock, sk);
1818
1819 err = sock_error(sk);
1820 if (err)
1821 return err;
1822
1823 if (msg->msg_flags & MSG_OOB)
1824 return -EOPNOTSUPP;
1825
1826 lock_sock(sk);
1827
1828 if (sk->sk_state != BT_CONNECTED) {
1829 err = -ENOTCONN;
1830 goto done;
1831 }
1832
1833 /* Connectionless channel */
1834 if (sk->sk_type == SOCK_DGRAM) {
1835 skb = l2cap_create_connless_pdu(sk, msg, len);
1836 if (IS_ERR(skb)) {
1837 err = PTR_ERR(skb);
1838 } else {
1839 l2cap_do_send(sk, skb);
1840 err = len;
1841 }
1842 goto done;
1843 }
1844
1845 switch (pi->mode) {
1846 case L2CAP_MODE_BASIC:
1847 /* Check outgoing MTU */
1848 if (len > pi->omtu) {
1849 err = -EMSGSIZE;
1850 goto done;
1851 }
1852
1853 /* Create a basic PDU */
1854 skb = l2cap_create_basic_pdu(sk, msg, len);
1855 if (IS_ERR(skb)) {
1856 err = PTR_ERR(skb);
1857 goto done;
1858 }
1859
1860 l2cap_do_send(sk, skb);
1861 err = len;
1862 break;
1863
1864 case L2CAP_MODE_ERTM:
1865 case L2CAP_MODE_STREAMING:
1866 /* Entire SDU fits into one PDU */
1867 if (len <= pi->remote_mps) {
1868 control = L2CAP_SDU_UNSEGMENTED;
1869 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1870 if (IS_ERR(skb)) {
1871 err = PTR_ERR(skb);
1872 goto done;
1873 }
1874 __skb_queue_tail(TX_QUEUE(sk), skb);
1875
1876 if (sk->sk_send_head == NULL)
1877 sk->sk_send_head = skb;
1878
1879 } else {
1880 /* Segment SDU into multiples PDUs */
1881 err = l2cap_sar_segment_sdu(sk, msg, len);
1882 if (err < 0)
1883 goto done;
1884 }
1885
1886 if (pi->mode == L2CAP_MODE_STREAMING) {
1887 l2cap_streaming_send(sk);
1888 } else {
1889 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1890 pi->conn_state && L2CAP_CONN_WAIT_F) {
1891 err = len;
1892 break;
1893 }
1894 err = l2cap_ertm_send(sk);
1895 }
1896
1897 if (err >= 0)
1898 err = len;
1899 break;
1900
1901 default:
1902 BT_DBG("bad state %1.1x", pi->mode);
1903 err = -EBADFD;
1904 }
1905
1906 done:
1907 release_sock(sk);
1908 return err;
1909 }
1910
1911 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1912 {
1913 struct sock *sk = sock->sk;
1914
1915 lock_sock(sk);
1916
1917 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1918 struct l2cap_conn_rsp rsp;
1919 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1920 u8 buf[128];
1921
1922 sk->sk_state = BT_CONFIG;
1923
1924 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1925 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1926 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1927 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1928 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1929 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1930
1931 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1932 release_sock(sk);
1933 return 0;
1934 }
1935
1936 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1937 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1938 l2cap_build_conf_req(sk, buf), buf);
1939 l2cap_pi(sk)->num_conf_req++;
1940
1941 release_sock(sk);
1942 return 0;
1943 }
1944
1945 release_sock(sk);
1946
1947 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1948 }
1949
1950 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1951 {
1952 struct sock *sk = sock->sk;
1953 struct l2cap_options opts;
1954 int len, err = 0;
1955 u32 opt;
1956
1957 BT_DBG("sk %p", sk);
1958
1959 lock_sock(sk);
1960
1961 switch (optname) {
1962 case L2CAP_OPTIONS:
1963 opts.imtu = l2cap_pi(sk)->imtu;
1964 opts.omtu = l2cap_pi(sk)->omtu;
1965 opts.flush_to = l2cap_pi(sk)->flush_to;
1966 opts.mode = l2cap_pi(sk)->mode;
1967 opts.fcs = l2cap_pi(sk)->fcs;
1968 opts.max_tx = l2cap_pi(sk)->max_tx;
1969 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1970
1971 len = min_t(unsigned int, sizeof(opts), optlen);
1972 if (copy_from_user((char *) &opts, optval, len)) {
1973 err = -EFAULT;
1974 break;
1975 }
1976
1977 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1978 err = -EINVAL;
1979 break;
1980 }
1981
1982 l2cap_pi(sk)->mode = opts.mode;
1983 switch (l2cap_pi(sk)->mode) {
1984 case L2CAP_MODE_BASIC:
1985 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1986 break;
1987 case L2CAP_MODE_ERTM:
1988 case L2CAP_MODE_STREAMING:
1989 if (!disable_ertm)
1990 break;
1991 /* fall through */
1992 default:
1993 err = -EINVAL;
1994 break;
1995 }
1996
1997 l2cap_pi(sk)->imtu = opts.imtu;
1998 l2cap_pi(sk)->omtu = opts.omtu;
1999 l2cap_pi(sk)->fcs = opts.fcs;
2000 l2cap_pi(sk)->max_tx = opts.max_tx;
2001 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2002 break;
2003
2004 case L2CAP_LM:
2005 if (get_user(opt, (u32 __user *) optval)) {
2006 err = -EFAULT;
2007 break;
2008 }
2009
2010 if (opt & L2CAP_LM_AUTH)
2011 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2012 if (opt & L2CAP_LM_ENCRYPT)
2013 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2014 if (opt & L2CAP_LM_SECURE)
2015 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2016
2017 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2018 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2019 break;
2020
2021 default:
2022 err = -ENOPROTOOPT;
2023 break;
2024 }
2025
2026 release_sock(sk);
2027 return err;
2028 }
2029
2030 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2031 {
2032 struct sock *sk = sock->sk;
2033 struct bt_security sec;
2034 int len, err = 0;
2035 u32 opt;
2036
2037 BT_DBG("sk %p", sk);
2038
2039 if (level == SOL_L2CAP)
2040 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2041
2042 if (level != SOL_BLUETOOTH)
2043 return -ENOPROTOOPT;
2044
2045 lock_sock(sk);
2046
2047 switch (optname) {
2048 case BT_SECURITY:
2049 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2050 && sk->sk_type != SOCK_RAW) {
2051 err = -EINVAL;
2052 break;
2053 }
2054
2055 sec.level = BT_SECURITY_LOW;
2056
2057 len = min_t(unsigned int, sizeof(sec), optlen);
2058 if (copy_from_user((char *) &sec, optval, len)) {
2059 err = -EFAULT;
2060 break;
2061 }
2062
2063 if (sec.level < BT_SECURITY_LOW ||
2064 sec.level > BT_SECURITY_HIGH) {
2065 err = -EINVAL;
2066 break;
2067 }
2068
2069 l2cap_pi(sk)->sec_level = sec.level;
2070 break;
2071
2072 case BT_DEFER_SETUP:
2073 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2074 err = -EINVAL;
2075 break;
2076 }
2077
2078 if (get_user(opt, (u32 __user *) optval)) {
2079 err = -EFAULT;
2080 break;
2081 }
2082
2083 bt_sk(sk)->defer_setup = opt;
2084 break;
2085
2086 default:
2087 err = -ENOPROTOOPT;
2088 break;
2089 }
2090
2091 release_sock(sk);
2092 return err;
2093 }
2094
2095 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2096 {
2097 struct sock *sk = sock->sk;
2098 struct l2cap_options opts;
2099 struct l2cap_conninfo cinfo;
2100 int len, err = 0;
2101 u32 opt;
2102
2103 BT_DBG("sk %p", sk);
2104
2105 if (get_user(len, optlen))
2106 return -EFAULT;
2107
2108 lock_sock(sk);
2109
2110 switch (optname) {
2111 case L2CAP_OPTIONS:
2112 opts.imtu = l2cap_pi(sk)->imtu;
2113 opts.omtu = l2cap_pi(sk)->omtu;
2114 opts.flush_to = l2cap_pi(sk)->flush_to;
2115 opts.mode = l2cap_pi(sk)->mode;
2116 opts.fcs = l2cap_pi(sk)->fcs;
2117 opts.max_tx = l2cap_pi(sk)->max_tx;
2118 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2119
2120 len = min_t(unsigned int, len, sizeof(opts));
2121 if (copy_to_user(optval, (char *) &opts, len))
2122 err = -EFAULT;
2123
2124 break;
2125
2126 case L2CAP_LM:
2127 switch (l2cap_pi(sk)->sec_level) {
2128 case BT_SECURITY_LOW:
2129 opt = L2CAP_LM_AUTH;
2130 break;
2131 case BT_SECURITY_MEDIUM:
2132 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2133 break;
2134 case BT_SECURITY_HIGH:
2135 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2136 L2CAP_LM_SECURE;
2137 break;
2138 default:
2139 opt = 0;
2140 break;
2141 }
2142
2143 if (l2cap_pi(sk)->role_switch)
2144 opt |= L2CAP_LM_MASTER;
2145
2146 if (l2cap_pi(sk)->force_reliable)
2147 opt |= L2CAP_LM_RELIABLE;
2148
2149 if (put_user(opt, (u32 __user *) optval))
2150 err = -EFAULT;
2151 break;
2152
2153 case L2CAP_CONNINFO:
2154 if (sk->sk_state != BT_CONNECTED &&
2155 !(sk->sk_state == BT_CONNECT2 &&
2156 bt_sk(sk)->defer_setup)) {
2157 err = -ENOTCONN;
2158 break;
2159 }
2160
2161 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2162 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2163
2164 len = min_t(unsigned int, len, sizeof(cinfo));
2165 if (copy_to_user(optval, (char *) &cinfo, len))
2166 err = -EFAULT;
2167
2168 break;
2169
2170 default:
2171 err = -ENOPROTOOPT;
2172 break;
2173 }
2174
2175 release_sock(sk);
2176 return err;
2177 }
2178
2179 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2180 {
2181 struct sock *sk = sock->sk;
2182 struct bt_security sec;
2183 int len, err = 0;
2184
2185 BT_DBG("sk %p", sk);
2186
2187 if (level == SOL_L2CAP)
2188 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2189
2190 if (level != SOL_BLUETOOTH)
2191 return -ENOPROTOOPT;
2192
2193 if (get_user(len, optlen))
2194 return -EFAULT;
2195
2196 lock_sock(sk);
2197
2198 switch (optname) {
2199 case BT_SECURITY:
2200 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2201 && sk->sk_type != SOCK_RAW) {
2202 err = -EINVAL;
2203 break;
2204 }
2205
2206 sec.level = l2cap_pi(sk)->sec_level;
2207
2208 len = min_t(unsigned int, len, sizeof(sec));
2209 if (copy_to_user(optval, (char *) &sec, len))
2210 err = -EFAULT;
2211
2212 break;
2213
2214 case BT_DEFER_SETUP:
2215 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2216 err = -EINVAL;
2217 break;
2218 }
2219
2220 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2221 err = -EFAULT;
2222
2223 break;
2224
2225 default:
2226 err = -ENOPROTOOPT;
2227 break;
2228 }
2229
2230 release_sock(sk);
2231 return err;
2232 }
2233
2234 static int l2cap_sock_shutdown(struct socket *sock, int how)
2235 {
2236 struct sock *sk = sock->sk;
2237 int err = 0;
2238
2239 BT_DBG("sock %p, sk %p", sock, sk);
2240
2241 if (!sk)
2242 return 0;
2243
2244 lock_sock(sk);
2245 if (!sk->sk_shutdown) {
2246 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2247 err = __l2cap_wait_ack(sk);
2248
2249 sk->sk_shutdown = SHUTDOWN_MASK;
2250 l2cap_sock_clear_timer(sk);
2251 __l2cap_sock_close(sk, 0);
2252
2253 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2254 err = bt_sock_wait_state(sk, BT_CLOSED,
2255 sk->sk_lingertime);
2256 }
2257
2258 if (!err && sk->sk_err)
2259 err = -sk->sk_err;
2260
2261 release_sock(sk);
2262 return err;
2263 }
2264
2265 static int l2cap_sock_release(struct socket *sock)
2266 {
2267 struct sock *sk = sock->sk;
2268 int err;
2269
2270 BT_DBG("sock %p, sk %p", sock, sk);
2271
2272 if (!sk)
2273 return 0;
2274
2275 err = l2cap_sock_shutdown(sock, 2);
2276
2277 sock_orphan(sk);
2278 l2cap_sock_kill(sk);
2279 return err;
2280 }
2281
2282 static void l2cap_chan_ready(struct sock *sk)
2283 {
2284 struct sock *parent = bt_sk(sk)->parent;
2285
2286 BT_DBG("sk %p, parent %p", sk, parent);
2287
2288 l2cap_pi(sk)->conf_state = 0;
2289 l2cap_sock_clear_timer(sk);
2290
2291 if (!parent) {
2292 /* Outgoing channel.
2293 * Wake up socket sleeping on connect.
2294 */
2295 sk->sk_state = BT_CONNECTED;
2296 sk->sk_state_change(sk);
2297 } else {
2298 /* Incoming channel.
2299 * Wake up socket sleeping on accept.
2300 */
2301 parent->sk_data_ready(parent, 0);
2302 }
2303 }
2304
2305 /* Copy frame to all raw sockets on that connection */
2306 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2307 {
2308 struct l2cap_chan_list *l = &conn->chan_list;
2309 struct sk_buff *nskb;
2310 struct sock *sk;
2311
2312 BT_DBG("conn %p", conn);
2313
2314 read_lock(&l->lock);
2315 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2316 if (sk->sk_type != SOCK_RAW)
2317 continue;
2318
2319 /* Don't send frame to the socket it came from */
2320 if (skb->sk == sk)
2321 continue;
2322 nskb = skb_clone(skb, GFP_ATOMIC);
2323 if (!nskb)
2324 continue;
2325
2326 if (sock_queue_rcv_skb(sk, nskb))
2327 kfree_skb(nskb);
2328 }
2329 read_unlock(&l->lock);
2330 }
2331
2332 /* ---- L2CAP signalling commands ---- */
2333 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2334 u8 code, u8 ident, u16 dlen, void *data)
2335 {
2336 struct sk_buff *skb, **frag;
2337 struct l2cap_cmd_hdr *cmd;
2338 struct l2cap_hdr *lh;
2339 int len, count;
2340
2341 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2342 conn, code, ident, dlen);
2343
2344 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2345 count = min_t(unsigned int, conn->mtu, len);
2346
2347 skb = bt_skb_alloc(count, GFP_ATOMIC);
2348 if (!skb)
2349 return NULL;
2350
2351 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2352 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2353 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2354
2355 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2356 cmd->code = code;
2357 cmd->ident = ident;
2358 cmd->len = cpu_to_le16(dlen);
2359
2360 if (dlen) {
2361 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2362 memcpy(skb_put(skb, count), data, count);
2363 data += count;
2364 }
2365
2366 len -= skb->len;
2367
2368 /* Continuation fragments (no L2CAP header) */
2369 frag = &skb_shinfo(skb)->frag_list;
2370 while (len) {
2371 count = min_t(unsigned int, conn->mtu, len);
2372
2373 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2374 if (!*frag)
2375 goto fail;
2376
2377 memcpy(skb_put(*frag, count), data, count);
2378
2379 len -= count;
2380 data += count;
2381
2382 frag = &(*frag)->next;
2383 }
2384
2385 return skb;
2386
2387 fail:
2388 kfree_skb(skb);
2389 return NULL;
2390 }
2391
2392 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2393 {
2394 struct l2cap_conf_opt *opt = *ptr;
2395 int len;
2396
2397 len = L2CAP_CONF_OPT_SIZE + opt->len;
2398 *ptr += len;
2399
2400 *type = opt->type;
2401 *olen = opt->len;
2402
2403 switch (opt->len) {
2404 case 1:
2405 *val = *((u8 *) opt->val);
2406 break;
2407
2408 case 2:
2409 *val = __le16_to_cpu(*((__le16 *) opt->val));
2410 break;
2411
2412 case 4:
2413 *val = __le32_to_cpu(*((__le32 *) opt->val));
2414 break;
2415
2416 default:
2417 *val = (unsigned long) opt->val;
2418 break;
2419 }
2420
2421 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2422 return len;
2423 }
2424
2425 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2426 {
2427 struct l2cap_conf_opt *opt = *ptr;
2428
2429 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2430
2431 opt->type = type;
2432 opt->len = len;
2433
2434 switch (len) {
2435 case 1:
2436 *((u8 *) opt->val) = val;
2437 break;
2438
2439 case 2:
2440 *((__le16 *) opt->val) = cpu_to_le16(val);
2441 break;
2442
2443 case 4:
2444 *((__le32 *) opt->val) = cpu_to_le32(val);
2445 break;
2446
2447 default:
2448 memcpy(opt->val, (void *) val, len);
2449 break;
2450 }
2451
2452 *ptr += L2CAP_CONF_OPT_SIZE + len;
2453 }
2454
2455 static void l2cap_ack_timeout(unsigned long arg)
2456 {
2457 struct sock *sk = (void *) arg;
2458
2459 bh_lock_sock(sk);
2460 l2cap_send_ack(l2cap_pi(sk));
2461 bh_unlock_sock(sk);
2462 }
2463
2464 static inline void l2cap_ertm_init(struct sock *sk)
2465 {
2466 l2cap_pi(sk)->expected_ack_seq = 0;
2467 l2cap_pi(sk)->unacked_frames = 0;
2468 l2cap_pi(sk)->buffer_seq = 0;
2469 l2cap_pi(sk)->num_acked = 0;
2470 l2cap_pi(sk)->frames_sent = 0;
2471
2472 setup_timer(&l2cap_pi(sk)->retrans_timer,
2473 l2cap_retrans_timeout, (unsigned long) sk);
2474 setup_timer(&l2cap_pi(sk)->monitor_timer,
2475 l2cap_monitor_timeout, (unsigned long) sk);
2476 setup_timer(&l2cap_pi(sk)->ack_timer,
2477 l2cap_ack_timeout, (unsigned long) sk);
2478
2479 __skb_queue_head_init(SREJ_QUEUE(sk));
2480 __skb_queue_head_init(BUSY_QUEUE(sk));
2481
2482 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2483
2484 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2485 }
2486
2487 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2488 {
2489 switch (mode) {
2490 case L2CAP_MODE_STREAMING:
2491 case L2CAP_MODE_ERTM:
2492 if (l2cap_mode_supported(mode, remote_feat_mask))
2493 return mode;
2494 /* fall through */
2495 default:
2496 return L2CAP_MODE_BASIC;
2497 }
2498 }
2499
2500 static int l2cap_build_conf_req(struct sock *sk, void *data)
2501 {
2502 struct l2cap_pinfo *pi = l2cap_pi(sk);
2503 struct l2cap_conf_req *req = data;
2504 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2505 void *ptr = req->data;
2506
2507 BT_DBG("sk %p", sk);
2508
2509 if (pi->num_conf_req || pi->num_conf_rsp)
2510 goto done;
2511
2512 switch (pi->mode) {
2513 case L2CAP_MODE_STREAMING:
2514 case L2CAP_MODE_ERTM:
2515 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2516 break;
2517
2518 /* fall through */
2519 default:
2520 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2521 break;
2522 }
2523
2524 done:
2525 switch (pi->mode) {
2526 case L2CAP_MODE_BASIC:
2527 if (pi->imtu != L2CAP_DEFAULT_MTU)
2528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2529
2530 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2531 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2532 break;
2533
2534 rfc.mode = L2CAP_MODE_BASIC;
2535 rfc.txwin_size = 0;
2536 rfc.max_transmit = 0;
2537 rfc.retrans_timeout = 0;
2538 rfc.monitor_timeout = 0;
2539 rfc.max_pdu_size = 0;
2540
2541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2542 (unsigned long) &rfc);
2543 break;
2544
2545 case L2CAP_MODE_ERTM:
2546 rfc.mode = L2CAP_MODE_ERTM;
2547 rfc.txwin_size = pi->tx_win;
2548 rfc.max_transmit = pi->max_tx;
2549 rfc.retrans_timeout = 0;
2550 rfc.monitor_timeout = 0;
2551 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2552 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2553 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2554
2555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2556 (unsigned long) &rfc);
2557
2558 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2559 break;
2560
2561 if (pi->fcs == L2CAP_FCS_NONE ||
2562 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2563 pi->fcs = L2CAP_FCS_NONE;
2564 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2565 }
2566 break;
2567
2568 case L2CAP_MODE_STREAMING:
2569 rfc.mode = L2CAP_MODE_STREAMING;
2570 rfc.txwin_size = 0;
2571 rfc.max_transmit = 0;
2572 rfc.retrans_timeout = 0;
2573 rfc.monitor_timeout = 0;
2574 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2575 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2576 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2577
2578 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2579 (unsigned long) &rfc);
2580
2581 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2582 break;
2583
2584 if (pi->fcs == L2CAP_FCS_NONE ||
2585 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2586 pi->fcs = L2CAP_FCS_NONE;
2587 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2588 }
2589 break;
2590 }
2591
2592 /* FIXME: Need actual value of the flush timeout */
2593 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2594 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2595
2596 req->dcid = cpu_to_le16(pi->dcid);
2597 req->flags = cpu_to_le16(0);
2598
2599 return ptr - data;
2600 }
2601
2602 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2603 {
2604 struct l2cap_pinfo *pi = l2cap_pi(sk);
2605 struct l2cap_conf_rsp *rsp = data;
2606 void *ptr = rsp->data;
2607 void *req = pi->conf_req;
2608 int len = pi->conf_len;
2609 int type, hint, olen;
2610 unsigned long val;
2611 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2612 u16 mtu = L2CAP_DEFAULT_MTU;
2613 u16 result = L2CAP_CONF_SUCCESS;
2614
2615 BT_DBG("sk %p", sk);
2616
2617 while (len >= L2CAP_CONF_OPT_SIZE) {
2618 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2619
2620 hint = type & L2CAP_CONF_HINT;
2621 type &= L2CAP_CONF_MASK;
2622
2623 switch (type) {
2624 case L2CAP_CONF_MTU:
2625 mtu = val;
2626 break;
2627
2628 case L2CAP_CONF_FLUSH_TO:
2629 pi->flush_to = val;
2630 break;
2631
2632 case L2CAP_CONF_QOS:
2633 break;
2634
2635 case L2CAP_CONF_RFC:
2636 if (olen == sizeof(rfc))
2637 memcpy(&rfc, (void *) val, olen);
2638 break;
2639
2640 case L2CAP_CONF_FCS:
2641 if (val == L2CAP_FCS_NONE)
2642 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2643
2644 break;
2645
2646 default:
2647 if (hint)
2648 break;
2649
2650 result = L2CAP_CONF_UNKNOWN;
2651 *((u8 *) ptr++) = type;
2652 break;
2653 }
2654 }
2655
2656 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2657 goto done;
2658
2659 switch (pi->mode) {
2660 case L2CAP_MODE_STREAMING:
2661 case L2CAP_MODE_ERTM:
2662 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2663 pi->mode = l2cap_select_mode(rfc.mode,
2664 pi->conn->feat_mask);
2665 break;
2666 }
2667
2668 if (pi->mode != rfc.mode)
2669 return -ECONNREFUSED;
2670
2671 break;
2672 }
2673
2674 done:
2675 if (pi->mode != rfc.mode) {
2676 result = L2CAP_CONF_UNACCEPT;
2677 rfc.mode = pi->mode;
2678
2679 if (pi->num_conf_rsp == 1)
2680 return -ECONNREFUSED;
2681
2682 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2683 sizeof(rfc), (unsigned long) &rfc);
2684 }
2685
2686
2687 if (result == L2CAP_CONF_SUCCESS) {
2688 /* Configure output options and let the other side know
2689 * which ones we don't like. */
2690
2691 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2692 result = L2CAP_CONF_UNACCEPT;
2693 else {
2694 pi->omtu = mtu;
2695 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2696 }
2697 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2698
2699 switch (rfc.mode) {
2700 case L2CAP_MODE_BASIC:
2701 pi->fcs = L2CAP_FCS_NONE;
2702 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2703 break;
2704
2705 case L2CAP_MODE_ERTM:
2706 pi->remote_tx_win = rfc.txwin_size;
2707 pi->remote_max_tx = rfc.max_transmit;
2708
2709 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2710 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2711
2712 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2713
2714 rfc.retrans_timeout =
2715 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2716 rfc.monitor_timeout =
2717 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2718
2719 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2720
2721 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2722 sizeof(rfc), (unsigned long) &rfc);
2723
2724 break;
2725
2726 case L2CAP_MODE_STREAMING:
2727 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2728 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2729
2730 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2731
2732 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2733
2734 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2735 sizeof(rfc), (unsigned long) &rfc);
2736
2737 break;
2738
2739 default:
2740 result = L2CAP_CONF_UNACCEPT;
2741
2742 memset(&rfc, 0, sizeof(rfc));
2743 rfc.mode = pi->mode;
2744 }
2745
2746 if (result == L2CAP_CONF_SUCCESS)
2747 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2748 }
2749 rsp->scid = cpu_to_le16(pi->dcid);
2750 rsp->result = cpu_to_le16(result);
2751 rsp->flags = cpu_to_le16(0x0000);
2752
2753 return ptr - data;
2754 }
2755
2756 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2757 {
2758 struct l2cap_pinfo *pi = l2cap_pi(sk);
2759 struct l2cap_conf_req *req = data;
2760 void *ptr = req->data;
2761 int type, olen;
2762 unsigned long val;
2763 struct l2cap_conf_rfc rfc;
2764
2765 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2766
2767 while (len >= L2CAP_CONF_OPT_SIZE) {
2768 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2769
2770 switch (type) {
2771 case L2CAP_CONF_MTU:
2772 if (val < L2CAP_DEFAULT_MIN_MTU) {
2773 *result = L2CAP_CONF_UNACCEPT;
2774 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2775 } else
2776 pi->omtu = val;
2777 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2778 break;
2779
2780 case L2CAP_CONF_FLUSH_TO:
2781 pi->flush_to = val;
2782 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2783 2, pi->flush_to);
2784 break;
2785
2786 case L2CAP_CONF_RFC:
2787 if (olen == sizeof(rfc))
2788 memcpy(&rfc, (void *)val, olen);
2789
2790 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2791 rfc.mode != pi->mode)
2792 return -ECONNREFUSED;
2793
2794 pi->fcs = 0;
2795
2796 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2797 sizeof(rfc), (unsigned long) &rfc);
2798 break;
2799 }
2800 }
2801
2802 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2803 return -ECONNREFUSED;
2804
2805 pi->mode = rfc.mode;
2806
2807 if (*result == L2CAP_CONF_SUCCESS) {
2808 switch (rfc.mode) {
2809 case L2CAP_MODE_ERTM:
2810 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2811 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2812 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2813 break;
2814 case L2CAP_MODE_STREAMING:
2815 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2816 }
2817 }
2818
2819 req->dcid = cpu_to_le16(pi->dcid);
2820 req->flags = cpu_to_le16(0x0000);
2821
2822 return ptr - data;
2823 }
2824
2825 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2826 {
2827 struct l2cap_conf_rsp *rsp = data;
2828 void *ptr = rsp->data;
2829
2830 BT_DBG("sk %p", sk);
2831
2832 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2833 rsp->result = cpu_to_le16(result);
2834 rsp->flags = cpu_to_le16(flags);
2835
2836 return ptr - data;
2837 }
2838
2839 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2840 {
2841 struct l2cap_pinfo *pi = l2cap_pi(sk);
2842 int type, olen;
2843 unsigned long val;
2844 struct l2cap_conf_rfc rfc;
2845
2846 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2847
2848 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2849 return;
2850
2851 while (len >= L2CAP_CONF_OPT_SIZE) {
2852 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2853
2854 switch (type) {
2855 case L2CAP_CONF_RFC:
2856 if (olen == sizeof(rfc))
2857 memcpy(&rfc, (void *)val, olen);
2858 goto done;
2859 }
2860 }
2861
2862 done:
2863 switch (rfc.mode) {
2864 case L2CAP_MODE_ERTM:
2865 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2866 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2867 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2868 break;
2869 case L2CAP_MODE_STREAMING:
2870 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2871 }
2872 }
2873
2874 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2875 {
2876 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2877
2878 if (rej->reason != 0x0000)
2879 return 0;
2880
2881 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2882 cmd->ident == conn->info_ident) {
2883 del_timer(&conn->info_timer);
2884
2885 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2886 conn->info_ident = 0;
2887
2888 l2cap_conn_start(conn);
2889 }
2890
2891 return 0;
2892 }
2893
2894 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2895 {
2896 struct l2cap_chan_list *list = &conn->chan_list;
2897 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2898 struct l2cap_conn_rsp rsp;
2899 struct sock *parent, *uninitialized_var(sk);
2900 int result, status = L2CAP_CS_NO_INFO;
2901
2902 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2903 __le16 psm = req->psm;
2904
2905 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2906
2907 /* Check if we have socket listening on psm */
2908 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2909 if (!parent) {
2910 result = L2CAP_CR_BAD_PSM;
2911 goto sendresp;
2912 }
2913
2914 /* Check if the ACL is secure enough (if not SDP) */
2915 if (psm != cpu_to_le16(0x0001) &&
2916 !hci_conn_check_link_mode(conn->hcon)) {
2917 conn->disc_reason = 0x05;
2918 result = L2CAP_CR_SEC_BLOCK;
2919 goto response;
2920 }
2921
2922 result = L2CAP_CR_NO_MEM;
2923
2924 /* Check for backlog size */
2925 if (sk_acceptq_is_full(parent)) {
2926 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2927 goto response;
2928 }
2929
2930 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2931 if (!sk)
2932 goto response;
2933
2934 write_lock_bh(&list->lock);
2935
2936 /* Check if we already have channel with that dcid */
2937 if (__l2cap_get_chan_by_dcid(list, scid)) {
2938 write_unlock_bh(&list->lock);
2939 sock_set_flag(sk, SOCK_ZAPPED);
2940 l2cap_sock_kill(sk);
2941 goto response;
2942 }
2943
2944 hci_conn_hold(conn->hcon);
2945
2946 l2cap_sock_init(sk, parent);
2947 bacpy(&bt_sk(sk)->src, conn->src);
2948 bacpy(&bt_sk(sk)->dst, conn->dst);
2949 l2cap_pi(sk)->psm = psm;
2950 l2cap_pi(sk)->dcid = scid;
2951
2952 __l2cap_chan_add(conn, sk, parent);
2953 dcid = l2cap_pi(sk)->scid;
2954
2955 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2956
2957 l2cap_pi(sk)->ident = cmd->ident;
2958
2959 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2960 if (l2cap_check_security(sk)) {
2961 if (bt_sk(sk)->defer_setup) {
2962 sk->sk_state = BT_CONNECT2;
2963 result = L2CAP_CR_PEND;
2964 status = L2CAP_CS_AUTHOR_PEND;
2965 parent->sk_data_ready(parent, 0);
2966 } else {
2967 sk->sk_state = BT_CONFIG;
2968 result = L2CAP_CR_SUCCESS;
2969 status = L2CAP_CS_NO_INFO;
2970 }
2971 } else {
2972 sk->sk_state = BT_CONNECT2;
2973 result = L2CAP_CR_PEND;
2974 status = L2CAP_CS_AUTHEN_PEND;
2975 }
2976 } else {
2977 sk->sk_state = BT_CONNECT2;
2978 result = L2CAP_CR_PEND;
2979 status = L2CAP_CS_NO_INFO;
2980 }
2981
2982 write_unlock_bh(&list->lock);
2983
2984 response:
2985 bh_unlock_sock(parent);
2986
2987 sendresp:
2988 rsp.scid = cpu_to_le16(scid);
2989 rsp.dcid = cpu_to_le16(dcid);
2990 rsp.result = cpu_to_le16(result);
2991 rsp.status = cpu_to_le16(status);
2992 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2993
2994 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2995 struct l2cap_info_req info;
2996 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2997
2998 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2999 conn->info_ident = l2cap_get_ident(conn);
3000
3001 mod_timer(&conn->info_timer, jiffies +
3002 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
3003
3004 l2cap_send_cmd(conn, conn->info_ident,
3005 L2CAP_INFO_REQ, sizeof(info), &info);
3006 }
3007
3008 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3009 result == L2CAP_CR_SUCCESS) {
3010 u8 buf[128];
3011 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3012 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3013 l2cap_build_conf_req(sk, buf), buf);
3014 l2cap_pi(sk)->num_conf_req++;
3015 }
3016
3017 return 0;
3018 }
3019
3020 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3021 {
3022 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3023 u16 scid, dcid, result, status;
3024 struct sock *sk;
3025 u8 req[128];
3026
3027 scid = __le16_to_cpu(rsp->scid);
3028 dcid = __le16_to_cpu(rsp->dcid);
3029 result = __le16_to_cpu(rsp->result);
3030 status = __le16_to_cpu(rsp->status);
3031
3032 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3033
3034 if (scid) {
3035 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3036 if (!sk)
3037 return -EFAULT;
3038 } else {
3039 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3040 if (!sk)
3041 return -EFAULT;
3042 }
3043
3044 switch (result) {
3045 case L2CAP_CR_SUCCESS:
3046 sk->sk_state = BT_CONFIG;
3047 l2cap_pi(sk)->ident = 0;
3048 l2cap_pi(sk)->dcid = dcid;
3049 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3050
3051 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3052 break;
3053
3054 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3055
3056 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3057 l2cap_build_conf_req(sk, req), req);
3058 l2cap_pi(sk)->num_conf_req++;
3059 break;
3060
3061 case L2CAP_CR_PEND:
3062 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3063 break;
3064
3065 default:
3066 l2cap_chan_del(sk, ECONNREFUSED);
3067 break;
3068 }
3069
3070 bh_unlock_sock(sk);
3071 return 0;
3072 }
3073
3074 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3075 {
3076 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3077 u16 dcid, flags;
3078 u8 rsp[64];
3079 struct sock *sk;
3080 int len;
3081
3082 dcid = __le16_to_cpu(req->dcid);
3083 flags = __le16_to_cpu(req->flags);
3084
3085 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3086
3087 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3088 if (!sk)
3089 return -ENOENT;
3090
3091 if (sk->sk_state != BT_CONFIG) {
3092 struct l2cap_cmd_rej rej;
3093
3094 rej.reason = cpu_to_le16(0x0002);
3095 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3096 sizeof(rej), &rej);
3097 goto unlock;
3098 }
3099
3100 /* Reject if config buffer is too small. */
3101 len = cmd_len - sizeof(*req);
3102 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3103 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3104 l2cap_build_conf_rsp(sk, rsp,
3105 L2CAP_CONF_REJECT, flags), rsp);
3106 goto unlock;
3107 }
3108
3109 /* Store config. */
3110 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3111 l2cap_pi(sk)->conf_len += len;
3112
3113 if (flags & 0x0001) {
3114 /* Incomplete config. Send empty response. */
3115 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3116 l2cap_build_conf_rsp(sk, rsp,
3117 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3118 goto unlock;
3119 }
3120
3121 /* Complete config. */
3122 len = l2cap_parse_conf_req(sk, rsp);
3123 if (len < 0) {
3124 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3125 goto unlock;
3126 }
3127
3128 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3129 l2cap_pi(sk)->num_conf_rsp++;
3130
3131 /* Reset config buffer. */
3132 l2cap_pi(sk)->conf_len = 0;
3133
3134 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3135 goto unlock;
3136
3137 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3138 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3139 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3140 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3141
3142 sk->sk_state = BT_CONNECTED;
3143
3144 l2cap_pi(sk)->next_tx_seq = 0;
3145 l2cap_pi(sk)->expected_tx_seq = 0;
3146 __skb_queue_head_init(TX_QUEUE(sk));
3147 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3148 l2cap_ertm_init(sk);
3149
3150 l2cap_chan_ready(sk);
3151 goto unlock;
3152 }
3153
3154 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3155 u8 buf[64];
3156 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3157 l2cap_build_conf_req(sk, buf), buf);
3158 l2cap_pi(sk)->num_conf_req++;
3159 }
3160
3161 unlock:
3162 bh_unlock_sock(sk);
3163 return 0;
3164 }
3165
3166 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3167 {
3168 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3169 u16 scid, flags, result;
3170 struct sock *sk;
3171 int len = cmd->len - sizeof(*rsp);
3172
3173 scid = __le16_to_cpu(rsp->scid);
3174 flags = __le16_to_cpu(rsp->flags);
3175 result = __le16_to_cpu(rsp->result);
3176
3177 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3178 scid, flags, result);
3179
3180 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3181 if (!sk)
3182 return 0;
3183
3184 switch (result) {
3185 case L2CAP_CONF_SUCCESS:
3186 l2cap_conf_rfc_get(sk, rsp->data, len);
3187 break;
3188
3189 case L2CAP_CONF_UNACCEPT:
3190 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3191 char req[64];
3192
3193 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3194 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3195 goto done;
3196 }
3197
3198 /* throw out any old stored conf requests */
3199 result = L2CAP_CONF_SUCCESS;
3200 len = l2cap_parse_conf_rsp(sk, rsp->data,
3201 len, req, &result);
3202 if (len < 0) {
3203 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3204 goto done;
3205 }
3206
3207 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3208 L2CAP_CONF_REQ, len, req);
3209 l2cap_pi(sk)->num_conf_req++;
3210 if (result != L2CAP_CONF_SUCCESS)
3211 goto done;
3212 break;
3213 }
3214
3215 default:
3216 sk->sk_err = ECONNRESET;
3217 l2cap_sock_set_timer(sk, HZ * 5);
3218 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3219 goto done;
3220 }
3221
3222 if (flags & 0x01)
3223 goto done;
3224
3225 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3226
3227 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3228 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3229 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3230 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3231
3232 sk->sk_state = BT_CONNECTED;
3233 l2cap_pi(sk)->next_tx_seq = 0;
3234 l2cap_pi(sk)->expected_tx_seq = 0;
3235 __skb_queue_head_init(TX_QUEUE(sk));
3236 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3237 l2cap_ertm_init(sk);
3238
3239 l2cap_chan_ready(sk);
3240 }
3241
3242 done:
3243 bh_unlock_sock(sk);
3244 return 0;
3245 }
3246
3247 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3248 {
3249 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3250 struct l2cap_disconn_rsp rsp;
3251 u16 dcid, scid;
3252 struct sock *sk;
3253
3254 scid = __le16_to_cpu(req->scid);
3255 dcid = __le16_to_cpu(req->dcid);
3256
3257 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3258
3259 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3260 if (!sk)
3261 return 0;
3262
3263 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3264 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3265 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3266
3267 sk->sk_shutdown = SHUTDOWN_MASK;
3268
3269 l2cap_chan_del(sk, ECONNRESET);
3270 bh_unlock_sock(sk);
3271
3272 l2cap_sock_kill(sk);
3273 return 0;
3274 }
3275
3276 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3277 {
3278 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3279 u16 dcid, scid;
3280 struct sock *sk;
3281
3282 scid = __le16_to_cpu(rsp->scid);
3283 dcid = __le16_to_cpu(rsp->dcid);
3284
3285 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3286
3287 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3288 if (!sk)
3289 return 0;
3290
3291 l2cap_chan_del(sk, 0);
3292 bh_unlock_sock(sk);
3293
3294 l2cap_sock_kill(sk);
3295 return 0;
3296 }
3297
3298 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3299 {
3300 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3301 u16 type;
3302
3303 type = __le16_to_cpu(req->type);
3304
3305 BT_DBG("type 0x%4.4x", type);
3306
3307 if (type == L2CAP_IT_FEAT_MASK) {
3308 u8 buf[8];
3309 u32 feat_mask = l2cap_feat_mask;
3310 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3311 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3312 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3313 if (!disable_ertm)
3314 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3315 | L2CAP_FEAT_FCS;
3316 put_unaligned_le32(feat_mask, rsp->data);
3317 l2cap_send_cmd(conn, cmd->ident,
3318 L2CAP_INFO_RSP, sizeof(buf), buf);
3319 } else if (type == L2CAP_IT_FIXED_CHAN) {
3320 u8 buf[12];
3321 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3322 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3323 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3324 memcpy(buf + 4, l2cap_fixed_chan, 8);
3325 l2cap_send_cmd(conn, cmd->ident,
3326 L2CAP_INFO_RSP, sizeof(buf), buf);
3327 } else {
3328 struct l2cap_info_rsp rsp;
3329 rsp.type = cpu_to_le16(type);
3330 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3331 l2cap_send_cmd(conn, cmd->ident,
3332 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3333 }
3334
3335 return 0;
3336 }
3337
3338 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3339 {
3340 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3341 u16 type, result;
3342
3343 type = __le16_to_cpu(rsp->type);
3344 result = __le16_to_cpu(rsp->result);
3345
3346 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3347
3348 del_timer(&conn->info_timer);
3349
3350 if (result != L2CAP_IR_SUCCESS) {
3351 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3352 conn->info_ident = 0;
3353
3354 l2cap_conn_start(conn);
3355
3356 return 0;
3357 }
3358
3359 if (type == L2CAP_IT_FEAT_MASK) {
3360 conn->feat_mask = get_unaligned_le32(rsp->data);
3361
3362 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3363 struct l2cap_info_req req;
3364 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3365
3366 conn->info_ident = l2cap_get_ident(conn);
3367
3368 l2cap_send_cmd(conn, conn->info_ident,
3369 L2CAP_INFO_REQ, sizeof(req), &req);
3370 } else {
3371 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3372 conn->info_ident = 0;
3373
3374 l2cap_conn_start(conn);
3375 }
3376 } else if (type == L2CAP_IT_FIXED_CHAN) {
3377 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3378 conn->info_ident = 0;
3379
3380 l2cap_conn_start(conn);
3381 }
3382
3383 return 0;
3384 }
3385
3386 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3387 {
3388 u8 *data = skb->data;
3389 int len = skb->len;
3390 struct l2cap_cmd_hdr cmd;
3391 int err = 0;
3392
3393 l2cap_raw_recv(conn, skb);
3394
3395 while (len >= L2CAP_CMD_HDR_SIZE) {
3396 u16 cmd_len;
3397 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3398 data += L2CAP_CMD_HDR_SIZE;
3399 len -= L2CAP_CMD_HDR_SIZE;
3400
3401 cmd_len = le16_to_cpu(cmd.len);
3402
3403 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3404
3405 if (cmd_len > len || !cmd.ident) {
3406 BT_DBG("corrupted command");
3407 break;
3408 }
3409
3410 switch (cmd.code) {
3411 case L2CAP_COMMAND_REJ:
3412 l2cap_command_rej(conn, &cmd, data);
3413 break;
3414
3415 case L2CAP_CONN_REQ:
3416 err = l2cap_connect_req(conn, &cmd, data);
3417 break;
3418
3419 case L2CAP_CONN_RSP:
3420 err = l2cap_connect_rsp(conn, &cmd, data);
3421 break;
3422
3423 case L2CAP_CONF_REQ:
3424 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3425 break;
3426
3427 case L2CAP_CONF_RSP:
3428 err = l2cap_config_rsp(conn, &cmd, data);
3429 break;
3430
3431 case L2CAP_DISCONN_REQ:
3432 err = l2cap_disconnect_req(conn, &cmd, data);
3433 break;
3434
3435 case L2CAP_DISCONN_RSP:
3436 err = l2cap_disconnect_rsp(conn, &cmd, data);
3437 break;
3438
3439 case L2CAP_ECHO_REQ:
3440 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3441 break;
3442
3443 case L2CAP_ECHO_RSP:
3444 break;
3445
3446 case L2CAP_INFO_REQ:
3447 err = l2cap_information_req(conn, &cmd, data);
3448 break;
3449
3450 case L2CAP_INFO_RSP:
3451 err = l2cap_information_rsp(conn, &cmd, data);
3452 break;
3453
3454 default:
3455 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3456 err = -EINVAL;
3457 break;
3458 }
3459
3460 if (err) {
3461 struct l2cap_cmd_rej rej;
3462 BT_DBG("error %d", err);
3463
3464 /* FIXME: Map err to a valid reason */
3465 rej.reason = cpu_to_le16(0);
3466 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3467 }
3468
3469 data += cmd_len;
3470 len -= cmd_len;
3471 }
3472
3473 kfree_skb(skb);
3474 }
3475
3476 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3477 {
3478 u16 our_fcs, rcv_fcs;
3479 int hdr_size = L2CAP_HDR_SIZE + 2;
3480
3481 if (pi->fcs == L2CAP_FCS_CRC16) {
3482 skb_trim(skb, skb->len - 2);
3483 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3484 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3485
3486 if (our_fcs != rcv_fcs)
3487 return -EBADMSG;
3488 }
3489 return 0;
3490 }
3491
3492 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3493 {
3494 struct l2cap_pinfo *pi = l2cap_pi(sk);
3495 u16 control = 0;
3496
3497 pi->frames_sent = 0;
3498
3499 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3500
3501 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3502 control |= L2CAP_SUPER_RCV_NOT_READY;
3503 l2cap_send_sframe(pi, control);
3504 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3505 }
3506
3507 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3508 l2cap_retransmit_frames(sk);
3509
3510 l2cap_ertm_send(sk);
3511
3512 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3513 pi->frames_sent == 0) {
3514 control |= L2CAP_SUPER_RCV_READY;
3515 l2cap_send_sframe(pi, control);
3516 }
3517 }
3518
3519 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3520 {
3521 struct sk_buff *next_skb;
3522 struct l2cap_pinfo *pi = l2cap_pi(sk);
3523 int tx_seq_offset, next_tx_seq_offset;
3524
3525 bt_cb(skb)->tx_seq = tx_seq;
3526 bt_cb(skb)->sar = sar;
3527
3528 next_skb = skb_peek(SREJ_QUEUE(sk));
3529 if (!next_skb) {
3530 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3531 return 0;
3532 }
3533
3534 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3535 if (tx_seq_offset < 0)
3536 tx_seq_offset += 64;
3537
3538 do {
3539 if (bt_cb(next_skb)->tx_seq == tx_seq)
3540 return -EINVAL;
3541
3542 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3543 pi->buffer_seq) % 64;
3544 if (next_tx_seq_offset < 0)
3545 next_tx_seq_offset += 64;
3546
3547 if (next_tx_seq_offset > tx_seq_offset) {
3548 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3549 return 0;
3550 }
3551
3552 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3553 break;
3554
3555 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3556
3557 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3558
3559 return 0;
3560 }
3561
3562 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3563 {
3564 struct l2cap_pinfo *pi = l2cap_pi(sk);
3565 struct sk_buff *_skb;
3566 int err;
3567
3568 switch (control & L2CAP_CTRL_SAR) {
3569 case L2CAP_SDU_UNSEGMENTED:
3570 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3571 goto drop;
3572
3573 err = sock_queue_rcv_skb(sk, skb);
3574 if (!err)
3575 return err;
3576
3577 break;
3578
3579 case L2CAP_SDU_START:
3580 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3581 goto drop;
3582
3583 pi->sdu_len = get_unaligned_le16(skb->data);
3584
3585 if (pi->sdu_len > pi->imtu)
3586 goto disconnect;
3587
3588 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3589 if (!pi->sdu)
3590 return -ENOMEM;
3591
3592 /* pull sdu_len bytes only after alloc, because of Local Busy
3593 * condition we have to be sure that this will be executed
3594 * only once, i.e., when alloc does not fail */
3595 skb_pull(skb, 2);
3596
3597 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3598
3599 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3600 pi->partial_sdu_len = skb->len;
3601 break;
3602
3603 case L2CAP_SDU_CONTINUE:
3604 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3605 goto disconnect;
3606
3607 if (!pi->sdu)
3608 goto disconnect;
3609
3610 pi->partial_sdu_len += skb->len;
3611 if (pi->partial_sdu_len > pi->sdu_len)
3612 goto drop;
3613
3614 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3615
3616 break;
3617
3618 case L2CAP_SDU_END:
3619 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3620 goto disconnect;
3621
3622 if (!pi->sdu)
3623 goto disconnect;
3624
3625 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3626 pi->partial_sdu_len += skb->len;
3627
3628 if (pi->partial_sdu_len > pi->imtu)
3629 goto drop;
3630
3631 if (pi->partial_sdu_len != pi->sdu_len)
3632 goto drop;
3633
3634 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3635 }
3636
3637 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3638 if (!_skb) {
3639 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3640 return -ENOMEM;
3641 }
3642
3643 err = sock_queue_rcv_skb(sk, _skb);
3644 if (err < 0) {
3645 kfree_skb(_skb);
3646 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3647 return err;
3648 }
3649
3650 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3651 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3652
3653 kfree_skb(pi->sdu);
3654 break;
3655 }
3656
3657 kfree_skb(skb);
3658 return 0;
3659
3660 drop:
3661 kfree_skb(pi->sdu);
3662 pi->sdu = NULL;
3663
3664 disconnect:
3665 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3666 kfree_skb(skb);
3667 return 0;
3668 }
3669
3670 static int l2cap_try_push_rx_skb(struct sock *sk)
3671 {
3672 struct l2cap_pinfo *pi = l2cap_pi(sk);
3673 struct sk_buff *skb;
3674 u16 control;
3675 int err;
3676
3677 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3678 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3679 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3680 if (err < 0) {
3681 skb_queue_head(BUSY_QUEUE(sk), skb);
3682 return -EBUSY;
3683 }
3684
3685 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3686 }
3687
3688 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3689 goto done;
3690
3691 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3692 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3693 l2cap_send_sframe(pi, control);
3694 l2cap_pi(sk)->retry_count = 1;
3695
3696 del_timer(&pi->retrans_timer);
3697 __mod_monitor_timer();
3698
3699 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3700
3701 done:
3702 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3703 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3704
3705 BT_DBG("sk %p, Exit local busy", sk);
3706
3707 return 0;
3708 }
3709
3710 static void l2cap_busy_work(struct work_struct *work)
3711 {
3712 DECLARE_WAITQUEUE(wait, current);
3713 struct l2cap_pinfo *pi =
3714 container_of(work, struct l2cap_pinfo, busy_work);
3715 struct sock *sk = (struct sock *)pi;
3716 int n_tries = 0, timeo = HZ/5, err;
3717 struct sk_buff *skb;
3718
3719 lock_sock(sk);
3720
3721 add_wait_queue(sk_sleep(sk), &wait);
3722 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3723 set_current_state(TASK_INTERRUPTIBLE);
3724
3725 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3726 err = -EBUSY;
3727 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3728 break;
3729 }
3730
3731 if (!timeo)
3732 timeo = HZ/5;
3733
3734 if (signal_pending(current)) {
3735 err = sock_intr_errno(timeo);
3736 break;
3737 }
3738
3739 release_sock(sk);
3740 timeo = schedule_timeout(timeo);
3741 lock_sock(sk);
3742
3743 err = sock_error(sk);
3744 if (err)
3745 break;
3746
3747 if (l2cap_try_push_rx_skb(sk) == 0)
3748 break;
3749 }
3750
3751 set_current_state(TASK_RUNNING);
3752 remove_wait_queue(sk_sleep(sk), &wait);
3753
3754 release_sock(sk);
3755 }
3756
3757 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3758 {
3759 struct l2cap_pinfo *pi = l2cap_pi(sk);
3760 int sctrl, err;
3761
3762 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3763 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3764 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3765 return l2cap_try_push_rx_skb(sk);
3766
3767
3768 }
3769
3770 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3771 if (err >= 0) {
3772 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3773 return err;
3774 }
3775
3776 /* Busy Condition */
3777 BT_DBG("sk %p, Enter local busy", sk);
3778
3779 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3780 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3781 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3782
3783 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3784 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3785 l2cap_send_sframe(pi, sctrl);
3786
3787 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3788
3789 del_timer(&pi->ack_timer);
3790
3791 queue_work(_busy_wq, &pi->busy_work);
3792
3793 return err;
3794 }
3795
3796 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3797 {
3798 struct l2cap_pinfo *pi = l2cap_pi(sk);
3799 struct sk_buff *_skb;
3800 int err = -EINVAL;
3801
3802 /*
3803 * TODO: We have to notify the userland if some data is lost with the
3804 * Streaming Mode.
3805 */
3806
3807 switch (control & L2CAP_CTRL_SAR) {
3808 case L2CAP_SDU_UNSEGMENTED:
3809 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3810 kfree_skb(pi->sdu);
3811 break;
3812 }
3813
3814 err = sock_queue_rcv_skb(sk, skb);
3815 if (!err)
3816 return 0;
3817
3818 break;
3819
3820 case L2CAP_SDU_START:
3821 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3822 kfree_skb(pi->sdu);
3823 break;
3824 }
3825
3826 pi->sdu_len = get_unaligned_le16(skb->data);
3827 skb_pull(skb, 2);
3828
3829 if (pi->sdu_len > pi->imtu) {
3830 err = -EMSGSIZE;
3831 break;
3832 }
3833
3834 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3835 if (!pi->sdu) {
3836 err = -ENOMEM;
3837 break;
3838 }
3839
3840 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3841
3842 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3843 pi->partial_sdu_len = skb->len;
3844 err = 0;
3845 break;
3846
3847 case L2CAP_SDU_CONTINUE:
3848 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3849 break;
3850
3851 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3852
3853 pi->partial_sdu_len += skb->len;
3854 if (pi->partial_sdu_len > pi->sdu_len)
3855 kfree_skb(pi->sdu);
3856 else
3857 err = 0;
3858
3859 break;
3860
3861 case L2CAP_SDU_END:
3862 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3863 break;
3864
3865 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3866
3867 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3868 pi->partial_sdu_len += skb->len;
3869
3870 if (pi->partial_sdu_len > pi->imtu)
3871 goto drop;
3872
3873 if (pi->partial_sdu_len == pi->sdu_len) {
3874 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3875 err = sock_queue_rcv_skb(sk, _skb);
3876 if (err < 0)
3877 kfree_skb(_skb);
3878 }
3879 err = 0;
3880
3881 drop:
3882 kfree_skb(pi->sdu);
3883 break;
3884 }
3885
3886 kfree_skb(skb);
3887 return err;
3888 }
3889
3890 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3891 {
3892 struct sk_buff *skb;
3893 u16 control;
3894
3895 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3896 if (bt_cb(skb)->tx_seq != tx_seq)
3897 break;
3898
3899 skb = skb_dequeue(SREJ_QUEUE(sk));
3900 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3901 l2cap_ertm_reassembly_sdu(sk, skb, control);
3902 l2cap_pi(sk)->buffer_seq_srej =
3903 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3904 tx_seq = (tx_seq + 1) % 64;
3905 }
3906 }
3907
3908 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3909 {
3910 struct l2cap_pinfo *pi = l2cap_pi(sk);
3911 struct srej_list *l, *tmp;
3912 u16 control;
3913
3914 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3915 if (l->tx_seq == tx_seq) {
3916 list_del(&l->list);
3917 kfree(l);
3918 return;
3919 }
3920 control = L2CAP_SUPER_SELECT_REJECT;
3921 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3922 l2cap_send_sframe(pi, control);
3923 list_del(&l->list);
3924 list_add_tail(&l->list, SREJ_LIST(sk));
3925 }
3926 }
3927
3928 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3929 {
3930 struct l2cap_pinfo *pi = l2cap_pi(sk);
3931 struct srej_list *new;
3932 u16 control;
3933
3934 while (tx_seq != pi->expected_tx_seq) {
3935 control = L2CAP_SUPER_SELECT_REJECT;
3936 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3937 l2cap_send_sframe(pi, control);
3938
3939 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3940 new->tx_seq = pi->expected_tx_seq;
3941 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3942 list_add_tail(&new->list, SREJ_LIST(sk));
3943 }
3944 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3945 }
3946
3947 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3948 {
3949 struct l2cap_pinfo *pi = l2cap_pi(sk);
3950 u8 tx_seq = __get_txseq(rx_control);
3951 u8 req_seq = __get_reqseq(rx_control);
3952 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3953 int tx_seq_offset, expected_tx_seq_offset;
3954 int num_to_ack = (pi->tx_win/6) + 1;
3955 int err = 0;
3956
3957 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3958 rx_control);
3959
3960 if (L2CAP_CTRL_FINAL & rx_control &&
3961 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3962 del_timer(&pi->monitor_timer);
3963 if (pi->unacked_frames > 0)
3964 __mod_retrans_timer();
3965 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3966 }
3967
3968 pi->expected_ack_seq = req_seq;
3969 l2cap_drop_acked_frames(sk);
3970
3971 if (tx_seq == pi->expected_tx_seq)
3972 goto expected;
3973
3974 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3975 if (tx_seq_offset < 0)
3976 tx_seq_offset += 64;
3977
3978 /* invalid tx_seq */
3979 if (tx_seq_offset >= pi->tx_win) {
3980 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3981 goto drop;
3982 }
3983
3984 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3985 goto drop;
3986
3987 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3988 struct srej_list *first;
3989
3990 first = list_first_entry(SREJ_LIST(sk),
3991 struct srej_list, list);
3992 if (tx_seq == first->tx_seq) {
3993 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3994 l2cap_check_srej_gap(sk, tx_seq);
3995
3996 list_del(&first->list);
3997 kfree(first);
3998
3999 if (list_empty(SREJ_LIST(sk))) {
4000 pi->buffer_seq = pi->buffer_seq_srej;
4001 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
4002 l2cap_send_ack(pi);
4003 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4004 }
4005 } else {
4006 struct srej_list *l;
4007
4008 /* duplicated tx_seq */
4009 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4010 goto drop;
4011
4012 list_for_each_entry(l, SREJ_LIST(sk), list) {
4013 if (l->tx_seq == tx_seq) {
4014 l2cap_resend_srejframe(sk, tx_seq);
4015 return 0;
4016 }
4017 }
4018 l2cap_send_srejframe(sk, tx_seq);
4019 }
4020 } else {
4021 expected_tx_seq_offset =
4022 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4023 if (expected_tx_seq_offset < 0)
4024 expected_tx_seq_offset += 64;
4025
4026 /* duplicated tx_seq */
4027 if (tx_seq_offset < expected_tx_seq_offset)
4028 goto drop;
4029
4030 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4031
4032 BT_DBG("sk %p, Enter SREJ", sk);
4033
4034 INIT_LIST_HEAD(SREJ_LIST(sk));
4035 pi->buffer_seq_srej = pi->buffer_seq;
4036
4037 __skb_queue_head_init(SREJ_QUEUE(sk));
4038 __skb_queue_head_init(BUSY_QUEUE(sk));
4039 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4040
4041 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4042
4043 l2cap_send_srejframe(sk, tx_seq);
4044
4045 del_timer(&pi->ack_timer);
4046 }
4047 return 0;
4048
4049 expected:
4050 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4051
4052 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4053 bt_cb(skb)->tx_seq = tx_seq;
4054 bt_cb(skb)->sar = sar;
4055 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4056 return 0;
4057 }
4058
4059 err = l2cap_push_rx_skb(sk, skb, rx_control);
4060 if (err < 0)
4061 return 0;
4062
4063 if (rx_control & L2CAP_CTRL_FINAL) {
4064 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4065 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4066 else
4067 l2cap_retransmit_frames(sk);
4068 }
4069
4070 __mod_ack_timer();
4071
4072 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4073 if (pi->num_acked == num_to_ack - 1)
4074 l2cap_send_ack(pi);
4075
4076 return 0;
4077
4078 drop:
4079 kfree_skb(skb);
4080 return 0;
4081 }
4082
4083 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4084 {
4085 struct l2cap_pinfo *pi = l2cap_pi(sk);
4086
4087 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4088 rx_control);
4089
4090 pi->expected_ack_seq = __get_reqseq(rx_control);
4091 l2cap_drop_acked_frames(sk);
4092
4093 if (rx_control & L2CAP_CTRL_POLL) {
4094 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4095 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4096 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4097 (pi->unacked_frames > 0))
4098 __mod_retrans_timer();
4099
4100 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4101 l2cap_send_srejtail(sk);
4102 } else {
4103 l2cap_send_i_or_rr_or_rnr(sk);
4104 }
4105
4106 } else if (rx_control & L2CAP_CTRL_FINAL) {
4107 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4108
4109 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4110 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4111 else
4112 l2cap_retransmit_frames(sk);
4113
4114 } else {
4115 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4116 (pi->unacked_frames > 0))
4117 __mod_retrans_timer();
4118
4119 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4120 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4121 l2cap_send_ack(pi);
4122 } else {
4123 l2cap_ertm_send(sk);
4124 }
4125 }
4126 }
4127
4128 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4129 {
4130 struct l2cap_pinfo *pi = l2cap_pi(sk);
4131 u8 tx_seq = __get_reqseq(rx_control);
4132
4133 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4134
4135 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4136
4137 pi->expected_ack_seq = tx_seq;
4138 l2cap_drop_acked_frames(sk);
4139
4140 if (rx_control & L2CAP_CTRL_FINAL) {
4141 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4142 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4143 else
4144 l2cap_retransmit_frames(sk);
4145 } else {
4146 l2cap_retransmit_frames(sk);
4147
4148 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4149 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4150 }
4151 }
4152 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4153 {
4154 struct l2cap_pinfo *pi = l2cap_pi(sk);
4155 u8 tx_seq = __get_reqseq(rx_control);
4156
4157 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4158
4159 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4160
4161 if (rx_control & L2CAP_CTRL_POLL) {
4162 pi->expected_ack_seq = tx_seq;
4163 l2cap_drop_acked_frames(sk);
4164
4165 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4166 l2cap_retransmit_one_frame(sk, tx_seq);
4167
4168 l2cap_ertm_send(sk);
4169
4170 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4171 pi->srej_save_reqseq = tx_seq;
4172 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4173 }
4174 } else if (rx_control & L2CAP_CTRL_FINAL) {
4175 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4176 pi->srej_save_reqseq == tx_seq)
4177 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4178 else
4179 l2cap_retransmit_one_frame(sk, tx_seq);
4180 } else {
4181 l2cap_retransmit_one_frame(sk, tx_seq);
4182 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4183 pi->srej_save_reqseq = tx_seq;
4184 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4185 }
4186 }
4187 }
4188
4189 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4190 {
4191 struct l2cap_pinfo *pi = l2cap_pi(sk);
4192 u8 tx_seq = __get_reqseq(rx_control);
4193
4194 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4195
4196 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4197 pi->expected_ack_seq = tx_seq;
4198 l2cap_drop_acked_frames(sk);
4199
4200 if (rx_control & L2CAP_CTRL_POLL)
4201 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4202
4203 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4204 del_timer(&pi->retrans_timer);
4205 if (rx_control & L2CAP_CTRL_POLL)
4206 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4207 return;
4208 }
4209
4210 if (rx_control & L2CAP_CTRL_POLL)
4211 l2cap_send_srejtail(sk);
4212 else
4213 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4214 }
4215
4216 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4217 {
4218 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4219
4220 if (L2CAP_CTRL_FINAL & rx_control &&
4221 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4222 del_timer(&l2cap_pi(sk)->monitor_timer);
4223 if (l2cap_pi(sk)->unacked_frames > 0)
4224 __mod_retrans_timer();
4225 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4226 }
4227
4228 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4229 case L2CAP_SUPER_RCV_READY:
4230 l2cap_data_channel_rrframe(sk, rx_control);
4231 break;
4232
4233 case L2CAP_SUPER_REJECT:
4234 l2cap_data_channel_rejframe(sk, rx_control);
4235 break;
4236
4237 case L2CAP_SUPER_SELECT_REJECT:
4238 l2cap_data_channel_srejframe(sk, rx_control);
4239 break;
4240
4241 case L2CAP_SUPER_RCV_NOT_READY:
4242 l2cap_data_channel_rnrframe(sk, rx_control);
4243 break;
4244 }
4245
4246 kfree_skb(skb);
4247 return 0;
4248 }
4249
4250 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4251 {
4252 struct l2cap_pinfo *pi = l2cap_pi(sk);
4253 u16 control;
4254 u8 req_seq;
4255 int len, next_tx_seq_offset, req_seq_offset;
4256
4257 control = get_unaligned_le16(skb->data);
4258 skb_pull(skb, 2);
4259 len = skb->len;
4260
4261 /*
4262 * We can just drop the corrupted I-frame here.
4263 * Receiver will miss it and start proper recovery
4264 * procedures and ask retransmission.
4265 */
4266 if (l2cap_check_fcs(pi, skb))
4267 goto drop;
4268
4269 if (__is_sar_start(control) && __is_iframe(control))
4270 len -= 2;
4271
4272 if (pi->fcs == L2CAP_FCS_CRC16)
4273 len -= 2;
4274
4275 if (len > pi->mps) {
4276 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4277 goto drop;
4278 }
4279
4280 req_seq = __get_reqseq(control);
4281 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4282 if (req_seq_offset < 0)
4283 req_seq_offset += 64;
4284
4285 next_tx_seq_offset =
4286 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4287 if (next_tx_seq_offset < 0)
4288 next_tx_seq_offset += 64;
4289
4290 /* check for invalid req-seq */
4291 if (req_seq_offset > next_tx_seq_offset) {
4292 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4293 goto drop;
4294 }
4295
4296 if (__is_iframe(control)) {
4297 if (len < 0) {
4298 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4299 goto drop;
4300 }
4301
4302 l2cap_data_channel_iframe(sk, control, skb);
4303 } else {
4304 if (len != 0) {
4305 BT_ERR("%d", len);
4306 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4307 goto drop;
4308 }
4309
4310 l2cap_data_channel_sframe(sk, control, skb);
4311 }
4312
4313 return 0;
4314
4315 drop:
4316 kfree_skb(skb);
4317 return 0;
4318 }
4319
4320 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4321 {
4322 struct sock *sk;
4323 struct l2cap_pinfo *pi;
4324 u16 control;
4325 u8 tx_seq;
4326 int len;
4327
4328 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4329 if (!sk) {
4330 BT_DBG("unknown cid 0x%4.4x", cid);
4331 goto drop;
4332 }
4333
4334 pi = l2cap_pi(sk);
4335
4336 BT_DBG("sk %p, len %d", sk, skb->len);
4337
4338 if (sk->sk_state != BT_CONNECTED)
4339 goto drop;
4340
4341 switch (pi->mode) {
4342 case L2CAP_MODE_BASIC:
4343 /* If socket recv buffers overflows we drop data here
4344 * which is *bad* because L2CAP has to be reliable.
4345 * But we don't have any other choice. L2CAP doesn't
4346 * provide flow control mechanism. */
4347
4348 if (pi->imtu < skb->len)
4349 goto drop;
4350
4351 if (!sock_queue_rcv_skb(sk, skb))
4352 goto done;
4353 break;
4354
4355 case L2CAP_MODE_ERTM:
4356 if (!sock_owned_by_user(sk)) {
4357 l2cap_ertm_data_rcv(sk, skb);
4358 } else {
4359 if (sk_add_backlog(sk, skb))
4360 goto drop;
4361 }
4362
4363 goto done;
4364
4365 case L2CAP_MODE_STREAMING:
4366 control = get_unaligned_le16(skb->data);
4367 skb_pull(skb, 2);
4368 len = skb->len;
4369
4370 if (l2cap_check_fcs(pi, skb))
4371 goto drop;
4372
4373 if (__is_sar_start(control))
4374 len -= 2;
4375
4376 if (pi->fcs == L2CAP_FCS_CRC16)
4377 len -= 2;
4378
4379 if (len > pi->mps || len < 0 || __is_sframe(control))
4380 goto drop;
4381
4382 tx_seq = __get_txseq(control);
4383
4384 if (pi->expected_tx_seq == tx_seq)
4385 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4386 else
4387 pi->expected_tx_seq = (tx_seq + 1) % 64;
4388
4389 l2cap_streaming_reassembly_sdu(sk, skb, control);
4390
4391 goto done;
4392
4393 default:
4394 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4395 break;
4396 }
4397
4398 drop:
4399 kfree_skb(skb);
4400
4401 done:
4402 if (sk)
4403 bh_unlock_sock(sk);
4404
4405 return 0;
4406 }
4407
4408 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4409 {
4410 struct sock *sk;
4411
4412 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4413 if (!sk)
4414 goto drop;
4415
4416 BT_DBG("sk %p, len %d", sk, skb->len);
4417
4418 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4419 goto drop;
4420
4421 if (l2cap_pi(sk)->imtu < skb->len)
4422 goto drop;
4423
4424 if (!sock_queue_rcv_skb(sk, skb))
4425 goto done;
4426
4427 drop:
4428 kfree_skb(skb);
4429
4430 done:
4431 if (sk)
4432 bh_unlock_sock(sk);
4433 return 0;
4434 }
4435
4436 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4437 {
4438 struct l2cap_hdr *lh = (void *) skb->data;
4439 u16 cid, len;
4440 __le16 psm;
4441
4442 skb_pull(skb, L2CAP_HDR_SIZE);
4443 cid = __le16_to_cpu(lh->cid);
4444 len = __le16_to_cpu(lh->len);
4445
4446 if (len != skb->len) {
4447 kfree_skb(skb);
4448 return;
4449 }
4450
4451 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4452
4453 switch (cid) {
4454 case L2CAP_CID_SIGNALING:
4455 l2cap_sig_channel(conn, skb);
4456 break;
4457
4458 case L2CAP_CID_CONN_LESS:
4459 psm = get_unaligned_le16(skb->data);
4460 skb_pull(skb, 2);
4461 l2cap_conless_channel(conn, psm, skb);
4462 break;
4463
4464 default:
4465 l2cap_data_channel(conn, cid, skb);
4466 break;
4467 }
4468 }
4469
4470 /* ---- L2CAP interface with lower layer (HCI) ---- */
4471
4472 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4473 {
4474 int exact = 0, lm1 = 0, lm2 = 0;
4475 register struct sock *sk;
4476 struct hlist_node *node;
4477
4478 if (type != ACL_LINK)
4479 return -EINVAL;
4480
4481 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4482
4483 /* Find listening sockets and check their link_mode */
4484 read_lock(&l2cap_sk_list.lock);
4485 sk_for_each(sk, node, &l2cap_sk_list.head) {
4486 if (sk->sk_state != BT_LISTEN)
4487 continue;
4488
4489 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4490 lm1 |= HCI_LM_ACCEPT;
4491 if (l2cap_pi(sk)->role_switch)
4492 lm1 |= HCI_LM_MASTER;
4493 exact++;
4494 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4495 lm2 |= HCI_LM_ACCEPT;
4496 if (l2cap_pi(sk)->role_switch)
4497 lm2 |= HCI_LM_MASTER;
4498 }
4499 }
4500 read_unlock(&l2cap_sk_list.lock);
4501
4502 return exact ? lm1 : lm2;
4503 }
4504
4505 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4506 {
4507 struct l2cap_conn *conn;
4508
4509 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4510
4511 if (hcon->type != ACL_LINK)
4512 return -EINVAL;
4513
4514 if (!status) {
4515 conn = l2cap_conn_add(hcon, status);
4516 if (conn)
4517 l2cap_conn_ready(conn);
4518 } else
4519 l2cap_conn_del(hcon, bt_err(status));
4520
4521 return 0;
4522 }
4523
4524 static int l2cap_disconn_ind(struct hci_conn *hcon)
4525 {
4526 struct l2cap_conn *conn = hcon->l2cap_data;
4527
4528 BT_DBG("hcon %p", hcon);
4529
4530 if (hcon->type != ACL_LINK || !conn)
4531 return 0x13;
4532
4533 return conn->disc_reason;
4534 }
4535
4536 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4537 {
4538 BT_DBG("hcon %p reason %d", hcon, reason);
4539
4540 if (hcon->type != ACL_LINK)
4541 return -EINVAL;
4542
4543 l2cap_conn_del(hcon, bt_err(reason));
4544
4545 return 0;
4546 }
4547
4548 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4549 {
4550 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4551 return;
4552
4553 if (encrypt == 0x00) {
4554 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4555 l2cap_sock_clear_timer(sk);
4556 l2cap_sock_set_timer(sk, HZ * 5);
4557 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4558 __l2cap_sock_close(sk, ECONNREFUSED);
4559 } else {
4560 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4561 l2cap_sock_clear_timer(sk);
4562 }
4563 }
4564
4565 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4566 {
4567 struct l2cap_chan_list *l;
4568 struct l2cap_conn *conn = hcon->l2cap_data;
4569 struct sock *sk;
4570
4571 if (!conn)
4572 return 0;
4573
4574 l = &conn->chan_list;
4575
4576 BT_DBG("conn %p", conn);
4577
4578 read_lock(&l->lock);
4579
4580 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4581 bh_lock_sock(sk);
4582
4583 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4584 bh_unlock_sock(sk);
4585 continue;
4586 }
4587
4588 if (!status && (sk->sk_state == BT_CONNECTED ||
4589 sk->sk_state == BT_CONFIG)) {
4590 l2cap_check_encryption(sk, encrypt);
4591 bh_unlock_sock(sk);
4592 continue;
4593 }
4594
4595 if (sk->sk_state == BT_CONNECT) {
4596 if (!status) {
4597 struct l2cap_conn_req req;
4598 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4599 req.psm = l2cap_pi(sk)->psm;
4600
4601 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4602 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4603
4604 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4605 L2CAP_CONN_REQ, sizeof(req), &req);
4606 } else {
4607 l2cap_sock_clear_timer(sk);
4608 l2cap_sock_set_timer(sk, HZ / 10);
4609 }
4610 } else if (sk->sk_state == BT_CONNECT2) {
4611 struct l2cap_conn_rsp rsp;
4612 __u16 result;
4613
4614 if (!status) {
4615 sk->sk_state = BT_CONFIG;
4616 result = L2CAP_CR_SUCCESS;
4617 } else {
4618 sk->sk_state = BT_DISCONN;
4619 l2cap_sock_set_timer(sk, HZ / 10);
4620 result = L2CAP_CR_SEC_BLOCK;
4621 }
4622
4623 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4624 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4625 rsp.result = cpu_to_le16(result);
4626 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4627 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4628 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4629 }
4630
4631 bh_unlock_sock(sk);
4632 }
4633
4634 read_unlock(&l->lock);
4635
4636 return 0;
4637 }
4638
4639 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4640 {
4641 struct l2cap_conn *conn = hcon->l2cap_data;
4642
4643 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4644 goto drop;
4645
4646 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4647
4648 if (flags & ACL_START) {
4649 struct l2cap_hdr *hdr;
4650 int len;
4651
4652 if (conn->rx_len) {
4653 BT_ERR("Unexpected start frame (len %d)", skb->len);
4654 kfree_skb(conn->rx_skb);
4655 conn->rx_skb = NULL;
4656 conn->rx_len = 0;
4657 l2cap_conn_unreliable(conn, ECOMM);
4658 }
4659
4660 if (skb->len < 2) {
4661 BT_ERR("Frame is too short (len %d)", skb->len);
4662 l2cap_conn_unreliable(conn, ECOMM);
4663 goto drop;
4664 }
4665
4666 hdr = (struct l2cap_hdr *) skb->data;
4667 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4668
4669 if (len == skb->len) {
4670 /* Complete frame received */
4671 l2cap_recv_frame(conn, skb);
4672 return 0;
4673 }
4674
4675 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4676
4677 if (skb->len > len) {
4678 BT_ERR("Frame is too long (len %d, expected len %d)",
4679 skb->len, len);
4680 l2cap_conn_unreliable(conn, ECOMM);
4681 goto drop;
4682 }
4683
4684 /* Allocate skb for the complete frame (with header) */
4685 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4686 if (!conn->rx_skb)
4687 goto drop;
4688
4689 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4690 skb->len);
4691 conn->rx_len = len - skb->len;
4692 } else {
4693 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4694
4695 if (!conn->rx_len) {
4696 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4697 l2cap_conn_unreliable(conn, ECOMM);
4698 goto drop;
4699 }
4700
4701 if (skb->len > conn->rx_len) {
4702 BT_ERR("Fragment is too long (len %d, expected %d)",
4703 skb->len, conn->rx_len);
4704 kfree_skb(conn->rx_skb);
4705 conn->rx_skb = NULL;
4706 conn->rx_len = 0;
4707 l2cap_conn_unreliable(conn, ECOMM);
4708 goto drop;
4709 }
4710
4711 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4712 skb->len);
4713 conn->rx_len -= skb->len;
4714
4715 if (!conn->rx_len) {
4716 /* Complete frame received */
4717 l2cap_recv_frame(conn, conn->rx_skb);
4718 conn->rx_skb = NULL;
4719 }
4720 }
4721
4722 drop:
4723 kfree_skb(skb);
4724 return 0;
4725 }
4726
4727 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4728 {
4729 struct sock *sk;
4730 struct hlist_node *node;
4731
4732 read_lock_bh(&l2cap_sk_list.lock);
4733
4734 sk_for_each(sk, node, &l2cap_sk_list.head) {
4735 struct l2cap_pinfo *pi = l2cap_pi(sk);
4736
4737 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4738 batostr(&bt_sk(sk)->src),
4739 batostr(&bt_sk(sk)->dst),
4740 sk->sk_state, __le16_to_cpu(pi->psm),
4741 pi->scid, pi->dcid,
4742 pi->imtu, pi->omtu, pi->sec_level);
4743 }
4744
4745 read_unlock_bh(&l2cap_sk_list.lock);
4746
4747 return 0;
4748 }
4749
4750 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4751 {
4752 return single_open(file, l2cap_debugfs_show, inode->i_private);
4753 }
4754
4755 static const struct file_operations l2cap_debugfs_fops = {
4756 .open = l2cap_debugfs_open,
4757 .read = seq_read,
4758 .llseek = seq_lseek,
4759 .release = single_release,
4760 };
4761
4762 static struct dentry *l2cap_debugfs;
4763
4764 static const struct proto_ops l2cap_sock_ops = {
4765 .family = PF_BLUETOOTH,
4766 .owner = THIS_MODULE,
4767 .release = l2cap_sock_release,
4768 .bind = l2cap_sock_bind,
4769 .connect = l2cap_sock_connect,
4770 .listen = l2cap_sock_listen,
4771 .accept = l2cap_sock_accept,
4772 .getname = l2cap_sock_getname,
4773 .sendmsg = l2cap_sock_sendmsg,
4774 .recvmsg = l2cap_sock_recvmsg,
4775 .poll = bt_sock_poll,
4776 .ioctl = bt_sock_ioctl,
4777 .mmap = sock_no_mmap,
4778 .socketpair = sock_no_socketpair,
4779 .shutdown = l2cap_sock_shutdown,
4780 .setsockopt = l2cap_sock_setsockopt,
4781 .getsockopt = l2cap_sock_getsockopt
4782 };
4783
4784 static const struct net_proto_family l2cap_sock_family_ops = {
4785 .family = PF_BLUETOOTH,
4786 .owner = THIS_MODULE,
4787 .create = l2cap_sock_create,
4788 };
4789
4790 static struct hci_proto l2cap_hci_proto = {
4791 .name = "L2CAP",
4792 .id = HCI_PROTO_L2CAP,
4793 .connect_ind = l2cap_connect_ind,
4794 .connect_cfm = l2cap_connect_cfm,
4795 .disconn_ind = l2cap_disconn_ind,
4796 .disconn_cfm = l2cap_disconn_cfm,
4797 .security_cfm = l2cap_security_cfm,
4798 .recv_acldata = l2cap_recv_acldata
4799 };
4800
4801 static int __init l2cap_init(void)
4802 {
4803 int err;
4804
4805 err = proto_register(&l2cap_proto, 0);
4806 if (err < 0)
4807 return err;
4808
4809 _busy_wq = create_singlethread_workqueue("l2cap");
4810 if (!_busy_wq)
4811 goto error;
4812
4813 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4814 if (err < 0) {
4815 BT_ERR("L2CAP socket registration failed");
4816 goto error;
4817 }
4818
4819 err = hci_register_proto(&l2cap_hci_proto);
4820 if (err < 0) {
4821 BT_ERR("L2CAP protocol registration failed");
4822 bt_sock_unregister(BTPROTO_L2CAP);
4823 goto error;
4824 }
4825
4826 if (bt_debugfs) {
4827 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4828 bt_debugfs, NULL, &l2cap_debugfs_fops);
4829 if (!l2cap_debugfs)
4830 BT_ERR("Failed to create L2CAP debug file");
4831 }
4832
4833 BT_INFO("L2CAP ver %s", VERSION);
4834 BT_INFO("L2CAP socket layer initialized");
4835
4836 return 0;
4837
4838 error:
4839 proto_unregister(&l2cap_proto);
4840 return err;
4841 }
4842
4843 static void __exit l2cap_exit(void)
4844 {
4845 debugfs_remove(l2cap_debugfs);
4846
4847 flush_workqueue(_busy_wq);
4848 destroy_workqueue(_busy_wq);
4849
4850 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4851 BT_ERR("L2CAP socket unregistration failed");
4852
4853 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4854 BT_ERR("L2CAP protocol unregistration failed");
4855
4856 proto_unregister(&l2cap_proto);
4857 }
4858
4859 void l2cap_load(void)
4860 {
4861 /* Dummy function to trigger automatic L2CAP module loading by
4862 * other modules that use L2CAP sockets but don't use any other
4863 * symbols from it. */
4864 }
4865 EXPORT_SYMBOL(l2cap_load);
4866
4867 module_init(l2cap_init);
4868 module_exit(l2cap_exit);
4869
4870 module_param(disable_ertm, bool, 0644);
4871 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4872
4873 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4874 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4875 MODULE_VERSION(VERSION);
4876 MODULE_LICENSE("GPL");
4877 MODULE_ALIAS("bt-proto-0");
This page took 0.216276 seconds and 6 git commands to generate.