Bluetooth: do not use assignment in if condition
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core and sockets. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 #define VERSION "2.15"
59
60 static int disable_ertm = 0;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static const struct proto_ops l2cap_sock_ops;
66
67 static struct workqueue_struct *_busy_wq;
68
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 };
72
73 static void l2cap_busy_work(struct work_struct *work);
74
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
78
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
82
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
87 {
88 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
89 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
90 }
91
92 static void l2cap_sock_clear_timer(struct sock *sk)
93 {
94 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 sk_stop_timer(sk, &sk->sk_timer);
96 }
97
98 static void l2cap_sock_timeout(unsigned long arg)
99 {
100 struct sock *sk = (struct sock *) arg;
101 int reason;
102
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104
105 bh_lock_sock(sk);
106
107 if (sock_owned_by_user(sk)) {
108 /* sk is owned by user. Try again later */
109 l2cap_sock_set_timer(sk, HZ / 5);
110 bh_unlock_sock(sk);
111 sock_put(sk);
112 return;
113 }
114
115 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
116 reason = ECONNREFUSED;
117 else if (sk->sk_state == BT_CONNECT &&
118 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
119 reason = ECONNREFUSED;
120 else
121 reason = ETIMEDOUT;
122
123 __l2cap_sock_close(sk, reason);
124
125 bh_unlock_sock(sk);
126
127 l2cap_sock_kill(sk);
128 sock_put(sk);
129 }
130
131 /* ---- L2CAP channels ---- */
132 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
133 {
134 struct sock *s;
135 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
136 if (l2cap_pi(s)->dcid == cid)
137 break;
138 }
139 return s;
140 }
141
142 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
143 {
144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->scid == cid)
147 break;
148 }
149 return s;
150 }
151
152 /* Find channel with given SCID.
153 * Returns locked socket */
154 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
155 {
156 struct sock *s;
157 read_lock(&l->lock);
158 s = __l2cap_get_chan_by_scid(l, cid);
159 if (s)
160 bh_lock_sock(s);
161 read_unlock(&l->lock);
162 return s;
163 }
164
165 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 {
167 struct sock *s;
168 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
169 if (l2cap_pi(s)->ident == ident)
170 break;
171 }
172 return s;
173 }
174
175 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
176 {
177 struct sock *s;
178 read_lock(&l->lock);
179 s = __l2cap_get_chan_by_ident(l, ident);
180 if (s)
181 bh_lock_sock(s);
182 read_unlock(&l->lock);
183 return s;
184 }
185
186 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
187 {
188 u16 cid = L2CAP_CID_DYN_START;
189
190 for (; cid < L2CAP_CID_DYN_END; cid++) {
191 if (!__l2cap_get_chan_by_scid(l, cid))
192 return cid;
193 }
194
195 return 0;
196 }
197
198 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
199 {
200 sock_hold(sk);
201
202 if (l->head)
203 l2cap_pi(l->head)->prev_c = sk;
204
205 l2cap_pi(sk)->next_c = l->head;
206 l2cap_pi(sk)->prev_c = NULL;
207 l->head = sk;
208 }
209
210 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
211 {
212 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
213
214 write_lock_bh(&l->lock);
215 if (sk == l->head)
216 l->head = next;
217
218 if (next)
219 l2cap_pi(next)->prev_c = prev;
220 if (prev)
221 l2cap_pi(prev)->next_c = next;
222 write_unlock_bh(&l->lock);
223
224 __sock_put(sk);
225 }
226
227 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
228 {
229 struct l2cap_chan_list *l = &conn->chan_list;
230
231 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
232 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
233
234 conn->disc_reason = 0x13;
235
236 l2cap_pi(sk)->conn = conn;
237
238 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
239 /* Alloc CID for connection-oriented socket */
240 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
241 } else if (sk->sk_type == SOCK_DGRAM) {
242 /* Connectionless socket */
243 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
244 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
245 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
246 } else {
247 /* Raw socket can send/recv signalling messages only */
248 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
249 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
250 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
251 }
252
253 __l2cap_chan_link(l, sk);
254
255 if (parent)
256 bt_accept_enqueue(parent, sk);
257 }
258
259 /* Delete channel.
260 * Must be called on the locked socket. */
261 static void l2cap_chan_del(struct sock *sk, int err)
262 {
263 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
264 struct sock *parent = bt_sk(sk)->parent;
265
266 l2cap_sock_clear_timer(sk);
267
268 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
269
270 if (conn) {
271 /* Unlink from channel list */
272 l2cap_chan_unlink(&conn->chan_list, sk);
273 l2cap_pi(sk)->conn = NULL;
274 hci_conn_put(conn->hcon);
275 }
276
277 sk->sk_state = BT_CLOSED;
278 sock_set_flag(sk, SOCK_ZAPPED);
279
280 if (err)
281 sk->sk_err = err;
282
283 if (parent) {
284 bt_accept_unlink(sk);
285 parent->sk_data_ready(parent, 0);
286 } else
287 sk->sk_state_change(sk);
288
289 skb_queue_purge(TX_QUEUE(sk));
290
291 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
292 struct srej_list *l, *tmp;
293
294 del_timer(&l2cap_pi(sk)->retrans_timer);
295 del_timer(&l2cap_pi(sk)->monitor_timer);
296 del_timer(&l2cap_pi(sk)->ack_timer);
297
298 skb_queue_purge(SREJ_QUEUE(sk));
299 skb_queue_purge(BUSY_QUEUE(sk));
300
301 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
302 list_del(&l->list);
303 kfree(l);
304 }
305 }
306 }
307
308 /* Service level security */
309 static inline int l2cap_check_security(struct sock *sk)
310 {
311 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
312 __u8 auth_type;
313
314 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
315 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
316 auth_type = HCI_AT_NO_BONDING_MITM;
317 else
318 auth_type = HCI_AT_NO_BONDING;
319
320 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
321 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
322 } else {
323 switch (l2cap_pi(sk)->sec_level) {
324 case BT_SECURITY_HIGH:
325 auth_type = HCI_AT_GENERAL_BONDING_MITM;
326 break;
327 case BT_SECURITY_MEDIUM:
328 auth_type = HCI_AT_GENERAL_BONDING;
329 break;
330 default:
331 auth_type = HCI_AT_NO_BONDING;
332 break;
333 }
334 }
335
336 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
337 auth_type);
338 }
339
340 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
341 {
342 u8 id;
343
344 /* Get next available identificator.
345 * 1 - 128 are used by kernel.
346 * 129 - 199 are reserved.
347 * 200 - 254 are used by utilities like l2ping, etc.
348 */
349
350 spin_lock_bh(&conn->lock);
351
352 if (++conn->tx_ident > 128)
353 conn->tx_ident = 1;
354
355 id = conn->tx_ident;
356
357 spin_unlock_bh(&conn->lock);
358
359 return id;
360 }
361
362 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
363 {
364 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
365
366 BT_DBG("code 0x%2.2x", code);
367
368 if (!skb)
369 return;
370
371 hci_send_acl(conn->hcon, skb, 0);
372 }
373
374 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
375 {
376 struct sk_buff *skb;
377 struct l2cap_hdr *lh;
378 struct l2cap_conn *conn = pi->conn;
379 struct sock *sk = (struct sock *)pi;
380 int count, hlen = L2CAP_HDR_SIZE + 2;
381
382 if (sk->sk_state != BT_CONNECTED)
383 return;
384
385 if (pi->fcs == L2CAP_FCS_CRC16)
386 hlen += 2;
387
388 BT_DBG("pi %p, control 0x%2.2x", pi, control);
389
390 count = min_t(unsigned int, conn->mtu, hlen);
391 control |= L2CAP_CTRL_FRAME_TYPE;
392
393 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
394 control |= L2CAP_CTRL_FINAL;
395 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
396 }
397
398 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
399 control |= L2CAP_CTRL_POLL;
400 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
401 }
402
403 skb = bt_skb_alloc(count, GFP_ATOMIC);
404 if (!skb)
405 return;
406
407 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
408 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
409 lh->cid = cpu_to_le16(pi->dcid);
410 put_unaligned_le16(control, skb_put(skb, 2));
411
412 if (pi->fcs == L2CAP_FCS_CRC16) {
413 u16 fcs = crc16(0, (u8 *)lh, count - 2);
414 put_unaligned_le16(fcs, skb_put(skb, 2));
415 }
416
417 hci_send_acl(pi->conn->hcon, skb, 0);
418 }
419
420 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
421 {
422 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
423 control |= L2CAP_SUPER_RCV_NOT_READY;
424 pi->conn_state |= L2CAP_CONN_RNR_SENT;
425 } else
426 control |= L2CAP_SUPER_RCV_READY;
427
428 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
429
430 l2cap_send_sframe(pi, control);
431 }
432
433 static inline int __l2cap_no_conn_pending(struct sock *sk)
434 {
435 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
436 }
437
438 static void l2cap_do_start(struct sock *sk)
439 {
440 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
441
442 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
443 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
444 return;
445
446 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
447 struct l2cap_conn_req req;
448 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
449 req.psm = l2cap_pi(sk)->psm;
450
451 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
452 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
453
454 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
455 L2CAP_CONN_REQ, sizeof(req), &req);
456 }
457 } else {
458 struct l2cap_info_req req;
459 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
460
461 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
462 conn->info_ident = l2cap_get_ident(conn);
463
464 mod_timer(&conn->info_timer, jiffies +
465 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
466
467 l2cap_send_cmd(conn, conn->info_ident,
468 L2CAP_INFO_REQ, sizeof(req), &req);
469 }
470 }
471
472 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
473 {
474 u32 local_feat_mask = l2cap_feat_mask;
475 if (!disable_ertm)
476 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
477
478 switch (mode) {
479 case L2CAP_MODE_ERTM:
480 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
481 case L2CAP_MODE_STREAMING:
482 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
483 default:
484 return 0x00;
485 }
486 }
487
488 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
489 {
490 struct l2cap_disconn_req req;
491
492 if (!conn)
493 return;
494
495 skb_queue_purge(TX_QUEUE(sk));
496
497 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
498 del_timer(&l2cap_pi(sk)->retrans_timer);
499 del_timer(&l2cap_pi(sk)->monitor_timer);
500 del_timer(&l2cap_pi(sk)->ack_timer);
501 }
502
503 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
504 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
505 l2cap_send_cmd(conn, l2cap_get_ident(conn),
506 L2CAP_DISCONN_REQ, sizeof(req), &req);
507
508 sk->sk_state = BT_DISCONN;
509 sk->sk_err = err;
510 }
511
512 /* ---- L2CAP connections ---- */
513 static void l2cap_conn_start(struct l2cap_conn *conn)
514 {
515 struct l2cap_chan_list *l = &conn->chan_list;
516 struct sock_del_list del, *tmp1, *tmp2;
517 struct sock *sk;
518
519 BT_DBG("conn %p", conn);
520
521 INIT_LIST_HEAD(&del.list);
522
523 read_lock(&l->lock);
524
525 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
526 bh_lock_sock(sk);
527
528 if (sk->sk_type != SOCK_SEQPACKET &&
529 sk->sk_type != SOCK_STREAM) {
530 bh_unlock_sock(sk);
531 continue;
532 }
533
534 if (sk->sk_state == BT_CONNECT) {
535 struct l2cap_conn_req req;
536
537 if (!l2cap_check_security(sk) ||
538 !__l2cap_no_conn_pending(sk)) {
539 bh_unlock_sock(sk);
540 continue;
541 }
542
543 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
544 conn->feat_mask)
545 && l2cap_pi(sk)->conf_state &
546 L2CAP_CONF_STATE2_DEVICE) {
547 tmp1 = kzalloc(sizeof(struct sock_del_list),
548 GFP_ATOMIC);
549 tmp1->sk = sk;
550 list_add_tail(&tmp1->list, &del.list);
551 bh_unlock_sock(sk);
552 continue;
553 }
554
555 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
556 req.psm = l2cap_pi(sk)->psm;
557
558 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
559 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
560
561 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
562 L2CAP_CONN_REQ, sizeof(req), &req);
563
564 } else if (sk->sk_state == BT_CONNECT2) {
565 struct l2cap_conn_rsp rsp;
566 char buf[128];
567 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
568 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
569
570 if (l2cap_check_security(sk)) {
571 if (bt_sk(sk)->defer_setup) {
572 struct sock *parent = bt_sk(sk)->parent;
573 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
574 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
575 parent->sk_data_ready(parent, 0);
576
577 } else {
578 sk->sk_state = BT_CONFIG;
579 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
580 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
581 }
582 } else {
583 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
584 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
585 }
586
587 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
588 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
589
590 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
591 rsp.result != L2CAP_CR_SUCCESS) {
592 bh_unlock_sock(sk);
593 continue;
594 }
595
596 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
597 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
598 l2cap_build_conf_req(sk, buf), buf);
599 l2cap_pi(sk)->num_conf_req++;
600 }
601
602 bh_unlock_sock(sk);
603 }
604
605 read_unlock(&l->lock);
606
607 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
608 bh_lock_sock(tmp1->sk);
609 __l2cap_sock_close(tmp1->sk, ECONNRESET);
610 bh_unlock_sock(tmp1->sk);
611 list_del(&tmp1->list);
612 kfree(tmp1);
613 }
614 }
615
616 static void l2cap_conn_ready(struct l2cap_conn *conn)
617 {
618 struct l2cap_chan_list *l = &conn->chan_list;
619 struct sock *sk;
620
621 BT_DBG("conn %p", conn);
622
623 read_lock(&l->lock);
624
625 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
626 bh_lock_sock(sk);
627
628 if (sk->sk_type != SOCK_SEQPACKET &&
629 sk->sk_type != SOCK_STREAM) {
630 l2cap_sock_clear_timer(sk);
631 sk->sk_state = BT_CONNECTED;
632 sk->sk_state_change(sk);
633 } else if (sk->sk_state == BT_CONNECT)
634 l2cap_do_start(sk);
635
636 bh_unlock_sock(sk);
637 }
638
639 read_unlock(&l->lock);
640 }
641
642 /* Notify sockets that we cannot guaranty reliability anymore */
643 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
644 {
645 struct l2cap_chan_list *l = &conn->chan_list;
646 struct sock *sk;
647
648 BT_DBG("conn %p", conn);
649
650 read_lock(&l->lock);
651
652 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
653 if (l2cap_pi(sk)->force_reliable)
654 sk->sk_err = err;
655 }
656
657 read_unlock(&l->lock);
658 }
659
660 static void l2cap_info_timeout(unsigned long arg)
661 {
662 struct l2cap_conn *conn = (void *) arg;
663
664 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
665 conn->info_ident = 0;
666
667 l2cap_conn_start(conn);
668 }
669
670 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
671 {
672 struct l2cap_conn *conn = hcon->l2cap_data;
673
674 if (conn || status)
675 return conn;
676
677 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
678 if (!conn)
679 return NULL;
680
681 hcon->l2cap_data = conn;
682 conn->hcon = hcon;
683
684 BT_DBG("hcon %p conn %p", hcon, conn);
685
686 conn->mtu = hcon->hdev->acl_mtu;
687 conn->src = &hcon->hdev->bdaddr;
688 conn->dst = &hcon->dst;
689
690 conn->feat_mask = 0;
691
692 spin_lock_init(&conn->lock);
693 rwlock_init(&conn->chan_list.lock);
694
695 setup_timer(&conn->info_timer, l2cap_info_timeout,
696 (unsigned long) conn);
697
698 conn->disc_reason = 0x13;
699
700 return conn;
701 }
702
703 static void l2cap_conn_del(struct hci_conn *hcon, int err)
704 {
705 struct l2cap_conn *conn = hcon->l2cap_data;
706 struct sock *sk;
707
708 if (!conn)
709 return;
710
711 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
712
713 kfree_skb(conn->rx_skb);
714
715 /* Kill channels */
716 while ((sk = conn->chan_list.head)) {
717 bh_lock_sock(sk);
718 l2cap_chan_del(sk, err);
719 bh_unlock_sock(sk);
720 l2cap_sock_kill(sk);
721 }
722
723 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
724 del_timer_sync(&conn->info_timer);
725
726 hcon->l2cap_data = NULL;
727 kfree(conn);
728 }
729
730 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
731 {
732 struct l2cap_chan_list *l = &conn->chan_list;
733 write_lock_bh(&l->lock);
734 __l2cap_chan_add(conn, sk, parent);
735 write_unlock_bh(&l->lock);
736 }
737
738 /* ---- Socket interface ---- */
739 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
740 {
741 struct sock *sk;
742 struct hlist_node *node;
743 sk_for_each(sk, node, &l2cap_sk_list.head)
744 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
745 goto found;
746 sk = NULL;
747 found:
748 return sk;
749 }
750
751 /* Find socket with psm and source bdaddr.
752 * Returns closest match.
753 */
754 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
755 {
756 struct sock *sk = NULL, *sk1 = NULL;
757 struct hlist_node *node;
758
759 sk_for_each(sk, node, &l2cap_sk_list.head) {
760 if (state && sk->sk_state != state)
761 continue;
762
763 if (l2cap_pi(sk)->psm == psm) {
764 /* Exact match. */
765 if (!bacmp(&bt_sk(sk)->src, src))
766 break;
767
768 /* Closest match */
769 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
770 sk1 = sk;
771 }
772 }
773 return node ? sk : sk1;
774 }
775
776 /* Find socket with given address (psm, src).
777 * Returns locked socket */
778 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
779 {
780 struct sock *s;
781 read_lock(&l2cap_sk_list.lock);
782 s = __l2cap_get_sock_by_psm(state, psm, src);
783 if (s)
784 bh_lock_sock(s);
785 read_unlock(&l2cap_sk_list.lock);
786 return s;
787 }
788
789 static void l2cap_sock_destruct(struct sock *sk)
790 {
791 BT_DBG("sk %p", sk);
792
793 skb_queue_purge(&sk->sk_receive_queue);
794 skb_queue_purge(&sk->sk_write_queue);
795 }
796
797 static void l2cap_sock_cleanup_listen(struct sock *parent)
798 {
799 struct sock *sk;
800
801 BT_DBG("parent %p", parent);
802
803 /* Close not yet accepted channels */
804 while ((sk = bt_accept_dequeue(parent, NULL)))
805 l2cap_sock_close(sk);
806
807 parent->sk_state = BT_CLOSED;
808 sock_set_flag(parent, SOCK_ZAPPED);
809 }
810
811 /* Kill socket (only if zapped and orphan)
812 * Must be called on unlocked socket.
813 */
814 static void l2cap_sock_kill(struct sock *sk)
815 {
816 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
817 return;
818
819 BT_DBG("sk %p state %d", sk, sk->sk_state);
820
821 /* Kill poor orphan */
822 bt_sock_unlink(&l2cap_sk_list, sk);
823 sock_set_flag(sk, SOCK_DEAD);
824 sock_put(sk);
825 }
826
827 static void __l2cap_sock_close(struct sock *sk, int reason)
828 {
829 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
830
831 switch (sk->sk_state) {
832 case BT_LISTEN:
833 l2cap_sock_cleanup_listen(sk);
834 break;
835
836 case BT_CONNECTED:
837 case BT_CONFIG:
838 if (sk->sk_type == SOCK_SEQPACKET ||
839 sk->sk_type == SOCK_STREAM) {
840 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
841
842 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
843 l2cap_send_disconn_req(conn, sk, reason);
844 } else
845 l2cap_chan_del(sk, reason);
846 break;
847
848 case BT_CONNECT2:
849 if (sk->sk_type == SOCK_SEQPACKET ||
850 sk->sk_type == SOCK_STREAM) {
851 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
852 struct l2cap_conn_rsp rsp;
853 __u16 result;
854
855 if (bt_sk(sk)->defer_setup)
856 result = L2CAP_CR_SEC_BLOCK;
857 else
858 result = L2CAP_CR_BAD_PSM;
859
860 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
861 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
862 rsp.result = cpu_to_le16(result);
863 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
864 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
865 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
866 } else
867 l2cap_chan_del(sk, reason);
868 break;
869
870 case BT_CONNECT:
871 case BT_DISCONN:
872 l2cap_chan_del(sk, reason);
873 break;
874
875 default:
876 sock_set_flag(sk, SOCK_ZAPPED);
877 break;
878 }
879 }
880
881 /* Must be called on unlocked socket. */
882 static void l2cap_sock_close(struct sock *sk)
883 {
884 l2cap_sock_clear_timer(sk);
885 lock_sock(sk);
886 __l2cap_sock_close(sk, ECONNRESET);
887 release_sock(sk);
888 l2cap_sock_kill(sk);
889 }
890
891 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
892 {
893 struct l2cap_pinfo *pi = l2cap_pi(sk);
894
895 BT_DBG("sk %p", sk);
896
897 if (parent) {
898 sk->sk_type = parent->sk_type;
899 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
900
901 pi->imtu = l2cap_pi(parent)->imtu;
902 pi->omtu = l2cap_pi(parent)->omtu;
903 pi->conf_state = l2cap_pi(parent)->conf_state;
904 pi->mode = l2cap_pi(parent)->mode;
905 pi->fcs = l2cap_pi(parent)->fcs;
906 pi->max_tx = l2cap_pi(parent)->max_tx;
907 pi->tx_win = l2cap_pi(parent)->tx_win;
908 pi->sec_level = l2cap_pi(parent)->sec_level;
909 pi->role_switch = l2cap_pi(parent)->role_switch;
910 pi->force_reliable = l2cap_pi(parent)->force_reliable;
911 } else {
912 pi->imtu = L2CAP_DEFAULT_MTU;
913 pi->omtu = 0;
914 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
915 pi->mode = L2CAP_MODE_ERTM;
916 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
917 } else {
918 pi->mode = L2CAP_MODE_BASIC;
919 }
920 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
921 pi->fcs = L2CAP_FCS_CRC16;
922 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
923 pi->sec_level = BT_SECURITY_LOW;
924 pi->role_switch = 0;
925 pi->force_reliable = 0;
926 }
927
928 /* Default config options */
929 pi->conf_len = 0;
930 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
931 skb_queue_head_init(TX_QUEUE(sk));
932 skb_queue_head_init(SREJ_QUEUE(sk));
933 skb_queue_head_init(BUSY_QUEUE(sk));
934 INIT_LIST_HEAD(SREJ_LIST(sk));
935 }
936
937 static struct proto l2cap_proto = {
938 .name = "L2CAP",
939 .owner = THIS_MODULE,
940 .obj_size = sizeof(struct l2cap_pinfo)
941 };
942
943 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
944 {
945 struct sock *sk;
946
947 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
948 if (!sk)
949 return NULL;
950
951 sock_init_data(sock, sk);
952 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
953
954 sk->sk_destruct = l2cap_sock_destruct;
955 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
956
957 sock_reset_flag(sk, SOCK_ZAPPED);
958
959 sk->sk_protocol = proto;
960 sk->sk_state = BT_OPEN;
961
962 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
963
964 bt_sock_link(&l2cap_sk_list, sk);
965 return sk;
966 }
967
968 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
969 int kern)
970 {
971 struct sock *sk;
972
973 BT_DBG("sock %p", sock);
974
975 sock->state = SS_UNCONNECTED;
976
977 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
978 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
979 return -ESOCKTNOSUPPORT;
980
981 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
982 return -EPERM;
983
984 sock->ops = &l2cap_sock_ops;
985
986 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
987 if (!sk)
988 return -ENOMEM;
989
990 l2cap_sock_init(sk, NULL);
991 return 0;
992 }
993
994 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
995 {
996 struct sock *sk = sock->sk;
997 struct sockaddr_l2 la;
998 int len, err = 0;
999
1000 BT_DBG("sk %p", sk);
1001
1002 if (!addr || addr->sa_family != AF_BLUETOOTH)
1003 return -EINVAL;
1004
1005 memset(&la, 0, sizeof(la));
1006 len = min_t(unsigned int, sizeof(la), alen);
1007 memcpy(&la, addr, len);
1008
1009 if (la.l2_cid)
1010 return -EINVAL;
1011
1012 lock_sock(sk);
1013
1014 if (sk->sk_state != BT_OPEN) {
1015 err = -EBADFD;
1016 goto done;
1017 }
1018
1019 if (la.l2_psm) {
1020 __u16 psm = __le16_to_cpu(la.l2_psm);
1021
1022 /* PSM must be odd and lsb of upper byte must be 0 */
1023 if ((psm & 0x0101) != 0x0001) {
1024 err = -EINVAL;
1025 goto done;
1026 }
1027
1028 /* Restrict usage of well-known PSMs */
1029 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1030 err = -EACCES;
1031 goto done;
1032 }
1033 }
1034
1035 write_lock_bh(&l2cap_sk_list.lock);
1036
1037 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1038 err = -EADDRINUSE;
1039 } else {
1040 /* Save source address */
1041 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1042 l2cap_pi(sk)->psm = la.l2_psm;
1043 l2cap_pi(sk)->sport = la.l2_psm;
1044 sk->sk_state = BT_BOUND;
1045
1046 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1047 __le16_to_cpu(la.l2_psm) == 0x0003)
1048 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1049 }
1050
1051 write_unlock_bh(&l2cap_sk_list.lock);
1052
1053 done:
1054 release_sock(sk);
1055 return err;
1056 }
1057
1058 static int l2cap_do_connect(struct sock *sk)
1059 {
1060 bdaddr_t *src = &bt_sk(sk)->src;
1061 bdaddr_t *dst = &bt_sk(sk)->dst;
1062 struct l2cap_conn *conn;
1063 struct hci_conn *hcon;
1064 struct hci_dev *hdev;
1065 __u8 auth_type;
1066 int err;
1067
1068 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1069 l2cap_pi(sk)->psm);
1070
1071 hdev = hci_get_route(dst, src);
1072 if (!hdev)
1073 return -EHOSTUNREACH;
1074
1075 hci_dev_lock_bh(hdev);
1076
1077 err = -ENOMEM;
1078
1079 if (sk->sk_type == SOCK_RAW) {
1080 switch (l2cap_pi(sk)->sec_level) {
1081 case BT_SECURITY_HIGH:
1082 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1083 break;
1084 case BT_SECURITY_MEDIUM:
1085 auth_type = HCI_AT_DEDICATED_BONDING;
1086 break;
1087 default:
1088 auth_type = HCI_AT_NO_BONDING;
1089 break;
1090 }
1091 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1092 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1093 auth_type = HCI_AT_NO_BONDING_MITM;
1094 else
1095 auth_type = HCI_AT_NO_BONDING;
1096
1097 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1098 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1099 } else {
1100 switch (l2cap_pi(sk)->sec_level) {
1101 case BT_SECURITY_HIGH:
1102 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1103 break;
1104 case BT_SECURITY_MEDIUM:
1105 auth_type = HCI_AT_GENERAL_BONDING;
1106 break;
1107 default:
1108 auth_type = HCI_AT_NO_BONDING;
1109 break;
1110 }
1111 }
1112
1113 hcon = hci_connect(hdev, ACL_LINK, dst,
1114 l2cap_pi(sk)->sec_level, auth_type);
1115 if (!hcon)
1116 goto done;
1117
1118 conn = l2cap_conn_add(hcon, 0);
1119 if (!conn) {
1120 hci_conn_put(hcon);
1121 goto done;
1122 }
1123
1124 err = 0;
1125
1126 /* Update source addr of the socket */
1127 bacpy(src, conn->src);
1128
1129 l2cap_chan_add(conn, sk, NULL);
1130
1131 sk->sk_state = BT_CONNECT;
1132 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1133
1134 if (hcon->state == BT_CONNECTED) {
1135 if (sk->sk_type != SOCK_SEQPACKET &&
1136 sk->sk_type != SOCK_STREAM) {
1137 l2cap_sock_clear_timer(sk);
1138 sk->sk_state = BT_CONNECTED;
1139 } else
1140 l2cap_do_start(sk);
1141 }
1142
1143 done:
1144 hci_dev_unlock_bh(hdev);
1145 hci_dev_put(hdev);
1146 return err;
1147 }
1148
1149 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1150 {
1151 struct sock *sk = sock->sk;
1152 struct sockaddr_l2 la;
1153 int len, err = 0;
1154
1155 BT_DBG("sk %p", sk);
1156
1157 if (!addr || alen < sizeof(addr->sa_family) ||
1158 addr->sa_family != AF_BLUETOOTH)
1159 return -EINVAL;
1160
1161 memset(&la, 0, sizeof(la));
1162 len = min_t(unsigned int, sizeof(la), alen);
1163 memcpy(&la, addr, len);
1164
1165 if (la.l2_cid)
1166 return -EINVAL;
1167
1168 lock_sock(sk);
1169
1170 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1171 && !la.l2_psm) {
1172 err = -EINVAL;
1173 goto done;
1174 }
1175
1176 switch (l2cap_pi(sk)->mode) {
1177 case L2CAP_MODE_BASIC:
1178 break;
1179 case L2CAP_MODE_ERTM:
1180 case L2CAP_MODE_STREAMING:
1181 if (!disable_ertm)
1182 break;
1183 /* fall through */
1184 default:
1185 err = -ENOTSUPP;
1186 goto done;
1187 }
1188
1189 switch (sk->sk_state) {
1190 case BT_CONNECT:
1191 case BT_CONNECT2:
1192 case BT_CONFIG:
1193 /* Already connecting */
1194 goto wait;
1195
1196 case BT_CONNECTED:
1197 /* Already connected */
1198 err = -EISCONN;
1199 goto done;
1200
1201 case BT_OPEN:
1202 case BT_BOUND:
1203 /* Can connect */
1204 break;
1205
1206 default:
1207 err = -EBADFD;
1208 goto done;
1209 }
1210
1211 /* PSM must be odd and lsb of upper byte must be 0 */
1212 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1213 sk->sk_type != SOCK_RAW) {
1214 err = -EINVAL;
1215 goto done;
1216 }
1217
1218 /* Set destination address and psm */
1219 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1220 l2cap_pi(sk)->psm = la.l2_psm;
1221
1222 err = l2cap_do_connect(sk);
1223 if (err)
1224 goto done;
1225
1226 wait:
1227 err = bt_sock_wait_state(sk, BT_CONNECTED,
1228 sock_sndtimeo(sk, flags & O_NONBLOCK));
1229 done:
1230 release_sock(sk);
1231 return err;
1232 }
1233
1234 static int l2cap_sock_listen(struct socket *sock, int backlog)
1235 {
1236 struct sock *sk = sock->sk;
1237 int err = 0;
1238
1239 BT_DBG("sk %p backlog %d", sk, backlog);
1240
1241 lock_sock(sk);
1242
1243 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1244 || sk->sk_state != BT_BOUND) {
1245 err = -EBADFD;
1246 goto done;
1247 }
1248
1249 switch (l2cap_pi(sk)->mode) {
1250 case L2CAP_MODE_BASIC:
1251 break;
1252 case L2CAP_MODE_ERTM:
1253 case L2CAP_MODE_STREAMING:
1254 if (!disable_ertm)
1255 break;
1256 /* fall through */
1257 default:
1258 err = -ENOTSUPP;
1259 goto done;
1260 }
1261
1262 if (!l2cap_pi(sk)->psm) {
1263 bdaddr_t *src = &bt_sk(sk)->src;
1264 u16 psm;
1265
1266 err = -EINVAL;
1267
1268 write_lock_bh(&l2cap_sk_list.lock);
1269
1270 for (psm = 0x1001; psm < 0x1100; psm += 2)
1271 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1272 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1273 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1274 err = 0;
1275 break;
1276 }
1277
1278 write_unlock_bh(&l2cap_sk_list.lock);
1279
1280 if (err < 0)
1281 goto done;
1282 }
1283
1284 sk->sk_max_ack_backlog = backlog;
1285 sk->sk_ack_backlog = 0;
1286 sk->sk_state = BT_LISTEN;
1287
1288 done:
1289 release_sock(sk);
1290 return err;
1291 }
1292
1293 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1294 {
1295 DECLARE_WAITQUEUE(wait, current);
1296 struct sock *sk = sock->sk, *nsk;
1297 long timeo;
1298 int err = 0;
1299
1300 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1301
1302 if (sk->sk_state != BT_LISTEN) {
1303 err = -EBADFD;
1304 goto done;
1305 }
1306
1307 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1308
1309 BT_DBG("sk %p timeo %ld", sk, timeo);
1310
1311 /* Wait for an incoming connection. (wake-one). */
1312 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1313 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1314 set_current_state(TASK_INTERRUPTIBLE);
1315 if (!timeo) {
1316 err = -EAGAIN;
1317 break;
1318 }
1319
1320 release_sock(sk);
1321 timeo = schedule_timeout(timeo);
1322 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1323
1324 if (sk->sk_state != BT_LISTEN) {
1325 err = -EBADFD;
1326 break;
1327 }
1328
1329 if (signal_pending(current)) {
1330 err = sock_intr_errno(timeo);
1331 break;
1332 }
1333 }
1334 set_current_state(TASK_RUNNING);
1335 remove_wait_queue(sk_sleep(sk), &wait);
1336
1337 if (err)
1338 goto done;
1339
1340 newsock->state = SS_CONNECTED;
1341
1342 BT_DBG("new socket %p", nsk);
1343
1344 done:
1345 release_sock(sk);
1346 return err;
1347 }
1348
1349 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1350 {
1351 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1352 struct sock *sk = sock->sk;
1353
1354 BT_DBG("sock %p, sk %p", sock, sk);
1355
1356 addr->sa_family = AF_BLUETOOTH;
1357 *len = sizeof(struct sockaddr_l2);
1358
1359 if (peer) {
1360 la->l2_psm = l2cap_pi(sk)->psm;
1361 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1362 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1363 } else {
1364 la->l2_psm = l2cap_pi(sk)->sport;
1365 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1366 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1367 }
1368
1369 return 0;
1370 }
1371
1372 static int __l2cap_wait_ack(struct sock *sk)
1373 {
1374 DECLARE_WAITQUEUE(wait, current);
1375 int err = 0;
1376 int timeo = HZ/5;
1377
1378 add_wait_queue(sk_sleep(sk), &wait);
1379 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1380 set_current_state(TASK_INTERRUPTIBLE);
1381
1382 if (!timeo)
1383 timeo = HZ/5;
1384
1385 if (signal_pending(current)) {
1386 err = sock_intr_errno(timeo);
1387 break;
1388 }
1389
1390 release_sock(sk);
1391 timeo = schedule_timeout(timeo);
1392 lock_sock(sk);
1393
1394 err = sock_error(sk);
1395 if (err)
1396 break;
1397 }
1398 set_current_state(TASK_RUNNING);
1399 remove_wait_queue(sk_sleep(sk), &wait);
1400 return err;
1401 }
1402
1403 static void l2cap_monitor_timeout(unsigned long arg)
1404 {
1405 struct sock *sk = (void *) arg;
1406
1407 BT_DBG("sk %p", sk);
1408
1409 bh_lock_sock(sk);
1410 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1411 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1412 bh_unlock_sock(sk);
1413 return;
1414 }
1415
1416 l2cap_pi(sk)->retry_count++;
1417 __mod_monitor_timer();
1418
1419 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1420 bh_unlock_sock(sk);
1421 }
1422
1423 static void l2cap_retrans_timeout(unsigned long arg)
1424 {
1425 struct sock *sk = (void *) arg;
1426
1427 BT_DBG("sk %p", sk);
1428
1429 bh_lock_sock(sk);
1430 l2cap_pi(sk)->retry_count = 1;
1431 __mod_monitor_timer();
1432
1433 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1434
1435 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1436 bh_unlock_sock(sk);
1437 }
1438
1439 static void l2cap_drop_acked_frames(struct sock *sk)
1440 {
1441 struct sk_buff *skb;
1442
1443 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1444 l2cap_pi(sk)->unacked_frames) {
1445 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1446 break;
1447
1448 skb = skb_dequeue(TX_QUEUE(sk));
1449 kfree_skb(skb);
1450
1451 l2cap_pi(sk)->unacked_frames--;
1452 }
1453
1454 if (!l2cap_pi(sk)->unacked_frames)
1455 del_timer(&l2cap_pi(sk)->retrans_timer);
1456 }
1457
1458 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1459 {
1460 struct l2cap_pinfo *pi = l2cap_pi(sk);
1461
1462 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1463
1464 hci_send_acl(pi->conn->hcon, skb, 0);
1465 }
1466
1467 static void l2cap_streaming_send(struct sock *sk)
1468 {
1469 struct sk_buff *skb;
1470 struct l2cap_pinfo *pi = l2cap_pi(sk);
1471 u16 control, fcs;
1472
1473 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1474 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1475 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1476 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1477
1478 if (pi->fcs == L2CAP_FCS_CRC16) {
1479 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1480 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1481 }
1482
1483 l2cap_do_send(sk, skb);
1484
1485 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1486 }
1487 }
1488
1489 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1490 {
1491 struct l2cap_pinfo *pi = l2cap_pi(sk);
1492 struct sk_buff *skb, *tx_skb;
1493 u16 control, fcs;
1494
1495 skb = skb_peek(TX_QUEUE(sk));
1496 if (!skb)
1497 return;
1498
1499 do {
1500 if (bt_cb(skb)->tx_seq == tx_seq)
1501 break;
1502
1503 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1504 return;
1505
1506 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1507
1508 if (pi->remote_max_tx &&
1509 bt_cb(skb)->retries == pi->remote_max_tx) {
1510 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1511 return;
1512 }
1513
1514 tx_skb = skb_clone(skb, GFP_ATOMIC);
1515 bt_cb(skb)->retries++;
1516 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1517
1518 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1519 control |= L2CAP_CTRL_FINAL;
1520 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1521 }
1522
1523 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1524 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1525
1526 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1527
1528 if (pi->fcs == L2CAP_FCS_CRC16) {
1529 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1530 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1531 }
1532
1533 l2cap_do_send(sk, tx_skb);
1534 }
1535
1536 static int l2cap_ertm_send(struct sock *sk)
1537 {
1538 struct sk_buff *skb, *tx_skb;
1539 struct l2cap_pinfo *pi = l2cap_pi(sk);
1540 u16 control, fcs;
1541 int nsent = 0;
1542
1543 if (sk->sk_state != BT_CONNECTED)
1544 return -ENOTCONN;
1545
1546 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1547
1548 if (pi->remote_max_tx &&
1549 bt_cb(skb)->retries == pi->remote_max_tx) {
1550 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1551 break;
1552 }
1553
1554 tx_skb = skb_clone(skb, GFP_ATOMIC);
1555
1556 bt_cb(skb)->retries++;
1557
1558 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1559 control &= L2CAP_CTRL_SAR;
1560
1561 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1562 control |= L2CAP_CTRL_FINAL;
1563 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1564 }
1565 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1566 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1567 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1568
1569
1570 if (pi->fcs == L2CAP_FCS_CRC16) {
1571 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1572 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1573 }
1574
1575 l2cap_do_send(sk, tx_skb);
1576
1577 __mod_retrans_timer();
1578
1579 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1580 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1581
1582 pi->unacked_frames++;
1583 pi->frames_sent++;
1584
1585 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1586 sk->sk_send_head = NULL;
1587 else
1588 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1589
1590 nsent++;
1591 }
1592
1593 return nsent;
1594 }
1595
1596 static int l2cap_retransmit_frames(struct sock *sk)
1597 {
1598 struct l2cap_pinfo *pi = l2cap_pi(sk);
1599 int ret;
1600
1601 if (!skb_queue_empty(TX_QUEUE(sk)))
1602 sk->sk_send_head = TX_QUEUE(sk)->next;
1603
1604 pi->next_tx_seq = pi->expected_ack_seq;
1605 ret = l2cap_ertm_send(sk);
1606 return ret;
1607 }
1608
1609 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1610 {
1611 struct sock *sk = (struct sock *)pi;
1612 u16 control = 0;
1613
1614 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1615
1616 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1617 control |= L2CAP_SUPER_RCV_NOT_READY;
1618 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1619 l2cap_send_sframe(pi, control);
1620 return;
1621 }
1622
1623 if (l2cap_ertm_send(sk) > 0)
1624 return;
1625
1626 control |= L2CAP_SUPER_RCV_READY;
1627 l2cap_send_sframe(pi, control);
1628 }
1629
1630 static void l2cap_send_srejtail(struct sock *sk)
1631 {
1632 struct srej_list *tail;
1633 u16 control;
1634
1635 control = L2CAP_SUPER_SELECT_REJECT;
1636 control |= L2CAP_CTRL_FINAL;
1637
1638 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1639 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1640
1641 l2cap_send_sframe(l2cap_pi(sk), control);
1642 }
1643
1644 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1645 {
1646 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1647 struct sk_buff **frag;
1648 int err, sent = 0;
1649
1650 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1651 return -EFAULT;
1652
1653 sent += count;
1654 len -= count;
1655
1656 /* Continuation fragments (no L2CAP header) */
1657 frag = &skb_shinfo(skb)->frag_list;
1658 while (len) {
1659 count = min_t(unsigned int, conn->mtu, len);
1660
1661 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1662 if (!*frag)
1663 return err;
1664 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1665 return -EFAULT;
1666
1667 sent += count;
1668 len -= count;
1669
1670 frag = &(*frag)->next;
1671 }
1672
1673 return sent;
1674 }
1675
1676 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1677 {
1678 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1679 struct sk_buff *skb;
1680 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1681 struct l2cap_hdr *lh;
1682
1683 BT_DBG("sk %p len %d", sk, (int)len);
1684
1685 count = min_t(unsigned int, (conn->mtu - hlen), len);
1686 skb = bt_skb_send_alloc(sk, count + hlen,
1687 msg->msg_flags & MSG_DONTWAIT, &err);
1688 if (!skb)
1689 return ERR_PTR(err);
1690
1691 /* Create L2CAP header */
1692 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1693 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1694 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1695 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1696
1697 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1698 if (unlikely(err < 0)) {
1699 kfree_skb(skb);
1700 return ERR_PTR(err);
1701 }
1702 return skb;
1703 }
1704
1705 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1706 {
1707 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1708 struct sk_buff *skb;
1709 int err, count, hlen = L2CAP_HDR_SIZE;
1710 struct l2cap_hdr *lh;
1711
1712 BT_DBG("sk %p len %d", sk, (int)len);
1713
1714 count = min_t(unsigned int, (conn->mtu - hlen), len);
1715 skb = bt_skb_send_alloc(sk, count + hlen,
1716 msg->msg_flags & MSG_DONTWAIT, &err);
1717 if (!skb)
1718 return ERR_PTR(err);
1719
1720 /* Create L2CAP header */
1721 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1722 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1723 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1724
1725 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1726 if (unlikely(err < 0)) {
1727 kfree_skb(skb);
1728 return ERR_PTR(err);
1729 }
1730 return skb;
1731 }
1732
1733 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1734 {
1735 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1736 struct sk_buff *skb;
1737 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1738 struct l2cap_hdr *lh;
1739
1740 BT_DBG("sk %p len %d", sk, (int)len);
1741
1742 if (!conn)
1743 return ERR_PTR(-ENOTCONN);
1744
1745 if (sdulen)
1746 hlen += 2;
1747
1748 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1749 hlen += 2;
1750
1751 count = min_t(unsigned int, (conn->mtu - hlen), len);
1752 skb = bt_skb_send_alloc(sk, count + hlen,
1753 msg->msg_flags & MSG_DONTWAIT, &err);
1754 if (!skb)
1755 return ERR_PTR(err);
1756
1757 /* Create L2CAP header */
1758 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1759 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1760 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1761 put_unaligned_le16(control, skb_put(skb, 2));
1762 if (sdulen)
1763 put_unaligned_le16(sdulen, skb_put(skb, 2));
1764
1765 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1766 if (unlikely(err < 0)) {
1767 kfree_skb(skb);
1768 return ERR_PTR(err);
1769 }
1770
1771 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1772 put_unaligned_le16(0, skb_put(skb, 2));
1773
1774 bt_cb(skb)->retries = 0;
1775 return skb;
1776 }
1777
1778 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1779 {
1780 struct l2cap_pinfo *pi = l2cap_pi(sk);
1781 struct sk_buff *skb;
1782 struct sk_buff_head sar_queue;
1783 u16 control;
1784 size_t size = 0;
1785
1786 skb_queue_head_init(&sar_queue);
1787 control = L2CAP_SDU_START;
1788 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1789 if (IS_ERR(skb))
1790 return PTR_ERR(skb);
1791
1792 __skb_queue_tail(&sar_queue, skb);
1793 len -= pi->remote_mps;
1794 size += pi->remote_mps;
1795
1796 while (len > 0) {
1797 size_t buflen;
1798
1799 if (len > pi->remote_mps) {
1800 control = L2CAP_SDU_CONTINUE;
1801 buflen = pi->remote_mps;
1802 } else {
1803 control = L2CAP_SDU_END;
1804 buflen = len;
1805 }
1806
1807 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1808 if (IS_ERR(skb)) {
1809 skb_queue_purge(&sar_queue);
1810 return PTR_ERR(skb);
1811 }
1812
1813 __skb_queue_tail(&sar_queue, skb);
1814 len -= buflen;
1815 size += buflen;
1816 }
1817 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1818 if (sk->sk_send_head == NULL)
1819 sk->sk_send_head = sar_queue.next;
1820
1821 return size;
1822 }
1823
1824 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1825 {
1826 struct sock *sk = sock->sk;
1827 struct l2cap_pinfo *pi = l2cap_pi(sk);
1828 struct sk_buff *skb;
1829 u16 control;
1830 int err;
1831
1832 BT_DBG("sock %p, sk %p", sock, sk);
1833
1834 err = sock_error(sk);
1835 if (err)
1836 return err;
1837
1838 if (msg->msg_flags & MSG_OOB)
1839 return -EOPNOTSUPP;
1840
1841 lock_sock(sk);
1842
1843 if (sk->sk_state != BT_CONNECTED) {
1844 err = -ENOTCONN;
1845 goto done;
1846 }
1847
1848 /* Connectionless channel */
1849 if (sk->sk_type == SOCK_DGRAM) {
1850 skb = l2cap_create_connless_pdu(sk, msg, len);
1851 if (IS_ERR(skb)) {
1852 err = PTR_ERR(skb);
1853 } else {
1854 l2cap_do_send(sk, skb);
1855 err = len;
1856 }
1857 goto done;
1858 }
1859
1860 switch (pi->mode) {
1861 case L2CAP_MODE_BASIC:
1862 /* Check outgoing MTU */
1863 if (len > pi->omtu) {
1864 err = -EMSGSIZE;
1865 goto done;
1866 }
1867
1868 /* Create a basic PDU */
1869 skb = l2cap_create_basic_pdu(sk, msg, len);
1870 if (IS_ERR(skb)) {
1871 err = PTR_ERR(skb);
1872 goto done;
1873 }
1874
1875 l2cap_do_send(sk, skb);
1876 err = len;
1877 break;
1878
1879 case L2CAP_MODE_ERTM:
1880 case L2CAP_MODE_STREAMING:
1881 /* Entire SDU fits into one PDU */
1882 if (len <= pi->remote_mps) {
1883 control = L2CAP_SDU_UNSEGMENTED;
1884 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1885 if (IS_ERR(skb)) {
1886 err = PTR_ERR(skb);
1887 goto done;
1888 }
1889 __skb_queue_tail(TX_QUEUE(sk), skb);
1890
1891 if (sk->sk_send_head == NULL)
1892 sk->sk_send_head = skb;
1893
1894 } else {
1895 /* Segment SDU into multiples PDUs */
1896 err = l2cap_sar_segment_sdu(sk, msg, len);
1897 if (err < 0)
1898 goto done;
1899 }
1900
1901 if (pi->mode == L2CAP_MODE_STREAMING) {
1902 l2cap_streaming_send(sk);
1903 } else {
1904 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1905 pi->conn_state && L2CAP_CONN_WAIT_F) {
1906 err = len;
1907 break;
1908 }
1909 err = l2cap_ertm_send(sk);
1910 }
1911
1912 if (err >= 0)
1913 err = len;
1914 break;
1915
1916 default:
1917 BT_DBG("bad state %1.1x", pi->mode);
1918 err = -EBADFD;
1919 }
1920
1921 done:
1922 release_sock(sk);
1923 return err;
1924 }
1925
1926 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1927 {
1928 struct sock *sk = sock->sk;
1929
1930 lock_sock(sk);
1931
1932 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1933 struct l2cap_conn_rsp rsp;
1934 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1935 u8 buf[128];
1936
1937 sk->sk_state = BT_CONFIG;
1938
1939 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1940 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1941 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1942 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1943 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1944 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1945
1946 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1947 release_sock(sk);
1948 return 0;
1949 }
1950
1951 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1952 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1953 l2cap_build_conf_req(sk, buf), buf);
1954 l2cap_pi(sk)->num_conf_req++;
1955
1956 release_sock(sk);
1957 return 0;
1958 }
1959
1960 release_sock(sk);
1961
1962 if (sock->type == SOCK_STREAM)
1963 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1964
1965 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1966 }
1967
1968 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1969 {
1970 struct sock *sk = sock->sk;
1971 struct l2cap_options opts;
1972 int len, err = 0;
1973 u32 opt;
1974
1975 BT_DBG("sk %p", sk);
1976
1977 lock_sock(sk);
1978
1979 switch (optname) {
1980 case L2CAP_OPTIONS:
1981 if (sk->sk_state == BT_CONNECTED) {
1982 err = -EINVAL;
1983 break;
1984 }
1985
1986 opts.imtu = l2cap_pi(sk)->imtu;
1987 opts.omtu = l2cap_pi(sk)->omtu;
1988 opts.flush_to = l2cap_pi(sk)->flush_to;
1989 opts.mode = l2cap_pi(sk)->mode;
1990 opts.fcs = l2cap_pi(sk)->fcs;
1991 opts.max_tx = l2cap_pi(sk)->max_tx;
1992 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1993
1994 len = min_t(unsigned int, sizeof(opts), optlen);
1995 if (copy_from_user((char *) &opts, optval, len)) {
1996 err = -EFAULT;
1997 break;
1998 }
1999
2000 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
2001 err = -EINVAL;
2002 break;
2003 }
2004
2005 l2cap_pi(sk)->mode = opts.mode;
2006 switch (l2cap_pi(sk)->mode) {
2007 case L2CAP_MODE_BASIC:
2008 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
2009 break;
2010 case L2CAP_MODE_ERTM:
2011 case L2CAP_MODE_STREAMING:
2012 if (!disable_ertm)
2013 break;
2014 /* fall through */
2015 default:
2016 err = -EINVAL;
2017 break;
2018 }
2019
2020 l2cap_pi(sk)->imtu = opts.imtu;
2021 l2cap_pi(sk)->omtu = opts.omtu;
2022 l2cap_pi(sk)->fcs = opts.fcs;
2023 l2cap_pi(sk)->max_tx = opts.max_tx;
2024 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2025 break;
2026
2027 case L2CAP_LM:
2028 if (get_user(opt, (u32 __user *) optval)) {
2029 err = -EFAULT;
2030 break;
2031 }
2032
2033 if (opt & L2CAP_LM_AUTH)
2034 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2035 if (opt & L2CAP_LM_ENCRYPT)
2036 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2037 if (opt & L2CAP_LM_SECURE)
2038 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2039
2040 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2041 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2042 break;
2043
2044 default:
2045 err = -ENOPROTOOPT;
2046 break;
2047 }
2048
2049 release_sock(sk);
2050 return err;
2051 }
2052
2053 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2054 {
2055 struct sock *sk = sock->sk;
2056 struct bt_security sec;
2057 int len, err = 0;
2058 u32 opt;
2059
2060 BT_DBG("sk %p", sk);
2061
2062 if (level == SOL_L2CAP)
2063 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2064
2065 if (level != SOL_BLUETOOTH)
2066 return -ENOPROTOOPT;
2067
2068 lock_sock(sk);
2069
2070 switch (optname) {
2071 case BT_SECURITY:
2072 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2073 && sk->sk_type != SOCK_RAW) {
2074 err = -EINVAL;
2075 break;
2076 }
2077
2078 sec.level = BT_SECURITY_LOW;
2079
2080 len = min_t(unsigned int, sizeof(sec), optlen);
2081 if (copy_from_user((char *) &sec, optval, len)) {
2082 err = -EFAULT;
2083 break;
2084 }
2085
2086 if (sec.level < BT_SECURITY_LOW ||
2087 sec.level > BT_SECURITY_HIGH) {
2088 err = -EINVAL;
2089 break;
2090 }
2091
2092 l2cap_pi(sk)->sec_level = sec.level;
2093 break;
2094
2095 case BT_DEFER_SETUP:
2096 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2097 err = -EINVAL;
2098 break;
2099 }
2100
2101 if (get_user(opt, (u32 __user *) optval)) {
2102 err = -EFAULT;
2103 break;
2104 }
2105
2106 bt_sk(sk)->defer_setup = opt;
2107 break;
2108
2109 default:
2110 err = -ENOPROTOOPT;
2111 break;
2112 }
2113
2114 release_sock(sk);
2115 return err;
2116 }
2117
2118 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2119 {
2120 struct sock *sk = sock->sk;
2121 struct l2cap_options opts;
2122 struct l2cap_conninfo cinfo;
2123 int len, err = 0;
2124 u32 opt;
2125
2126 BT_DBG("sk %p", sk);
2127
2128 if (get_user(len, optlen))
2129 return -EFAULT;
2130
2131 lock_sock(sk);
2132
2133 switch (optname) {
2134 case L2CAP_OPTIONS:
2135 opts.imtu = l2cap_pi(sk)->imtu;
2136 opts.omtu = l2cap_pi(sk)->omtu;
2137 opts.flush_to = l2cap_pi(sk)->flush_to;
2138 opts.mode = l2cap_pi(sk)->mode;
2139 opts.fcs = l2cap_pi(sk)->fcs;
2140 opts.max_tx = l2cap_pi(sk)->max_tx;
2141 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2142
2143 len = min_t(unsigned int, len, sizeof(opts));
2144 if (copy_to_user(optval, (char *) &opts, len))
2145 err = -EFAULT;
2146
2147 break;
2148
2149 case L2CAP_LM:
2150 switch (l2cap_pi(sk)->sec_level) {
2151 case BT_SECURITY_LOW:
2152 opt = L2CAP_LM_AUTH;
2153 break;
2154 case BT_SECURITY_MEDIUM:
2155 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2156 break;
2157 case BT_SECURITY_HIGH:
2158 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2159 L2CAP_LM_SECURE;
2160 break;
2161 default:
2162 opt = 0;
2163 break;
2164 }
2165
2166 if (l2cap_pi(sk)->role_switch)
2167 opt |= L2CAP_LM_MASTER;
2168
2169 if (l2cap_pi(sk)->force_reliable)
2170 opt |= L2CAP_LM_RELIABLE;
2171
2172 if (put_user(opt, (u32 __user *) optval))
2173 err = -EFAULT;
2174 break;
2175
2176 case L2CAP_CONNINFO:
2177 if (sk->sk_state != BT_CONNECTED &&
2178 !(sk->sk_state == BT_CONNECT2 &&
2179 bt_sk(sk)->defer_setup)) {
2180 err = -ENOTCONN;
2181 break;
2182 }
2183
2184 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2185 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2186
2187 len = min_t(unsigned int, len, sizeof(cinfo));
2188 if (copy_to_user(optval, (char *) &cinfo, len))
2189 err = -EFAULT;
2190
2191 break;
2192
2193 default:
2194 err = -ENOPROTOOPT;
2195 break;
2196 }
2197
2198 release_sock(sk);
2199 return err;
2200 }
2201
2202 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2203 {
2204 struct sock *sk = sock->sk;
2205 struct bt_security sec;
2206 int len, err = 0;
2207
2208 BT_DBG("sk %p", sk);
2209
2210 if (level == SOL_L2CAP)
2211 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2212
2213 if (level != SOL_BLUETOOTH)
2214 return -ENOPROTOOPT;
2215
2216 if (get_user(len, optlen))
2217 return -EFAULT;
2218
2219 lock_sock(sk);
2220
2221 switch (optname) {
2222 case BT_SECURITY:
2223 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2224 && sk->sk_type != SOCK_RAW) {
2225 err = -EINVAL;
2226 break;
2227 }
2228
2229 sec.level = l2cap_pi(sk)->sec_level;
2230
2231 len = min_t(unsigned int, len, sizeof(sec));
2232 if (copy_to_user(optval, (char *) &sec, len))
2233 err = -EFAULT;
2234
2235 break;
2236
2237 case BT_DEFER_SETUP:
2238 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2239 err = -EINVAL;
2240 break;
2241 }
2242
2243 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2244 err = -EFAULT;
2245
2246 break;
2247
2248 default:
2249 err = -ENOPROTOOPT;
2250 break;
2251 }
2252
2253 release_sock(sk);
2254 return err;
2255 }
2256
2257 static int l2cap_sock_shutdown(struct socket *sock, int how)
2258 {
2259 struct sock *sk = sock->sk;
2260 int err = 0;
2261
2262 BT_DBG("sock %p, sk %p", sock, sk);
2263
2264 if (!sk)
2265 return 0;
2266
2267 lock_sock(sk);
2268 if (!sk->sk_shutdown) {
2269 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2270 err = __l2cap_wait_ack(sk);
2271
2272 sk->sk_shutdown = SHUTDOWN_MASK;
2273 l2cap_sock_clear_timer(sk);
2274 __l2cap_sock_close(sk, 0);
2275
2276 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2277 err = bt_sock_wait_state(sk, BT_CLOSED,
2278 sk->sk_lingertime);
2279 }
2280
2281 if (!err && sk->sk_err)
2282 err = -sk->sk_err;
2283
2284 release_sock(sk);
2285 return err;
2286 }
2287
2288 static int l2cap_sock_release(struct socket *sock)
2289 {
2290 struct sock *sk = sock->sk;
2291 int err;
2292
2293 BT_DBG("sock %p, sk %p", sock, sk);
2294
2295 if (!sk)
2296 return 0;
2297
2298 err = l2cap_sock_shutdown(sock, 2);
2299
2300 sock_orphan(sk);
2301 l2cap_sock_kill(sk);
2302 return err;
2303 }
2304
2305 static void l2cap_chan_ready(struct sock *sk)
2306 {
2307 struct sock *parent = bt_sk(sk)->parent;
2308
2309 BT_DBG("sk %p, parent %p", sk, parent);
2310
2311 l2cap_pi(sk)->conf_state = 0;
2312 l2cap_sock_clear_timer(sk);
2313
2314 if (!parent) {
2315 /* Outgoing channel.
2316 * Wake up socket sleeping on connect.
2317 */
2318 sk->sk_state = BT_CONNECTED;
2319 sk->sk_state_change(sk);
2320 } else {
2321 /* Incoming channel.
2322 * Wake up socket sleeping on accept.
2323 */
2324 parent->sk_data_ready(parent, 0);
2325 }
2326 }
2327
2328 /* Copy frame to all raw sockets on that connection */
2329 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2330 {
2331 struct l2cap_chan_list *l = &conn->chan_list;
2332 struct sk_buff *nskb;
2333 struct sock *sk;
2334
2335 BT_DBG("conn %p", conn);
2336
2337 read_lock(&l->lock);
2338 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2339 if (sk->sk_type != SOCK_RAW)
2340 continue;
2341
2342 /* Don't send frame to the socket it came from */
2343 if (skb->sk == sk)
2344 continue;
2345 nskb = skb_clone(skb, GFP_ATOMIC);
2346 if (!nskb)
2347 continue;
2348
2349 if (sock_queue_rcv_skb(sk, nskb))
2350 kfree_skb(nskb);
2351 }
2352 read_unlock(&l->lock);
2353 }
2354
2355 /* ---- L2CAP signalling commands ---- */
2356 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2357 u8 code, u8 ident, u16 dlen, void *data)
2358 {
2359 struct sk_buff *skb, **frag;
2360 struct l2cap_cmd_hdr *cmd;
2361 struct l2cap_hdr *lh;
2362 int len, count;
2363
2364 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2365 conn, code, ident, dlen);
2366
2367 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2368 count = min_t(unsigned int, conn->mtu, len);
2369
2370 skb = bt_skb_alloc(count, GFP_ATOMIC);
2371 if (!skb)
2372 return NULL;
2373
2374 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2375 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2376 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2377
2378 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2379 cmd->code = code;
2380 cmd->ident = ident;
2381 cmd->len = cpu_to_le16(dlen);
2382
2383 if (dlen) {
2384 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2385 memcpy(skb_put(skb, count), data, count);
2386 data += count;
2387 }
2388
2389 len -= skb->len;
2390
2391 /* Continuation fragments (no L2CAP header) */
2392 frag = &skb_shinfo(skb)->frag_list;
2393 while (len) {
2394 count = min_t(unsigned int, conn->mtu, len);
2395
2396 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2397 if (!*frag)
2398 goto fail;
2399
2400 memcpy(skb_put(*frag, count), data, count);
2401
2402 len -= count;
2403 data += count;
2404
2405 frag = &(*frag)->next;
2406 }
2407
2408 return skb;
2409
2410 fail:
2411 kfree_skb(skb);
2412 return NULL;
2413 }
2414
2415 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2416 {
2417 struct l2cap_conf_opt *opt = *ptr;
2418 int len;
2419
2420 len = L2CAP_CONF_OPT_SIZE + opt->len;
2421 *ptr += len;
2422
2423 *type = opt->type;
2424 *olen = opt->len;
2425
2426 switch (opt->len) {
2427 case 1:
2428 *val = *((u8 *) opt->val);
2429 break;
2430
2431 case 2:
2432 *val = get_unaligned_le16(opt->val);
2433 break;
2434
2435 case 4:
2436 *val = get_unaligned_le32(opt->val);
2437 break;
2438
2439 default:
2440 *val = (unsigned long) opt->val;
2441 break;
2442 }
2443
2444 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2445 return len;
2446 }
2447
2448 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2449 {
2450 struct l2cap_conf_opt *opt = *ptr;
2451
2452 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2453
2454 opt->type = type;
2455 opt->len = len;
2456
2457 switch (len) {
2458 case 1:
2459 *((u8 *) opt->val) = val;
2460 break;
2461
2462 case 2:
2463 put_unaligned_le16(val, opt->val);
2464 break;
2465
2466 case 4:
2467 put_unaligned_le32(val, opt->val);
2468 break;
2469
2470 default:
2471 memcpy(opt->val, (void *) val, len);
2472 break;
2473 }
2474
2475 *ptr += L2CAP_CONF_OPT_SIZE + len;
2476 }
2477
2478 static void l2cap_ack_timeout(unsigned long arg)
2479 {
2480 struct sock *sk = (void *) arg;
2481
2482 bh_lock_sock(sk);
2483 l2cap_send_ack(l2cap_pi(sk));
2484 bh_unlock_sock(sk);
2485 }
2486
2487 static inline void l2cap_ertm_init(struct sock *sk)
2488 {
2489 l2cap_pi(sk)->expected_ack_seq = 0;
2490 l2cap_pi(sk)->unacked_frames = 0;
2491 l2cap_pi(sk)->buffer_seq = 0;
2492 l2cap_pi(sk)->num_acked = 0;
2493 l2cap_pi(sk)->frames_sent = 0;
2494
2495 setup_timer(&l2cap_pi(sk)->retrans_timer,
2496 l2cap_retrans_timeout, (unsigned long) sk);
2497 setup_timer(&l2cap_pi(sk)->monitor_timer,
2498 l2cap_monitor_timeout, (unsigned long) sk);
2499 setup_timer(&l2cap_pi(sk)->ack_timer,
2500 l2cap_ack_timeout, (unsigned long) sk);
2501
2502 __skb_queue_head_init(SREJ_QUEUE(sk));
2503 __skb_queue_head_init(BUSY_QUEUE(sk));
2504
2505 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2506
2507 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2508 }
2509
2510 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2511 {
2512 switch (mode) {
2513 case L2CAP_MODE_STREAMING:
2514 case L2CAP_MODE_ERTM:
2515 if (l2cap_mode_supported(mode, remote_feat_mask))
2516 return mode;
2517 /* fall through */
2518 default:
2519 return L2CAP_MODE_BASIC;
2520 }
2521 }
2522
2523 static int l2cap_build_conf_req(struct sock *sk, void *data)
2524 {
2525 struct l2cap_pinfo *pi = l2cap_pi(sk);
2526 struct l2cap_conf_req *req = data;
2527 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2528 void *ptr = req->data;
2529
2530 BT_DBG("sk %p", sk);
2531
2532 if (pi->num_conf_req || pi->num_conf_rsp)
2533 goto done;
2534
2535 switch (pi->mode) {
2536 case L2CAP_MODE_STREAMING:
2537 case L2CAP_MODE_ERTM:
2538 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2539 break;
2540
2541 /* fall through */
2542 default:
2543 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2544 break;
2545 }
2546
2547 done:
2548 switch (pi->mode) {
2549 case L2CAP_MODE_BASIC:
2550 if (pi->imtu != L2CAP_DEFAULT_MTU)
2551 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2552
2553 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2554 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2555 break;
2556
2557 rfc.mode = L2CAP_MODE_BASIC;
2558 rfc.txwin_size = 0;
2559 rfc.max_transmit = 0;
2560 rfc.retrans_timeout = 0;
2561 rfc.monitor_timeout = 0;
2562 rfc.max_pdu_size = 0;
2563
2564 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2565 (unsigned long) &rfc);
2566 break;
2567
2568 case L2CAP_MODE_ERTM:
2569 rfc.mode = L2CAP_MODE_ERTM;
2570 rfc.txwin_size = pi->tx_win;
2571 rfc.max_transmit = pi->max_tx;
2572 rfc.retrans_timeout = 0;
2573 rfc.monitor_timeout = 0;
2574 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2575 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2576 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2577
2578 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2579 (unsigned long) &rfc);
2580
2581 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2582 break;
2583
2584 if (pi->fcs == L2CAP_FCS_NONE ||
2585 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2586 pi->fcs = L2CAP_FCS_NONE;
2587 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2588 }
2589 break;
2590
2591 case L2CAP_MODE_STREAMING:
2592 rfc.mode = L2CAP_MODE_STREAMING;
2593 rfc.txwin_size = 0;
2594 rfc.max_transmit = 0;
2595 rfc.retrans_timeout = 0;
2596 rfc.monitor_timeout = 0;
2597 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2598 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2599 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2600
2601 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2602 (unsigned long) &rfc);
2603
2604 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2605 break;
2606
2607 if (pi->fcs == L2CAP_FCS_NONE ||
2608 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2609 pi->fcs = L2CAP_FCS_NONE;
2610 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2611 }
2612 break;
2613 }
2614
2615 /* FIXME: Need actual value of the flush timeout */
2616 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2617 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2618
2619 req->dcid = cpu_to_le16(pi->dcid);
2620 req->flags = cpu_to_le16(0);
2621
2622 return ptr - data;
2623 }
2624
2625 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2626 {
2627 struct l2cap_pinfo *pi = l2cap_pi(sk);
2628 struct l2cap_conf_rsp *rsp = data;
2629 void *ptr = rsp->data;
2630 void *req = pi->conf_req;
2631 int len = pi->conf_len;
2632 int type, hint, olen;
2633 unsigned long val;
2634 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2635 u16 mtu = L2CAP_DEFAULT_MTU;
2636 u16 result = L2CAP_CONF_SUCCESS;
2637
2638 BT_DBG("sk %p", sk);
2639
2640 while (len >= L2CAP_CONF_OPT_SIZE) {
2641 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2642
2643 hint = type & L2CAP_CONF_HINT;
2644 type &= L2CAP_CONF_MASK;
2645
2646 switch (type) {
2647 case L2CAP_CONF_MTU:
2648 mtu = val;
2649 break;
2650
2651 case L2CAP_CONF_FLUSH_TO:
2652 pi->flush_to = val;
2653 break;
2654
2655 case L2CAP_CONF_QOS:
2656 break;
2657
2658 case L2CAP_CONF_RFC:
2659 if (olen == sizeof(rfc))
2660 memcpy(&rfc, (void *) val, olen);
2661 break;
2662
2663 case L2CAP_CONF_FCS:
2664 if (val == L2CAP_FCS_NONE)
2665 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2666
2667 break;
2668
2669 default:
2670 if (hint)
2671 break;
2672
2673 result = L2CAP_CONF_UNKNOWN;
2674 *((u8 *) ptr++) = type;
2675 break;
2676 }
2677 }
2678
2679 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2680 goto done;
2681
2682 switch (pi->mode) {
2683 case L2CAP_MODE_STREAMING:
2684 case L2CAP_MODE_ERTM:
2685 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2686 pi->mode = l2cap_select_mode(rfc.mode,
2687 pi->conn->feat_mask);
2688 break;
2689 }
2690
2691 if (pi->mode != rfc.mode)
2692 return -ECONNREFUSED;
2693
2694 break;
2695 }
2696
2697 done:
2698 if (pi->mode != rfc.mode) {
2699 result = L2CAP_CONF_UNACCEPT;
2700 rfc.mode = pi->mode;
2701
2702 if (pi->num_conf_rsp == 1)
2703 return -ECONNREFUSED;
2704
2705 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2706 sizeof(rfc), (unsigned long) &rfc);
2707 }
2708
2709
2710 if (result == L2CAP_CONF_SUCCESS) {
2711 /* Configure output options and let the other side know
2712 * which ones we don't like. */
2713
2714 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2715 result = L2CAP_CONF_UNACCEPT;
2716 else {
2717 pi->omtu = mtu;
2718 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2719 }
2720 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2721
2722 switch (rfc.mode) {
2723 case L2CAP_MODE_BASIC:
2724 pi->fcs = L2CAP_FCS_NONE;
2725 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2726 break;
2727
2728 case L2CAP_MODE_ERTM:
2729 pi->remote_tx_win = rfc.txwin_size;
2730 pi->remote_max_tx = rfc.max_transmit;
2731
2732 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2733 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2734
2735 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2736
2737 rfc.retrans_timeout =
2738 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2739 rfc.monitor_timeout =
2740 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2741
2742 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2743
2744 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2745 sizeof(rfc), (unsigned long) &rfc);
2746
2747 break;
2748
2749 case L2CAP_MODE_STREAMING:
2750 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2751 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2752
2753 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2754
2755 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2756
2757 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2758 sizeof(rfc), (unsigned long) &rfc);
2759
2760 break;
2761
2762 default:
2763 result = L2CAP_CONF_UNACCEPT;
2764
2765 memset(&rfc, 0, sizeof(rfc));
2766 rfc.mode = pi->mode;
2767 }
2768
2769 if (result == L2CAP_CONF_SUCCESS)
2770 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2771 }
2772 rsp->scid = cpu_to_le16(pi->dcid);
2773 rsp->result = cpu_to_le16(result);
2774 rsp->flags = cpu_to_le16(0x0000);
2775
2776 return ptr - data;
2777 }
2778
2779 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2780 {
2781 struct l2cap_pinfo *pi = l2cap_pi(sk);
2782 struct l2cap_conf_req *req = data;
2783 void *ptr = req->data;
2784 int type, olen;
2785 unsigned long val;
2786 struct l2cap_conf_rfc rfc;
2787
2788 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2789
2790 while (len >= L2CAP_CONF_OPT_SIZE) {
2791 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2792
2793 switch (type) {
2794 case L2CAP_CONF_MTU:
2795 if (val < L2CAP_DEFAULT_MIN_MTU) {
2796 *result = L2CAP_CONF_UNACCEPT;
2797 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2798 } else
2799 pi->imtu = val;
2800 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2801 break;
2802
2803 case L2CAP_CONF_FLUSH_TO:
2804 pi->flush_to = val;
2805 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2806 2, pi->flush_to);
2807 break;
2808
2809 case L2CAP_CONF_RFC:
2810 if (olen == sizeof(rfc))
2811 memcpy(&rfc, (void *)val, olen);
2812
2813 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2814 rfc.mode != pi->mode)
2815 return -ECONNREFUSED;
2816
2817 pi->fcs = 0;
2818
2819 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2820 sizeof(rfc), (unsigned long) &rfc);
2821 break;
2822 }
2823 }
2824
2825 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2826 return -ECONNREFUSED;
2827
2828 pi->mode = rfc.mode;
2829
2830 if (*result == L2CAP_CONF_SUCCESS) {
2831 switch (rfc.mode) {
2832 case L2CAP_MODE_ERTM:
2833 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2834 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2835 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2836 break;
2837 case L2CAP_MODE_STREAMING:
2838 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2839 }
2840 }
2841
2842 req->dcid = cpu_to_le16(pi->dcid);
2843 req->flags = cpu_to_le16(0x0000);
2844
2845 return ptr - data;
2846 }
2847
2848 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2849 {
2850 struct l2cap_conf_rsp *rsp = data;
2851 void *ptr = rsp->data;
2852
2853 BT_DBG("sk %p", sk);
2854
2855 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2856 rsp->result = cpu_to_le16(result);
2857 rsp->flags = cpu_to_le16(flags);
2858
2859 return ptr - data;
2860 }
2861
2862 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2863 {
2864 struct l2cap_pinfo *pi = l2cap_pi(sk);
2865 int type, olen;
2866 unsigned long val;
2867 struct l2cap_conf_rfc rfc;
2868
2869 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2870
2871 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2872 return;
2873
2874 while (len >= L2CAP_CONF_OPT_SIZE) {
2875 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2876
2877 switch (type) {
2878 case L2CAP_CONF_RFC:
2879 if (olen == sizeof(rfc))
2880 memcpy(&rfc, (void *)val, olen);
2881 goto done;
2882 }
2883 }
2884
2885 done:
2886 switch (rfc.mode) {
2887 case L2CAP_MODE_ERTM:
2888 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2889 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2890 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2891 break;
2892 case L2CAP_MODE_STREAMING:
2893 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2894 }
2895 }
2896
2897 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2898 {
2899 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2900
2901 if (rej->reason != 0x0000)
2902 return 0;
2903
2904 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2905 cmd->ident == conn->info_ident) {
2906 del_timer(&conn->info_timer);
2907
2908 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2909 conn->info_ident = 0;
2910
2911 l2cap_conn_start(conn);
2912 }
2913
2914 return 0;
2915 }
2916
2917 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2918 {
2919 struct l2cap_chan_list *list = &conn->chan_list;
2920 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2921 struct l2cap_conn_rsp rsp;
2922 struct sock *parent, *sk = NULL;
2923 int result, status = L2CAP_CS_NO_INFO;
2924
2925 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2926 __le16 psm = req->psm;
2927
2928 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2929
2930 /* Check if we have socket listening on psm */
2931 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2932 if (!parent) {
2933 result = L2CAP_CR_BAD_PSM;
2934 goto sendresp;
2935 }
2936
2937 /* Check if the ACL is secure enough (if not SDP) */
2938 if (psm != cpu_to_le16(0x0001) &&
2939 !hci_conn_check_link_mode(conn->hcon)) {
2940 conn->disc_reason = 0x05;
2941 result = L2CAP_CR_SEC_BLOCK;
2942 goto response;
2943 }
2944
2945 result = L2CAP_CR_NO_MEM;
2946
2947 /* Check for backlog size */
2948 if (sk_acceptq_is_full(parent)) {
2949 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2950 goto response;
2951 }
2952
2953 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2954 if (!sk)
2955 goto response;
2956
2957 write_lock_bh(&list->lock);
2958
2959 /* Check if we already have channel with that dcid */
2960 if (__l2cap_get_chan_by_dcid(list, scid)) {
2961 write_unlock_bh(&list->lock);
2962 sock_set_flag(sk, SOCK_ZAPPED);
2963 l2cap_sock_kill(sk);
2964 goto response;
2965 }
2966
2967 hci_conn_hold(conn->hcon);
2968
2969 l2cap_sock_init(sk, parent);
2970 bacpy(&bt_sk(sk)->src, conn->src);
2971 bacpy(&bt_sk(sk)->dst, conn->dst);
2972 l2cap_pi(sk)->psm = psm;
2973 l2cap_pi(sk)->dcid = scid;
2974
2975 __l2cap_chan_add(conn, sk, parent);
2976 dcid = l2cap_pi(sk)->scid;
2977
2978 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2979
2980 l2cap_pi(sk)->ident = cmd->ident;
2981
2982 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2983 if (l2cap_check_security(sk)) {
2984 if (bt_sk(sk)->defer_setup) {
2985 sk->sk_state = BT_CONNECT2;
2986 result = L2CAP_CR_PEND;
2987 status = L2CAP_CS_AUTHOR_PEND;
2988 parent->sk_data_ready(parent, 0);
2989 } else {
2990 sk->sk_state = BT_CONFIG;
2991 result = L2CAP_CR_SUCCESS;
2992 status = L2CAP_CS_NO_INFO;
2993 }
2994 } else {
2995 sk->sk_state = BT_CONNECT2;
2996 result = L2CAP_CR_PEND;
2997 status = L2CAP_CS_AUTHEN_PEND;
2998 }
2999 } else {
3000 sk->sk_state = BT_CONNECT2;
3001 result = L2CAP_CR_PEND;
3002 status = L2CAP_CS_NO_INFO;
3003 }
3004
3005 write_unlock_bh(&list->lock);
3006
3007 response:
3008 bh_unlock_sock(parent);
3009
3010 sendresp:
3011 rsp.scid = cpu_to_le16(scid);
3012 rsp.dcid = cpu_to_le16(dcid);
3013 rsp.result = cpu_to_le16(result);
3014 rsp.status = cpu_to_le16(status);
3015 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3016
3017 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3018 struct l2cap_info_req info;
3019 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3020
3021 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3022 conn->info_ident = l2cap_get_ident(conn);
3023
3024 mod_timer(&conn->info_timer, jiffies +
3025 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
3026
3027 l2cap_send_cmd(conn, conn->info_ident,
3028 L2CAP_INFO_REQ, sizeof(info), &info);
3029 }
3030
3031 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3032 result == L2CAP_CR_SUCCESS) {
3033 u8 buf[128];
3034 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3035 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3036 l2cap_build_conf_req(sk, buf), buf);
3037 l2cap_pi(sk)->num_conf_req++;
3038 }
3039
3040 return 0;
3041 }
3042
3043 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3044 {
3045 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3046 u16 scid, dcid, result, status;
3047 struct sock *sk;
3048 u8 req[128];
3049
3050 scid = __le16_to_cpu(rsp->scid);
3051 dcid = __le16_to_cpu(rsp->dcid);
3052 result = __le16_to_cpu(rsp->result);
3053 status = __le16_to_cpu(rsp->status);
3054
3055 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3056
3057 if (scid) {
3058 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3059 if (!sk)
3060 return -EFAULT;
3061 } else {
3062 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3063 if (!sk)
3064 return -EFAULT;
3065 }
3066
3067 switch (result) {
3068 case L2CAP_CR_SUCCESS:
3069 sk->sk_state = BT_CONFIG;
3070 l2cap_pi(sk)->ident = 0;
3071 l2cap_pi(sk)->dcid = dcid;
3072 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3073
3074 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3075 break;
3076
3077 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3078
3079 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3080 l2cap_build_conf_req(sk, req), req);
3081 l2cap_pi(sk)->num_conf_req++;
3082 break;
3083
3084 case L2CAP_CR_PEND:
3085 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3086 break;
3087
3088 default:
3089 /* don't delete l2cap channel if sk is owned by user */
3090 if (sock_owned_by_user(sk)) {
3091 sk->sk_state = BT_DISCONN;
3092 l2cap_sock_clear_timer(sk);
3093 l2cap_sock_set_timer(sk, HZ / 5);
3094 break;
3095 }
3096
3097 l2cap_chan_del(sk, ECONNREFUSED);
3098 break;
3099 }
3100
3101 bh_unlock_sock(sk);
3102 return 0;
3103 }
3104
3105 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3106 {
3107 /* FCS is enabled only in ERTM or streaming mode, if one or both
3108 * sides request it.
3109 */
3110 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3111 pi->fcs = L2CAP_FCS_NONE;
3112 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3113 pi->fcs = L2CAP_FCS_CRC16;
3114 }
3115
3116 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3117 {
3118 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3119 u16 dcid, flags;
3120 u8 rsp[64];
3121 struct sock *sk;
3122 int len;
3123
3124 dcid = __le16_to_cpu(req->dcid);
3125 flags = __le16_to_cpu(req->flags);
3126
3127 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3128
3129 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3130 if (!sk)
3131 return -ENOENT;
3132
3133 if (sk->sk_state == BT_DISCONN)
3134 goto unlock;
3135
3136 /* Reject if config buffer is too small. */
3137 len = cmd_len - sizeof(*req);
3138 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3139 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3140 l2cap_build_conf_rsp(sk, rsp,
3141 L2CAP_CONF_REJECT, flags), rsp);
3142 goto unlock;
3143 }
3144
3145 /* Store config. */
3146 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3147 l2cap_pi(sk)->conf_len += len;
3148
3149 if (flags & 0x0001) {
3150 /* Incomplete config. Send empty response. */
3151 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3152 l2cap_build_conf_rsp(sk, rsp,
3153 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3154 goto unlock;
3155 }
3156
3157 /* Complete config. */
3158 len = l2cap_parse_conf_req(sk, rsp);
3159 if (len < 0) {
3160 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3161 goto unlock;
3162 }
3163
3164 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3165 l2cap_pi(sk)->num_conf_rsp++;
3166
3167 /* Reset config buffer. */
3168 l2cap_pi(sk)->conf_len = 0;
3169
3170 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3171 goto unlock;
3172
3173 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3174 set_default_fcs(l2cap_pi(sk));
3175
3176 sk->sk_state = BT_CONNECTED;
3177
3178 l2cap_pi(sk)->next_tx_seq = 0;
3179 l2cap_pi(sk)->expected_tx_seq = 0;
3180 __skb_queue_head_init(TX_QUEUE(sk));
3181 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3182 l2cap_ertm_init(sk);
3183
3184 l2cap_chan_ready(sk);
3185 goto unlock;
3186 }
3187
3188 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3189 u8 buf[64];
3190 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3191 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3192 l2cap_build_conf_req(sk, buf), buf);
3193 l2cap_pi(sk)->num_conf_req++;
3194 }
3195
3196 unlock:
3197 bh_unlock_sock(sk);
3198 return 0;
3199 }
3200
3201 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3202 {
3203 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3204 u16 scid, flags, result;
3205 struct sock *sk;
3206 int len = cmd->len - sizeof(*rsp);
3207
3208 scid = __le16_to_cpu(rsp->scid);
3209 flags = __le16_to_cpu(rsp->flags);
3210 result = __le16_to_cpu(rsp->result);
3211
3212 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3213 scid, flags, result);
3214
3215 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3216 if (!sk)
3217 return 0;
3218
3219 switch (result) {
3220 case L2CAP_CONF_SUCCESS:
3221 l2cap_conf_rfc_get(sk, rsp->data, len);
3222 break;
3223
3224 case L2CAP_CONF_UNACCEPT:
3225 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3226 char req[64];
3227
3228 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3229 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3230 goto done;
3231 }
3232
3233 /* throw out any old stored conf requests */
3234 result = L2CAP_CONF_SUCCESS;
3235 len = l2cap_parse_conf_rsp(sk, rsp->data,
3236 len, req, &result);
3237 if (len < 0) {
3238 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3239 goto done;
3240 }
3241
3242 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3243 L2CAP_CONF_REQ, len, req);
3244 l2cap_pi(sk)->num_conf_req++;
3245 if (result != L2CAP_CONF_SUCCESS)
3246 goto done;
3247 break;
3248 }
3249
3250 default:
3251 sk->sk_err = ECONNRESET;
3252 l2cap_sock_set_timer(sk, HZ * 5);
3253 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3254 goto done;
3255 }
3256
3257 if (flags & 0x01)
3258 goto done;
3259
3260 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3261
3262 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3263 set_default_fcs(l2cap_pi(sk));
3264
3265 sk->sk_state = BT_CONNECTED;
3266 l2cap_pi(sk)->next_tx_seq = 0;
3267 l2cap_pi(sk)->expected_tx_seq = 0;
3268 __skb_queue_head_init(TX_QUEUE(sk));
3269 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3270 l2cap_ertm_init(sk);
3271
3272 l2cap_chan_ready(sk);
3273 }
3274
3275 done:
3276 bh_unlock_sock(sk);
3277 return 0;
3278 }
3279
3280 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3281 {
3282 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3283 struct l2cap_disconn_rsp rsp;
3284 u16 dcid, scid;
3285 struct sock *sk;
3286
3287 scid = __le16_to_cpu(req->scid);
3288 dcid = __le16_to_cpu(req->dcid);
3289
3290 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3291
3292 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3293 if (!sk)
3294 return 0;
3295
3296 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3297 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3298 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3299
3300 sk->sk_shutdown = SHUTDOWN_MASK;
3301
3302 /* don't delete l2cap channel if sk is owned by user */
3303 if (sock_owned_by_user(sk)) {
3304 sk->sk_state = BT_DISCONN;
3305 l2cap_sock_clear_timer(sk);
3306 l2cap_sock_set_timer(sk, HZ / 5);
3307 bh_unlock_sock(sk);
3308 return 0;
3309 }
3310
3311 l2cap_chan_del(sk, ECONNRESET);
3312 bh_unlock_sock(sk);
3313
3314 l2cap_sock_kill(sk);
3315 return 0;
3316 }
3317
3318 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3319 {
3320 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3321 u16 dcid, scid;
3322 struct sock *sk;
3323
3324 scid = __le16_to_cpu(rsp->scid);
3325 dcid = __le16_to_cpu(rsp->dcid);
3326
3327 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3328
3329 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3330 if (!sk)
3331 return 0;
3332
3333 /* don't delete l2cap channel if sk is owned by user */
3334 if (sock_owned_by_user(sk)) {
3335 sk->sk_state = BT_DISCONN;
3336 l2cap_sock_clear_timer(sk);
3337 l2cap_sock_set_timer(sk, HZ / 5);
3338 bh_unlock_sock(sk);
3339 return 0;
3340 }
3341
3342 l2cap_chan_del(sk, 0);
3343 bh_unlock_sock(sk);
3344
3345 l2cap_sock_kill(sk);
3346 return 0;
3347 }
3348
3349 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3350 {
3351 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3352 u16 type;
3353
3354 type = __le16_to_cpu(req->type);
3355
3356 BT_DBG("type 0x%4.4x", type);
3357
3358 if (type == L2CAP_IT_FEAT_MASK) {
3359 u8 buf[8];
3360 u32 feat_mask = l2cap_feat_mask;
3361 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3362 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3363 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3364 if (!disable_ertm)
3365 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3366 | L2CAP_FEAT_FCS;
3367 put_unaligned_le32(feat_mask, rsp->data);
3368 l2cap_send_cmd(conn, cmd->ident,
3369 L2CAP_INFO_RSP, sizeof(buf), buf);
3370 } else if (type == L2CAP_IT_FIXED_CHAN) {
3371 u8 buf[12];
3372 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3373 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3374 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3375 memcpy(buf + 4, l2cap_fixed_chan, 8);
3376 l2cap_send_cmd(conn, cmd->ident,
3377 L2CAP_INFO_RSP, sizeof(buf), buf);
3378 } else {
3379 struct l2cap_info_rsp rsp;
3380 rsp.type = cpu_to_le16(type);
3381 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3382 l2cap_send_cmd(conn, cmd->ident,
3383 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3384 }
3385
3386 return 0;
3387 }
3388
3389 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3390 {
3391 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3392 u16 type, result;
3393
3394 type = __le16_to_cpu(rsp->type);
3395 result = __le16_to_cpu(rsp->result);
3396
3397 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3398
3399 del_timer(&conn->info_timer);
3400
3401 if (result != L2CAP_IR_SUCCESS) {
3402 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3403 conn->info_ident = 0;
3404
3405 l2cap_conn_start(conn);
3406
3407 return 0;
3408 }
3409
3410 if (type == L2CAP_IT_FEAT_MASK) {
3411 conn->feat_mask = get_unaligned_le32(rsp->data);
3412
3413 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3414 struct l2cap_info_req req;
3415 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3416
3417 conn->info_ident = l2cap_get_ident(conn);
3418
3419 l2cap_send_cmd(conn, conn->info_ident,
3420 L2CAP_INFO_REQ, sizeof(req), &req);
3421 } else {
3422 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3423 conn->info_ident = 0;
3424
3425 l2cap_conn_start(conn);
3426 }
3427 } else if (type == L2CAP_IT_FIXED_CHAN) {
3428 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3429 conn->info_ident = 0;
3430
3431 l2cap_conn_start(conn);
3432 }
3433
3434 return 0;
3435 }
3436
3437 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3438 {
3439 u8 *data = skb->data;
3440 int len = skb->len;
3441 struct l2cap_cmd_hdr cmd;
3442 int err = 0;
3443
3444 l2cap_raw_recv(conn, skb);
3445
3446 while (len >= L2CAP_CMD_HDR_SIZE) {
3447 u16 cmd_len;
3448 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3449 data += L2CAP_CMD_HDR_SIZE;
3450 len -= L2CAP_CMD_HDR_SIZE;
3451
3452 cmd_len = le16_to_cpu(cmd.len);
3453
3454 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3455
3456 if (cmd_len > len || !cmd.ident) {
3457 BT_DBG("corrupted command");
3458 break;
3459 }
3460
3461 switch (cmd.code) {
3462 case L2CAP_COMMAND_REJ:
3463 l2cap_command_rej(conn, &cmd, data);
3464 break;
3465
3466 case L2CAP_CONN_REQ:
3467 err = l2cap_connect_req(conn, &cmd, data);
3468 break;
3469
3470 case L2CAP_CONN_RSP:
3471 err = l2cap_connect_rsp(conn, &cmd, data);
3472 break;
3473
3474 case L2CAP_CONF_REQ:
3475 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3476 break;
3477
3478 case L2CAP_CONF_RSP:
3479 err = l2cap_config_rsp(conn, &cmd, data);
3480 break;
3481
3482 case L2CAP_DISCONN_REQ:
3483 err = l2cap_disconnect_req(conn, &cmd, data);
3484 break;
3485
3486 case L2CAP_DISCONN_RSP:
3487 err = l2cap_disconnect_rsp(conn, &cmd, data);
3488 break;
3489
3490 case L2CAP_ECHO_REQ:
3491 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3492 break;
3493
3494 case L2CAP_ECHO_RSP:
3495 break;
3496
3497 case L2CAP_INFO_REQ:
3498 err = l2cap_information_req(conn, &cmd, data);
3499 break;
3500
3501 case L2CAP_INFO_RSP:
3502 err = l2cap_information_rsp(conn, &cmd, data);
3503 break;
3504
3505 default:
3506 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3507 err = -EINVAL;
3508 break;
3509 }
3510
3511 if (err) {
3512 struct l2cap_cmd_rej rej;
3513 BT_DBG("error %d", err);
3514
3515 /* FIXME: Map err to a valid reason */
3516 rej.reason = cpu_to_le16(0);
3517 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3518 }
3519
3520 data += cmd_len;
3521 len -= cmd_len;
3522 }
3523
3524 kfree_skb(skb);
3525 }
3526
3527 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3528 {
3529 u16 our_fcs, rcv_fcs;
3530 int hdr_size = L2CAP_HDR_SIZE + 2;
3531
3532 if (pi->fcs == L2CAP_FCS_CRC16) {
3533 skb_trim(skb, skb->len - 2);
3534 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3535 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3536
3537 if (our_fcs != rcv_fcs)
3538 return -EBADMSG;
3539 }
3540 return 0;
3541 }
3542
3543 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3544 {
3545 struct l2cap_pinfo *pi = l2cap_pi(sk);
3546 u16 control = 0;
3547
3548 pi->frames_sent = 0;
3549
3550 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3551
3552 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3553 control |= L2CAP_SUPER_RCV_NOT_READY;
3554 l2cap_send_sframe(pi, control);
3555 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3556 }
3557
3558 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3559 l2cap_retransmit_frames(sk);
3560
3561 l2cap_ertm_send(sk);
3562
3563 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3564 pi->frames_sent == 0) {
3565 control |= L2CAP_SUPER_RCV_READY;
3566 l2cap_send_sframe(pi, control);
3567 }
3568 }
3569
3570 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3571 {
3572 struct sk_buff *next_skb;
3573 struct l2cap_pinfo *pi = l2cap_pi(sk);
3574 int tx_seq_offset, next_tx_seq_offset;
3575
3576 bt_cb(skb)->tx_seq = tx_seq;
3577 bt_cb(skb)->sar = sar;
3578
3579 next_skb = skb_peek(SREJ_QUEUE(sk));
3580 if (!next_skb) {
3581 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3582 return 0;
3583 }
3584
3585 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3586 if (tx_seq_offset < 0)
3587 tx_seq_offset += 64;
3588
3589 do {
3590 if (bt_cb(next_skb)->tx_seq == tx_seq)
3591 return -EINVAL;
3592
3593 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3594 pi->buffer_seq) % 64;
3595 if (next_tx_seq_offset < 0)
3596 next_tx_seq_offset += 64;
3597
3598 if (next_tx_seq_offset > tx_seq_offset) {
3599 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3600 return 0;
3601 }
3602
3603 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3604 break;
3605
3606 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3607
3608 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3609
3610 return 0;
3611 }
3612
3613 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3614 {
3615 struct l2cap_pinfo *pi = l2cap_pi(sk);
3616 struct sk_buff *_skb;
3617 int err;
3618
3619 switch (control & L2CAP_CTRL_SAR) {
3620 case L2CAP_SDU_UNSEGMENTED:
3621 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3622 goto drop;
3623
3624 err = sock_queue_rcv_skb(sk, skb);
3625 if (!err)
3626 return err;
3627
3628 break;
3629
3630 case L2CAP_SDU_START:
3631 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3632 goto drop;
3633
3634 pi->sdu_len = get_unaligned_le16(skb->data);
3635
3636 if (pi->sdu_len > pi->imtu)
3637 goto disconnect;
3638
3639 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3640 if (!pi->sdu)
3641 return -ENOMEM;
3642
3643 /* pull sdu_len bytes only after alloc, because of Local Busy
3644 * condition we have to be sure that this will be executed
3645 * only once, i.e., when alloc does not fail */
3646 skb_pull(skb, 2);
3647
3648 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3649
3650 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3651 pi->partial_sdu_len = skb->len;
3652 break;
3653
3654 case L2CAP_SDU_CONTINUE:
3655 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3656 goto disconnect;
3657
3658 if (!pi->sdu)
3659 goto disconnect;
3660
3661 pi->partial_sdu_len += skb->len;
3662 if (pi->partial_sdu_len > pi->sdu_len)
3663 goto drop;
3664
3665 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3666
3667 break;
3668
3669 case L2CAP_SDU_END:
3670 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3671 goto disconnect;
3672
3673 if (!pi->sdu)
3674 goto disconnect;
3675
3676 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3677 pi->partial_sdu_len += skb->len;
3678
3679 if (pi->partial_sdu_len > pi->imtu)
3680 goto drop;
3681
3682 if (pi->partial_sdu_len != pi->sdu_len)
3683 goto drop;
3684
3685 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3686 }
3687
3688 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3689 if (!_skb) {
3690 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3691 return -ENOMEM;
3692 }
3693
3694 err = sock_queue_rcv_skb(sk, _skb);
3695 if (err < 0) {
3696 kfree_skb(_skb);
3697 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3698 return err;
3699 }
3700
3701 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3702 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3703
3704 kfree_skb(pi->sdu);
3705 break;
3706 }
3707
3708 kfree_skb(skb);
3709 return 0;
3710
3711 drop:
3712 kfree_skb(pi->sdu);
3713 pi->sdu = NULL;
3714
3715 disconnect:
3716 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3717 kfree_skb(skb);
3718 return 0;
3719 }
3720
3721 static int l2cap_try_push_rx_skb(struct sock *sk)
3722 {
3723 struct l2cap_pinfo *pi = l2cap_pi(sk);
3724 struct sk_buff *skb;
3725 u16 control;
3726 int err;
3727
3728 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3729 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3730 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3731 if (err < 0) {
3732 skb_queue_head(BUSY_QUEUE(sk), skb);
3733 return -EBUSY;
3734 }
3735
3736 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3737 }
3738
3739 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3740 goto done;
3741
3742 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3743 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3744 l2cap_send_sframe(pi, control);
3745 l2cap_pi(sk)->retry_count = 1;
3746
3747 del_timer(&pi->retrans_timer);
3748 __mod_monitor_timer();
3749
3750 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3751
3752 done:
3753 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3754 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3755
3756 BT_DBG("sk %p, Exit local busy", sk);
3757
3758 return 0;
3759 }
3760
3761 static void l2cap_busy_work(struct work_struct *work)
3762 {
3763 DECLARE_WAITQUEUE(wait, current);
3764 struct l2cap_pinfo *pi =
3765 container_of(work, struct l2cap_pinfo, busy_work);
3766 struct sock *sk = (struct sock *)pi;
3767 int n_tries = 0, timeo = HZ/5, err;
3768 struct sk_buff *skb;
3769
3770 lock_sock(sk);
3771
3772 add_wait_queue(sk_sleep(sk), &wait);
3773 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3774 set_current_state(TASK_INTERRUPTIBLE);
3775
3776 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3777 err = -EBUSY;
3778 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3779 break;
3780 }
3781
3782 if (!timeo)
3783 timeo = HZ/5;
3784
3785 if (signal_pending(current)) {
3786 err = sock_intr_errno(timeo);
3787 break;
3788 }
3789
3790 release_sock(sk);
3791 timeo = schedule_timeout(timeo);
3792 lock_sock(sk);
3793
3794 err = sock_error(sk);
3795 if (err)
3796 break;
3797
3798 if (l2cap_try_push_rx_skb(sk) == 0)
3799 break;
3800 }
3801
3802 set_current_state(TASK_RUNNING);
3803 remove_wait_queue(sk_sleep(sk), &wait);
3804
3805 release_sock(sk);
3806 }
3807
3808 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3809 {
3810 struct l2cap_pinfo *pi = l2cap_pi(sk);
3811 int sctrl, err;
3812
3813 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3814 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3815 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3816 return l2cap_try_push_rx_skb(sk);
3817
3818
3819 }
3820
3821 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3822 if (err >= 0) {
3823 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3824 return err;
3825 }
3826
3827 /* Busy Condition */
3828 BT_DBG("sk %p, Enter local busy", sk);
3829
3830 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3831 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3832 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3833
3834 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3835 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3836 l2cap_send_sframe(pi, sctrl);
3837
3838 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3839
3840 del_timer(&pi->ack_timer);
3841
3842 queue_work(_busy_wq, &pi->busy_work);
3843
3844 return err;
3845 }
3846
3847 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3848 {
3849 struct l2cap_pinfo *pi = l2cap_pi(sk);
3850 struct sk_buff *_skb;
3851 int err = -EINVAL;
3852
3853 /*
3854 * TODO: We have to notify the userland if some data is lost with the
3855 * Streaming Mode.
3856 */
3857
3858 switch (control & L2CAP_CTRL_SAR) {
3859 case L2CAP_SDU_UNSEGMENTED:
3860 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3861 kfree_skb(pi->sdu);
3862 break;
3863 }
3864
3865 err = sock_queue_rcv_skb(sk, skb);
3866 if (!err)
3867 return 0;
3868
3869 break;
3870
3871 case L2CAP_SDU_START:
3872 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3873 kfree_skb(pi->sdu);
3874 break;
3875 }
3876
3877 pi->sdu_len = get_unaligned_le16(skb->data);
3878 skb_pull(skb, 2);
3879
3880 if (pi->sdu_len > pi->imtu) {
3881 err = -EMSGSIZE;
3882 break;
3883 }
3884
3885 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3886 if (!pi->sdu) {
3887 err = -ENOMEM;
3888 break;
3889 }
3890
3891 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3892
3893 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3894 pi->partial_sdu_len = skb->len;
3895 err = 0;
3896 break;
3897
3898 case L2CAP_SDU_CONTINUE:
3899 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3900 break;
3901
3902 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3903
3904 pi->partial_sdu_len += skb->len;
3905 if (pi->partial_sdu_len > pi->sdu_len)
3906 kfree_skb(pi->sdu);
3907 else
3908 err = 0;
3909
3910 break;
3911
3912 case L2CAP_SDU_END:
3913 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3914 break;
3915
3916 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3917
3918 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3919 pi->partial_sdu_len += skb->len;
3920
3921 if (pi->partial_sdu_len > pi->imtu)
3922 goto drop;
3923
3924 if (pi->partial_sdu_len == pi->sdu_len) {
3925 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3926 err = sock_queue_rcv_skb(sk, _skb);
3927 if (err < 0)
3928 kfree_skb(_skb);
3929 }
3930 err = 0;
3931
3932 drop:
3933 kfree_skb(pi->sdu);
3934 break;
3935 }
3936
3937 kfree_skb(skb);
3938 return err;
3939 }
3940
3941 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3942 {
3943 struct sk_buff *skb;
3944 u16 control;
3945
3946 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3947 if (bt_cb(skb)->tx_seq != tx_seq)
3948 break;
3949
3950 skb = skb_dequeue(SREJ_QUEUE(sk));
3951 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3952 l2cap_ertm_reassembly_sdu(sk, skb, control);
3953 l2cap_pi(sk)->buffer_seq_srej =
3954 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3955 tx_seq = (tx_seq + 1) % 64;
3956 }
3957 }
3958
3959 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3960 {
3961 struct l2cap_pinfo *pi = l2cap_pi(sk);
3962 struct srej_list *l, *tmp;
3963 u16 control;
3964
3965 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3966 if (l->tx_seq == tx_seq) {
3967 list_del(&l->list);
3968 kfree(l);
3969 return;
3970 }
3971 control = L2CAP_SUPER_SELECT_REJECT;
3972 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3973 l2cap_send_sframe(pi, control);
3974 list_del(&l->list);
3975 list_add_tail(&l->list, SREJ_LIST(sk));
3976 }
3977 }
3978
3979 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3980 {
3981 struct l2cap_pinfo *pi = l2cap_pi(sk);
3982 struct srej_list *new;
3983 u16 control;
3984
3985 while (tx_seq != pi->expected_tx_seq) {
3986 control = L2CAP_SUPER_SELECT_REJECT;
3987 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3988 l2cap_send_sframe(pi, control);
3989
3990 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3991 new->tx_seq = pi->expected_tx_seq;
3992 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3993 list_add_tail(&new->list, SREJ_LIST(sk));
3994 }
3995 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3996 }
3997
3998 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3999 {
4000 struct l2cap_pinfo *pi = l2cap_pi(sk);
4001 u8 tx_seq = __get_txseq(rx_control);
4002 u8 req_seq = __get_reqseq(rx_control);
4003 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
4004 int tx_seq_offset, expected_tx_seq_offset;
4005 int num_to_ack = (pi->tx_win/6) + 1;
4006 int err = 0;
4007
4008 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
4009 rx_control);
4010
4011 if (L2CAP_CTRL_FINAL & rx_control &&
4012 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4013 del_timer(&pi->monitor_timer);
4014 if (pi->unacked_frames > 0)
4015 __mod_retrans_timer();
4016 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
4017 }
4018
4019 pi->expected_ack_seq = req_seq;
4020 l2cap_drop_acked_frames(sk);
4021
4022 if (tx_seq == pi->expected_tx_seq)
4023 goto expected;
4024
4025 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
4026 if (tx_seq_offset < 0)
4027 tx_seq_offset += 64;
4028
4029 /* invalid tx_seq */
4030 if (tx_seq_offset >= pi->tx_win) {
4031 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4032 goto drop;
4033 }
4034
4035 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
4036 goto drop;
4037
4038 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4039 struct srej_list *first;
4040
4041 first = list_first_entry(SREJ_LIST(sk),
4042 struct srej_list, list);
4043 if (tx_seq == first->tx_seq) {
4044 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4045 l2cap_check_srej_gap(sk, tx_seq);
4046
4047 list_del(&first->list);
4048 kfree(first);
4049
4050 if (list_empty(SREJ_LIST(sk))) {
4051 pi->buffer_seq = pi->buffer_seq_srej;
4052 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
4053 l2cap_send_ack(pi);
4054 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4055 }
4056 } else {
4057 struct srej_list *l;
4058
4059 /* duplicated tx_seq */
4060 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4061 goto drop;
4062
4063 list_for_each_entry(l, SREJ_LIST(sk), list) {
4064 if (l->tx_seq == tx_seq) {
4065 l2cap_resend_srejframe(sk, tx_seq);
4066 return 0;
4067 }
4068 }
4069 l2cap_send_srejframe(sk, tx_seq);
4070 }
4071 } else {
4072 expected_tx_seq_offset =
4073 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4074 if (expected_tx_seq_offset < 0)
4075 expected_tx_seq_offset += 64;
4076
4077 /* duplicated tx_seq */
4078 if (tx_seq_offset < expected_tx_seq_offset)
4079 goto drop;
4080
4081 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4082
4083 BT_DBG("sk %p, Enter SREJ", sk);
4084
4085 INIT_LIST_HEAD(SREJ_LIST(sk));
4086 pi->buffer_seq_srej = pi->buffer_seq;
4087
4088 __skb_queue_head_init(SREJ_QUEUE(sk));
4089 __skb_queue_head_init(BUSY_QUEUE(sk));
4090 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4091
4092 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4093
4094 l2cap_send_srejframe(sk, tx_seq);
4095
4096 del_timer(&pi->ack_timer);
4097 }
4098 return 0;
4099
4100 expected:
4101 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4102
4103 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4104 bt_cb(skb)->tx_seq = tx_seq;
4105 bt_cb(skb)->sar = sar;
4106 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4107 return 0;
4108 }
4109
4110 err = l2cap_push_rx_skb(sk, skb, rx_control);
4111 if (err < 0)
4112 return 0;
4113
4114 if (rx_control & L2CAP_CTRL_FINAL) {
4115 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4116 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4117 else
4118 l2cap_retransmit_frames(sk);
4119 }
4120
4121 __mod_ack_timer();
4122
4123 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4124 if (pi->num_acked == num_to_ack - 1)
4125 l2cap_send_ack(pi);
4126
4127 return 0;
4128
4129 drop:
4130 kfree_skb(skb);
4131 return 0;
4132 }
4133
4134 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4135 {
4136 struct l2cap_pinfo *pi = l2cap_pi(sk);
4137
4138 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4139 rx_control);
4140
4141 pi->expected_ack_seq = __get_reqseq(rx_control);
4142 l2cap_drop_acked_frames(sk);
4143
4144 if (rx_control & L2CAP_CTRL_POLL) {
4145 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4146 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4147 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4148 (pi->unacked_frames > 0))
4149 __mod_retrans_timer();
4150
4151 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4152 l2cap_send_srejtail(sk);
4153 } else {
4154 l2cap_send_i_or_rr_or_rnr(sk);
4155 }
4156
4157 } else if (rx_control & L2CAP_CTRL_FINAL) {
4158 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4159
4160 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4161 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4162 else
4163 l2cap_retransmit_frames(sk);
4164
4165 } else {
4166 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4167 (pi->unacked_frames > 0))
4168 __mod_retrans_timer();
4169
4170 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4171 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4172 l2cap_send_ack(pi);
4173 } else {
4174 l2cap_ertm_send(sk);
4175 }
4176 }
4177 }
4178
4179 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4180 {
4181 struct l2cap_pinfo *pi = l2cap_pi(sk);
4182 u8 tx_seq = __get_reqseq(rx_control);
4183
4184 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4185
4186 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4187
4188 pi->expected_ack_seq = tx_seq;
4189 l2cap_drop_acked_frames(sk);
4190
4191 if (rx_control & L2CAP_CTRL_FINAL) {
4192 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4193 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4194 else
4195 l2cap_retransmit_frames(sk);
4196 } else {
4197 l2cap_retransmit_frames(sk);
4198
4199 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4200 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4201 }
4202 }
4203 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4204 {
4205 struct l2cap_pinfo *pi = l2cap_pi(sk);
4206 u8 tx_seq = __get_reqseq(rx_control);
4207
4208 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4209
4210 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4211
4212 if (rx_control & L2CAP_CTRL_POLL) {
4213 pi->expected_ack_seq = tx_seq;
4214 l2cap_drop_acked_frames(sk);
4215
4216 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4217 l2cap_retransmit_one_frame(sk, tx_seq);
4218
4219 l2cap_ertm_send(sk);
4220
4221 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4222 pi->srej_save_reqseq = tx_seq;
4223 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4224 }
4225 } else if (rx_control & L2CAP_CTRL_FINAL) {
4226 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4227 pi->srej_save_reqseq == tx_seq)
4228 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4229 else
4230 l2cap_retransmit_one_frame(sk, tx_seq);
4231 } else {
4232 l2cap_retransmit_one_frame(sk, tx_seq);
4233 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4234 pi->srej_save_reqseq = tx_seq;
4235 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4236 }
4237 }
4238 }
4239
4240 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4241 {
4242 struct l2cap_pinfo *pi = l2cap_pi(sk);
4243 u8 tx_seq = __get_reqseq(rx_control);
4244
4245 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4246
4247 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4248 pi->expected_ack_seq = tx_seq;
4249 l2cap_drop_acked_frames(sk);
4250
4251 if (rx_control & L2CAP_CTRL_POLL)
4252 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4253
4254 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4255 del_timer(&pi->retrans_timer);
4256 if (rx_control & L2CAP_CTRL_POLL)
4257 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4258 return;
4259 }
4260
4261 if (rx_control & L2CAP_CTRL_POLL)
4262 l2cap_send_srejtail(sk);
4263 else
4264 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4265 }
4266
4267 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4268 {
4269 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4270
4271 if (L2CAP_CTRL_FINAL & rx_control &&
4272 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4273 del_timer(&l2cap_pi(sk)->monitor_timer);
4274 if (l2cap_pi(sk)->unacked_frames > 0)
4275 __mod_retrans_timer();
4276 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4277 }
4278
4279 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4280 case L2CAP_SUPER_RCV_READY:
4281 l2cap_data_channel_rrframe(sk, rx_control);
4282 break;
4283
4284 case L2CAP_SUPER_REJECT:
4285 l2cap_data_channel_rejframe(sk, rx_control);
4286 break;
4287
4288 case L2CAP_SUPER_SELECT_REJECT:
4289 l2cap_data_channel_srejframe(sk, rx_control);
4290 break;
4291
4292 case L2CAP_SUPER_RCV_NOT_READY:
4293 l2cap_data_channel_rnrframe(sk, rx_control);
4294 break;
4295 }
4296
4297 kfree_skb(skb);
4298 return 0;
4299 }
4300
4301 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4302 {
4303 struct l2cap_pinfo *pi = l2cap_pi(sk);
4304 u16 control;
4305 u8 req_seq;
4306 int len, next_tx_seq_offset, req_seq_offset;
4307
4308 control = get_unaligned_le16(skb->data);
4309 skb_pull(skb, 2);
4310 len = skb->len;
4311
4312 /*
4313 * We can just drop the corrupted I-frame here.
4314 * Receiver will miss it and start proper recovery
4315 * procedures and ask retransmission.
4316 */
4317 if (l2cap_check_fcs(pi, skb))
4318 goto drop;
4319
4320 if (__is_sar_start(control) && __is_iframe(control))
4321 len -= 2;
4322
4323 if (pi->fcs == L2CAP_FCS_CRC16)
4324 len -= 2;
4325
4326 if (len > pi->mps) {
4327 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4328 goto drop;
4329 }
4330
4331 req_seq = __get_reqseq(control);
4332 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4333 if (req_seq_offset < 0)
4334 req_seq_offset += 64;
4335
4336 next_tx_seq_offset =
4337 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4338 if (next_tx_seq_offset < 0)
4339 next_tx_seq_offset += 64;
4340
4341 /* check for invalid req-seq */
4342 if (req_seq_offset > next_tx_seq_offset) {
4343 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4344 goto drop;
4345 }
4346
4347 if (__is_iframe(control)) {
4348 if (len < 0) {
4349 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4350 goto drop;
4351 }
4352
4353 l2cap_data_channel_iframe(sk, control, skb);
4354 } else {
4355 if (len != 0) {
4356 BT_ERR("%d", len);
4357 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4358 goto drop;
4359 }
4360
4361 l2cap_data_channel_sframe(sk, control, skb);
4362 }
4363
4364 return 0;
4365
4366 drop:
4367 kfree_skb(skb);
4368 return 0;
4369 }
4370
4371 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4372 {
4373 struct sock *sk;
4374 struct l2cap_pinfo *pi;
4375 u16 control;
4376 u8 tx_seq;
4377 int len;
4378
4379 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4380 if (!sk) {
4381 BT_DBG("unknown cid 0x%4.4x", cid);
4382 goto drop;
4383 }
4384
4385 pi = l2cap_pi(sk);
4386
4387 BT_DBG("sk %p, len %d", sk, skb->len);
4388
4389 if (sk->sk_state != BT_CONNECTED)
4390 goto drop;
4391
4392 switch (pi->mode) {
4393 case L2CAP_MODE_BASIC:
4394 /* If socket recv buffers overflows we drop data here
4395 * which is *bad* because L2CAP has to be reliable.
4396 * But we don't have any other choice. L2CAP doesn't
4397 * provide flow control mechanism. */
4398
4399 if (pi->imtu < skb->len)
4400 goto drop;
4401
4402 if (!sock_queue_rcv_skb(sk, skb))
4403 goto done;
4404 break;
4405
4406 case L2CAP_MODE_ERTM:
4407 if (!sock_owned_by_user(sk)) {
4408 l2cap_ertm_data_rcv(sk, skb);
4409 } else {
4410 if (sk_add_backlog(sk, skb))
4411 goto drop;
4412 }
4413
4414 goto done;
4415
4416 case L2CAP_MODE_STREAMING:
4417 control = get_unaligned_le16(skb->data);
4418 skb_pull(skb, 2);
4419 len = skb->len;
4420
4421 if (l2cap_check_fcs(pi, skb))
4422 goto drop;
4423
4424 if (__is_sar_start(control))
4425 len -= 2;
4426
4427 if (pi->fcs == L2CAP_FCS_CRC16)
4428 len -= 2;
4429
4430 if (len > pi->mps || len < 0 || __is_sframe(control))
4431 goto drop;
4432
4433 tx_seq = __get_txseq(control);
4434
4435 if (pi->expected_tx_seq == tx_seq)
4436 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4437 else
4438 pi->expected_tx_seq = (tx_seq + 1) % 64;
4439
4440 l2cap_streaming_reassembly_sdu(sk, skb, control);
4441
4442 goto done;
4443
4444 default:
4445 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4446 break;
4447 }
4448
4449 drop:
4450 kfree_skb(skb);
4451
4452 done:
4453 if (sk)
4454 bh_unlock_sock(sk);
4455
4456 return 0;
4457 }
4458
4459 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4460 {
4461 struct sock *sk;
4462
4463 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4464 if (!sk)
4465 goto drop;
4466
4467 BT_DBG("sk %p, len %d", sk, skb->len);
4468
4469 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4470 goto drop;
4471
4472 if (l2cap_pi(sk)->imtu < skb->len)
4473 goto drop;
4474
4475 if (!sock_queue_rcv_skb(sk, skb))
4476 goto done;
4477
4478 drop:
4479 kfree_skb(skb);
4480
4481 done:
4482 if (sk)
4483 bh_unlock_sock(sk);
4484 return 0;
4485 }
4486
4487 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4488 {
4489 struct l2cap_hdr *lh = (void *) skb->data;
4490 u16 cid, len;
4491 __le16 psm;
4492
4493 skb_pull(skb, L2CAP_HDR_SIZE);
4494 cid = __le16_to_cpu(lh->cid);
4495 len = __le16_to_cpu(lh->len);
4496
4497 if (len != skb->len) {
4498 kfree_skb(skb);
4499 return;
4500 }
4501
4502 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4503
4504 switch (cid) {
4505 case L2CAP_CID_SIGNALING:
4506 l2cap_sig_channel(conn, skb);
4507 break;
4508
4509 case L2CAP_CID_CONN_LESS:
4510 psm = get_unaligned_le16(skb->data);
4511 skb_pull(skb, 2);
4512 l2cap_conless_channel(conn, psm, skb);
4513 break;
4514
4515 default:
4516 l2cap_data_channel(conn, cid, skb);
4517 break;
4518 }
4519 }
4520
4521 /* ---- L2CAP interface with lower layer (HCI) ---- */
4522
4523 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4524 {
4525 int exact = 0, lm1 = 0, lm2 = 0;
4526 register struct sock *sk;
4527 struct hlist_node *node;
4528
4529 if (type != ACL_LINK)
4530 return -EINVAL;
4531
4532 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4533
4534 /* Find listening sockets and check their link_mode */
4535 read_lock(&l2cap_sk_list.lock);
4536 sk_for_each(sk, node, &l2cap_sk_list.head) {
4537 if (sk->sk_state != BT_LISTEN)
4538 continue;
4539
4540 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4541 lm1 |= HCI_LM_ACCEPT;
4542 if (l2cap_pi(sk)->role_switch)
4543 lm1 |= HCI_LM_MASTER;
4544 exact++;
4545 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4546 lm2 |= HCI_LM_ACCEPT;
4547 if (l2cap_pi(sk)->role_switch)
4548 lm2 |= HCI_LM_MASTER;
4549 }
4550 }
4551 read_unlock(&l2cap_sk_list.lock);
4552
4553 return exact ? lm1 : lm2;
4554 }
4555
4556 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4557 {
4558 struct l2cap_conn *conn;
4559
4560 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4561
4562 if (hcon->type != ACL_LINK)
4563 return -EINVAL;
4564
4565 if (!status) {
4566 conn = l2cap_conn_add(hcon, status);
4567 if (conn)
4568 l2cap_conn_ready(conn);
4569 } else
4570 l2cap_conn_del(hcon, bt_err(status));
4571
4572 return 0;
4573 }
4574
4575 static int l2cap_disconn_ind(struct hci_conn *hcon)
4576 {
4577 struct l2cap_conn *conn = hcon->l2cap_data;
4578
4579 BT_DBG("hcon %p", hcon);
4580
4581 if (hcon->type != ACL_LINK || !conn)
4582 return 0x13;
4583
4584 return conn->disc_reason;
4585 }
4586
4587 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4588 {
4589 BT_DBG("hcon %p reason %d", hcon, reason);
4590
4591 if (hcon->type != ACL_LINK)
4592 return -EINVAL;
4593
4594 l2cap_conn_del(hcon, bt_err(reason));
4595
4596 return 0;
4597 }
4598
4599 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4600 {
4601 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4602 return;
4603
4604 if (encrypt == 0x00) {
4605 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4606 l2cap_sock_clear_timer(sk);
4607 l2cap_sock_set_timer(sk, HZ * 5);
4608 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4609 __l2cap_sock_close(sk, ECONNREFUSED);
4610 } else {
4611 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4612 l2cap_sock_clear_timer(sk);
4613 }
4614 }
4615
4616 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4617 {
4618 struct l2cap_chan_list *l;
4619 struct l2cap_conn *conn = hcon->l2cap_data;
4620 struct sock *sk;
4621
4622 if (!conn)
4623 return 0;
4624
4625 l = &conn->chan_list;
4626
4627 BT_DBG("conn %p", conn);
4628
4629 read_lock(&l->lock);
4630
4631 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4632 bh_lock_sock(sk);
4633
4634 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4635 bh_unlock_sock(sk);
4636 continue;
4637 }
4638
4639 if (!status && (sk->sk_state == BT_CONNECTED ||
4640 sk->sk_state == BT_CONFIG)) {
4641 l2cap_check_encryption(sk, encrypt);
4642 bh_unlock_sock(sk);
4643 continue;
4644 }
4645
4646 if (sk->sk_state == BT_CONNECT) {
4647 if (!status) {
4648 struct l2cap_conn_req req;
4649 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4650 req.psm = l2cap_pi(sk)->psm;
4651
4652 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4653 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4654
4655 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4656 L2CAP_CONN_REQ, sizeof(req), &req);
4657 } else {
4658 l2cap_sock_clear_timer(sk);
4659 l2cap_sock_set_timer(sk, HZ / 10);
4660 }
4661 } else if (sk->sk_state == BT_CONNECT2) {
4662 struct l2cap_conn_rsp rsp;
4663 __u16 result;
4664
4665 if (!status) {
4666 sk->sk_state = BT_CONFIG;
4667 result = L2CAP_CR_SUCCESS;
4668 } else {
4669 sk->sk_state = BT_DISCONN;
4670 l2cap_sock_set_timer(sk, HZ / 10);
4671 result = L2CAP_CR_SEC_BLOCK;
4672 }
4673
4674 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4675 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4676 rsp.result = cpu_to_le16(result);
4677 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4678 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4679 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4680 }
4681
4682 bh_unlock_sock(sk);
4683 }
4684
4685 read_unlock(&l->lock);
4686
4687 return 0;
4688 }
4689
4690 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4691 {
4692 struct l2cap_conn *conn = hcon->l2cap_data;
4693
4694 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4695 goto drop;
4696
4697 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4698
4699 if (flags & ACL_START) {
4700 struct l2cap_hdr *hdr;
4701 struct sock *sk;
4702 u16 cid;
4703 int len;
4704
4705 if (conn->rx_len) {
4706 BT_ERR("Unexpected start frame (len %d)", skb->len);
4707 kfree_skb(conn->rx_skb);
4708 conn->rx_skb = NULL;
4709 conn->rx_len = 0;
4710 l2cap_conn_unreliable(conn, ECOMM);
4711 }
4712
4713 /* Start fragment always begin with Basic L2CAP header */
4714 if (skb->len < L2CAP_HDR_SIZE) {
4715 BT_ERR("Frame is too short (len %d)", skb->len);
4716 l2cap_conn_unreliable(conn, ECOMM);
4717 goto drop;
4718 }
4719
4720 hdr = (struct l2cap_hdr *) skb->data;
4721 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4722 cid = __le16_to_cpu(hdr->cid);
4723
4724 if (len == skb->len) {
4725 /* Complete frame received */
4726 l2cap_recv_frame(conn, skb);
4727 return 0;
4728 }
4729
4730 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4731
4732 if (skb->len > len) {
4733 BT_ERR("Frame is too long (len %d, expected len %d)",
4734 skb->len, len);
4735 l2cap_conn_unreliable(conn, ECOMM);
4736 goto drop;
4737 }
4738
4739 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4740
4741 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4742 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4743 len, l2cap_pi(sk)->imtu);
4744 bh_unlock_sock(sk);
4745 l2cap_conn_unreliable(conn, ECOMM);
4746 goto drop;
4747 }
4748
4749 if (sk)
4750 bh_unlock_sock(sk);
4751
4752 /* Allocate skb for the complete frame (with header) */
4753 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4754 if (!conn->rx_skb)
4755 goto drop;
4756
4757 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4758 skb->len);
4759 conn->rx_len = len - skb->len;
4760 } else {
4761 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4762
4763 if (!conn->rx_len) {
4764 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4765 l2cap_conn_unreliable(conn, ECOMM);
4766 goto drop;
4767 }
4768
4769 if (skb->len > conn->rx_len) {
4770 BT_ERR("Fragment is too long (len %d, expected %d)",
4771 skb->len, conn->rx_len);
4772 kfree_skb(conn->rx_skb);
4773 conn->rx_skb = NULL;
4774 conn->rx_len = 0;
4775 l2cap_conn_unreliable(conn, ECOMM);
4776 goto drop;
4777 }
4778
4779 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4780 skb->len);
4781 conn->rx_len -= skb->len;
4782
4783 if (!conn->rx_len) {
4784 /* Complete frame received */
4785 l2cap_recv_frame(conn, conn->rx_skb);
4786 conn->rx_skb = NULL;
4787 }
4788 }
4789
4790 drop:
4791 kfree_skb(skb);
4792 return 0;
4793 }
4794
4795 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4796 {
4797 struct sock *sk;
4798 struct hlist_node *node;
4799
4800 read_lock_bh(&l2cap_sk_list.lock);
4801
4802 sk_for_each(sk, node, &l2cap_sk_list.head) {
4803 struct l2cap_pinfo *pi = l2cap_pi(sk);
4804
4805 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4806 batostr(&bt_sk(sk)->src),
4807 batostr(&bt_sk(sk)->dst),
4808 sk->sk_state, __le16_to_cpu(pi->psm),
4809 pi->scid, pi->dcid,
4810 pi->imtu, pi->omtu, pi->sec_level);
4811 }
4812
4813 read_unlock_bh(&l2cap_sk_list.lock);
4814
4815 return 0;
4816 }
4817
4818 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4819 {
4820 return single_open(file, l2cap_debugfs_show, inode->i_private);
4821 }
4822
4823 static const struct file_operations l2cap_debugfs_fops = {
4824 .open = l2cap_debugfs_open,
4825 .read = seq_read,
4826 .llseek = seq_lseek,
4827 .release = single_release,
4828 };
4829
4830 static struct dentry *l2cap_debugfs;
4831
4832 static const struct proto_ops l2cap_sock_ops = {
4833 .family = PF_BLUETOOTH,
4834 .owner = THIS_MODULE,
4835 .release = l2cap_sock_release,
4836 .bind = l2cap_sock_bind,
4837 .connect = l2cap_sock_connect,
4838 .listen = l2cap_sock_listen,
4839 .accept = l2cap_sock_accept,
4840 .getname = l2cap_sock_getname,
4841 .sendmsg = l2cap_sock_sendmsg,
4842 .recvmsg = l2cap_sock_recvmsg,
4843 .poll = bt_sock_poll,
4844 .ioctl = bt_sock_ioctl,
4845 .mmap = sock_no_mmap,
4846 .socketpair = sock_no_socketpair,
4847 .shutdown = l2cap_sock_shutdown,
4848 .setsockopt = l2cap_sock_setsockopt,
4849 .getsockopt = l2cap_sock_getsockopt
4850 };
4851
4852 static const struct net_proto_family l2cap_sock_family_ops = {
4853 .family = PF_BLUETOOTH,
4854 .owner = THIS_MODULE,
4855 .create = l2cap_sock_create,
4856 };
4857
4858 static struct hci_proto l2cap_hci_proto = {
4859 .name = "L2CAP",
4860 .id = HCI_PROTO_L2CAP,
4861 .connect_ind = l2cap_connect_ind,
4862 .connect_cfm = l2cap_connect_cfm,
4863 .disconn_ind = l2cap_disconn_ind,
4864 .disconn_cfm = l2cap_disconn_cfm,
4865 .security_cfm = l2cap_security_cfm,
4866 .recv_acldata = l2cap_recv_acldata
4867 };
4868
4869 static int __init l2cap_init(void)
4870 {
4871 int err;
4872
4873 err = proto_register(&l2cap_proto, 0);
4874 if (err < 0)
4875 return err;
4876
4877 _busy_wq = create_singlethread_workqueue("l2cap");
4878 if (!_busy_wq)
4879 goto error;
4880
4881 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4882 if (err < 0) {
4883 BT_ERR("L2CAP socket registration failed");
4884 goto error;
4885 }
4886
4887 err = hci_register_proto(&l2cap_hci_proto);
4888 if (err < 0) {
4889 BT_ERR("L2CAP protocol registration failed");
4890 bt_sock_unregister(BTPROTO_L2CAP);
4891 goto error;
4892 }
4893
4894 if (bt_debugfs) {
4895 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4896 bt_debugfs, NULL, &l2cap_debugfs_fops);
4897 if (!l2cap_debugfs)
4898 BT_ERR("Failed to create L2CAP debug file");
4899 }
4900
4901 BT_INFO("L2CAP ver %s", VERSION);
4902 BT_INFO("L2CAP socket layer initialized");
4903
4904 return 0;
4905
4906 error:
4907 proto_unregister(&l2cap_proto);
4908 return err;
4909 }
4910
4911 static void __exit l2cap_exit(void)
4912 {
4913 debugfs_remove(l2cap_debugfs);
4914
4915 flush_workqueue(_busy_wq);
4916 destroy_workqueue(_busy_wq);
4917
4918 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4919 BT_ERR("L2CAP socket unregistration failed");
4920
4921 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4922 BT_ERR("L2CAP protocol unregistration failed");
4923
4924 proto_unregister(&l2cap_proto);
4925 }
4926
4927 void l2cap_load(void)
4928 {
4929 /* Dummy function to trigger automatic L2CAP module loading by
4930 * other modules that use L2CAP sockets but don't use any other
4931 * symbols from it. */
4932 }
4933 EXPORT_SYMBOL(l2cap_load);
4934
4935 module_init(l2cap_init);
4936 module_exit(l2cap_exit);
4937
4938 module_param(disable_ertm, bool, 0644);
4939 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4940
4941 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4942 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4943 MODULE_VERSION(VERSION);
4944 MODULE_LICENSE("GPL");
4945 MODULE_ALIAS("bt-proto-0");
This page took 0.163849 seconds and 6 git commands to generate.