Merge branch 'omap-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core and sockets. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 #define VERSION "2.15"
59
60 static int disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static const struct proto_ops l2cap_sock_ops;
66
67 static struct workqueue_struct *_busy_wq;
68
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 };
72
73 static void l2cap_busy_work(struct work_struct *work);
74
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
78
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
82
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
87 {
88 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
89 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
90 }
91
92 static void l2cap_sock_clear_timer(struct sock *sk)
93 {
94 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 sk_stop_timer(sk, &sk->sk_timer);
96 }
97
98 static void l2cap_sock_timeout(unsigned long arg)
99 {
100 struct sock *sk = (struct sock *) arg;
101 int reason;
102
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104
105 bh_lock_sock(sk);
106
107 if (sock_owned_by_user(sk)) {
108 /* sk is owned by user. Try again later */
109 l2cap_sock_set_timer(sk, HZ / 5);
110 bh_unlock_sock(sk);
111 sock_put(sk);
112 return;
113 }
114
115 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
116 reason = ECONNREFUSED;
117 else if (sk->sk_state == BT_CONNECT &&
118 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
119 reason = ECONNREFUSED;
120 else
121 reason = ETIMEDOUT;
122
123 __l2cap_sock_close(sk, reason);
124
125 bh_unlock_sock(sk);
126
127 l2cap_sock_kill(sk);
128 sock_put(sk);
129 }
130
131 /* ---- L2CAP channels ---- */
132 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
133 {
134 struct sock *s;
135 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
136 if (l2cap_pi(s)->dcid == cid)
137 break;
138 }
139 return s;
140 }
141
142 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
143 {
144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->scid == cid)
147 break;
148 }
149 return s;
150 }
151
152 /* Find channel with given SCID.
153 * Returns locked socket */
154 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
155 {
156 struct sock *s;
157 read_lock(&l->lock);
158 s = __l2cap_get_chan_by_scid(l, cid);
159 if (s)
160 bh_lock_sock(s);
161 read_unlock(&l->lock);
162 return s;
163 }
164
165 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 {
167 struct sock *s;
168 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
169 if (l2cap_pi(s)->ident == ident)
170 break;
171 }
172 return s;
173 }
174
175 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
176 {
177 struct sock *s;
178 read_lock(&l->lock);
179 s = __l2cap_get_chan_by_ident(l, ident);
180 if (s)
181 bh_lock_sock(s);
182 read_unlock(&l->lock);
183 return s;
184 }
185
186 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
187 {
188 u16 cid = L2CAP_CID_DYN_START;
189
190 for (; cid < L2CAP_CID_DYN_END; cid++) {
191 if (!__l2cap_get_chan_by_scid(l, cid))
192 return cid;
193 }
194
195 return 0;
196 }
197
198 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
199 {
200 sock_hold(sk);
201
202 if (l->head)
203 l2cap_pi(l->head)->prev_c = sk;
204
205 l2cap_pi(sk)->next_c = l->head;
206 l2cap_pi(sk)->prev_c = NULL;
207 l->head = sk;
208 }
209
210 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
211 {
212 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
213
214 write_lock_bh(&l->lock);
215 if (sk == l->head)
216 l->head = next;
217
218 if (next)
219 l2cap_pi(next)->prev_c = prev;
220 if (prev)
221 l2cap_pi(prev)->next_c = next;
222 write_unlock_bh(&l->lock);
223
224 __sock_put(sk);
225 }
226
227 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
228 {
229 struct l2cap_chan_list *l = &conn->chan_list;
230
231 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
232 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
233
234 conn->disc_reason = 0x13;
235
236 l2cap_pi(sk)->conn = conn;
237
238 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
239 /* Alloc CID for connection-oriented socket */
240 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
241 } else if (sk->sk_type == SOCK_DGRAM) {
242 /* Connectionless socket */
243 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
244 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
245 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
246 } else {
247 /* Raw socket can send/recv signalling messages only */
248 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
249 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
250 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
251 }
252
253 __l2cap_chan_link(l, sk);
254
255 if (parent)
256 bt_accept_enqueue(parent, sk);
257 }
258
259 /* Delete channel.
260 * Must be called on the locked socket. */
261 static void l2cap_chan_del(struct sock *sk, int err)
262 {
263 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
264 struct sock *parent = bt_sk(sk)->parent;
265
266 l2cap_sock_clear_timer(sk);
267
268 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
269
270 if (conn) {
271 /* Unlink from channel list */
272 l2cap_chan_unlink(&conn->chan_list, sk);
273 l2cap_pi(sk)->conn = NULL;
274 hci_conn_put(conn->hcon);
275 }
276
277 sk->sk_state = BT_CLOSED;
278 sock_set_flag(sk, SOCK_ZAPPED);
279
280 if (err)
281 sk->sk_err = err;
282
283 if (parent) {
284 bt_accept_unlink(sk);
285 parent->sk_data_ready(parent, 0);
286 } else
287 sk->sk_state_change(sk);
288
289 skb_queue_purge(TX_QUEUE(sk));
290
291 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
292 struct srej_list *l, *tmp;
293
294 del_timer(&l2cap_pi(sk)->retrans_timer);
295 del_timer(&l2cap_pi(sk)->monitor_timer);
296 del_timer(&l2cap_pi(sk)->ack_timer);
297
298 skb_queue_purge(SREJ_QUEUE(sk));
299 skb_queue_purge(BUSY_QUEUE(sk));
300
301 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
302 list_del(&l->list);
303 kfree(l);
304 }
305 }
306 }
307
308 static inline u8 l2cap_get_auth_type(struct sock *sk)
309 {
310 if (sk->sk_type == SOCK_RAW) {
311 switch (l2cap_pi(sk)->sec_level) {
312 case BT_SECURITY_HIGH:
313 return HCI_AT_DEDICATED_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 return HCI_AT_DEDICATED_BONDING;
316 default:
317 return HCI_AT_NO_BONDING;
318 }
319 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
320 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
321 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
322
323 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
324 return HCI_AT_NO_BONDING_MITM;
325 else
326 return HCI_AT_NO_BONDING;
327 } else {
328 switch (l2cap_pi(sk)->sec_level) {
329 case BT_SECURITY_HIGH:
330 return HCI_AT_GENERAL_BONDING_MITM;
331 case BT_SECURITY_MEDIUM:
332 return HCI_AT_GENERAL_BONDING;
333 default:
334 return HCI_AT_NO_BONDING;
335 }
336 }
337 }
338
339 /* Service level security */
340 static inline int l2cap_check_security(struct sock *sk)
341 {
342 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
343 __u8 auth_type;
344
345 auth_type = l2cap_get_auth_type(sk);
346
347 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
348 auth_type);
349 }
350
351 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
352 {
353 u8 id;
354
355 /* Get next available identificator.
356 * 1 - 128 are used by kernel.
357 * 129 - 199 are reserved.
358 * 200 - 254 are used by utilities like l2ping, etc.
359 */
360
361 spin_lock_bh(&conn->lock);
362
363 if (++conn->tx_ident > 128)
364 conn->tx_ident = 1;
365
366 id = conn->tx_ident;
367
368 spin_unlock_bh(&conn->lock);
369
370 return id;
371 }
372
373 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
374 {
375 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
376
377 BT_DBG("code 0x%2.2x", code);
378
379 if (!skb)
380 return;
381
382 hci_send_acl(conn->hcon, skb, 0);
383 }
384
385 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
386 {
387 struct sk_buff *skb;
388 struct l2cap_hdr *lh;
389 struct l2cap_conn *conn = pi->conn;
390 struct sock *sk = (struct sock *)pi;
391 int count, hlen = L2CAP_HDR_SIZE + 2;
392
393 if (sk->sk_state != BT_CONNECTED)
394 return;
395
396 if (pi->fcs == L2CAP_FCS_CRC16)
397 hlen += 2;
398
399 BT_DBG("pi %p, control 0x%2.2x", pi, control);
400
401 count = min_t(unsigned int, conn->mtu, hlen);
402 control |= L2CAP_CTRL_FRAME_TYPE;
403
404 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
405 control |= L2CAP_CTRL_FINAL;
406 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
407 }
408
409 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
410 control |= L2CAP_CTRL_POLL;
411 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
412 }
413
414 skb = bt_skb_alloc(count, GFP_ATOMIC);
415 if (!skb)
416 return;
417
418 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
419 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
420 lh->cid = cpu_to_le16(pi->dcid);
421 put_unaligned_le16(control, skb_put(skb, 2));
422
423 if (pi->fcs == L2CAP_FCS_CRC16) {
424 u16 fcs = crc16(0, (u8 *)lh, count - 2);
425 put_unaligned_le16(fcs, skb_put(skb, 2));
426 }
427
428 hci_send_acl(pi->conn->hcon, skb, 0);
429 }
430
431 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
432 {
433 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
434 control |= L2CAP_SUPER_RCV_NOT_READY;
435 pi->conn_state |= L2CAP_CONN_RNR_SENT;
436 } else
437 control |= L2CAP_SUPER_RCV_READY;
438
439 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
440
441 l2cap_send_sframe(pi, control);
442 }
443
444 static inline int __l2cap_no_conn_pending(struct sock *sk)
445 {
446 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
447 }
448
449 static void l2cap_do_start(struct sock *sk)
450 {
451 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
452
453 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
454 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
455 return;
456
457 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
458 struct l2cap_conn_req req;
459 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
460 req.psm = l2cap_pi(sk)->psm;
461
462 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
463 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
464
465 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
466 L2CAP_CONN_REQ, sizeof(req), &req);
467 }
468 } else {
469 struct l2cap_info_req req;
470 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
471
472 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
473 conn->info_ident = l2cap_get_ident(conn);
474
475 mod_timer(&conn->info_timer, jiffies +
476 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
477
478 l2cap_send_cmd(conn, conn->info_ident,
479 L2CAP_INFO_REQ, sizeof(req), &req);
480 }
481 }
482
483 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
484 {
485 u32 local_feat_mask = l2cap_feat_mask;
486 if (!disable_ertm)
487 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
488
489 switch (mode) {
490 case L2CAP_MODE_ERTM:
491 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
492 case L2CAP_MODE_STREAMING:
493 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
494 default:
495 return 0x00;
496 }
497 }
498
499 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
500 {
501 struct l2cap_disconn_req req;
502
503 if (!conn)
504 return;
505
506 skb_queue_purge(TX_QUEUE(sk));
507
508 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
509 del_timer(&l2cap_pi(sk)->retrans_timer);
510 del_timer(&l2cap_pi(sk)->monitor_timer);
511 del_timer(&l2cap_pi(sk)->ack_timer);
512 }
513
514 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
515 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
516 l2cap_send_cmd(conn, l2cap_get_ident(conn),
517 L2CAP_DISCONN_REQ, sizeof(req), &req);
518
519 sk->sk_state = BT_DISCONN;
520 sk->sk_err = err;
521 }
522
523 /* ---- L2CAP connections ---- */
524 static void l2cap_conn_start(struct l2cap_conn *conn)
525 {
526 struct l2cap_chan_list *l = &conn->chan_list;
527 struct sock_del_list del, *tmp1, *tmp2;
528 struct sock *sk;
529
530 BT_DBG("conn %p", conn);
531
532 INIT_LIST_HEAD(&del.list);
533
534 read_lock(&l->lock);
535
536 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
537 bh_lock_sock(sk);
538
539 if (sk->sk_type != SOCK_SEQPACKET &&
540 sk->sk_type != SOCK_STREAM) {
541 bh_unlock_sock(sk);
542 continue;
543 }
544
545 if (sk->sk_state == BT_CONNECT) {
546 struct l2cap_conn_req req;
547
548 if (!l2cap_check_security(sk) ||
549 !__l2cap_no_conn_pending(sk)) {
550 bh_unlock_sock(sk);
551 continue;
552 }
553
554 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
555 conn->feat_mask)
556 && l2cap_pi(sk)->conf_state &
557 L2CAP_CONF_STATE2_DEVICE) {
558 tmp1 = kzalloc(sizeof(struct sock_del_list),
559 GFP_ATOMIC);
560 tmp1->sk = sk;
561 list_add_tail(&tmp1->list, &del.list);
562 bh_unlock_sock(sk);
563 continue;
564 }
565
566 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
567 req.psm = l2cap_pi(sk)->psm;
568
569 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
570 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
571
572 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
573 L2CAP_CONN_REQ, sizeof(req), &req);
574
575 } else if (sk->sk_state == BT_CONNECT2) {
576 struct l2cap_conn_rsp rsp;
577 char buf[128];
578 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
579 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
580
581 if (l2cap_check_security(sk)) {
582 if (bt_sk(sk)->defer_setup) {
583 struct sock *parent = bt_sk(sk)->parent;
584 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
585 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
586 parent->sk_data_ready(parent, 0);
587
588 } else {
589 sk->sk_state = BT_CONFIG;
590 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
591 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
592 }
593 } else {
594 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
595 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
596 }
597
598 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
599 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
600
601 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
602 rsp.result != L2CAP_CR_SUCCESS) {
603 bh_unlock_sock(sk);
604 continue;
605 }
606
607 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
608 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
609 l2cap_build_conf_req(sk, buf), buf);
610 l2cap_pi(sk)->num_conf_req++;
611 }
612
613 bh_unlock_sock(sk);
614 }
615
616 read_unlock(&l->lock);
617
618 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
619 bh_lock_sock(tmp1->sk);
620 __l2cap_sock_close(tmp1->sk, ECONNRESET);
621 bh_unlock_sock(tmp1->sk);
622 list_del(&tmp1->list);
623 kfree(tmp1);
624 }
625 }
626
627 static void l2cap_conn_ready(struct l2cap_conn *conn)
628 {
629 struct l2cap_chan_list *l = &conn->chan_list;
630 struct sock *sk;
631
632 BT_DBG("conn %p", conn);
633
634 read_lock(&l->lock);
635
636 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
637 bh_lock_sock(sk);
638
639 if (sk->sk_type != SOCK_SEQPACKET &&
640 sk->sk_type != SOCK_STREAM) {
641 l2cap_sock_clear_timer(sk);
642 sk->sk_state = BT_CONNECTED;
643 sk->sk_state_change(sk);
644 } else if (sk->sk_state == BT_CONNECT)
645 l2cap_do_start(sk);
646
647 bh_unlock_sock(sk);
648 }
649
650 read_unlock(&l->lock);
651 }
652
653 /* Notify sockets that we cannot guaranty reliability anymore */
654 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
655 {
656 struct l2cap_chan_list *l = &conn->chan_list;
657 struct sock *sk;
658
659 BT_DBG("conn %p", conn);
660
661 read_lock(&l->lock);
662
663 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
664 if (l2cap_pi(sk)->force_reliable)
665 sk->sk_err = err;
666 }
667
668 read_unlock(&l->lock);
669 }
670
671 static void l2cap_info_timeout(unsigned long arg)
672 {
673 struct l2cap_conn *conn = (void *) arg;
674
675 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
676 conn->info_ident = 0;
677
678 l2cap_conn_start(conn);
679 }
680
681 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
682 {
683 struct l2cap_conn *conn = hcon->l2cap_data;
684
685 if (conn || status)
686 return conn;
687
688 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
689 if (!conn)
690 return NULL;
691
692 hcon->l2cap_data = conn;
693 conn->hcon = hcon;
694
695 BT_DBG("hcon %p conn %p", hcon, conn);
696
697 conn->mtu = hcon->hdev->acl_mtu;
698 conn->src = &hcon->hdev->bdaddr;
699 conn->dst = &hcon->dst;
700
701 conn->feat_mask = 0;
702
703 spin_lock_init(&conn->lock);
704 rwlock_init(&conn->chan_list.lock);
705
706 setup_timer(&conn->info_timer, l2cap_info_timeout,
707 (unsigned long) conn);
708
709 conn->disc_reason = 0x13;
710
711 return conn;
712 }
713
714 static void l2cap_conn_del(struct hci_conn *hcon, int err)
715 {
716 struct l2cap_conn *conn = hcon->l2cap_data;
717 struct sock *sk;
718
719 if (!conn)
720 return;
721
722 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
723
724 kfree_skb(conn->rx_skb);
725
726 /* Kill channels */
727 while ((sk = conn->chan_list.head)) {
728 bh_lock_sock(sk);
729 l2cap_chan_del(sk, err);
730 bh_unlock_sock(sk);
731 l2cap_sock_kill(sk);
732 }
733
734 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
735 del_timer_sync(&conn->info_timer);
736
737 hcon->l2cap_data = NULL;
738 kfree(conn);
739 }
740
741 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
742 {
743 struct l2cap_chan_list *l = &conn->chan_list;
744 write_lock_bh(&l->lock);
745 __l2cap_chan_add(conn, sk, parent);
746 write_unlock_bh(&l->lock);
747 }
748
749 /* ---- Socket interface ---- */
750 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
751 {
752 struct sock *sk;
753 struct hlist_node *node;
754 sk_for_each(sk, node, &l2cap_sk_list.head)
755 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
756 goto found;
757 sk = NULL;
758 found:
759 return sk;
760 }
761
762 /* Find socket with psm and source bdaddr.
763 * Returns closest match.
764 */
765 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
766 {
767 struct sock *sk = NULL, *sk1 = NULL;
768 struct hlist_node *node;
769
770 read_lock(&l2cap_sk_list.lock);
771
772 sk_for_each(sk, node, &l2cap_sk_list.head) {
773 if (state && sk->sk_state != state)
774 continue;
775
776 if (l2cap_pi(sk)->psm == psm) {
777 /* Exact match. */
778 if (!bacmp(&bt_sk(sk)->src, src))
779 break;
780
781 /* Closest match */
782 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
783 sk1 = sk;
784 }
785 }
786
787 read_unlock(&l2cap_sk_list.lock);
788
789 return node ? sk : sk1;
790 }
791
792 static void l2cap_sock_destruct(struct sock *sk)
793 {
794 BT_DBG("sk %p", sk);
795
796 skb_queue_purge(&sk->sk_receive_queue);
797 skb_queue_purge(&sk->sk_write_queue);
798 }
799
800 static void l2cap_sock_cleanup_listen(struct sock *parent)
801 {
802 struct sock *sk;
803
804 BT_DBG("parent %p", parent);
805
806 /* Close not yet accepted channels */
807 while ((sk = bt_accept_dequeue(parent, NULL)))
808 l2cap_sock_close(sk);
809
810 parent->sk_state = BT_CLOSED;
811 sock_set_flag(parent, SOCK_ZAPPED);
812 }
813
814 /* Kill socket (only if zapped and orphan)
815 * Must be called on unlocked socket.
816 */
817 static void l2cap_sock_kill(struct sock *sk)
818 {
819 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
820 return;
821
822 BT_DBG("sk %p state %d", sk, sk->sk_state);
823
824 /* Kill poor orphan */
825 bt_sock_unlink(&l2cap_sk_list, sk);
826 sock_set_flag(sk, SOCK_DEAD);
827 sock_put(sk);
828 }
829
830 static void __l2cap_sock_close(struct sock *sk, int reason)
831 {
832 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
833
834 switch (sk->sk_state) {
835 case BT_LISTEN:
836 l2cap_sock_cleanup_listen(sk);
837 break;
838
839 case BT_CONNECTED:
840 case BT_CONFIG:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844
845 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
846 l2cap_send_disconn_req(conn, sk, reason);
847 } else
848 l2cap_chan_del(sk, reason);
849 break;
850
851 case BT_CONNECT2:
852 if (sk->sk_type == SOCK_SEQPACKET ||
853 sk->sk_type == SOCK_STREAM) {
854 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
855 struct l2cap_conn_rsp rsp;
856 __u16 result;
857
858 if (bt_sk(sk)->defer_setup)
859 result = L2CAP_CR_SEC_BLOCK;
860 else
861 result = L2CAP_CR_BAD_PSM;
862
863 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
864 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
865 rsp.result = cpu_to_le16(result);
866 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
867 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
868 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
869 } else
870 l2cap_chan_del(sk, reason);
871 break;
872
873 case BT_CONNECT:
874 case BT_DISCONN:
875 l2cap_chan_del(sk, reason);
876 break;
877
878 default:
879 sock_set_flag(sk, SOCK_ZAPPED);
880 break;
881 }
882 }
883
884 /* Must be called on unlocked socket. */
885 static void l2cap_sock_close(struct sock *sk)
886 {
887 l2cap_sock_clear_timer(sk);
888 lock_sock(sk);
889 __l2cap_sock_close(sk, ECONNRESET);
890 release_sock(sk);
891 l2cap_sock_kill(sk);
892 }
893
894 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
895 {
896 struct l2cap_pinfo *pi = l2cap_pi(sk);
897
898 BT_DBG("sk %p", sk);
899
900 if (parent) {
901 sk->sk_type = parent->sk_type;
902 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
903
904 pi->imtu = l2cap_pi(parent)->imtu;
905 pi->omtu = l2cap_pi(parent)->omtu;
906 pi->conf_state = l2cap_pi(parent)->conf_state;
907 pi->mode = l2cap_pi(parent)->mode;
908 pi->fcs = l2cap_pi(parent)->fcs;
909 pi->max_tx = l2cap_pi(parent)->max_tx;
910 pi->tx_win = l2cap_pi(parent)->tx_win;
911 pi->sec_level = l2cap_pi(parent)->sec_level;
912 pi->role_switch = l2cap_pi(parent)->role_switch;
913 pi->force_reliable = l2cap_pi(parent)->force_reliable;
914 } else {
915 pi->imtu = L2CAP_DEFAULT_MTU;
916 pi->omtu = 0;
917 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
918 pi->mode = L2CAP_MODE_ERTM;
919 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
920 } else {
921 pi->mode = L2CAP_MODE_BASIC;
922 }
923 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
924 pi->fcs = L2CAP_FCS_CRC16;
925 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
926 pi->sec_level = BT_SECURITY_LOW;
927 pi->role_switch = 0;
928 pi->force_reliable = 0;
929 }
930
931 /* Default config options */
932 pi->conf_len = 0;
933 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
934 skb_queue_head_init(TX_QUEUE(sk));
935 skb_queue_head_init(SREJ_QUEUE(sk));
936 skb_queue_head_init(BUSY_QUEUE(sk));
937 INIT_LIST_HEAD(SREJ_LIST(sk));
938 }
939
940 static struct proto l2cap_proto = {
941 .name = "L2CAP",
942 .owner = THIS_MODULE,
943 .obj_size = sizeof(struct l2cap_pinfo)
944 };
945
946 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
947 {
948 struct sock *sk;
949
950 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
951 if (!sk)
952 return NULL;
953
954 sock_init_data(sock, sk);
955 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
956
957 sk->sk_destruct = l2cap_sock_destruct;
958 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
959
960 sock_reset_flag(sk, SOCK_ZAPPED);
961
962 sk->sk_protocol = proto;
963 sk->sk_state = BT_OPEN;
964
965 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
966
967 bt_sock_link(&l2cap_sk_list, sk);
968 return sk;
969 }
970
971 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
972 int kern)
973 {
974 struct sock *sk;
975
976 BT_DBG("sock %p", sock);
977
978 sock->state = SS_UNCONNECTED;
979
980 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
981 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
982 return -ESOCKTNOSUPPORT;
983
984 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
985 return -EPERM;
986
987 sock->ops = &l2cap_sock_ops;
988
989 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
990 if (!sk)
991 return -ENOMEM;
992
993 l2cap_sock_init(sk, NULL);
994 return 0;
995 }
996
997 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
998 {
999 struct sock *sk = sock->sk;
1000 struct sockaddr_l2 la;
1001 int len, err = 0;
1002
1003 BT_DBG("sk %p", sk);
1004
1005 if (!addr || addr->sa_family != AF_BLUETOOTH)
1006 return -EINVAL;
1007
1008 memset(&la, 0, sizeof(la));
1009 len = min_t(unsigned int, sizeof(la), alen);
1010 memcpy(&la, addr, len);
1011
1012 if (la.l2_cid)
1013 return -EINVAL;
1014
1015 lock_sock(sk);
1016
1017 if (sk->sk_state != BT_OPEN) {
1018 err = -EBADFD;
1019 goto done;
1020 }
1021
1022 if (la.l2_psm) {
1023 __u16 psm = __le16_to_cpu(la.l2_psm);
1024
1025 /* PSM must be odd and lsb of upper byte must be 0 */
1026 if ((psm & 0x0101) != 0x0001) {
1027 err = -EINVAL;
1028 goto done;
1029 }
1030
1031 /* Restrict usage of well-known PSMs */
1032 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1033 err = -EACCES;
1034 goto done;
1035 }
1036 }
1037
1038 write_lock_bh(&l2cap_sk_list.lock);
1039
1040 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1041 err = -EADDRINUSE;
1042 } else {
1043 /* Save source address */
1044 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1045 l2cap_pi(sk)->psm = la.l2_psm;
1046 l2cap_pi(sk)->sport = la.l2_psm;
1047 sk->sk_state = BT_BOUND;
1048
1049 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1050 __le16_to_cpu(la.l2_psm) == 0x0003)
1051 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1052 }
1053
1054 write_unlock_bh(&l2cap_sk_list.lock);
1055
1056 done:
1057 release_sock(sk);
1058 return err;
1059 }
1060
1061 static int l2cap_do_connect(struct sock *sk)
1062 {
1063 bdaddr_t *src = &bt_sk(sk)->src;
1064 bdaddr_t *dst = &bt_sk(sk)->dst;
1065 struct l2cap_conn *conn;
1066 struct hci_conn *hcon;
1067 struct hci_dev *hdev;
1068 __u8 auth_type;
1069 int err;
1070
1071 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1072 l2cap_pi(sk)->psm);
1073
1074 hdev = hci_get_route(dst, src);
1075 if (!hdev)
1076 return -EHOSTUNREACH;
1077
1078 hci_dev_lock_bh(hdev);
1079
1080 err = -ENOMEM;
1081
1082 auth_type = l2cap_get_auth_type(sk);
1083
1084 hcon = hci_connect(hdev, ACL_LINK, dst,
1085 l2cap_pi(sk)->sec_level, auth_type);
1086 if (!hcon)
1087 goto done;
1088
1089 conn = l2cap_conn_add(hcon, 0);
1090 if (!conn) {
1091 hci_conn_put(hcon);
1092 goto done;
1093 }
1094
1095 err = 0;
1096
1097 /* Update source addr of the socket */
1098 bacpy(src, conn->src);
1099
1100 l2cap_chan_add(conn, sk, NULL);
1101
1102 sk->sk_state = BT_CONNECT;
1103 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1104
1105 if (hcon->state == BT_CONNECTED) {
1106 if (sk->sk_type != SOCK_SEQPACKET &&
1107 sk->sk_type != SOCK_STREAM) {
1108 l2cap_sock_clear_timer(sk);
1109 if (l2cap_check_security(sk))
1110 sk->sk_state = BT_CONNECTED;
1111 } else
1112 l2cap_do_start(sk);
1113 }
1114
1115 done:
1116 hci_dev_unlock_bh(hdev);
1117 hci_dev_put(hdev);
1118 return err;
1119 }
1120
1121 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1122 {
1123 struct sock *sk = sock->sk;
1124 struct sockaddr_l2 la;
1125 int len, err = 0;
1126
1127 BT_DBG("sk %p", sk);
1128
1129 if (!addr || alen < sizeof(addr->sa_family) ||
1130 addr->sa_family != AF_BLUETOOTH)
1131 return -EINVAL;
1132
1133 memset(&la, 0, sizeof(la));
1134 len = min_t(unsigned int, sizeof(la), alen);
1135 memcpy(&la, addr, len);
1136
1137 if (la.l2_cid)
1138 return -EINVAL;
1139
1140 lock_sock(sk);
1141
1142 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1143 && !la.l2_psm) {
1144 err = -EINVAL;
1145 goto done;
1146 }
1147
1148 switch (l2cap_pi(sk)->mode) {
1149 case L2CAP_MODE_BASIC:
1150 break;
1151 case L2CAP_MODE_ERTM:
1152 case L2CAP_MODE_STREAMING:
1153 if (!disable_ertm)
1154 break;
1155 /* fall through */
1156 default:
1157 err = -ENOTSUPP;
1158 goto done;
1159 }
1160
1161 switch (sk->sk_state) {
1162 case BT_CONNECT:
1163 case BT_CONNECT2:
1164 case BT_CONFIG:
1165 /* Already connecting */
1166 goto wait;
1167
1168 case BT_CONNECTED:
1169 /* Already connected */
1170 err = -EISCONN;
1171 goto done;
1172
1173 case BT_OPEN:
1174 case BT_BOUND:
1175 /* Can connect */
1176 break;
1177
1178 default:
1179 err = -EBADFD;
1180 goto done;
1181 }
1182
1183 /* PSM must be odd and lsb of upper byte must be 0 */
1184 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1185 sk->sk_type != SOCK_RAW) {
1186 err = -EINVAL;
1187 goto done;
1188 }
1189
1190 /* Set destination address and psm */
1191 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1192 l2cap_pi(sk)->psm = la.l2_psm;
1193
1194 err = l2cap_do_connect(sk);
1195 if (err)
1196 goto done;
1197
1198 wait:
1199 err = bt_sock_wait_state(sk, BT_CONNECTED,
1200 sock_sndtimeo(sk, flags & O_NONBLOCK));
1201 done:
1202 release_sock(sk);
1203 return err;
1204 }
1205
1206 static int l2cap_sock_listen(struct socket *sock, int backlog)
1207 {
1208 struct sock *sk = sock->sk;
1209 int err = 0;
1210
1211 BT_DBG("sk %p backlog %d", sk, backlog);
1212
1213 lock_sock(sk);
1214
1215 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1216 || sk->sk_state != BT_BOUND) {
1217 err = -EBADFD;
1218 goto done;
1219 }
1220
1221 switch (l2cap_pi(sk)->mode) {
1222 case L2CAP_MODE_BASIC:
1223 break;
1224 case L2CAP_MODE_ERTM:
1225 case L2CAP_MODE_STREAMING:
1226 if (!disable_ertm)
1227 break;
1228 /* fall through */
1229 default:
1230 err = -ENOTSUPP;
1231 goto done;
1232 }
1233
1234 if (!l2cap_pi(sk)->psm) {
1235 bdaddr_t *src = &bt_sk(sk)->src;
1236 u16 psm;
1237
1238 err = -EINVAL;
1239
1240 write_lock_bh(&l2cap_sk_list.lock);
1241
1242 for (psm = 0x1001; psm < 0x1100; psm += 2)
1243 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1244 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1245 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1246 err = 0;
1247 break;
1248 }
1249
1250 write_unlock_bh(&l2cap_sk_list.lock);
1251
1252 if (err < 0)
1253 goto done;
1254 }
1255
1256 sk->sk_max_ack_backlog = backlog;
1257 sk->sk_ack_backlog = 0;
1258 sk->sk_state = BT_LISTEN;
1259
1260 done:
1261 release_sock(sk);
1262 return err;
1263 }
1264
1265 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1266 {
1267 DECLARE_WAITQUEUE(wait, current);
1268 struct sock *sk = sock->sk, *nsk;
1269 long timeo;
1270 int err = 0;
1271
1272 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1273
1274 if (sk->sk_state != BT_LISTEN) {
1275 err = -EBADFD;
1276 goto done;
1277 }
1278
1279 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1280
1281 BT_DBG("sk %p timeo %ld", sk, timeo);
1282
1283 /* Wait for an incoming connection. (wake-one). */
1284 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1285 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1286 set_current_state(TASK_INTERRUPTIBLE);
1287 if (!timeo) {
1288 err = -EAGAIN;
1289 break;
1290 }
1291
1292 release_sock(sk);
1293 timeo = schedule_timeout(timeo);
1294 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1295
1296 if (sk->sk_state != BT_LISTEN) {
1297 err = -EBADFD;
1298 break;
1299 }
1300
1301 if (signal_pending(current)) {
1302 err = sock_intr_errno(timeo);
1303 break;
1304 }
1305 }
1306 set_current_state(TASK_RUNNING);
1307 remove_wait_queue(sk_sleep(sk), &wait);
1308
1309 if (err)
1310 goto done;
1311
1312 newsock->state = SS_CONNECTED;
1313
1314 BT_DBG("new socket %p", nsk);
1315
1316 done:
1317 release_sock(sk);
1318 return err;
1319 }
1320
1321 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1322 {
1323 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1324 struct sock *sk = sock->sk;
1325
1326 BT_DBG("sock %p, sk %p", sock, sk);
1327
1328 addr->sa_family = AF_BLUETOOTH;
1329 *len = sizeof(struct sockaddr_l2);
1330
1331 if (peer) {
1332 la->l2_psm = l2cap_pi(sk)->psm;
1333 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1334 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1335 } else {
1336 la->l2_psm = l2cap_pi(sk)->sport;
1337 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1338 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1339 }
1340
1341 return 0;
1342 }
1343
1344 static int __l2cap_wait_ack(struct sock *sk)
1345 {
1346 DECLARE_WAITQUEUE(wait, current);
1347 int err = 0;
1348 int timeo = HZ/5;
1349
1350 add_wait_queue(sk_sleep(sk), &wait);
1351 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1352 set_current_state(TASK_INTERRUPTIBLE);
1353
1354 if (!timeo)
1355 timeo = HZ/5;
1356
1357 if (signal_pending(current)) {
1358 err = sock_intr_errno(timeo);
1359 break;
1360 }
1361
1362 release_sock(sk);
1363 timeo = schedule_timeout(timeo);
1364 lock_sock(sk);
1365
1366 err = sock_error(sk);
1367 if (err)
1368 break;
1369 }
1370 set_current_state(TASK_RUNNING);
1371 remove_wait_queue(sk_sleep(sk), &wait);
1372 return err;
1373 }
1374
1375 static void l2cap_monitor_timeout(unsigned long arg)
1376 {
1377 struct sock *sk = (void *) arg;
1378
1379 BT_DBG("sk %p", sk);
1380
1381 bh_lock_sock(sk);
1382 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1383 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1384 bh_unlock_sock(sk);
1385 return;
1386 }
1387
1388 l2cap_pi(sk)->retry_count++;
1389 __mod_monitor_timer();
1390
1391 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1392 bh_unlock_sock(sk);
1393 }
1394
1395 static void l2cap_retrans_timeout(unsigned long arg)
1396 {
1397 struct sock *sk = (void *) arg;
1398
1399 BT_DBG("sk %p", sk);
1400
1401 bh_lock_sock(sk);
1402 l2cap_pi(sk)->retry_count = 1;
1403 __mod_monitor_timer();
1404
1405 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1406
1407 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1408 bh_unlock_sock(sk);
1409 }
1410
1411 static void l2cap_drop_acked_frames(struct sock *sk)
1412 {
1413 struct sk_buff *skb;
1414
1415 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1416 l2cap_pi(sk)->unacked_frames) {
1417 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1418 break;
1419
1420 skb = skb_dequeue(TX_QUEUE(sk));
1421 kfree_skb(skb);
1422
1423 l2cap_pi(sk)->unacked_frames--;
1424 }
1425
1426 if (!l2cap_pi(sk)->unacked_frames)
1427 del_timer(&l2cap_pi(sk)->retrans_timer);
1428 }
1429
1430 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1431 {
1432 struct l2cap_pinfo *pi = l2cap_pi(sk);
1433
1434 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1435
1436 hci_send_acl(pi->conn->hcon, skb, 0);
1437 }
1438
1439 static void l2cap_streaming_send(struct sock *sk)
1440 {
1441 struct sk_buff *skb;
1442 struct l2cap_pinfo *pi = l2cap_pi(sk);
1443 u16 control, fcs;
1444
1445 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1446 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1447 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1448 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1449
1450 if (pi->fcs == L2CAP_FCS_CRC16) {
1451 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1452 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1453 }
1454
1455 l2cap_do_send(sk, skb);
1456
1457 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1458 }
1459 }
1460
1461 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1462 {
1463 struct l2cap_pinfo *pi = l2cap_pi(sk);
1464 struct sk_buff *skb, *tx_skb;
1465 u16 control, fcs;
1466
1467 skb = skb_peek(TX_QUEUE(sk));
1468 if (!skb)
1469 return;
1470
1471 do {
1472 if (bt_cb(skb)->tx_seq == tx_seq)
1473 break;
1474
1475 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1476 return;
1477
1478 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1479
1480 if (pi->remote_max_tx &&
1481 bt_cb(skb)->retries == pi->remote_max_tx) {
1482 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1483 return;
1484 }
1485
1486 tx_skb = skb_clone(skb, GFP_ATOMIC);
1487 bt_cb(skb)->retries++;
1488 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1489
1490 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1491 control |= L2CAP_CTRL_FINAL;
1492 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1493 }
1494
1495 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1496 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1497
1498 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1499
1500 if (pi->fcs == L2CAP_FCS_CRC16) {
1501 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1502 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1503 }
1504
1505 l2cap_do_send(sk, tx_skb);
1506 }
1507
1508 static int l2cap_ertm_send(struct sock *sk)
1509 {
1510 struct sk_buff *skb, *tx_skb;
1511 struct l2cap_pinfo *pi = l2cap_pi(sk);
1512 u16 control, fcs;
1513 int nsent = 0;
1514
1515 if (sk->sk_state != BT_CONNECTED)
1516 return -ENOTCONN;
1517
1518 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1519
1520 if (pi->remote_max_tx &&
1521 bt_cb(skb)->retries == pi->remote_max_tx) {
1522 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1523 break;
1524 }
1525
1526 tx_skb = skb_clone(skb, GFP_ATOMIC);
1527
1528 bt_cb(skb)->retries++;
1529
1530 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1531 control &= L2CAP_CTRL_SAR;
1532
1533 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1534 control |= L2CAP_CTRL_FINAL;
1535 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1536 }
1537 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1538 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1539 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1540
1541
1542 if (pi->fcs == L2CAP_FCS_CRC16) {
1543 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1544 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1545 }
1546
1547 l2cap_do_send(sk, tx_skb);
1548
1549 __mod_retrans_timer();
1550
1551 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1552 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1553
1554 pi->unacked_frames++;
1555 pi->frames_sent++;
1556
1557 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1558 sk->sk_send_head = NULL;
1559 else
1560 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1561
1562 nsent++;
1563 }
1564
1565 return nsent;
1566 }
1567
1568 static int l2cap_retransmit_frames(struct sock *sk)
1569 {
1570 struct l2cap_pinfo *pi = l2cap_pi(sk);
1571 int ret;
1572
1573 if (!skb_queue_empty(TX_QUEUE(sk)))
1574 sk->sk_send_head = TX_QUEUE(sk)->next;
1575
1576 pi->next_tx_seq = pi->expected_ack_seq;
1577 ret = l2cap_ertm_send(sk);
1578 return ret;
1579 }
1580
1581 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1582 {
1583 struct sock *sk = (struct sock *)pi;
1584 u16 control = 0;
1585
1586 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1587
1588 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1589 control |= L2CAP_SUPER_RCV_NOT_READY;
1590 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1591 l2cap_send_sframe(pi, control);
1592 return;
1593 }
1594
1595 if (l2cap_ertm_send(sk) > 0)
1596 return;
1597
1598 control |= L2CAP_SUPER_RCV_READY;
1599 l2cap_send_sframe(pi, control);
1600 }
1601
1602 static void l2cap_send_srejtail(struct sock *sk)
1603 {
1604 struct srej_list *tail;
1605 u16 control;
1606
1607 control = L2CAP_SUPER_SELECT_REJECT;
1608 control |= L2CAP_CTRL_FINAL;
1609
1610 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1611 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1612
1613 l2cap_send_sframe(l2cap_pi(sk), control);
1614 }
1615
1616 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1617 {
1618 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1619 struct sk_buff **frag;
1620 int err, sent = 0;
1621
1622 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1623 return -EFAULT;
1624
1625 sent += count;
1626 len -= count;
1627
1628 /* Continuation fragments (no L2CAP header) */
1629 frag = &skb_shinfo(skb)->frag_list;
1630 while (len) {
1631 count = min_t(unsigned int, conn->mtu, len);
1632
1633 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1634 if (!*frag)
1635 return err;
1636 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1637 return -EFAULT;
1638
1639 sent += count;
1640 len -= count;
1641
1642 frag = &(*frag)->next;
1643 }
1644
1645 return sent;
1646 }
1647
1648 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1649 {
1650 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1651 struct sk_buff *skb;
1652 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1653 struct l2cap_hdr *lh;
1654
1655 BT_DBG("sk %p len %d", sk, (int)len);
1656
1657 count = min_t(unsigned int, (conn->mtu - hlen), len);
1658 skb = bt_skb_send_alloc(sk, count + hlen,
1659 msg->msg_flags & MSG_DONTWAIT, &err);
1660 if (!skb)
1661 return ERR_PTR(err);
1662
1663 /* Create L2CAP header */
1664 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1665 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1666 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1667 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1668
1669 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1670 if (unlikely(err < 0)) {
1671 kfree_skb(skb);
1672 return ERR_PTR(err);
1673 }
1674 return skb;
1675 }
1676
1677 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1678 {
1679 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1680 struct sk_buff *skb;
1681 int err, count, hlen = L2CAP_HDR_SIZE;
1682 struct l2cap_hdr *lh;
1683
1684 BT_DBG("sk %p len %d", sk, (int)len);
1685
1686 count = min_t(unsigned int, (conn->mtu - hlen), len);
1687 skb = bt_skb_send_alloc(sk, count + hlen,
1688 msg->msg_flags & MSG_DONTWAIT, &err);
1689 if (!skb)
1690 return ERR_PTR(err);
1691
1692 /* Create L2CAP header */
1693 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1694 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1695 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1696
1697 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1698 if (unlikely(err < 0)) {
1699 kfree_skb(skb);
1700 return ERR_PTR(err);
1701 }
1702 return skb;
1703 }
1704
1705 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1706 {
1707 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1708 struct sk_buff *skb;
1709 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1710 struct l2cap_hdr *lh;
1711
1712 BT_DBG("sk %p len %d", sk, (int)len);
1713
1714 if (!conn)
1715 return ERR_PTR(-ENOTCONN);
1716
1717 if (sdulen)
1718 hlen += 2;
1719
1720 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1721 hlen += 2;
1722
1723 count = min_t(unsigned int, (conn->mtu - hlen), len);
1724 skb = bt_skb_send_alloc(sk, count + hlen,
1725 msg->msg_flags & MSG_DONTWAIT, &err);
1726 if (!skb)
1727 return ERR_PTR(err);
1728
1729 /* Create L2CAP header */
1730 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1731 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1732 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1733 put_unaligned_le16(control, skb_put(skb, 2));
1734 if (sdulen)
1735 put_unaligned_le16(sdulen, skb_put(skb, 2));
1736
1737 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1738 if (unlikely(err < 0)) {
1739 kfree_skb(skb);
1740 return ERR_PTR(err);
1741 }
1742
1743 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1744 put_unaligned_le16(0, skb_put(skb, 2));
1745
1746 bt_cb(skb)->retries = 0;
1747 return skb;
1748 }
1749
1750 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1751 {
1752 struct l2cap_pinfo *pi = l2cap_pi(sk);
1753 struct sk_buff *skb;
1754 struct sk_buff_head sar_queue;
1755 u16 control;
1756 size_t size = 0;
1757
1758 skb_queue_head_init(&sar_queue);
1759 control = L2CAP_SDU_START;
1760 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1761 if (IS_ERR(skb))
1762 return PTR_ERR(skb);
1763
1764 __skb_queue_tail(&sar_queue, skb);
1765 len -= pi->remote_mps;
1766 size += pi->remote_mps;
1767
1768 while (len > 0) {
1769 size_t buflen;
1770
1771 if (len > pi->remote_mps) {
1772 control = L2CAP_SDU_CONTINUE;
1773 buflen = pi->remote_mps;
1774 } else {
1775 control = L2CAP_SDU_END;
1776 buflen = len;
1777 }
1778
1779 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1780 if (IS_ERR(skb)) {
1781 skb_queue_purge(&sar_queue);
1782 return PTR_ERR(skb);
1783 }
1784
1785 __skb_queue_tail(&sar_queue, skb);
1786 len -= buflen;
1787 size += buflen;
1788 }
1789 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1790 if (sk->sk_send_head == NULL)
1791 sk->sk_send_head = sar_queue.next;
1792
1793 return size;
1794 }
1795
1796 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1797 {
1798 struct sock *sk = sock->sk;
1799 struct l2cap_pinfo *pi = l2cap_pi(sk);
1800 struct sk_buff *skb;
1801 u16 control;
1802 int err;
1803
1804 BT_DBG("sock %p, sk %p", sock, sk);
1805
1806 err = sock_error(sk);
1807 if (err)
1808 return err;
1809
1810 if (msg->msg_flags & MSG_OOB)
1811 return -EOPNOTSUPP;
1812
1813 lock_sock(sk);
1814
1815 if (sk->sk_state != BT_CONNECTED) {
1816 err = -ENOTCONN;
1817 goto done;
1818 }
1819
1820 /* Connectionless channel */
1821 if (sk->sk_type == SOCK_DGRAM) {
1822 skb = l2cap_create_connless_pdu(sk, msg, len);
1823 if (IS_ERR(skb)) {
1824 err = PTR_ERR(skb);
1825 } else {
1826 l2cap_do_send(sk, skb);
1827 err = len;
1828 }
1829 goto done;
1830 }
1831
1832 switch (pi->mode) {
1833 case L2CAP_MODE_BASIC:
1834 /* Check outgoing MTU */
1835 if (len > pi->omtu) {
1836 err = -EMSGSIZE;
1837 goto done;
1838 }
1839
1840 /* Create a basic PDU */
1841 skb = l2cap_create_basic_pdu(sk, msg, len);
1842 if (IS_ERR(skb)) {
1843 err = PTR_ERR(skb);
1844 goto done;
1845 }
1846
1847 l2cap_do_send(sk, skb);
1848 err = len;
1849 break;
1850
1851 case L2CAP_MODE_ERTM:
1852 case L2CAP_MODE_STREAMING:
1853 /* Entire SDU fits into one PDU */
1854 if (len <= pi->remote_mps) {
1855 control = L2CAP_SDU_UNSEGMENTED;
1856 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1857 if (IS_ERR(skb)) {
1858 err = PTR_ERR(skb);
1859 goto done;
1860 }
1861 __skb_queue_tail(TX_QUEUE(sk), skb);
1862
1863 if (sk->sk_send_head == NULL)
1864 sk->sk_send_head = skb;
1865
1866 } else {
1867 /* Segment SDU into multiples PDUs */
1868 err = l2cap_sar_segment_sdu(sk, msg, len);
1869 if (err < 0)
1870 goto done;
1871 }
1872
1873 if (pi->mode == L2CAP_MODE_STREAMING) {
1874 l2cap_streaming_send(sk);
1875 } else {
1876 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1877 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
1878 err = len;
1879 break;
1880 }
1881 err = l2cap_ertm_send(sk);
1882 }
1883
1884 if (err >= 0)
1885 err = len;
1886 break;
1887
1888 default:
1889 BT_DBG("bad state %1.1x", pi->mode);
1890 err = -EBADFD;
1891 }
1892
1893 done:
1894 release_sock(sk);
1895 return err;
1896 }
1897
1898 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1899 {
1900 struct sock *sk = sock->sk;
1901
1902 lock_sock(sk);
1903
1904 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1905 struct l2cap_conn_rsp rsp;
1906 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1907 u8 buf[128];
1908
1909 sk->sk_state = BT_CONFIG;
1910
1911 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1912 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1913 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1914 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1915 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1916 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1917
1918 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1919 release_sock(sk);
1920 return 0;
1921 }
1922
1923 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1924 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1925 l2cap_build_conf_req(sk, buf), buf);
1926 l2cap_pi(sk)->num_conf_req++;
1927
1928 release_sock(sk);
1929 return 0;
1930 }
1931
1932 release_sock(sk);
1933
1934 if (sock->type == SOCK_STREAM)
1935 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1936
1937 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1938 }
1939
1940 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1941 {
1942 struct sock *sk = sock->sk;
1943 struct l2cap_options opts;
1944 int len, err = 0;
1945 u32 opt;
1946
1947 BT_DBG("sk %p", sk);
1948
1949 lock_sock(sk);
1950
1951 switch (optname) {
1952 case L2CAP_OPTIONS:
1953 if (sk->sk_state == BT_CONNECTED) {
1954 err = -EINVAL;
1955 break;
1956 }
1957
1958 opts.imtu = l2cap_pi(sk)->imtu;
1959 opts.omtu = l2cap_pi(sk)->omtu;
1960 opts.flush_to = l2cap_pi(sk)->flush_to;
1961 opts.mode = l2cap_pi(sk)->mode;
1962 opts.fcs = l2cap_pi(sk)->fcs;
1963 opts.max_tx = l2cap_pi(sk)->max_tx;
1964 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1965
1966 len = min_t(unsigned int, sizeof(opts), optlen);
1967 if (copy_from_user((char *) &opts, optval, len)) {
1968 err = -EFAULT;
1969 break;
1970 }
1971
1972 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1973 err = -EINVAL;
1974 break;
1975 }
1976
1977 l2cap_pi(sk)->mode = opts.mode;
1978 switch (l2cap_pi(sk)->mode) {
1979 case L2CAP_MODE_BASIC:
1980 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1981 break;
1982 case L2CAP_MODE_ERTM:
1983 case L2CAP_MODE_STREAMING:
1984 if (!disable_ertm)
1985 break;
1986 /* fall through */
1987 default:
1988 err = -EINVAL;
1989 break;
1990 }
1991
1992 l2cap_pi(sk)->imtu = opts.imtu;
1993 l2cap_pi(sk)->omtu = opts.omtu;
1994 l2cap_pi(sk)->fcs = opts.fcs;
1995 l2cap_pi(sk)->max_tx = opts.max_tx;
1996 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1997 break;
1998
1999 case L2CAP_LM:
2000 if (get_user(opt, (u32 __user *) optval)) {
2001 err = -EFAULT;
2002 break;
2003 }
2004
2005 if (opt & L2CAP_LM_AUTH)
2006 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2007 if (opt & L2CAP_LM_ENCRYPT)
2008 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2009 if (opt & L2CAP_LM_SECURE)
2010 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2011
2012 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2013 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2014 break;
2015
2016 default:
2017 err = -ENOPROTOOPT;
2018 break;
2019 }
2020
2021 release_sock(sk);
2022 return err;
2023 }
2024
2025 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2026 {
2027 struct sock *sk = sock->sk;
2028 struct bt_security sec;
2029 int len, err = 0;
2030 u32 opt;
2031
2032 BT_DBG("sk %p", sk);
2033
2034 if (level == SOL_L2CAP)
2035 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2036
2037 if (level != SOL_BLUETOOTH)
2038 return -ENOPROTOOPT;
2039
2040 lock_sock(sk);
2041
2042 switch (optname) {
2043 case BT_SECURITY:
2044 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2045 && sk->sk_type != SOCK_RAW) {
2046 err = -EINVAL;
2047 break;
2048 }
2049
2050 sec.level = BT_SECURITY_LOW;
2051
2052 len = min_t(unsigned int, sizeof(sec), optlen);
2053 if (copy_from_user((char *) &sec, optval, len)) {
2054 err = -EFAULT;
2055 break;
2056 }
2057
2058 if (sec.level < BT_SECURITY_LOW ||
2059 sec.level > BT_SECURITY_HIGH) {
2060 err = -EINVAL;
2061 break;
2062 }
2063
2064 l2cap_pi(sk)->sec_level = sec.level;
2065 break;
2066
2067 case BT_DEFER_SETUP:
2068 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2069 err = -EINVAL;
2070 break;
2071 }
2072
2073 if (get_user(opt, (u32 __user *) optval)) {
2074 err = -EFAULT;
2075 break;
2076 }
2077
2078 bt_sk(sk)->defer_setup = opt;
2079 break;
2080
2081 default:
2082 err = -ENOPROTOOPT;
2083 break;
2084 }
2085
2086 release_sock(sk);
2087 return err;
2088 }
2089
2090 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2091 {
2092 struct sock *sk = sock->sk;
2093 struct l2cap_options opts;
2094 struct l2cap_conninfo cinfo;
2095 int len, err = 0;
2096 u32 opt;
2097
2098 BT_DBG("sk %p", sk);
2099
2100 if (get_user(len, optlen))
2101 return -EFAULT;
2102
2103 lock_sock(sk);
2104
2105 switch (optname) {
2106 case L2CAP_OPTIONS:
2107 opts.imtu = l2cap_pi(sk)->imtu;
2108 opts.omtu = l2cap_pi(sk)->omtu;
2109 opts.flush_to = l2cap_pi(sk)->flush_to;
2110 opts.mode = l2cap_pi(sk)->mode;
2111 opts.fcs = l2cap_pi(sk)->fcs;
2112 opts.max_tx = l2cap_pi(sk)->max_tx;
2113 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2114
2115 len = min_t(unsigned int, len, sizeof(opts));
2116 if (copy_to_user(optval, (char *) &opts, len))
2117 err = -EFAULT;
2118
2119 break;
2120
2121 case L2CAP_LM:
2122 switch (l2cap_pi(sk)->sec_level) {
2123 case BT_SECURITY_LOW:
2124 opt = L2CAP_LM_AUTH;
2125 break;
2126 case BT_SECURITY_MEDIUM:
2127 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2128 break;
2129 case BT_SECURITY_HIGH:
2130 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2131 L2CAP_LM_SECURE;
2132 break;
2133 default:
2134 opt = 0;
2135 break;
2136 }
2137
2138 if (l2cap_pi(sk)->role_switch)
2139 opt |= L2CAP_LM_MASTER;
2140
2141 if (l2cap_pi(sk)->force_reliable)
2142 opt |= L2CAP_LM_RELIABLE;
2143
2144 if (put_user(opt, (u32 __user *) optval))
2145 err = -EFAULT;
2146 break;
2147
2148 case L2CAP_CONNINFO:
2149 if (sk->sk_state != BT_CONNECTED &&
2150 !(sk->sk_state == BT_CONNECT2 &&
2151 bt_sk(sk)->defer_setup)) {
2152 err = -ENOTCONN;
2153 break;
2154 }
2155
2156 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2157 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2158
2159 len = min_t(unsigned int, len, sizeof(cinfo));
2160 if (copy_to_user(optval, (char *) &cinfo, len))
2161 err = -EFAULT;
2162
2163 break;
2164
2165 default:
2166 err = -ENOPROTOOPT;
2167 break;
2168 }
2169
2170 release_sock(sk);
2171 return err;
2172 }
2173
2174 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2175 {
2176 struct sock *sk = sock->sk;
2177 struct bt_security sec;
2178 int len, err = 0;
2179
2180 BT_DBG("sk %p", sk);
2181
2182 if (level == SOL_L2CAP)
2183 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2184
2185 if (level != SOL_BLUETOOTH)
2186 return -ENOPROTOOPT;
2187
2188 if (get_user(len, optlen))
2189 return -EFAULT;
2190
2191 lock_sock(sk);
2192
2193 switch (optname) {
2194 case BT_SECURITY:
2195 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2196 && sk->sk_type != SOCK_RAW) {
2197 err = -EINVAL;
2198 break;
2199 }
2200
2201 sec.level = l2cap_pi(sk)->sec_level;
2202
2203 len = min_t(unsigned int, len, sizeof(sec));
2204 if (copy_to_user(optval, (char *) &sec, len))
2205 err = -EFAULT;
2206
2207 break;
2208
2209 case BT_DEFER_SETUP:
2210 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2211 err = -EINVAL;
2212 break;
2213 }
2214
2215 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2216 err = -EFAULT;
2217
2218 break;
2219
2220 default:
2221 err = -ENOPROTOOPT;
2222 break;
2223 }
2224
2225 release_sock(sk);
2226 return err;
2227 }
2228
2229 static int l2cap_sock_shutdown(struct socket *sock, int how)
2230 {
2231 struct sock *sk = sock->sk;
2232 int err = 0;
2233
2234 BT_DBG("sock %p, sk %p", sock, sk);
2235
2236 if (!sk)
2237 return 0;
2238
2239 lock_sock(sk);
2240 if (!sk->sk_shutdown) {
2241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2242 err = __l2cap_wait_ack(sk);
2243
2244 sk->sk_shutdown = SHUTDOWN_MASK;
2245 l2cap_sock_clear_timer(sk);
2246 __l2cap_sock_close(sk, 0);
2247
2248 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2249 err = bt_sock_wait_state(sk, BT_CLOSED,
2250 sk->sk_lingertime);
2251 }
2252
2253 if (!err && sk->sk_err)
2254 err = -sk->sk_err;
2255
2256 release_sock(sk);
2257 return err;
2258 }
2259
2260 static int l2cap_sock_release(struct socket *sock)
2261 {
2262 struct sock *sk = sock->sk;
2263 int err;
2264
2265 BT_DBG("sock %p, sk %p", sock, sk);
2266
2267 if (!sk)
2268 return 0;
2269
2270 err = l2cap_sock_shutdown(sock, 2);
2271
2272 sock_orphan(sk);
2273 l2cap_sock_kill(sk);
2274 return err;
2275 }
2276
2277 static void l2cap_chan_ready(struct sock *sk)
2278 {
2279 struct sock *parent = bt_sk(sk)->parent;
2280
2281 BT_DBG("sk %p, parent %p", sk, parent);
2282
2283 l2cap_pi(sk)->conf_state = 0;
2284 l2cap_sock_clear_timer(sk);
2285
2286 if (!parent) {
2287 /* Outgoing channel.
2288 * Wake up socket sleeping on connect.
2289 */
2290 sk->sk_state = BT_CONNECTED;
2291 sk->sk_state_change(sk);
2292 } else {
2293 /* Incoming channel.
2294 * Wake up socket sleeping on accept.
2295 */
2296 parent->sk_data_ready(parent, 0);
2297 }
2298 }
2299
2300 /* Copy frame to all raw sockets on that connection */
2301 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2302 {
2303 struct l2cap_chan_list *l = &conn->chan_list;
2304 struct sk_buff *nskb;
2305 struct sock *sk;
2306
2307 BT_DBG("conn %p", conn);
2308
2309 read_lock(&l->lock);
2310 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2311 if (sk->sk_type != SOCK_RAW)
2312 continue;
2313
2314 /* Don't send frame to the socket it came from */
2315 if (skb->sk == sk)
2316 continue;
2317 nskb = skb_clone(skb, GFP_ATOMIC);
2318 if (!nskb)
2319 continue;
2320
2321 if (sock_queue_rcv_skb(sk, nskb))
2322 kfree_skb(nskb);
2323 }
2324 read_unlock(&l->lock);
2325 }
2326
2327 /* ---- L2CAP signalling commands ---- */
2328 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2329 u8 code, u8 ident, u16 dlen, void *data)
2330 {
2331 struct sk_buff *skb, **frag;
2332 struct l2cap_cmd_hdr *cmd;
2333 struct l2cap_hdr *lh;
2334 int len, count;
2335
2336 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2337 conn, code, ident, dlen);
2338
2339 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2340 count = min_t(unsigned int, conn->mtu, len);
2341
2342 skb = bt_skb_alloc(count, GFP_ATOMIC);
2343 if (!skb)
2344 return NULL;
2345
2346 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2347 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2348 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2349
2350 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2351 cmd->code = code;
2352 cmd->ident = ident;
2353 cmd->len = cpu_to_le16(dlen);
2354
2355 if (dlen) {
2356 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2357 memcpy(skb_put(skb, count), data, count);
2358 data += count;
2359 }
2360
2361 len -= skb->len;
2362
2363 /* Continuation fragments (no L2CAP header) */
2364 frag = &skb_shinfo(skb)->frag_list;
2365 while (len) {
2366 count = min_t(unsigned int, conn->mtu, len);
2367
2368 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2369 if (!*frag)
2370 goto fail;
2371
2372 memcpy(skb_put(*frag, count), data, count);
2373
2374 len -= count;
2375 data += count;
2376
2377 frag = &(*frag)->next;
2378 }
2379
2380 return skb;
2381
2382 fail:
2383 kfree_skb(skb);
2384 return NULL;
2385 }
2386
2387 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2388 {
2389 struct l2cap_conf_opt *opt = *ptr;
2390 int len;
2391
2392 len = L2CAP_CONF_OPT_SIZE + opt->len;
2393 *ptr += len;
2394
2395 *type = opt->type;
2396 *olen = opt->len;
2397
2398 switch (opt->len) {
2399 case 1:
2400 *val = *((u8 *) opt->val);
2401 break;
2402
2403 case 2:
2404 *val = get_unaligned_le16(opt->val);
2405 break;
2406
2407 case 4:
2408 *val = get_unaligned_le32(opt->val);
2409 break;
2410
2411 default:
2412 *val = (unsigned long) opt->val;
2413 break;
2414 }
2415
2416 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2417 return len;
2418 }
2419
2420 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2421 {
2422 struct l2cap_conf_opt *opt = *ptr;
2423
2424 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2425
2426 opt->type = type;
2427 opt->len = len;
2428
2429 switch (len) {
2430 case 1:
2431 *((u8 *) opt->val) = val;
2432 break;
2433
2434 case 2:
2435 put_unaligned_le16(val, opt->val);
2436 break;
2437
2438 case 4:
2439 put_unaligned_le32(val, opt->val);
2440 break;
2441
2442 default:
2443 memcpy(opt->val, (void *) val, len);
2444 break;
2445 }
2446
2447 *ptr += L2CAP_CONF_OPT_SIZE + len;
2448 }
2449
2450 static void l2cap_ack_timeout(unsigned long arg)
2451 {
2452 struct sock *sk = (void *) arg;
2453
2454 bh_lock_sock(sk);
2455 l2cap_send_ack(l2cap_pi(sk));
2456 bh_unlock_sock(sk);
2457 }
2458
2459 static inline void l2cap_ertm_init(struct sock *sk)
2460 {
2461 l2cap_pi(sk)->expected_ack_seq = 0;
2462 l2cap_pi(sk)->unacked_frames = 0;
2463 l2cap_pi(sk)->buffer_seq = 0;
2464 l2cap_pi(sk)->num_acked = 0;
2465 l2cap_pi(sk)->frames_sent = 0;
2466
2467 setup_timer(&l2cap_pi(sk)->retrans_timer,
2468 l2cap_retrans_timeout, (unsigned long) sk);
2469 setup_timer(&l2cap_pi(sk)->monitor_timer,
2470 l2cap_monitor_timeout, (unsigned long) sk);
2471 setup_timer(&l2cap_pi(sk)->ack_timer,
2472 l2cap_ack_timeout, (unsigned long) sk);
2473
2474 __skb_queue_head_init(SREJ_QUEUE(sk));
2475 __skb_queue_head_init(BUSY_QUEUE(sk));
2476
2477 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2478
2479 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2480 }
2481
2482 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2483 {
2484 switch (mode) {
2485 case L2CAP_MODE_STREAMING:
2486 case L2CAP_MODE_ERTM:
2487 if (l2cap_mode_supported(mode, remote_feat_mask))
2488 return mode;
2489 /* fall through */
2490 default:
2491 return L2CAP_MODE_BASIC;
2492 }
2493 }
2494
2495 static int l2cap_build_conf_req(struct sock *sk, void *data)
2496 {
2497 struct l2cap_pinfo *pi = l2cap_pi(sk);
2498 struct l2cap_conf_req *req = data;
2499 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2500 void *ptr = req->data;
2501
2502 BT_DBG("sk %p", sk);
2503
2504 if (pi->num_conf_req || pi->num_conf_rsp)
2505 goto done;
2506
2507 switch (pi->mode) {
2508 case L2CAP_MODE_STREAMING:
2509 case L2CAP_MODE_ERTM:
2510 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2511 break;
2512
2513 /* fall through */
2514 default:
2515 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2516 break;
2517 }
2518
2519 done:
2520 switch (pi->mode) {
2521 case L2CAP_MODE_BASIC:
2522 if (pi->imtu != L2CAP_DEFAULT_MTU)
2523 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2524
2525 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2526 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2527 break;
2528
2529 rfc.mode = L2CAP_MODE_BASIC;
2530 rfc.txwin_size = 0;
2531 rfc.max_transmit = 0;
2532 rfc.retrans_timeout = 0;
2533 rfc.monitor_timeout = 0;
2534 rfc.max_pdu_size = 0;
2535
2536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2537 (unsigned long) &rfc);
2538 break;
2539
2540 case L2CAP_MODE_ERTM:
2541 rfc.mode = L2CAP_MODE_ERTM;
2542 rfc.txwin_size = pi->tx_win;
2543 rfc.max_transmit = pi->max_tx;
2544 rfc.retrans_timeout = 0;
2545 rfc.monitor_timeout = 0;
2546 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2547 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2548 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2549
2550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2551 (unsigned long) &rfc);
2552
2553 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2554 break;
2555
2556 if (pi->fcs == L2CAP_FCS_NONE ||
2557 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2558 pi->fcs = L2CAP_FCS_NONE;
2559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2560 }
2561 break;
2562
2563 case L2CAP_MODE_STREAMING:
2564 rfc.mode = L2CAP_MODE_STREAMING;
2565 rfc.txwin_size = 0;
2566 rfc.max_transmit = 0;
2567 rfc.retrans_timeout = 0;
2568 rfc.monitor_timeout = 0;
2569 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2570 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2571 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2572
2573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2574 (unsigned long) &rfc);
2575
2576 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2577 break;
2578
2579 if (pi->fcs == L2CAP_FCS_NONE ||
2580 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2581 pi->fcs = L2CAP_FCS_NONE;
2582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2583 }
2584 break;
2585 }
2586
2587 /* FIXME: Need actual value of the flush timeout */
2588 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2589 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2590
2591 req->dcid = cpu_to_le16(pi->dcid);
2592 req->flags = cpu_to_le16(0);
2593
2594 return ptr - data;
2595 }
2596
2597 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2598 {
2599 struct l2cap_pinfo *pi = l2cap_pi(sk);
2600 struct l2cap_conf_rsp *rsp = data;
2601 void *ptr = rsp->data;
2602 void *req = pi->conf_req;
2603 int len = pi->conf_len;
2604 int type, hint, olen;
2605 unsigned long val;
2606 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2607 u16 mtu = L2CAP_DEFAULT_MTU;
2608 u16 result = L2CAP_CONF_SUCCESS;
2609
2610 BT_DBG("sk %p", sk);
2611
2612 while (len >= L2CAP_CONF_OPT_SIZE) {
2613 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2614
2615 hint = type & L2CAP_CONF_HINT;
2616 type &= L2CAP_CONF_MASK;
2617
2618 switch (type) {
2619 case L2CAP_CONF_MTU:
2620 mtu = val;
2621 break;
2622
2623 case L2CAP_CONF_FLUSH_TO:
2624 pi->flush_to = val;
2625 break;
2626
2627 case L2CAP_CONF_QOS:
2628 break;
2629
2630 case L2CAP_CONF_RFC:
2631 if (olen == sizeof(rfc))
2632 memcpy(&rfc, (void *) val, olen);
2633 break;
2634
2635 case L2CAP_CONF_FCS:
2636 if (val == L2CAP_FCS_NONE)
2637 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2638
2639 break;
2640
2641 default:
2642 if (hint)
2643 break;
2644
2645 result = L2CAP_CONF_UNKNOWN;
2646 *((u8 *) ptr++) = type;
2647 break;
2648 }
2649 }
2650
2651 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2652 goto done;
2653
2654 switch (pi->mode) {
2655 case L2CAP_MODE_STREAMING:
2656 case L2CAP_MODE_ERTM:
2657 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2658 pi->mode = l2cap_select_mode(rfc.mode,
2659 pi->conn->feat_mask);
2660 break;
2661 }
2662
2663 if (pi->mode != rfc.mode)
2664 return -ECONNREFUSED;
2665
2666 break;
2667 }
2668
2669 done:
2670 if (pi->mode != rfc.mode) {
2671 result = L2CAP_CONF_UNACCEPT;
2672 rfc.mode = pi->mode;
2673
2674 if (pi->num_conf_rsp == 1)
2675 return -ECONNREFUSED;
2676
2677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2678 sizeof(rfc), (unsigned long) &rfc);
2679 }
2680
2681
2682 if (result == L2CAP_CONF_SUCCESS) {
2683 /* Configure output options and let the other side know
2684 * which ones we don't like. */
2685
2686 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2687 result = L2CAP_CONF_UNACCEPT;
2688 else {
2689 pi->omtu = mtu;
2690 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2691 }
2692 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2693
2694 switch (rfc.mode) {
2695 case L2CAP_MODE_BASIC:
2696 pi->fcs = L2CAP_FCS_NONE;
2697 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2698 break;
2699
2700 case L2CAP_MODE_ERTM:
2701 pi->remote_tx_win = rfc.txwin_size;
2702 pi->remote_max_tx = rfc.max_transmit;
2703
2704 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2705 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2706
2707 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2708
2709 rfc.retrans_timeout =
2710 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2711 rfc.monitor_timeout =
2712 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2713
2714 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2715
2716 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2717 sizeof(rfc), (unsigned long) &rfc);
2718
2719 break;
2720
2721 case L2CAP_MODE_STREAMING:
2722 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2723 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2724
2725 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2726
2727 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2728
2729 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2730 sizeof(rfc), (unsigned long) &rfc);
2731
2732 break;
2733
2734 default:
2735 result = L2CAP_CONF_UNACCEPT;
2736
2737 memset(&rfc, 0, sizeof(rfc));
2738 rfc.mode = pi->mode;
2739 }
2740
2741 if (result == L2CAP_CONF_SUCCESS)
2742 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2743 }
2744 rsp->scid = cpu_to_le16(pi->dcid);
2745 rsp->result = cpu_to_le16(result);
2746 rsp->flags = cpu_to_le16(0x0000);
2747
2748 return ptr - data;
2749 }
2750
2751 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2752 {
2753 struct l2cap_pinfo *pi = l2cap_pi(sk);
2754 struct l2cap_conf_req *req = data;
2755 void *ptr = req->data;
2756 int type, olen;
2757 unsigned long val;
2758 struct l2cap_conf_rfc rfc;
2759
2760 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2761
2762 while (len >= L2CAP_CONF_OPT_SIZE) {
2763 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2764
2765 switch (type) {
2766 case L2CAP_CONF_MTU:
2767 if (val < L2CAP_DEFAULT_MIN_MTU) {
2768 *result = L2CAP_CONF_UNACCEPT;
2769 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2770 } else
2771 pi->imtu = val;
2772 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2773 break;
2774
2775 case L2CAP_CONF_FLUSH_TO:
2776 pi->flush_to = val;
2777 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2778 2, pi->flush_to);
2779 break;
2780
2781 case L2CAP_CONF_RFC:
2782 if (olen == sizeof(rfc))
2783 memcpy(&rfc, (void *)val, olen);
2784
2785 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2786 rfc.mode != pi->mode)
2787 return -ECONNREFUSED;
2788
2789 pi->fcs = 0;
2790
2791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2792 sizeof(rfc), (unsigned long) &rfc);
2793 break;
2794 }
2795 }
2796
2797 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2798 return -ECONNREFUSED;
2799
2800 pi->mode = rfc.mode;
2801
2802 if (*result == L2CAP_CONF_SUCCESS) {
2803 switch (rfc.mode) {
2804 case L2CAP_MODE_ERTM:
2805 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2806 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2807 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2808 break;
2809 case L2CAP_MODE_STREAMING:
2810 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2811 }
2812 }
2813
2814 req->dcid = cpu_to_le16(pi->dcid);
2815 req->flags = cpu_to_le16(0x0000);
2816
2817 return ptr - data;
2818 }
2819
2820 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2821 {
2822 struct l2cap_conf_rsp *rsp = data;
2823 void *ptr = rsp->data;
2824
2825 BT_DBG("sk %p", sk);
2826
2827 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2828 rsp->result = cpu_to_le16(result);
2829 rsp->flags = cpu_to_le16(flags);
2830
2831 return ptr - data;
2832 }
2833
2834 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2835 {
2836 struct l2cap_pinfo *pi = l2cap_pi(sk);
2837 int type, olen;
2838 unsigned long val;
2839 struct l2cap_conf_rfc rfc;
2840
2841 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2842
2843 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2844 return;
2845
2846 while (len >= L2CAP_CONF_OPT_SIZE) {
2847 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2848
2849 switch (type) {
2850 case L2CAP_CONF_RFC:
2851 if (olen == sizeof(rfc))
2852 memcpy(&rfc, (void *)val, olen);
2853 goto done;
2854 }
2855 }
2856
2857 done:
2858 switch (rfc.mode) {
2859 case L2CAP_MODE_ERTM:
2860 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2861 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2862 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2863 break;
2864 case L2CAP_MODE_STREAMING:
2865 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2866 }
2867 }
2868
2869 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2870 {
2871 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2872
2873 if (rej->reason != 0x0000)
2874 return 0;
2875
2876 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2877 cmd->ident == conn->info_ident) {
2878 del_timer(&conn->info_timer);
2879
2880 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2881 conn->info_ident = 0;
2882
2883 l2cap_conn_start(conn);
2884 }
2885
2886 return 0;
2887 }
2888
2889 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2890 {
2891 struct l2cap_chan_list *list = &conn->chan_list;
2892 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2893 struct l2cap_conn_rsp rsp;
2894 struct sock *parent, *sk = NULL;
2895 int result, status = L2CAP_CS_NO_INFO;
2896
2897 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2898 __le16 psm = req->psm;
2899
2900 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2901
2902 /* Check if we have socket listening on psm */
2903 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2904 if (!parent) {
2905 result = L2CAP_CR_BAD_PSM;
2906 goto sendresp;
2907 }
2908
2909 bh_lock_sock(parent);
2910
2911 /* Check if the ACL is secure enough (if not SDP) */
2912 if (psm != cpu_to_le16(0x0001) &&
2913 !hci_conn_check_link_mode(conn->hcon)) {
2914 conn->disc_reason = 0x05;
2915 result = L2CAP_CR_SEC_BLOCK;
2916 goto response;
2917 }
2918
2919 result = L2CAP_CR_NO_MEM;
2920
2921 /* Check for backlog size */
2922 if (sk_acceptq_is_full(parent)) {
2923 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2924 goto response;
2925 }
2926
2927 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2928 if (!sk)
2929 goto response;
2930
2931 write_lock_bh(&list->lock);
2932
2933 /* Check if we already have channel with that dcid */
2934 if (__l2cap_get_chan_by_dcid(list, scid)) {
2935 write_unlock_bh(&list->lock);
2936 sock_set_flag(sk, SOCK_ZAPPED);
2937 l2cap_sock_kill(sk);
2938 goto response;
2939 }
2940
2941 hci_conn_hold(conn->hcon);
2942
2943 l2cap_sock_init(sk, parent);
2944 bacpy(&bt_sk(sk)->src, conn->src);
2945 bacpy(&bt_sk(sk)->dst, conn->dst);
2946 l2cap_pi(sk)->psm = psm;
2947 l2cap_pi(sk)->dcid = scid;
2948
2949 __l2cap_chan_add(conn, sk, parent);
2950 dcid = l2cap_pi(sk)->scid;
2951
2952 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2953
2954 l2cap_pi(sk)->ident = cmd->ident;
2955
2956 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2957 if (l2cap_check_security(sk)) {
2958 if (bt_sk(sk)->defer_setup) {
2959 sk->sk_state = BT_CONNECT2;
2960 result = L2CAP_CR_PEND;
2961 status = L2CAP_CS_AUTHOR_PEND;
2962 parent->sk_data_ready(parent, 0);
2963 } else {
2964 sk->sk_state = BT_CONFIG;
2965 result = L2CAP_CR_SUCCESS;
2966 status = L2CAP_CS_NO_INFO;
2967 }
2968 } else {
2969 sk->sk_state = BT_CONNECT2;
2970 result = L2CAP_CR_PEND;
2971 status = L2CAP_CS_AUTHEN_PEND;
2972 }
2973 } else {
2974 sk->sk_state = BT_CONNECT2;
2975 result = L2CAP_CR_PEND;
2976 status = L2CAP_CS_NO_INFO;
2977 }
2978
2979 write_unlock_bh(&list->lock);
2980
2981 response:
2982 bh_unlock_sock(parent);
2983
2984 sendresp:
2985 rsp.scid = cpu_to_le16(scid);
2986 rsp.dcid = cpu_to_le16(dcid);
2987 rsp.result = cpu_to_le16(result);
2988 rsp.status = cpu_to_le16(status);
2989 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2990
2991 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2992 struct l2cap_info_req info;
2993 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2994
2995 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2996 conn->info_ident = l2cap_get_ident(conn);
2997
2998 mod_timer(&conn->info_timer, jiffies +
2999 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
3000
3001 l2cap_send_cmd(conn, conn->info_ident,
3002 L2CAP_INFO_REQ, sizeof(info), &info);
3003 }
3004
3005 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3006 result == L2CAP_CR_SUCCESS) {
3007 u8 buf[128];
3008 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3009 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3010 l2cap_build_conf_req(sk, buf), buf);
3011 l2cap_pi(sk)->num_conf_req++;
3012 }
3013
3014 return 0;
3015 }
3016
3017 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3018 {
3019 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3020 u16 scid, dcid, result, status;
3021 struct sock *sk;
3022 u8 req[128];
3023
3024 scid = __le16_to_cpu(rsp->scid);
3025 dcid = __le16_to_cpu(rsp->dcid);
3026 result = __le16_to_cpu(rsp->result);
3027 status = __le16_to_cpu(rsp->status);
3028
3029 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3030
3031 if (scid) {
3032 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3033 if (!sk)
3034 return -EFAULT;
3035 } else {
3036 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3037 if (!sk)
3038 return -EFAULT;
3039 }
3040
3041 switch (result) {
3042 case L2CAP_CR_SUCCESS:
3043 sk->sk_state = BT_CONFIG;
3044 l2cap_pi(sk)->ident = 0;
3045 l2cap_pi(sk)->dcid = dcid;
3046 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3047
3048 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3049 break;
3050
3051 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3052
3053 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3054 l2cap_build_conf_req(sk, req), req);
3055 l2cap_pi(sk)->num_conf_req++;
3056 break;
3057
3058 case L2CAP_CR_PEND:
3059 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3060 break;
3061
3062 default:
3063 /* don't delete l2cap channel if sk is owned by user */
3064 if (sock_owned_by_user(sk)) {
3065 sk->sk_state = BT_DISCONN;
3066 l2cap_sock_clear_timer(sk);
3067 l2cap_sock_set_timer(sk, HZ / 5);
3068 break;
3069 }
3070
3071 l2cap_chan_del(sk, ECONNREFUSED);
3072 break;
3073 }
3074
3075 bh_unlock_sock(sk);
3076 return 0;
3077 }
3078
3079 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3080 {
3081 /* FCS is enabled only in ERTM or streaming mode, if one or both
3082 * sides request it.
3083 */
3084 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3085 pi->fcs = L2CAP_FCS_NONE;
3086 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3087 pi->fcs = L2CAP_FCS_CRC16;
3088 }
3089
3090 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3091 {
3092 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3093 u16 dcid, flags;
3094 u8 rsp[64];
3095 struct sock *sk;
3096 int len;
3097
3098 dcid = __le16_to_cpu(req->dcid);
3099 flags = __le16_to_cpu(req->flags);
3100
3101 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3102
3103 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3104 if (!sk)
3105 return -ENOENT;
3106
3107 if (sk->sk_state != BT_CONFIG) {
3108 struct l2cap_cmd_rej rej;
3109
3110 rej.reason = cpu_to_le16(0x0002);
3111 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3112 sizeof(rej), &rej);
3113 goto unlock;
3114 }
3115
3116 /* Reject if config buffer is too small. */
3117 len = cmd_len - sizeof(*req);
3118 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3119 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3120 l2cap_build_conf_rsp(sk, rsp,
3121 L2CAP_CONF_REJECT, flags), rsp);
3122 goto unlock;
3123 }
3124
3125 /* Store config. */
3126 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3127 l2cap_pi(sk)->conf_len += len;
3128
3129 if (flags & 0x0001) {
3130 /* Incomplete config. Send empty response. */
3131 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3132 l2cap_build_conf_rsp(sk, rsp,
3133 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3134 goto unlock;
3135 }
3136
3137 /* Complete config. */
3138 len = l2cap_parse_conf_req(sk, rsp);
3139 if (len < 0) {
3140 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3141 goto unlock;
3142 }
3143
3144 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3145 l2cap_pi(sk)->num_conf_rsp++;
3146
3147 /* Reset config buffer. */
3148 l2cap_pi(sk)->conf_len = 0;
3149
3150 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3151 goto unlock;
3152
3153 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3154 set_default_fcs(l2cap_pi(sk));
3155
3156 sk->sk_state = BT_CONNECTED;
3157
3158 l2cap_pi(sk)->next_tx_seq = 0;
3159 l2cap_pi(sk)->expected_tx_seq = 0;
3160 __skb_queue_head_init(TX_QUEUE(sk));
3161 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3162 l2cap_ertm_init(sk);
3163
3164 l2cap_chan_ready(sk);
3165 goto unlock;
3166 }
3167
3168 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3169 u8 buf[64];
3170 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3171 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3172 l2cap_build_conf_req(sk, buf), buf);
3173 l2cap_pi(sk)->num_conf_req++;
3174 }
3175
3176 unlock:
3177 bh_unlock_sock(sk);
3178 return 0;
3179 }
3180
3181 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3182 {
3183 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3184 u16 scid, flags, result;
3185 struct sock *sk;
3186 int len = cmd->len - sizeof(*rsp);
3187
3188 scid = __le16_to_cpu(rsp->scid);
3189 flags = __le16_to_cpu(rsp->flags);
3190 result = __le16_to_cpu(rsp->result);
3191
3192 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3193 scid, flags, result);
3194
3195 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3196 if (!sk)
3197 return 0;
3198
3199 switch (result) {
3200 case L2CAP_CONF_SUCCESS:
3201 l2cap_conf_rfc_get(sk, rsp->data, len);
3202 break;
3203
3204 case L2CAP_CONF_UNACCEPT:
3205 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3206 char req[64];
3207
3208 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3209 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3210 goto done;
3211 }
3212
3213 /* throw out any old stored conf requests */
3214 result = L2CAP_CONF_SUCCESS;
3215 len = l2cap_parse_conf_rsp(sk, rsp->data,
3216 len, req, &result);
3217 if (len < 0) {
3218 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3219 goto done;
3220 }
3221
3222 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3223 L2CAP_CONF_REQ, len, req);
3224 l2cap_pi(sk)->num_conf_req++;
3225 if (result != L2CAP_CONF_SUCCESS)
3226 goto done;
3227 break;
3228 }
3229
3230 default:
3231 sk->sk_err = ECONNRESET;
3232 l2cap_sock_set_timer(sk, HZ * 5);
3233 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3234 goto done;
3235 }
3236
3237 if (flags & 0x01)
3238 goto done;
3239
3240 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3241
3242 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3243 set_default_fcs(l2cap_pi(sk));
3244
3245 sk->sk_state = BT_CONNECTED;
3246 l2cap_pi(sk)->next_tx_seq = 0;
3247 l2cap_pi(sk)->expected_tx_seq = 0;
3248 __skb_queue_head_init(TX_QUEUE(sk));
3249 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3250 l2cap_ertm_init(sk);
3251
3252 l2cap_chan_ready(sk);
3253 }
3254
3255 done:
3256 bh_unlock_sock(sk);
3257 return 0;
3258 }
3259
3260 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3261 {
3262 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3263 struct l2cap_disconn_rsp rsp;
3264 u16 dcid, scid;
3265 struct sock *sk;
3266
3267 scid = __le16_to_cpu(req->scid);
3268 dcid = __le16_to_cpu(req->dcid);
3269
3270 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3271
3272 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3273 if (!sk)
3274 return 0;
3275
3276 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3277 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3278 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3279
3280 sk->sk_shutdown = SHUTDOWN_MASK;
3281
3282 /* don't delete l2cap channel if sk is owned by user */
3283 if (sock_owned_by_user(sk)) {
3284 sk->sk_state = BT_DISCONN;
3285 l2cap_sock_clear_timer(sk);
3286 l2cap_sock_set_timer(sk, HZ / 5);
3287 bh_unlock_sock(sk);
3288 return 0;
3289 }
3290
3291 l2cap_chan_del(sk, ECONNRESET);
3292 bh_unlock_sock(sk);
3293
3294 l2cap_sock_kill(sk);
3295 return 0;
3296 }
3297
3298 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3299 {
3300 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3301 u16 dcid, scid;
3302 struct sock *sk;
3303
3304 scid = __le16_to_cpu(rsp->scid);
3305 dcid = __le16_to_cpu(rsp->dcid);
3306
3307 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3308
3309 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3310 if (!sk)
3311 return 0;
3312
3313 /* don't delete l2cap channel if sk is owned by user */
3314 if (sock_owned_by_user(sk)) {
3315 sk->sk_state = BT_DISCONN;
3316 l2cap_sock_clear_timer(sk);
3317 l2cap_sock_set_timer(sk, HZ / 5);
3318 bh_unlock_sock(sk);
3319 return 0;
3320 }
3321
3322 l2cap_chan_del(sk, 0);
3323 bh_unlock_sock(sk);
3324
3325 l2cap_sock_kill(sk);
3326 return 0;
3327 }
3328
3329 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3330 {
3331 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3332 u16 type;
3333
3334 type = __le16_to_cpu(req->type);
3335
3336 BT_DBG("type 0x%4.4x", type);
3337
3338 if (type == L2CAP_IT_FEAT_MASK) {
3339 u8 buf[8];
3340 u32 feat_mask = l2cap_feat_mask;
3341 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3342 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3343 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3344 if (!disable_ertm)
3345 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3346 | L2CAP_FEAT_FCS;
3347 put_unaligned_le32(feat_mask, rsp->data);
3348 l2cap_send_cmd(conn, cmd->ident,
3349 L2CAP_INFO_RSP, sizeof(buf), buf);
3350 } else if (type == L2CAP_IT_FIXED_CHAN) {
3351 u8 buf[12];
3352 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3353 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3354 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3355 memcpy(buf + 4, l2cap_fixed_chan, 8);
3356 l2cap_send_cmd(conn, cmd->ident,
3357 L2CAP_INFO_RSP, sizeof(buf), buf);
3358 } else {
3359 struct l2cap_info_rsp rsp;
3360 rsp.type = cpu_to_le16(type);
3361 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3362 l2cap_send_cmd(conn, cmd->ident,
3363 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3364 }
3365
3366 return 0;
3367 }
3368
3369 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3370 {
3371 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3372 u16 type, result;
3373
3374 type = __le16_to_cpu(rsp->type);
3375 result = __le16_to_cpu(rsp->result);
3376
3377 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3378
3379 del_timer(&conn->info_timer);
3380
3381 if (result != L2CAP_IR_SUCCESS) {
3382 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3383 conn->info_ident = 0;
3384
3385 l2cap_conn_start(conn);
3386
3387 return 0;
3388 }
3389
3390 if (type == L2CAP_IT_FEAT_MASK) {
3391 conn->feat_mask = get_unaligned_le32(rsp->data);
3392
3393 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3394 struct l2cap_info_req req;
3395 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3396
3397 conn->info_ident = l2cap_get_ident(conn);
3398
3399 l2cap_send_cmd(conn, conn->info_ident,
3400 L2CAP_INFO_REQ, sizeof(req), &req);
3401 } else {
3402 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3403 conn->info_ident = 0;
3404
3405 l2cap_conn_start(conn);
3406 }
3407 } else if (type == L2CAP_IT_FIXED_CHAN) {
3408 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3409 conn->info_ident = 0;
3410
3411 l2cap_conn_start(conn);
3412 }
3413
3414 return 0;
3415 }
3416
3417 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3418 {
3419 u8 *data = skb->data;
3420 int len = skb->len;
3421 struct l2cap_cmd_hdr cmd;
3422 int err = 0;
3423
3424 l2cap_raw_recv(conn, skb);
3425
3426 while (len >= L2CAP_CMD_HDR_SIZE) {
3427 u16 cmd_len;
3428 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3429 data += L2CAP_CMD_HDR_SIZE;
3430 len -= L2CAP_CMD_HDR_SIZE;
3431
3432 cmd_len = le16_to_cpu(cmd.len);
3433
3434 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3435
3436 if (cmd_len > len || !cmd.ident) {
3437 BT_DBG("corrupted command");
3438 break;
3439 }
3440
3441 switch (cmd.code) {
3442 case L2CAP_COMMAND_REJ:
3443 l2cap_command_rej(conn, &cmd, data);
3444 break;
3445
3446 case L2CAP_CONN_REQ:
3447 err = l2cap_connect_req(conn, &cmd, data);
3448 break;
3449
3450 case L2CAP_CONN_RSP:
3451 err = l2cap_connect_rsp(conn, &cmd, data);
3452 break;
3453
3454 case L2CAP_CONF_REQ:
3455 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3456 break;
3457
3458 case L2CAP_CONF_RSP:
3459 err = l2cap_config_rsp(conn, &cmd, data);
3460 break;
3461
3462 case L2CAP_DISCONN_REQ:
3463 err = l2cap_disconnect_req(conn, &cmd, data);
3464 break;
3465
3466 case L2CAP_DISCONN_RSP:
3467 err = l2cap_disconnect_rsp(conn, &cmd, data);
3468 break;
3469
3470 case L2CAP_ECHO_REQ:
3471 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3472 break;
3473
3474 case L2CAP_ECHO_RSP:
3475 break;
3476
3477 case L2CAP_INFO_REQ:
3478 err = l2cap_information_req(conn, &cmd, data);
3479 break;
3480
3481 case L2CAP_INFO_RSP:
3482 err = l2cap_information_rsp(conn, &cmd, data);
3483 break;
3484
3485 default:
3486 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3487 err = -EINVAL;
3488 break;
3489 }
3490
3491 if (err) {
3492 struct l2cap_cmd_rej rej;
3493 BT_DBG("error %d", err);
3494
3495 /* FIXME: Map err to a valid reason */
3496 rej.reason = cpu_to_le16(0);
3497 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3498 }
3499
3500 data += cmd_len;
3501 len -= cmd_len;
3502 }
3503
3504 kfree_skb(skb);
3505 }
3506
3507 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3508 {
3509 u16 our_fcs, rcv_fcs;
3510 int hdr_size = L2CAP_HDR_SIZE + 2;
3511
3512 if (pi->fcs == L2CAP_FCS_CRC16) {
3513 skb_trim(skb, skb->len - 2);
3514 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3515 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3516
3517 if (our_fcs != rcv_fcs)
3518 return -EBADMSG;
3519 }
3520 return 0;
3521 }
3522
3523 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3524 {
3525 struct l2cap_pinfo *pi = l2cap_pi(sk);
3526 u16 control = 0;
3527
3528 pi->frames_sent = 0;
3529
3530 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3531
3532 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3533 control |= L2CAP_SUPER_RCV_NOT_READY;
3534 l2cap_send_sframe(pi, control);
3535 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3536 }
3537
3538 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3539 l2cap_retransmit_frames(sk);
3540
3541 l2cap_ertm_send(sk);
3542
3543 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3544 pi->frames_sent == 0) {
3545 control |= L2CAP_SUPER_RCV_READY;
3546 l2cap_send_sframe(pi, control);
3547 }
3548 }
3549
3550 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3551 {
3552 struct sk_buff *next_skb;
3553 struct l2cap_pinfo *pi = l2cap_pi(sk);
3554 int tx_seq_offset, next_tx_seq_offset;
3555
3556 bt_cb(skb)->tx_seq = tx_seq;
3557 bt_cb(skb)->sar = sar;
3558
3559 next_skb = skb_peek(SREJ_QUEUE(sk));
3560 if (!next_skb) {
3561 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3562 return 0;
3563 }
3564
3565 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3566 if (tx_seq_offset < 0)
3567 tx_seq_offset += 64;
3568
3569 do {
3570 if (bt_cb(next_skb)->tx_seq == tx_seq)
3571 return -EINVAL;
3572
3573 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3574 pi->buffer_seq) % 64;
3575 if (next_tx_seq_offset < 0)
3576 next_tx_seq_offset += 64;
3577
3578 if (next_tx_seq_offset > tx_seq_offset) {
3579 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3580 return 0;
3581 }
3582
3583 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3584 break;
3585
3586 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3587
3588 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3589
3590 return 0;
3591 }
3592
3593 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3594 {
3595 struct l2cap_pinfo *pi = l2cap_pi(sk);
3596 struct sk_buff *_skb;
3597 int err;
3598
3599 switch (control & L2CAP_CTRL_SAR) {
3600 case L2CAP_SDU_UNSEGMENTED:
3601 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3602 goto drop;
3603
3604 err = sock_queue_rcv_skb(sk, skb);
3605 if (!err)
3606 return err;
3607
3608 break;
3609
3610 case L2CAP_SDU_START:
3611 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3612 goto drop;
3613
3614 pi->sdu_len = get_unaligned_le16(skb->data);
3615
3616 if (pi->sdu_len > pi->imtu)
3617 goto disconnect;
3618
3619 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3620 if (!pi->sdu)
3621 return -ENOMEM;
3622
3623 /* pull sdu_len bytes only after alloc, because of Local Busy
3624 * condition we have to be sure that this will be executed
3625 * only once, i.e., when alloc does not fail */
3626 skb_pull(skb, 2);
3627
3628 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3629
3630 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3631 pi->partial_sdu_len = skb->len;
3632 break;
3633
3634 case L2CAP_SDU_CONTINUE:
3635 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3636 goto disconnect;
3637
3638 if (!pi->sdu)
3639 goto disconnect;
3640
3641 pi->partial_sdu_len += skb->len;
3642 if (pi->partial_sdu_len > pi->sdu_len)
3643 goto drop;
3644
3645 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3646
3647 break;
3648
3649 case L2CAP_SDU_END:
3650 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3651 goto disconnect;
3652
3653 if (!pi->sdu)
3654 goto disconnect;
3655
3656 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3657 pi->partial_sdu_len += skb->len;
3658
3659 if (pi->partial_sdu_len > pi->imtu)
3660 goto drop;
3661
3662 if (pi->partial_sdu_len != pi->sdu_len)
3663 goto drop;
3664
3665 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3666 }
3667
3668 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3669 if (!_skb) {
3670 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3671 return -ENOMEM;
3672 }
3673
3674 err = sock_queue_rcv_skb(sk, _skb);
3675 if (err < 0) {
3676 kfree_skb(_skb);
3677 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3678 return err;
3679 }
3680
3681 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3682 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3683
3684 kfree_skb(pi->sdu);
3685 break;
3686 }
3687
3688 kfree_skb(skb);
3689 return 0;
3690
3691 drop:
3692 kfree_skb(pi->sdu);
3693 pi->sdu = NULL;
3694
3695 disconnect:
3696 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3697 kfree_skb(skb);
3698 return 0;
3699 }
3700
3701 static int l2cap_try_push_rx_skb(struct sock *sk)
3702 {
3703 struct l2cap_pinfo *pi = l2cap_pi(sk);
3704 struct sk_buff *skb;
3705 u16 control;
3706 int err;
3707
3708 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3709 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3710 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3711 if (err < 0) {
3712 skb_queue_head(BUSY_QUEUE(sk), skb);
3713 return -EBUSY;
3714 }
3715
3716 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3717 }
3718
3719 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3720 goto done;
3721
3722 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3723 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3724 l2cap_send_sframe(pi, control);
3725 l2cap_pi(sk)->retry_count = 1;
3726
3727 del_timer(&pi->retrans_timer);
3728 __mod_monitor_timer();
3729
3730 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3731
3732 done:
3733 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3734 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3735
3736 BT_DBG("sk %p, Exit local busy", sk);
3737
3738 return 0;
3739 }
3740
3741 static void l2cap_busy_work(struct work_struct *work)
3742 {
3743 DECLARE_WAITQUEUE(wait, current);
3744 struct l2cap_pinfo *pi =
3745 container_of(work, struct l2cap_pinfo, busy_work);
3746 struct sock *sk = (struct sock *)pi;
3747 int n_tries = 0, timeo = HZ/5, err;
3748 struct sk_buff *skb;
3749
3750 lock_sock(sk);
3751
3752 add_wait_queue(sk_sleep(sk), &wait);
3753 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3754 set_current_state(TASK_INTERRUPTIBLE);
3755
3756 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3757 err = -EBUSY;
3758 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3759 break;
3760 }
3761
3762 if (!timeo)
3763 timeo = HZ/5;
3764
3765 if (signal_pending(current)) {
3766 err = sock_intr_errno(timeo);
3767 break;
3768 }
3769
3770 release_sock(sk);
3771 timeo = schedule_timeout(timeo);
3772 lock_sock(sk);
3773
3774 err = sock_error(sk);
3775 if (err)
3776 break;
3777
3778 if (l2cap_try_push_rx_skb(sk) == 0)
3779 break;
3780 }
3781
3782 set_current_state(TASK_RUNNING);
3783 remove_wait_queue(sk_sleep(sk), &wait);
3784
3785 release_sock(sk);
3786 }
3787
3788 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3789 {
3790 struct l2cap_pinfo *pi = l2cap_pi(sk);
3791 int sctrl, err;
3792
3793 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3794 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3795 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3796 return l2cap_try_push_rx_skb(sk);
3797
3798
3799 }
3800
3801 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3802 if (err >= 0) {
3803 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3804 return err;
3805 }
3806
3807 /* Busy Condition */
3808 BT_DBG("sk %p, Enter local busy", sk);
3809
3810 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3811 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3812 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3813
3814 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3815 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3816 l2cap_send_sframe(pi, sctrl);
3817
3818 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3819
3820 del_timer(&pi->ack_timer);
3821
3822 queue_work(_busy_wq, &pi->busy_work);
3823
3824 return err;
3825 }
3826
3827 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3828 {
3829 struct l2cap_pinfo *pi = l2cap_pi(sk);
3830 struct sk_buff *_skb;
3831 int err = -EINVAL;
3832
3833 /*
3834 * TODO: We have to notify the userland if some data is lost with the
3835 * Streaming Mode.
3836 */
3837
3838 switch (control & L2CAP_CTRL_SAR) {
3839 case L2CAP_SDU_UNSEGMENTED:
3840 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3841 kfree_skb(pi->sdu);
3842 break;
3843 }
3844
3845 err = sock_queue_rcv_skb(sk, skb);
3846 if (!err)
3847 return 0;
3848
3849 break;
3850
3851 case L2CAP_SDU_START:
3852 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3853 kfree_skb(pi->sdu);
3854 break;
3855 }
3856
3857 pi->sdu_len = get_unaligned_le16(skb->data);
3858 skb_pull(skb, 2);
3859
3860 if (pi->sdu_len > pi->imtu) {
3861 err = -EMSGSIZE;
3862 break;
3863 }
3864
3865 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3866 if (!pi->sdu) {
3867 err = -ENOMEM;
3868 break;
3869 }
3870
3871 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3872
3873 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3874 pi->partial_sdu_len = skb->len;
3875 err = 0;
3876 break;
3877
3878 case L2CAP_SDU_CONTINUE:
3879 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3880 break;
3881
3882 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3883
3884 pi->partial_sdu_len += skb->len;
3885 if (pi->partial_sdu_len > pi->sdu_len)
3886 kfree_skb(pi->sdu);
3887 else
3888 err = 0;
3889
3890 break;
3891
3892 case L2CAP_SDU_END:
3893 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3894 break;
3895
3896 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3897
3898 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3899 pi->partial_sdu_len += skb->len;
3900
3901 if (pi->partial_sdu_len > pi->imtu)
3902 goto drop;
3903
3904 if (pi->partial_sdu_len == pi->sdu_len) {
3905 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3906 err = sock_queue_rcv_skb(sk, _skb);
3907 if (err < 0)
3908 kfree_skb(_skb);
3909 }
3910 err = 0;
3911
3912 drop:
3913 kfree_skb(pi->sdu);
3914 break;
3915 }
3916
3917 kfree_skb(skb);
3918 return err;
3919 }
3920
3921 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3922 {
3923 struct sk_buff *skb;
3924 u16 control;
3925
3926 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3927 if (bt_cb(skb)->tx_seq != tx_seq)
3928 break;
3929
3930 skb = skb_dequeue(SREJ_QUEUE(sk));
3931 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3932 l2cap_ertm_reassembly_sdu(sk, skb, control);
3933 l2cap_pi(sk)->buffer_seq_srej =
3934 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3935 tx_seq = (tx_seq + 1) % 64;
3936 }
3937 }
3938
3939 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3940 {
3941 struct l2cap_pinfo *pi = l2cap_pi(sk);
3942 struct srej_list *l, *tmp;
3943 u16 control;
3944
3945 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3946 if (l->tx_seq == tx_seq) {
3947 list_del(&l->list);
3948 kfree(l);
3949 return;
3950 }
3951 control = L2CAP_SUPER_SELECT_REJECT;
3952 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3953 l2cap_send_sframe(pi, control);
3954 list_del(&l->list);
3955 list_add_tail(&l->list, SREJ_LIST(sk));
3956 }
3957 }
3958
3959 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3960 {
3961 struct l2cap_pinfo *pi = l2cap_pi(sk);
3962 struct srej_list *new;
3963 u16 control;
3964
3965 while (tx_seq != pi->expected_tx_seq) {
3966 control = L2CAP_SUPER_SELECT_REJECT;
3967 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3968 l2cap_send_sframe(pi, control);
3969
3970 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3971 new->tx_seq = pi->expected_tx_seq;
3972 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3973 list_add_tail(&new->list, SREJ_LIST(sk));
3974 }
3975 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3976 }
3977
3978 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3979 {
3980 struct l2cap_pinfo *pi = l2cap_pi(sk);
3981 u8 tx_seq = __get_txseq(rx_control);
3982 u8 req_seq = __get_reqseq(rx_control);
3983 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3984 int tx_seq_offset, expected_tx_seq_offset;
3985 int num_to_ack = (pi->tx_win/6) + 1;
3986 int err = 0;
3987
3988 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3989 rx_control);
3990
3991 if (L2CAP_CTRL_FINAL & rx_control &&
3992 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3993 del_timer(&pi->monitor_timer);
3994 if (pi->unacked_frames > 0)
3995 __mod_retrans_timer();
3996 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3997 }
3998
3999 pi->expected_ack_seq = req_seq;
4000 l2cap_drop_acked_frames(sk);
4001
4002 if (tx_seq == pi->expected_tx_seq)
4003 goto expected;
4004
4005 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
4006 if (tx_seq_offset < 0)
4007 tx_seq_offset += 64;
4008
4009 /* invalid tx_seq */
4010 if (tx_seq_offset >= pi->tx_win) {
4011 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4012 goto drop;
4013 }
4014
4015 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
4016 goto drop;
4017
4018 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4019 struct srej_list *first;
4020
4021 first = list_first_entry(SREJ_LIST(sk),
4022 struct srej_list, list);
4023 if (tx_seq == first->tx_seq) {
4024 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4025 l2cap_check_srej_gap(sk, tx_seq);
4026
4027 list_del(&first->list);
4028 kfree(first);
4029
4030 if (list_empty(SREJ_LIST(sk))) {
4031 pi->buffer_seq = pi->buffer_seq_srej;
4032 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
4033 l2cap_send_ack(pi);
4034 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4035 }
4036 } else {
4037 struct srej_list *l;
4038
4039 /* duplicated tx_seq */
4040 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4041 goto drop;
4042
4043 list_for_each_entry(l, SREJ_LIST(sk), list) {
4044 if (l->tx_seq == tx_seq) {
4045 l2cap_resend_srejframe(sk, tx_seq);
4046 return 0;
4047 }
4048 }
4049 l2cap_send_srejframe(sk, tx_seq);
4050 }
4051 } else {
4052 expected_tx_seq_offset =
4053 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4054 if (expected_tx_seq_offset < 0)
4055 expected_tx_seq_offset += 64;
4056
4057 /* duplicated tx_seq */
4058 if (tx_seq_offset < expected_tx_seq_offset)
4059 goto drop;
4060
4061 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4062
4063 BT_DBG("sk %p, Enter SREJ", sk);
4064
4065 INIT_LIST_HEAD(SREJ_LIST(sk));
4066 pi->buffer_seq_srej = pi->buffer_seq;
4067
4068 __skb_queue_head_init(SREJ_QUEUE(sk));
4069 __skb_queue_head_init(BUSY_QUEUE(sk));
4070 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4071
4072 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4073
4074 l2cap_send_srejframe(sk, tx_seq);
4075
4076 del_timer(&pi->ack_timer);
4077 }
4078 return 0;
4079
4080 expected:
4081 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4082
4083 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4084 bt_cb(skb)->tx_seq = tx_seq;
4085 bt_cb(skb)->sar = sar;
4086 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4087 return 0;
4088 }
4089
4090 err = l2cap_push_rx_skb(sk, skb, rx_control);
4091 if (err < 0)
4092 return 0;
4093
4094 if (rx_control & L2CAP_CTRL_FINAL) {
4095 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4096 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4097 else
4098 l2cap_retransmit_frames(sk);
4099 }
4100
4101 __mod_ack_timer();
4102
4103 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4104 if (pi->num_acked == num_to_ack - 1)
4105 l2cap_send_ack(pi);
4106
4107 return 0;
4108
4109 drop:
4110 kfree_skb(skb);
4111 return 0;
4112 }
4113
4114 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4115 {
4116 struct l2cap_pinfo *pi = l2cap_pi(sk);
4117
4118 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4119 rx_control);
4120
4121 pi->expected_ack_seq = __get_reqseq(rx_control);
4122 l2cap_drop_acked_frames(sk);
4123
4124 if (rx_control & L2CAP_CTRL_POLL) {
4125 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4126 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4127 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4128 (pi->unacked_frames > 0))
4129 __mod_retrans_timer();
4130
4131 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4132 l2cap_send_srejtail(sk);
4133 } else {
4134 l2cap_send_i_or_rr_or_rnr(sk);
4135 }
4136
4137 } else if (rx_control & L2CAP_CTRL_FINAL) {
4138 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4139
4140 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4141 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4142 else
4143 l2cap_retransmit_frames(sk);
4144
4145 } else {
4146 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4147 (pi->unacked_frames > 0))
4148 __mod_retrans_timer();
4149
4150 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4151 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
4152 l2cap_send_ack(pi);
4153 else
4154 l2cap_ertm_send(sk);
4155 }
4156 }
4157
4158 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4159 {
4160 struct l2cap_pinfo *pi = l2cap_pi(sk);
4161 u8 tx_seq = __get_reqseq(rx_control);
4162
4163 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4164
4165 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4166
4167 pi->expected_ack_seq = tx_seq;
4168 l2cap_drop_acked_frames(sk);
4169
4170 if (rx_control & L2CAP_CTRL_FINAL) {
4171 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4172 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4173 else
4174 l2cap_retransmit_frames(sk);
4175 } else {
4176 l2cap_retransmit_frames(sk);
4177
4178 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4179 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4180 }
4181 }
4182 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4183 {
4184 struct l2cap_pinfo *pi = l2cap_pi(sk);
4185 u8 tx_seq = __get_reqseq(rx_control);
4186
4187 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4188
4189 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4190
4191 if (rx_control & L2CAP_CTRL_POLL) {
4192 pi->expected_ack_seq = tx_seq;
4193 l2cap_drop_acked_frames(sk);
4194
4195 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4196 l2cap_retransmit_one_frame(sk, tx_seq);
4197
4198 l2cap_ertm_send(sk);
4199
4200 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4201 pi->srej_save_reqseq = tx_seq;
4202 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4203 }
4204 } else if (rx_control & L2CAP_CTRL_FINAL) {
4205 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4206 pi->srej_save_reqseq == tx_seq)
4207 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4208 else
4209 l2cap_retransmit_one_frame(sk, tx_seq);
4210 } else {
4211 l2cap_retransmit_one_frame(sk, tx_seq);
4212 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4213 pi->srej_save_reqseq = tx_seq;
4214 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4215 }
4216 }
4217 }
4218
4219 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4220 {
4221 struct l2cap_pinfo *pi = l2cap_pi(sk);
4222 u8 tx_seq = __get_reqseq(rx_control);
4223
4224 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4225
4226 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4227 pi->expected_ack_seq = tx_seq;
4228 l2cap_drop_acked_frames(sk);
4229
4230 if (rx_control & L2CAP_CTRL_POLL)
4231 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4232
4233 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4234 del_timer(&pi->retrans_timer);
4235 if (rx_control & L2CAP_CTRL_POLL)
4236 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4237 return;
4238 }
4239
4240 if (rx_control & L2CAP_CTRL_POLL)
4241 l2cap_send_srejtail(sk);
4242 else
4243 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4244 }
4245
4246 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4247 {
4248 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4249
4250 if (L2CAP_CTRL_FINAL & rx_control &&
4251 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4252 del_timer(&l2cap_pi(sk)->monitor_timer);
4253 if (l2cap_pi(sk)->unacked_frames > 0)
4254 __mod_retrans_timer();
4255 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4256 }
4257
4258 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4259 case L2CAP_SUPER_RCV_READY:
4260 l2cap_data_channel_rrframe(sk, rx_control);
4261 break;
4262
4263 case L2CAP_SUPER_REJECT:
4264 l2cap_data_channel_rejframe(sk, rx_control);
4265 break;
4266
4267 case L2CAP_SUPER_SELECT_REJECT:
4268 l2cap_data_channel_srejframe(sk, rx_control);
4269 break;
4270
4271 case L2CAP_SUPER_RCV_NOT_READY:
4272 l2cap_data_channel_rnrframe(sk, rx_control);
4273 break;
4274 }
4275
4276 kfree_skb(skb);
4277 return 0;
4278 }
4279
4280 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4281 {
4282 struct l2cap_pinfo *pi = l2cap_pi(sk);
4283 u16 control;
4284 u8 req_seq;
4285 int len, next_tx_seq_offset, req_seq_offset;
4286
4287 control = get_unaligned_le16(skb->data);
4288 skb_pull(skb, 2);
4289 len = skb->len;
4290
4291 /*
4292 * We can just drop the corrupted I-frame here.
4293 * Receiver will miss it and start proper recovery
4294 * procedures and ask retransmission.
4295 */
4296 if (l2cap_check_fcs(pi, skb))
4297 goto drop;
4298
4299 if (__is_sar_start(control) && __is_iframe(control))
4300 len -= 2;
4301
4302 if (pi->fcs == L2CAP_FCS_CRC16)
4303 len -= 2;
4304
4305 if (len > pi->mps) {
4306 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4307 goto drop;
4308 }
4309
4310 req_seq = __get_reqseq(control);
4311 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4312 if (req_seq_offset < 0)
4313 req_seq_offset += 64;
4314
4315 next_tx_seq_offset =
4316 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4317 if (next_tx_seq_offset < 0)
4318 next_tx_seq_offset += 64;
4319
4320 /* check for invalid req-seq */
4321 if (req_seq_offset > next_tx_seq_offset) {
4322 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4323 goto drop;
4324 }
4325
4326 if (__is_iframe(control)) {
4327 if (len < 0) {
4328 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4329 goto drop;
4330 }
4331
4332 l2cap_data_channel_iframe(sk, control, skb);
4333 } else {
4334 if (len != 0) {
4335 BT_ERR("%d", len);
4336 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4337 goto drop;
4338 }
4339
4340 l2cap_data_channel_sframe(sk, control, skb);
4341 }
4342
4343 return 0;
4344
4345 drop:
4346 kfree_skb(skb);
4347 return 0;
4348 }
4349
4350 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4351 {
4352 struct sock *sk;
4353 struct l2cap_pinfo *pi;
4354 u16 control;
4355 u8 tx_seq;
4356 int len;
4357
4358 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4359 if (!sk) {
4360 BT_DBG("unknown cid 0x%4.4x", cid);
4361 goto drop;
4362 }
4363
4364 pi = l2cap_pi(sk);
4365
4366 BT_DBG("sk %p, len %d", sk, skb->len);
4367
4368 if (sk->sk_state != BT_CONNECTED)
4369 goto drop;
4370
4371 switch (pi->mode) {
4372 case L2CAP_MODE_BASIC:
4373 /* If socket recv buffers overflows we drop data here
4374 * which is *bad* because L2CAP has to be reliable.
4375 * But we don't have any other choice. L2CAP doesn't
4376 * provide flow control mechanism. */
4377
4378 if (pi->imtu < skb->len)
4379 goto drop;
4380
4381 if (!sock_queue_rcv_skb(sk, skb))
4382 goto done;
4383 break;
4384
4385 case L2CAP_MODE_ERTM:
4386 if (!sock_owned_by_user(sk)) {
4387 l2cap_ertm_data_rcv(sk, skb);
4388 } else {
4389 if (sk_add_backlog(sk, skb))
4390 goto drop;
4391 }
4392
4393 goto done;
4394
4395 case L2CAP_MODE_STREAMING:
4396 control = get_unaligned_le16(skb->data);
4397 skb_pull(skb, 2);
4398 len = skb->len;
4399
4400 if (l2cap_check_fcs(pi, skb))
4401 goto drop;
4402
4403 if (__is_sar_start(control))
4404 len -= 2;
4405
4406 if (pi->fcs == L2CAP_FCS_CRC16)
4407 len -= 2;
4408
4409 if (len > pi->mps || len < 0 || __is_sframe(control))
4410 goto drop;
4411
4412 tx_seq = __get_txseq(control);
4413
4414 if (pi->expected_tx_seq == tx_seq)
4415 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4416 else
4417 pi->expected_tx_seq = (tx_seq + 1) % 64;
4418
4419 l2cap_streaming_reassembly_sdu(sk, skb, control);
4420
4421 goto done;
4422
4423 default:
4424 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4425 break;
4426 }
4427
4428 drop:
4429 kfree_skb(skb);
4430
4431 done:
4432 if (sk)
4433 bh_unlock_sock(sk);
4434
4435 return 0;
4436 }
4437
4438 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4439 {
4440 struct sock *sk;
4441
4442 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4443 if (!sk)
4444 goto drop;
4445
4446 bh_lock_sock(sk);
4447
4448 BT_DBG("sk %p, len %d", sk, skb->len);
4449
4450 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4451 goto drop;
4452
4453 if (l2cap_pi(sk)->imtu < skb->len)
4454 goto drop;
4455
4456 if (!sock_queue_rcv_skb(sk, skb))
4457 goto done;
4458
4459 drop:
4460 kfree_skb(skb);
4461
4462 done:
4463 if (sk)
4464 bh_unlock_sock(sk);
4465 return 0;
4466 }
4467
4468 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4469 {
4470 struct l2cap_hdr *lh = (void *) skb->data;
4471 u16 cid, len;
4472 __le16 psm;
4473
4474 skb_pull(skb, L2CAP_HDR_SIZE);
4475 cid = __le16_to_cpu(lh->cid);
4476 len = __le16_to_cpu(lh->len);
4477
4478 if (len != skb->len) {
4479 kfree_skb(skb);
4480 return;
4481 }
4482
4483 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4484
4485 switch (cid) {
4486 case L2CAP_CID_SIGNALING:
4487 l2cap_sig_channel(conn, skb);
4488 break;
4489
4490 case L2CAP_CID_CONN_LESS:
4491 psm = get_unaligned_le16(skb->data);
4492 skb_pull(skb, 2);
4493 l2cap_conless_channel(conn, psm, skb);
4494 break;
4495
4496 default:
4497 l2cap_data_channel(conn, cid, skb);
4498 break;
4499 }
4500 }
4501
4502 /* ---- L2CAP interface with lower layer (HCI) ---- */
4503
4504 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4505 {
4506 int exact = 0, lm1 = 0, lm2 = 0;
4507 register struct sock *sk;
4508 struct hlist_node *node;
4509
4510 if (type != ACL_LINK)
4511 return -EINVAL;
4512
4513 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4514
4515 /* Find listening sockets and check their link_mode */
4516 read_lock(&l2cap_sk_list.lock);
4517 sk_for_each(sk, node, &l2cap_sk_list.head) {
4518 if (sk->sk_state != BT_LISTEN)
4519 continue;
4520
4521 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4522 lm1 |= HCI_LM_ACCEPT;
4523 if (l2cap_pi(sk)->role_switch)
4524 lm1 |= HCI_LM_MASTER;
4525 exact++;
4526 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4527 lm2 |= HCI_LM_ACCEPT;
4528 if (l2cap_pi(sk)->role_switch)
4529 lm2 |= HCI_LM_MASTER;
4530 }
4531 }
4532 read_unlock(&l2cap_sk_list.lock);
4533
4534 return exact ? lm1 : lm2;
4535 }
4536
4537 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4538 {
4539 struct l2cap_conn *conn;
4540
4541 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4542
4543 if (hcon->type != ACL_LINK)
4544 return -EINVAL;
4545
4546 if (!status) {
4547 conn = l2cap_conn_add(hcon, status);
4548 if (conn)
4549 l2cap_conn_ready(conn);
4550 } else
4551 l2cap_conn_del(hcon, bt_err(status));
4552
4553 return 0;
4554 }
4555
4556 static int l2cap_disconn_ind(struct hci_conn *hcon)
4557 {
4558 struct l2cap_conn *conn = hcon->l2cap_data;
4559
4560 BT_DBG("hcon %p", hcon);
4561
4562 if (hcon->type != ACL_LINK || !conn)
4563 return 0x13;
4564
4565 return conn->disc_reason;
4566 }
4567
4568 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4569 {
4570 BT_DBG("hcon %p reason %d", hcon, reason);
4571
4572 if (hcon->type != ACL_LINK)
4573 return -EINVAL;
4574
4575 l2cap_conn_del(hcon, bt_err(reason));
4576
4577 return 0;
4578 }
4579
4580 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4581 {
4582 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4583 return;
4584
4585 if (encrypt == 0x00) {
4586 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4587 l2cap_sock_clear_timer(sk);
4588 l2cap_sock_set_timer(sk, HZ * 5);
4589 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4590 __l2cap_sock_close(sk, ECONNREFUSED);
4591 } else {
4592 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4593 l2cap_sock_clear_timer(sk);
4594 }
4595 }
4596
4597 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4598 {
4599 struct l2cap_chan_list *l;
4600 struct l2cap_conn *conn = hcon->l2cap_data;
4601 struct sock *sk;
4602
4603 if (!conn)
4604 return 0;
4605
4606 l = &conn->chan_list;
4607
4608 BT_DBG("conn %p", conn);
4609
4610 read_lock(&l->lock);
4611
4612 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4613 bh_lock_sock(sk);
4614
4615 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4616 bh_unlock_sock(sk);
4617 continue;
4618 }
4619
4620 if (!status && (sk->sk_state == BT_CONNECTED ||
4621 sk->sk_state == BT_CONFIG)) {
4622 l2cap_check_encryption(sk, encrypt);
4623 bh_unlock_sock(sk);
4624 continue;
4625 }
4626
4627 if (sk->sk_state == BT_CONNECT) {
4628 if (!status) {
4629 struct l2cap_conn_req req;
4630 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4631 req.psm = l2cap_pi(sk)->psm;
4632
4633 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4634 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4635
4636 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4637 L2CAP_CONN_REQ, sizeof(req), &req);
4638 } else {
4639 l2cap_sock_clear_timer(sk);
4640 l2cap_sock_set_timer(sk, HZ / 10);
4641 }
4642 } else if (sk->sk_state == BT_CONNECT2) {
4643 struct l2cap_conn_rsp rsp;
4644 __u16 result;
4645
4646 if (!status) {
4647 sk->sk_state = BT_CONFIG;
4648 result = L2CAP_CR_SUCCESS;
4649 } else {
4650 sk->sk_state = BT_DISCONN;
4651 l2cap_sock_set_timer(sk, HZ / 10);
4652 result = L2CAP_CR_SEC_BLOCK;
4653 }
4654
4655 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4656 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4657 rsp.result = cpu_to_le16(result);
4658 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4659 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4660 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4661 }
4662
4663 bh_unlock_sock(sk);
4664 }
4665
4666 read_unlock(&l->lock);
4667
4668 return 0;
4669 }
4670
4671 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4672 {
4673 struct l2cap_conn *conn = hcon->l2cap_data;
4674
4675 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4676 goto drop;
4677
4678 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4679
4680 if (flags & ACL_START) {
4681 struct l2cap_hdr *hdr;
4682 struct sock *sk;
4683 u16 cid;
4684 int len;
4685
4686 if (conn->rx_len) {
4687 BT_ERR("Unexpected start frame (len %d)", skb->len);
4688 kfree_skb(conn->rx_skb);
4689 conn->rx_skb = NULL;
4690 conn->rx_len = 0;
4691 l2cap_conn_unreliable(conn, ECOMM);
4692 }
4693
4694 /* Start fragment always begin with Basic L2CAP header */
4695 if (skb->len < L2CAP_HDR_SIZE) {
4696 BT_ERR("Frame is too short (len %d)", skb->len);
4697 l2cap_conn_unreliable(conn, ECOMM);
4698 goto drop;
4699 }
4700
4701 hdr = (struct l2cap_hdr *) skb->data;
4702 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4703 cid = __le16_to_cpu(hdr->cid);
4704
4705 if (len == skb->len) {
4706 /* Complete frame received */
4707 l2cap_recv_frame(conn, skb);
4708 return 0;
4709 }
4710
4711 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4712
4713 if (skb->len > len) {
4714 BT_ERR("Frame is too long (len %d, expected len %d)",
4715 skb->len, len);
4716 l2cap_conn_unreliable(conn, ECOMM);
4717 goto drop;
4718 }
4719
4720 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4721
4722 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4723 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4724 len, l2cap_pi(sk)->imtu);
4725 bh_unlock_sock(sk);
4726 l2cap_conn_unreliable(conn, ECOMM);
4727 goto drop;
4728 }
4729
4730 if (sk)
4731 bh_unlock_sock(sk);
4732
4733 /* Allocate skb for the complete frame (with header) */
4734 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4735 if (!conn->rx_skb)
4736 goto drop;
4737
4738 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4739 skb->len);
4740 conn->rx_len = len - skb->len;
4741 } else {
4742 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4743
4744 if (!conn->rx_len) {
4745 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4746 l2cap_conn_unreliable(conn, ECOMM);
4747 goto drop;
4748 }
4749
4750 if (skb->len > conn->rx_len) {
4751 BT_ERR("Fragment is too long (len %d, expected %d)",
4752 skb->len, conn->rx_len);
4753 kfree_skb(conn->rx_skb);
4754 conn->rx_skb = NULL;
4755 conn->rx_len = 0;
4756 l2cap_conn_unreliable(conn, ECOMM);
4757 goto drop;
4758 }
4759
4760 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4761 skb->len);
4762 conn->rx_len -= skb->len;
4763
4764 if (!conn->rx_len) {
4765 /* Complete frame received */
4766 l2cap_recv_frame(conn, conn->rx_skb);
4767 conn->rx_skb = NULL;
4768 }
4769 }
4770
4771 drop:
4772 kfree_skb(skb);
4773 return 0;
4774 }
4775
4776 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4777 {
4778 struct sock *sk;
4779 struct hlist_node *node;
4780
4781 read_lock_bh(&l2cap_sk_list.lock);
4782
4783 sk_for_each(sk, node, &l2cap_sk_list.head) {
4784 struct l2cap_pinfo *pi = l2cap_pi(sk);
4785
4786 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4787 batostr(&bt_sk(sk)->src),
4788 batostr(&bt_sk(sk)->dst),
4789 sk->sk_state, __le16_to_cpu(pi->psm),
4790 pi->scid, pi->dcid,
4791 pi->imtu, pi->omtu, pi->sec_level);
4792 }
4793
4794 read_unlock_bh(&l2cap_sk_list.lock);
4795
4796 return 0;
4797 }
4798
4799 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4800 {
4801 return single_open(file, l2cap_debugfs_show, inode->i_private);
4802 }
4803
4804 static const struct file_operations l2cap_debugfs_fops = {
4805 .open = l2cap_debugfs_open,
4806 .read = seq_read,
4807 .llseek = seq_lseek,
4808 .release = single_release,
4809 };
4810
4811 static struct dentry *l2cap_debugfs;
4812
4813 static const struct proto_ops l2cap_sock_ops = {
4814 .family = PF_BLUETOOTH,
4815 .owner = THIS_MODULE,
4816 .release = l2cap_sock_release,
4817 .bind = l2cap_sock_bind,
4818 .connect = l2cap_sock_connect,
4819 .listen = l2cap_sock_listen,
4820 .accept = l2cap_sock_accept,
4821 .getname = l2cap_sock_getname,
4822 .sendmsg = l2cap_sock_sendmsg,
4823 .recvmsg = l2cap_sock_recvmsg,
4824 .poll = bt_sock_poll,
4825 .ioctl = bt_sock_ioctl,
4826 .mmap = sock_no_mmap,
4827 .socketpair = sock_no_socketpair,
4828 .shutdown = l2cap_sock_shutdown,
4829 .setsockopt = l2cap_sock_setsockopt,
4830 .getsockopt = l2cap_sock_getsockopt
4831 };
4832
4833 static const struct net_proto_family l2cap_sock_family_ops = {
4834 .family = PF_BLUETOOTH,
4835 .owner = THIS_MODULE,
4836 .create = l2cap_sock_create,
4837 };
4838
4839 static struct hci_proto l2cap_hci_proto = {
4840 .name = "L2CAP",
4841 .id = HCI_PROTO_L2CAP,
4842 .connect_ind = l2cap_connect_ind,
4843 .connect_cfm = l2cap_connect_cfm,
4844 .disconn_ind = l2cap_disconn_ind,
4845 .disconn_cfm = l2cap_disconn_cfm,
4846 .security_cfm = l2cap_security_cfm,
4847 .recv_acldata = l2cap_recv_acldata
4848 };
4849
4850 static int __init l2cap_init(void)
4851 {
4852 int err;
4853
4854 err = proto_register(&l2cap_proto, 0);
4855 if (err < 0)
4856 return err;
4857
4858 _busy_wq = create_singlethread_workqueue("l2cap");
4859 if (!_busy_wq) {
4860 proto_unregister(&l2cap_proto);
4861 return -ENOMEM;
4862 }
4863
4864 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4865 if (err < 0) {
4866 BT_ERR("L2CAP socket registration failed");
4867 goto error;
4868 }
4869
4870 err = hci_register_proto(&l2cap_hci_proto);
4871 if (err < 0) {
4872 BT_ERR("L2CAP protocol registration failed");
4873 bt_sock_unregister(BTPROTO_L2CAP);
4874 goto error;
4875 }
4876
4877 if (bt_debugfs) {
4878 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4879 bt_debugfs, NULL, &l2cap_debugfs_fops);
4880 if (!l2cap_debugfs)
4881 BT_ERR("Failed to create L2CAP debug file");
4882 }
4883
4884 BT_INFO("L2CAP ver %s", VERSION);
4885 BT_INFO("L2CAP socket layer initialized");
4886
4887 return 0;
4888
4889 error:
4890 destroy_workqueue(_busy_wq);
4891 proto_unregister(&l2cap_proto);
4892 return err;
4893 }
4894
4895 static void __exit l2cap_exit(void)
4896 {
4897 debugfs_remove(l2cap_debugfs);
4898
4899 flush_workqueue(_busy_wq);
4900 destroy_workqueue(_busy_wq);
4901
4902 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4903 BT_ERR("L2CAP socket unregistration failed");
4904
4905 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4906 BT_ERR("L2CAP protocol unregistration failed");
4907
4908 proto_unregister(&l2cap_proto);
4909 }
4910
4911 void l2cap_load(void)
4912 {
4913 /* Dummy function to trigger automatic L2CAP module loading by
4914 * other modules that use L2CAP sockets but don't use any other
4915 * symbols from it. */
4916 }
4917 EXPORT_SYMBOL(l2cap_load);
4918
4919 module_init(l2cap_init);
4920 module_exit(l2cap_exit);
4921
4922 module_param(disable_ertm, bool, 0644);
4923 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4924
4925 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4926 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4927 MODULE_VERSION(VERSION);
4928 MODULE_LICENSE("GPL");
4929 MODULE_ALIAS("bt-proto-0");
This page took 0.236149 seconds and 5 git commands to generate.