Bluetooth: Fix/implement Three-wire reliable packet sending
[deliverable/linux.git] / drivers / bluetooth / hci_h5.c
1 /*
2 *
3 * Bluetooth HCI Three-wire UART driver
4 *
5 * Copyright (C) 2012 Intel Corporation
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/skbuff.h>
27
28 #include <net/bluetooth/bluetooth.h>
29 #include <net/bluetooth/hci_core.h>
30
31 #include "hci_uart.h"
32
33 #define HCI_3WIRE_ACK_PKT 0
34 #define HCI_3WIRE_LINK_PKT 15
35
36 #define H5_TXWINSIZE 4
37
38 #define H5_ACK_TIMEOUT msecs_to_jiffies(250)
39
40 /*
41 * Maximum Three-wire packet:
42 * 4 byte header + max value for 12-bit length + 2 bytes for CRC
43 */
44 #define H5_MAX_LEN (4 + 0xfff + 2)
45
46 /* Convenience macros for reading Three-wire header values */
47 #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
48 #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
49 #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
50 #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
51 #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
52 #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0xff) + ((hdr)[2] << 4))
53
54 #define SLIP_DELIMITER 0xc0
55 #define SLIP_ESC 0xdb
56 #define SLIP_ESC_DELIM 0xdc
57 #define SLIP_ESC_ESC 0xdd
58
59 struct h5 {
60 struct sk_buff_head unack; /* Unack'ed packets queue */
61 struct sk_buff_head rel; /* Reliable packets queue */
62 struct sk_buff_head unrel; /* Unreliable packets queue */
63
64 struct sk_buff *rx_skb; /* Receive buffer */
65 size_t rx_pending; /* Expecting more bytes */
66 bool rx_esc; /* SLIP escape mode */
67 u8 rx_ack; /* Last ack number received */
68 u8 rx_seq; /* Last seq number received */
69
70 int (*rx_func) (struct hci_uart *hu, u8 c);
71
72 struct timer_list timer; /* Retransmission timer */
73
74 bool tx_ack_req; /* Pending ack to send */
75 u8 tx_seq; /* Next seq number to send */
76 };
77
78 static void h5_reset_rx(struct h5 *h5);
79
80 static void h5_timed_event(unsigned long arg)
81 {
82 struct hci_uart *hu = (struct hci_uart *) arg;
83 struct h5 *h5 = hu->priv;
84 struct sk_buff *skb;
85 unsigned long flags;
86
87 BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
88
89 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
90
91 while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
92 h5->tx_seq = (h5->tx_seq - 1) & 0x07;
93 skb_queue_head(&h5->rel, skb);
94 }
95
96 spin_unlock_irqrestore(&h5->unack.lock, flags);
97
98 hci_uart_tx_wakeup(hu);
99 }
100
101 static int h5_open(struct hci_uart *hu)
102 {
103 struct h5 *h5;
104
105 BT_DBG("hu %p", hu);
106
107 h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
108 if (!h5)
109 return -ENOMEM;
110
111 hu->priv = h5;
112
113 skb_queue_head_init(&h5->unack);
114 skb_queue_head_init(&h5->rel);
115 skb_queue_head_init(&h5->unrel);
116
117 h5_reset_rx(h5);
118
119 init_timer(&h5->timer);
120 h5->timer.function = h5_timed_event;
121 h5->timer.data = (unsigned long) hu;
122
123 return 0;
124 }
125
126 static int h5_close(struct hci_uart *hu)
127 {
128 struct h5 *h5 = hu->priv;
129
130 skb_queue_purge(&h5->unack);
131 skb_queue_purge(&h5->rel);
132 skb_queue_purge(&h5->unrel);
133
134 del_timer(&h5->timer);
135
136 kfree(h5);
137
138 return 0;
139 }
140
141 static void h5_pkt_cull(struct h5 *h5)
142 {
143 struct sk_buff *skb, *tmp;
144 unsigned long flags;
145 int i, to_remove;
146 u8 seq;
147
148 spin_lock_irqsave(&h5->unack.lock, flags);
149
150 to_remove = skb_queue_len(&h5->unack);
151
152 seq = h5->tx_seq;
153
154 while (to_remove > 0) {
155 if (h5->rx_ack == seq)
156 break;
157
158 to_remove--;
159 seq = (seq - 1) % 8;
160 }
161
162 if (seq != h5->rx_ack)
163 BT_ERR("Controller acked invalid packet");
164
165 i = 0;
166 skb_queue_walk_safe(&h5->unack, skb, tmp) {
167 if (i++ >= to_remove)
168 break;
169
170 __skb_unlink(skb, &h5->unack);
171 kfree_skb(skb);
172 }
173
174 if (skb_queue_empty(&h5->unack))
175 del_timer(&h5->timer);
176
177 spin_unlock_irqrestore(&h5->unack.lock, flags);
178 }
179
180 static void h5_handle_internal_rx(struct hci_uart *hu)
181 {
182 BT_DBG("%s", hu->hdev->name);
183 }
184
185 static void h5_complete_rx_pkt(struct hci_uart *hu)
186 {
187 struct h5 *h5 = hu->priv;
188 const unsigned char *hdr = h5->rx_skb->data;
189
190 BT_DBG("%s", hu->hdev->name);
191
192 if (H5_HDR_RELIABLE(hdr)) {
193 h5->tx_seq = (h5->tx_seq + 1) % 8;
194 h5->tx_ack_req = true;
195 }
196
197 h5->rx_ack = H5_HDR_ACK(hdr);
198
199 h5_pkt_cull(h5);
200
201 switch (H5_HDR_PKT_TYPE(hdr)) {
202 case HCI_EVENT_PKT:
203 case HCI_ACLDATA_PKT:
204 case HCI_SCODATA_PKT:
205 bt_cb(h5->rx_skb)->pkt_type = H5_HDR_PKT_TYPE(hdr);
206
207 /* Remove Three-wire header */
208 skb_pull(h5->rx_skb, 4);
209
210 hci_recv_frame(h5->rx_skb);
211 h5->rx_skb = NULL;
212
213 break;
214
215 default:
216 h5_handle_internal_rx(hu);
217 break;
218 }
219
220 h5_reset_rx(h5);
221 }
222
223 static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
224 {
225 struct h5 *h5 = hu->priv;
226
227 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
228
229 h5_complete_rx_pkt(hu);
230 h5_reset_rx(h5);
231
232 return 0;
233 }
234
235 static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
236 {
237 struct h5 *h5 = hu->priv;
238 const unsigned char *hdr = h5->rx_skb->data;
239
240 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
241
242 if (H5_HDR_CRC(hdr)) {
243 h5->rx_func = h5_rx_crc;
244 h5->rx_pending = 2;
245 } else {
246 h5_complete_rx_pkt(hu);
247 h5_reset_rx(h5);
248 }
249
250 return 0;
251 }
252
253 static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
254 {
255 struct h5 *h5 = hu->priv;
256 const unsigned char *hdr = h5->rx_skb->data;
257
258 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
259
260 if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
261 BT_ERR("Invalid header checksum");
262 h5_reset_rx(h5);
263 return 0;
264 }
265
266 if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_seq) {
267 BT_ERR("Out-of-order packet arrived (%u != %u)",
268 H5_HDR_SEQ(hdr), h5->tx_seq);
269 h5_reset_rx(h5);
270 return 0;
271 }
272
273 h5->rx_func = h5_rx_payload;
274 h5->rx_pending = H5_HDR_LEN(hdr);
275
276 return 0;
277 }
278
279 static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
280 {
281 struct h5 *h5 = hu->priv;
282
283 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
284
285 if (c == SLIP_DELIMITER)
286 return 1;
287
288 h5->rx_func = h5_rx_3wire_hdr;
289 h5->rx_pending = 4;
290
291 h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
292 if (!h5->rx_skb) {
293 BT_ERR("Can't allocate mem for new packet");
294 h5_reset_rx(h5);
295 return -ENOMEM;
296 }
297
298 h5->rx_skb->dev = (void *) hu->hdev;
299
300 return 0;
301 }
302
303 static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
304 {
305 struct h5 *h5 = hu->priv;
306
307 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
308
309 if (c == SLIP_DELIMITER)
310 h5->rx_func = h5_rx_pkt_start;
311
312 return 1;
313 }
314
315 static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
316 {
317 const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
318 const u8 *byte = &c;
319
320 if (!h5->rx_esc && c == SLIP_ESC) {
321 h5->rx_esc = true;
322 return;
323 }
324
325 if (h5->rx_esc) {
326 switch (c) {
327 case SLIP_ESC_DELIM:
328 byte = &delim;
329 break;
330 case SLIP_ESC_ESC:
331 byte = &esc;
332 break;
333 default:
334 BT_ERR("Invalid esc byte 0x%02hhx", c);
335 h5_reset_rx(h5);
336 return;
337 }
338
339 h5->rx_esc = false;
340 }
341
342 memcpy(skb_put(h5->rx_skb, 1), byte, 1);
343 h5->rx_pending--;
344
345 BT_DBG("unsliped 0x%02hhx", *byte);
346 }
347
348 static void h5_reset_rx(struct h5 *h5)
349 {
350 if (h5->rx_skb) {
351 kfree_skb(h5->rx_skb);
352 h5->rx_skb = NULL;
353 }
354
355 h5->rx_func = h5_rx_delimiter;
356 h5->rx_pending = 0;
357 h5->rx_esc = false;
358 }
359
360 static int h5_recv(struct hci_uart *hu, void *data, int count)
361 {
362 struct h5 *h5 = hu->priv;
363 unsigned char *ptr = data;
364
365 BT_DBG("%s count %d", hu->hdev->name, count);
366
367 while (count > 0) {
368 int processed;
369
370 if (h5->rx_pending > 0) {
371 if (*ptr == SLIP_DELIMITER) {
372 BT_ERR("Too short H5 packet");
373 h5_reset_rx(h5);
374 continue;
375 }
376
377 h5_unslip_one_byte(h5, *ptr);
378
379 ptr++; count--;
380 continue;
381 }
382
383 processed = h5->rx_func(hu, *ptr);
384 if (processed < 0)
385 return processed;
386
387 ptr += processed;
388 count -= processed;
389 }
390
391 return 0;
392 }
393
394 static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
395 {
396 struct h5 *h5 = hu->priv;
397
398 if (skb->len > 0xfff) {
399 BT_ERR("Packet too long (%u bytes)", skb->len);
400 kfree_skb(skb);
401 return 0;
402 }
403
404 switch (bt_cb(skb)->pkt_type) {
405 case HCI_ACLDATA_PKT:
406 case HCI_COMMAND_PKT:
407 skb_queue_tail(&h5->rel, skb);
408 break;
409
410 case HCI_SCODATA_PKT:
411 skb_queue_tail(&h5->unrel, skb);
412 break;
413
414 default:
415 BT_ERR("Unknown packet type %u", bt_cb(skb)->pkt_type);
416 kfree_skb(skb);
417 break;
418 }
419
420 return 0;
421 }
422
423 static void h5_slip_delim(struct sk_buff *skb)
424 {
425 const char delim = SLIP_DELIMITER;
426
427 memcpy(skb_put(skb, 1), &delim, 1);
428 }
429
430 static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
431 {
432 const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
433 const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
434
435 switch (c) {
436 case SLIP_DELIMITER:
437 memcpy(skb_put(skb, 2), &esc_delim, 2);
438 break;
439 case SLIP_ESC:
440 memcpy(skb_put(skb, 2), &esc_esc, 2);
441 break;
442 default:
443 memcpy(skb_put(skb, 1), &c, 1);
444 }
445 }
446
447 static struct sk_buff *h5_build_pkt(struct h5 *h5, bool rel, u8 pkt_type,
448 const u8 *data, size_t len)
449 {
450 struct sk_buff *nskb;
451 u8 hdr[4];
452 int i;
453
454 /*
455 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
456 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
457 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
458 * delimiters at start and end).
459 */
460 nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
461 if (!nskb)
462 return NULL;
463
464 bt_cb(nskb)->pkt_type = pkt_type;
465
466 h5_slip_delim(nskb);
467
468 hdr[0] = h5->rx_seq << 3;
469 h5->tx_ack_req = false;
470
471 if (rel) {
472 hdr[0] |= 1 << 7;
473 hdr[0] |= h5->tx_seq;
474 h5->tx_seq = (h5->tx_seq + 1) % 8;
475 }
476
477 hdr[1] = pkt_type | ((len & 0x0f) << 4);
478 hdr[2] = len >> 4;
479 hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
480
481 for (i = 0; i < 4; i++)
482 h5_slip_one_byte(nskb, hdr[i]);
483
484 for (i = 0; i < len; i++)
485 h5_slip_one_byte(nskb, data[i]);
486
487 h5_slip_delim(nskb);
488
489 return nskb;
490 }
491
492 static struct sk_buff *h5_prepare_pkt(struct h5 *h5, u8 pkt_type,
493 const u8 *data, size_t len)
494 {
495 bool rel;
496
497 switch (pkt_type) {
498 case HCI_ACLDATA_PKT:
499 case HCI_COMMAND_PKT:
500 rel = true;
501 break;
502 case HCI_SCODATA_PKT:
503 case HCI_3WIRE_LINK_PKT:
504 case HCI_3WIRE_ACK_PKT:
505 rel = false;
506 break;
507 default:
508 BT_ERR("Unknown packet type %u", pkt_type);
509 return NULL;
510 }
511
512 return h5_build_pkt(h5, rel, pkt_type, data, len);
513 }
514
515 static struct sk_buff *h5_prepare_ack(struct h5 *h5)
516 {
517 h5->tx_ack_req = false;
518 return NULL;
519 }
520
521 static struct sk_buff *h5_dequeue(struct hci_uart *hu)
522 {
523 struct h5 *h5 = hu->priv;
524 unsigned long flags;
525 struct sk_buff *skb, *nskb;
526
527 if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
528 nskb = h5_prepare_pkt(h5, bt_cb(skb)->pkt_type,
529 skb->data, skb->len);
530 if (nskb) {
531 kfree_skb(skb);
532 return nskb;
533 }
534
535 skb_queue_head(&h5->unrel, skb);
536 BT_ERR("Could not dequeue pkt because alloc_skb failed");
537 }
538
539 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
540
541 if (h5->unack.qlen >= H5_TXWINSIZE)
542 goto unlock;
543
544 if ((skb = skb_dequeue(&h5->rel)) != NULL) {
545 nskb = h5_prepare_pkt(h5, bt_cb(skb)->pkt_type,
546 skb->data, skb->len);
547 if (nskb) {
548 __skb_queue_tail(&h5->unack, skb);
549 mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
550 spin_unlock_irqrestore(&h5->unack.lock, flags);
551 return nskb;
552 }
553
554 skb_queue_head(&h5->rel, skb);
555 BT_ERR("Could not dequeue pkt because alloc_skb failed");
556 }
557
558 unlock:
559 spin_unlock_irqrestore(&h5->unack.lock, flags);
560
561 if (h5->tx_ack_req)
562 return h5_prepare_ack(h5);
563
564 return NULL;
565 }
566
567 static int h5_flush(struct hci_uart *hu)
568 {
569 BT_DBG("hu %p", hu);
570 return 0;
571 }
572
573 static struct hci_uart_proto h5p = {
574 .id = HCI_UART_3WIRE,
575 .open = h5_open,
576 .close = h5_close,
577 .recv = h5_recv,
578 .enqueue = h5_enqueue,
579 .dequeue = h5_dequeue,
580 .flush = h5_flush,
581 };
582
583 int __init h5_init(void)
584 {
585 int err = hci_uart_register_proto(&h5p);
586
587 if (!err)
588 BT_INFO("HCI Three-wire UART (H5) protocol initialized");
589 else
590 BT_ERR("HCI Three-wire UART (H5) protocol init failed");
591
592 return err;
593 }
594
595 int __exit h5_deinit(void)
596 {
597 return hci_uart_unregister_proto(&h5p);
598 }
This page took 0.058023 seconds and 6 git commands to generate.