766a02b1dfa148fb521cc120f07623c00ac88e60
[deliverable/linux.git] / net / nfc / nci / core.c
1 /*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
28 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
29
30 #include <linux/types.h>
31 #include <linux/workqueue.h>
32 #include <linux/completion.h>
33 #include <linux/export.h>
34 #include <linux/sched.h>
35 #include <linux/bitops.h>
36 #include <linux/skbuff.h>
37
38 #include "../nfc.h"
39 #include <net/nfc/nci.h>
40 #include <net/nfc/nci_core.h>
41 #include <linux/nfc.h>
42
43 static void nci_cmd_work(struct work_struct *work);
44 static void nci_rx_work(struct work_struct *work);
45 static void nci_tx_work(struct work_struct *work);
46
47 /* ---- NCI requests ---- */
48
49 void nci_req_complete(struct nci_dev *ndev, int result)
50 {
51 if (ndev->req_status == NCI_REQ_PEND) {
52 ndev->req_result = result;
53 ndev->req_status = NCI_REQ_DONE;
54 complete(&ndev->req_completion);
55 }
56 }
57
58 static void nci_req_cancel(struct nci_dev *ndev, int err)
59 {
60 if (ndev->req_status == NCI_REQ_PEND) {
61 ndev->req_result = err;
62 ndev->req_status = NCI_REQ_CANCELED;
63 complete(&ndev->req_completion);
64 }
65 }
66
67 /* Execute request and wait for completion. */
68 static int __nci_request(struct nci_dev *ndev,
69 void (*req)(struct nci_dev *ndev, unsigned long opt),
70 unsigned long opt, __u32 timeout)
71 {
72 int rc = 0;
73 long completion_rc;
74
75 ndev->req_status = NCI_REQ_PEND;
76
77 init_completion(&ndev->req_completion);
78 req(ndev, opt);
79 completion_rc =
80 wait_for_completion_interruptible_timeout(&ndev->req_completion,
81 timeout);
82
83 pr_debug("wait_for_completion return %ld\n", completion_rc);
84
85 if (completion_rc > 0) {
86 switch (ndev->req_status) {
87 case NCI_REQ_DONE:
88 rc = nci_to_errno(ndev->req_result);
89 break;
90
91 case NCI_REQ_CANCELED:
92 rc = -ndev->req_result;
93 break;
94
95 default:
96 rc = -ETIMEDOUT;
97 break;
98 }
99 } else {
100 pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
101 completion_rc);
102
103 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
104 }
105
106 ndev->req_status = ndev->req_result = 0;
107
108 return rc;
109 }
110
111 static inline int nci_request(struct nci_dev *ndev,
112 void (*req)(struct nci_dev *ndev,
113 unsigned long opt),
114 unsigned long opt, __u32 timeout)
115 {
116 int rc;
117
118 if (!test_bit(NCI_UP, &ndev->flags))
119 return -ENETDOWN;
120
121 /* Serialize all requests */
122 mutex_lock(&ndev->req_lock);
123 rc = __nci_request(ndev, req, opt, timeout);
124 mutex_unlock(&ndev->req_lock);
125
126 return rc;
127 }
128
129 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
130 {
131 struct nci_core_reset_cmd cmd;
132
133 cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
134 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
135 }
136
137 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
138 {
139 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
140 }
141
142 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
143 {
144 struct nci_rf_disc_map_cmd cmd;
145 struct disc_map_config *cfg = cmd.mapping_configs;
146 __u8 *num = &cmd.num_mapping_configs;
147 int i;
148
149 /* set rf mapping configurations */
150 *num = 0;
151
152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
154 if (ndev->supported_rf_interfaces[i] ==
155 NCI_RF_INTERFACE_ISO_DEP) {
156 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
157 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
158 NCI_DISC_MAP_MODE_LISTEN;
159 cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
160 (*num)++;
161 } else if (ndev->supported_rf_interfaces[i] ==
162 NCI_RF_INTERFACE_NFC_DEP) {
163 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
164 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
165 NCI_DISC_MAP_MODE_LISTEN;
166 cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP;
167 (*num)++;
168 }
169
170 if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
171 break;
172 }
173
174 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
175 (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
176 }
177
178 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
179 {
180 struct nci_rf_disc_cmd cmd;
181 __u32 protocols = opt;
182
183 cmd.num_disc_configs = 0;
184
185 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
186 (protocols & NFC_PROTO_JEWEL_MASK
187 || protocols & NFC_PROTO_MIFARE_MASK
188 || protocols & NFC_PROTO_ISO14443_MASK
189 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
190 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
191 NCI_NFC_A_PASSIVE_POLL_MODE;
192 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
193 cmd.num_disc_configs++;
194 }
195
196 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
197 (protocols & NFC_PROTO_ISO14443_MASK)) {
198 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
199 NCI_NFC_B_PASSIVE_POLL_MODE;
200 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
201 cmd.num_disc_configs++;
202 }
203
204 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
205 (protocols & NFC_PROTO_FELICA_MASK
206 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
207 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
208 NCI_NFC_F_PASSIVE_POLL_MODE;
209 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
210 cmd.num_disc_configs++;
211 }
212
213 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
214 (1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
215 &cmd);
216 }
217
218 struct nci_rf_discover_select_param {
219 __u8 rf_discovery_id;
220 __u8 rf_protocol;
221 };
222
223 static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt)
224 {
225 struct nci_rf_discover_select_param *param =
226 (struct nci_rf_discover_select_param *)opt;
227 struct nci_rf_discover_select_cmd cmd;
228
229 cmd.rf_discovery_id = param->rf_discovery_id;
230 cmd.rf_protocol = param->rf_protocol;
231
232 switch (cmd.rf_protocol) {
233 case NCI_RF_PROTOCOL_ISO_DEP:
234 cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP;
235 break;
236
237 case NCI_RF_PROTOCOL_NFC_DEP:
238 cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP;
239 break;
240
241 default:
242 cmd.rf_interface = NCI_RF_INTERFACE_FRAME;
243 break;
244 }
245
246 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD,
247 sizeof(struct nci_rf_discover_select_cmd), &cmd);
248 }
249
250 static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
251 {
252 struct nci_rf_deactivate_cmd cmd;
253
254 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
255
256 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
257 sizeof(struct nci_rf_deactivate_cmd), &cmd);
258 }
259
260 static int nci_open_device(struct nci_dev *ndev)
261 {
262 int rc = 0;
263
264 mutex_lock(&ndev->req_lock);
265
266 if (test_bit(NCI_UP, &ndev->flags)) {
267 rc = -EALREADY;
268 goto done;
269 }
270
271 if (ndev->ops->open(ndev)) {
272 rc = -EIO;
273 goto done;
274 }
275
276 atomic_set(&ndev->cmd_cnt, 1);
277
278 set_bit(NCI_INIT, &ndev->flags);
279
280 rc = __nci_request(ndev, nci_reset_req, 0,
281 msecs_to_jiffies(NCI_RESET_TIMEOUT));
282
283 if (!rc) {
284 rc = __nci_request(ndev, nci_init_req, 0,
285 msecs_to_jiffies(NCI_INIT_TIMEOUT));
286 }
287
288 if (!rc) {
289 rc = __nci_request(ndev, nci_init_complete_req, 0,
290 msecs_to_jiffies(NCI_INIT_TIMEOUT));
291 }
292
293 clear_bit(NCI_INIT, &ndev->flags);
294
295 if (!rc) {
296 set_bit(NCI_UP, &ndev->flags);
297 nci_clear_target_list(ndev);
298 atomic_set(&ndev->state, NCI_IDLE);
299 } else {
300 /* Init failed, cleanup */
301 skb_queue_purge(&ndev->cmd_q);
302 skb_queue_purge(&ndev->rx_q);
303 skb_queue_purge(&ndev->tx_q);
304
305 ndev->ops->close(ndev);
306 ndev->flags = 0;
307 }
308
309 done:
310 mutex_unlock(&ndev->req_lock);
311 return rc;
312 }
313
314 static int nci_close_device(struct nci_dev *ndev)
315 {
316 nci_req_cancel(ndev, ENODEV);
317 mutex_lock(&ndev->req_lock);
318
319 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
320 del_timer_sync(&ndev->cmd_timer);
321 del_timer_sync(&ndev->data_timer);
322 mutex_unlock(&ndev->req_lock);
323 return 0;
324 }
325
326 /* Drop RX and TX queues */
327 skb_queue_purge(&ndev->rx_q);
328 skb_queue_purge(&ndev->tx_q);
329
330 /* Flush RX and TX wq */
331 flush_workqueue(ndev->rx_wq);
332 flush_workqueue(ndev->tx_wq);
333
334 /* Reset device */
335 skb_queue_purge(&ndev->cmd_q);
336 atomic_set(&ndev->cmd_cnt, 1);
337
338 set_bit(NCI_INIT, &ndev->flags);
339 __nci_request(ndev, nci_reset_req, 0,
340 msecs_to_jiffies(NCI_RESET_TIMEOUT));
341 clear_bit(NCI_INIT, &ndev->flags);
342
343 /* Flush cmd wq */
344 flush_workqueue(ndev->cmd_wq);
345
346 /* After this point our queues are empty
347 * and no works are scheduled. */
348 ndev->ops->close(ndev);
349
350 /* Clear flags */
351 ndev->flags = 0;
352
353 mutex_unlock(&ndev->req_lock);
354
355 return 0;
356 }
357
358 /* NCI command timer function */
359 static void nci_cmd_timer(unsigned long arg)
360 {
361 struct nci_dev *ndev = (void *) arg;
362
363 atomic_set(&ndev->cmd_cnt, 1);
364 queue_work(ndev->cmd_wq, &ndev->cmd_work);
365 }
366
367 /* NCI data exchange timer function */
368 static void nci_data_timer(unsigned long arg)
369 {
370 struct nci_dev *ndev = (void *) arg;
371
372 set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
373 queue_work(ndev->rx_wq, &ndev->rx_work);
374 }
375
376 static int nci_dev_up(struct nfc_dev *nfc_dev)
377 {
378 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
379
380 return nci_open_device(ndev);
381 }
382
383 static int nci_dev_down(struct nfc_dev *nfc_dev)
384 {
385 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
386
387 return nci_close_device(ndev);
388 }
389
390 static int nci_start_poll(struct nfc_dev *nfc_dev,
391 __u32 im_protocols, __u32 tm_protocols)
392 {
393 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
394 int rc;
395
396 if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
397 (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) {
398 pr_err("unable to start poll, since poll is already active\n");
399 return -EBUSY;
400 }
401
402 if (ndev->target_active_prot) {
403 pr_err("there is an active target\n");
404 return -EBUSY;
405 }
406
407 if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) ||
408 (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
409 pr_debug("target active or w4 select, implicitly deactivate\n");
410
411 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
412 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
413 if (rc)
414 return -EBUSY;
415 }
416
417 rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
418 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
419
420 if (!rc)
421 ndev->poll_prots = im_protocols;
422
423 return rc;
424 }
425
426 static void nci_stop_poll(struct nfc_dev *nfc_dev)
427 {
428 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
429
430 if ((atomic_read(&ndev->state) != NCI_DISCOVERY) &&
431 (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) {
432 pr_err("unable to stop poll, since poll is not active\n");
433 return;
434 }
435
436 nci_request(ndev, nci_rf_deactivate_req, 0,
437 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
438 }
439
440 static int nci_activate_target(struct nfc_dev *nfc_dev,
441 struct nfc_target *target, __u32 protocol)
442 {
443 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
444 struct nci_rf_discover_select_param param;
445 struct nfc_target *nci_target = NULL;
446 int i;
447 int rc = 0;
448
449 pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
450
451 if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
452 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
453 pr_err("there is no available target to activate\n");
454 return -EINVAL;
455 }
456
457 if (ndev->target_active_prot) {
458 pr_err("there is already an active target\n");
459 return -EBUSY;
460 }
461
462 for (i = 0; i < ndev->n_targets; i++) {
463 if (ndev->targets[i].idx == target->idx) {
464 nci_target = &ndev->targets[i];
465 break;
466 }
467 }
468
469 if (!nci_target) {
470 pr_err("unable to find the selected target\n");
471 return -EINVAL;
472 }
473
474 if (!(nci_target->supported_protocols & (1 << protocol))) {
475 pr_err("target does not support the requested protocol 0x%x\n",
476 protocol);
477 return -EINVAL;
478 }
479
480 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
481 param.rf_discovery_id = nci_target->logical_idx;
482
483 if (protocol == NFC_PROTO_JEWEL)
484 param.rf_protocol = NCI_RF_PROTOCOL_T1T;
485 else if (protocol == NFC_PROTO_MIFARE)
486 param.rf_protocol = NCI_RF_PROTOCOL_T2T;
487 else if (protocol == NFC_PROTO_FELICA)
488 param.rf_protocol = NCI_RF_PROTOCOL_T3T;
489 else if (protocol == NFC_PROTO_ISO14443)
490 param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
491 else
492 param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
493
494 rc = nci_request(ndev, nci_rf_discover_select_req,
495 (unsigned long)&param,
496 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
497 }
498
499 if (!rc)
500 ndev->target_active_prot = protocol;
501
502 return rc;
503 }
504
505 static void nci_deactivate_target(struct nfc_dev *nfc_dev,
506 struct nfc_target *target)
507 {
508 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
509
510 pr_debug("target_idx %d\n", target->idx);
511
512 if (!ndev->target_active_prot) {
513 pr_err("unable to deactivate target, no active target\n");
514 return;
515 }
516
517 ndev->target_active_prot = 0;
518
519 if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
520 nci_request(ndev, nci_rf_deactivate_req, 0,
521 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
522 }
523 }
524
525 static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
526 struct sk_buff *skb,
527 data_exchange_cb_t cb, void *cb_context)
528 {
529 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
530 int rc;
531
532 pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
533
534 if (!ndev->target_active_prot) {
535 pr_err("unable to exchange data, no active target\n");
536 return -EINVAL;
537 }
538
539 if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
540 return -EBUSY;
541
542 /* store cb and context to be used on receiving data */
543 ndev->data_exchange_cb = cb;
544 ndev->data_exchange_cb_context = cb_context;
545
546 rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
547 if (rc)
548 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
549
550 return rc;
551 }
552
553 static struct nfc_ops nci_nfc_ops = {
554 .dev_up = nci_dev_up,
555 .dev_down = nci_dev_down,
556 .start_poll = nci_start_poll,
557 .stop_poll = nci_stop_poll,
558 .activate_target = nci_activate_target,
559 .deactivate_target = nci_deactivate_target,
560 .im_transceive = nci_transceive,
561 };
562
563 /* ---- Interface to NCI drivers ---- */
564
565 /**
566 * nci_allocate_device - allocate a new nci device
567 *
568 * @ops: device operations
569 * @supported_protocols: NFC protocols supported by the device
570 */
571 struct nci_dev *nci_allocate_device(struct nci_ops *ops,
572 __u32 supported_protocols,
573 int tx_headroom, int tx_tailroom)
574 {
575 struct nci_dev *ndev;
576
577 pr_debug("supported_protocols 0x%x\n", supported_protocols);
578
579 if (!ops->open || !ops->close || !ops->send)
580 return NULL;
581
582 if (!supported_protocols)
583 return NULL;
584
585 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
586 if (!ndev)
587 return NULL;
588
589 ndev->ops = ops;
590 ndev->tx_headroom = tx_headroom;
591 ndev->tx_tailroom = tx_tailroom;
592
593 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
594 supported_protocols,
595 tx_headroom + NCI_DATA_HDR_SIZE,
596 tx_tailroom);
597 if (!ndev->nfc_dev)
598 goto free_exit;
599
600 nfc_set_drvdata(ndev->nfc_dev, ndev);
601
602 return ndev;
603
604 free_exit:
605 kfree(ndev);
606 return NULL;
607 }
608 EXPORT_SYMBOL(nci_allocate_device);
609
610 /**
611 * nci_free_device - deallocate nci device
612 *
613 * @ndev: The nci device to deallocate
614 */
615 void nci_free_device(struct nci_dev *ndev)
616 {
617 nfc_free_device(ndev->nfc_dev);
618 kfree(ndev);
619 }
620 EXPORT_SYMBOL(nci_free_device);
621
622 /**
623 * nci_register_device - register a nci device in the nfc subsystem
624 *
625 * @dev: The nci device to register
626 */
627 int nci_register_device(struct nci_dev *ndev)
628 {
629 int rc;
630 struct device *dev = &ndev->nfc_dev->dev;
631 char name[32];
632
633 rc = nfc_register_device(ndev->nfc_dev);
634 if (rc)
635 goto exit;
636
637 ndev->flags = 0;
638
639 INIT_WORK(&ndev->cmd_work, nci_cmd_work);
640 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
641 ndev->cmd_wq = create_singlethread_workqueue(name);
642 if (!ndev->cmd_wq) {
643 rc = -ENOMEM;
644 goto unreg_exit;
645 }
646
647 INIT_WORK(&ndev->rx_work, nci_rx_work);
648 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
649 ndev->rx_wq = create_singlethread_workqueue(name);
650 if (!ndev->rx_wq) {
651 rc = -ENOMEM;
652 goto destroy_cmd_wq_exit;
653 }
654
655 INIT_WORK(&ndev->tx_work, nci_tx_work);
656 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
657 ndev->tx_wq = create_singlethread_workqueue(name);
658 if (!ndev->tx_wq) {
659 rc = -ENOMEM;
660 goto destroy_rx_wq_exit;
661 }
662
663 skb_queue_head_init(&ndev->cmd_q);
664 skb_queue_head_init(&ndev->rx_q);
665 skb_queue_head_init(&ndev->tx_q);
666
667 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
668 (unsigned long) ndev);
669 setup_timer(&ndev->data_timer, nci_data_timer,
670 (unsigned long) ndev);
671
672 mutex_init(&ndev->req_lock);
673
674 goto exit;
675
676 destroy_rx_wq_exit:
677 destroy_workqueue(ndev->rx_wq);
678
679 destroy_cmd_wq_exit:
680 destroy_workqueue(ndev->cmd_wq);
681
682 unreg_exit:
683 nfc_unregister_device(ndev->nfc_dev);
684
685 exit:
686 return rc;
687 }
688 EXPORT_SYMBOL(nci_register_device);
689
690 /**
691 * nci_unregister_device - unregister a nci device in the nfc subsystem
692 *
693 * @dev: The nci device to unregister
694 */
695 void nci_unregister_device(struct nci_dev *ndev)
696 {
697 nci_close_device(ndev);
698
699 destroy_workqueue(ndev->cmd_wq);
700 destroy_workqueue(ndev->rx_wq);
701 destroy_workqueue(ndev->tx_wq);
702
703 nfc_unregister_device(ndev->nfc_dev);
704 }
705 EXPORT_SYMBOL(nci_unregister_device);
706
707 /**
708 * nci_recv_frame - receive frame from NCI drivers
709 *
710 * @skb: The sk_buff to receive
711 */
712 int nci_recv_frame(struct sk_buff *skb)
713 {
714 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
715
716 pr_debug("len %d\n", skb->len);
717
718 if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
719 && !test_bit(NCI_INIT, &ndev->flags))) {
720 kfree_skb(skb);
721 return -ENXIO;
722 }
723
724 /* Queue frame for rx worker thread */
725 skb_queue_tail(&ndev->rx_q, skb);
726 queue_work(ndev->rx_wq, &ndev->rx_work);
727
728 return 0;
729 }
730 EXPORT_SYMBOL(nci_recv_frame);
731
732 static int nci_send_frame(struct sk_buff *skb)
733 {
734 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
735
736 pr_debug("len %d\n", skb->len);
737
738 if (!ndev) {
739 kfree_skb(skb);
740 return -ENODEV;
741 }
742
743 /* Get rid of skb owner, prior to sending to the driver. */
744 skb_orphan(skb);
745
746 return ndev->ops->send(skb);
747 }
748
749 /* Send NCI command */
750 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
751 {
752 struct nci_ctrl_hdr *hdr;
753 struct sk_buff *skb;
754
755 pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
756
757 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
758 if (!skb) {
759 pr_err("no memory for command\n");
760 return -ENOMEM;
761 }
762
763 hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
764 hdr->gid = nci_opcode_gid(opcode);
765 hdr->oid = nci_opcode_oid(opcode);
766 hdr->plen = plen;
767
768 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
769 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
770
771 if (plen)
772 memcpy(skb_put(skb, plen), payload, plen);
773
774 skb->dev = (void *) ndev;
775
776 skb_queue_tail(&ndev->cmd_q, skb);
777 queue_work(ndev->cmd_wq, &ndev->cmd_work);
778
779 return 0;
780 }
781
782 /* ---- NCI TX Data worker thread ---- */
783
784 static void nci_tx_work(struct work_struct *work)
785 {
786 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
787 struct sk_buff *skb;
788
789 pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
790
791 /* Send queued tx data */
792 while (atomic_read(&ndev->credits_cnt)) {
793 skb = skb_dequeue(&ndev->tx_q);
794 if (!skb)
795 return;
796
797 /* Check if data flow control is used */
798 if (atomic_read(&ndev->credits_cnt) !=
799 NCI_DATA_FLOW_CONTROL_NOT_USED)
800 atomic_dec(&ndev->credits_cnt);
801
802 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
803 nci_pbf(skb->data),
804 nci_conn_id(skb->data),
805 nci_plen(skb->data));
806
807 nci_send_frame(skb);
808
809 mod_timer(&ndev->data_timer,
810 jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
811 }
812 }
813
814 /* ----- NCI RX worker thread (data & control) ----- */
815
816 static void nci_rx_work(struct work_struct *work)
817 {
818 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
819 struct sk_buff *skb;
820
821 while ((skb = skb_dequeue(&ndev->rx_q))) {
822 /* Process frame */
823 switch (nci_mt(skb->data)) {
824 case NCI_MT_RSP_PKT:
825 nci_rsp_packet(ndev, skb);
826 break;
827
828 case NCI_MT_NTF_PKT:
829 nci_ntf_packet(ndev, skb);
830 break;
831
832 case NCI_MT_DATA_PKT:
833 nci_rx_data_packet(ndev, skb);
834 break;
835
836 default:
837 pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
838 kfree_skb(skb);
839 break;
840 }
841 }
842
843 /* check if a data exchange timout has occurred */
844 if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
845 /* complete the data exchange transaction, if exists */
846 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
847 nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
848
849 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
850 }
851 }
852
853 /* ----- NCI TX CMD worker thread ----- */
854
855 static void nci_cmd_work(struct work_struct *work)
856 {
857 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
858 struct sk_buff *skb;
859
860 pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
861
862 /* Send queued command */
863 if (atomic_read(&ndev->cmd_cnt)) {
864 skb = skb_dequeue(&ndev->cmd_q);
865 if (!skb)
866 return;
867
868 atomic_dec(&ndev->cmd_cnt);
869
870 pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
871 nci_pbf(skb->data),
872 nci_opcode_gid(nci_opcode(skb->data)),
873 nci_opcode_oid(nci_opcode(skb->data)),
874 nci_plen(skb->data));
875
876 nci_send_frame(skb);
877
878 mod_timer(&ndev->cmd_timer,
879 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
880 }
881 }
This page took 0.054315 seconds and 5 git commands to generate.