2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
5 * Copyright (C) 2011 Texas Instruments, Inc.
7 * Written by Ilan Elias <ilane@ti.com>
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
30 #include <linux/types.h>
31 #include <linux/workqueue.h>
32 #include <linux/completion.h>
33 #include <linux/export.h>
34 #include <linux/sched.h>
35 #include <linux/bitops.h>
36 #include <linux/skbuff.h>
39 #include <net/nfc/nci.h>
40 #include <net/nfc/nci_core.h>
41 #include <linux/nfc.h>
43 static void nci_cmd_work(struct work_struct
*work
);
44 static void nci_rx_work(struct work_struct
*work
);
45 static void nci_tx_work(struct work_struct
*work
);
47 /* ---- NCI requests ---- */
49 void nci_req_complete(struct nci_dev
*ndev
, int result
)
51 if (ndev
->req_status
== NCI_REQ_PEND
) {
52 ndev
->req_result
= result
;
53 ndev
->req_status
= NCI_REQ_DONE
;
54 complete(&ndev
->req_completion
);
58 static void nci_req_cancel(struct nci_dev
*ndev
, int err
)
60 if (ndev
->req_status
== NCI_REQ_PEND
) {
61 ndev
->req_result
= err
;
62 ndev
->req_status
= NCI_REQ_CANCELED
;
63 complete(&ndev
->req_completion
);
67 /* Execute request and wait for completion. */
68 static int __nci_request(struct nci_dev
*ndev
,
69 void (*req
)(struct nci_dev
*ndev
, unsigned long opt
),
70 unsigned long opt
, __u32 timeout
)
75 ndev
->req_status
= NCI_REQ_PEND
;
77 init_completion(&ndev
->req_completion
);
80 wait_for_completion_interruptible_timeout(&ndev
->req_completion
,
83 pr_debug("wait_for_completion return %ld\n", completion_rc
);
85 if (completion_rc
> 0) {
86 switch (ndev
->req_status
) {
88 rc
= nci_to_errno(ndev
->req_result
);
91 case NCI_REQ_CANCELED
:
92 rc
= -ndev
->req_result
;
100 pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
103 rc
= ((completion_rc
== 0) ? (-ETIMEDOUT
) : (completion_rc
));
106 ndev
->req_status
= ndev
->req_result
= 0;
111 static inline int nci_request(struct nci_dev
*ndev
,
112 void (*req
)(struct nci_dev
*ndev
,
114 unsigned long opt
, __u32 timeout
)
118 if (!test_bit(NCI_UP
, &ndev
->flags
))
121 /* Serialize all requests */
122 mutex_lock(&ndev
->req_lock
);
123 rc
= __nci_request(ndev
, req
, opt
, timeout
);
124 mutex_unlock(&ndev
->req_lock
);
129 static void nci_reset_req(struct nci_dev
*ndev
, unsigned long opt
)
131 struct nci_core_reset_cmd cmd
;
133 cmd
.reset_type
= NCI_RESET_TYPE_RESET_CONFIG
;
134 nci_send_cmd(ndev
, NCI_OP_CORE_RESET_CMD
, 1, &cmd
);
137 static void nci_init_req(struct nci_dev
*ndev
, unsigned long opt
)
139 nci_send_cmd(ndev
, NCI_OP_CORE_INIT_CMD
, 0, NULL
);
142 static void nci_init_complete_req(struct nci_dev
*ndev
, unsigned long opt
)
144 struct nci_rf_disc_map_cmd cmd
;
145 struct disc_map_config
*cfg
= cmd
.mapping_configs
;
146 __u8
*num
= &cmd
.num_mapping_configs
;
149 /* set rf mapping configurations */
152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 for (i
= 0; i
< ndev
->num_supported_rf_interfaces
; i
++) {
154 if (ndev
->supported_rf_interfaces
[i
] ==
155 NCI_RF_INTERFACE_ISO_DEP
) {
156 cfg
[*num
].rf_protocol
= NCI_RF_PROTOCOL_ISO_DEP
;
157 cfg
[*num
].mode
= NCI_DISC_MAP_MODE_POLL
|
158 NCI_DISC_MAP_MODE_LISTEN
;
159 cfg
[*num
].rf_interface
= NCI_RF_INTERFACE_ISO_DEP
;
161 } else if (ndev
->supported_rf_interfaces
[i
] ==
162 NCI_RF_INTERFACE_NFC_DEP
) {
163 cfg
[*num
].rf_protocol
= NCI_RF_PROTOCOL_NFC_DEP
;
164 cfg
[*num
].mode
= NCI_DISC_MAP_MODE_POLL
|
165 NCI_DISC_MAP_MODE_LISTEN
;
166 cfg
[*num
].rf_interface
= NCI_RF_INTERFACE_NFC_DEP
;
170 if (*num
== NCI_MAX_NUM_MAPPING_CONFIGS
)
174 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_MAP_CMD
,
175 (1 + ((*num
) * sizeof(struct disc_map_config
))), &cmd
);
178 static void nci_rf_discover_req(struct nci_dev
*ndev
, unsigned long opt
)
180 struct nci_rf_disc_cmd cmd
;
181 __u32 protocols
= opt
;
183 cmd
.num_disc_configs
= 0;
185 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
186 (protocols
& NFC_PROTO_JEWEL_MASK
187 || protocols
& NFC_PROTO_MIFARE_MASK
188 || protocols
& NFC_PROTO_ISO14443_MASK
189 || protocols
& NFC_PROTO_NFC_DEP_MASK
)) {
190 cmd
.disc_configs
[cmd
.num_disc_configs
].rf_tech_and_mode
=
191 NCI_NFC_A_PASSIVE_POLL_MODE
;
192 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
193 cmd
.num_disc_configs
++;
196 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
197 (protocols
& NFC_PROTO_ISO14443_MASK
)) {
198 cmd
.disc_configs
[cmd
.num_disc_configs
].rf_tech_and_mode
=
199 NCI_NFC_B_PASSIVE_POLL_MODE
;
200 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
201 cmd
.num_disc_configs
++;
204 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
205 (protocols
& NFC_PROTO_FELICA_MASK
206 || protocols
& NFC_PROTO_NFC_DEP_MASK
)) {
207 cmd
.disc_configs
[cmd
.num_disc_configs
].rf_tech_and_mode
=
208 NCI_NFC_F_PASSIVE_POLL_MODE
;
209 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
210 cmd
.num_disc_configs
++;
213 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_CMD
,
214 (1 + (cmd
.num_disc_configs
* sizeof(struct disc_config
))),
218 struct nci_rf_discover_select_param
{
219 __u8 rf_discovery_id
;
223 static void nci_rf_discover_select_req(struct nci_dev
*ndev
, unsigned long opt
)
225 struct nci_rf_discover_select_param
*param
=
226 (struct nci_rf_discover_select_param
*)opt
;
227 struct nci_rf_discover_select_cmd cmd
;
229 cmd
.rf_discovery_id
= param
->rf_discovery_id
;
230 cmd
.rf_protocol
= param
->rf_protocol
;
232 switch (cmd
.rf_protocol
) {
233 case NCI_RF_PROTOCOL_ISO_DEP
:
234 cmd
.rf_interface
= NCI_RF_INTERFACE_ISO_DEP
;
237 case NCI_RF_PROTOCOL_NFC_DEP
:
238 cmd
.rf_interface
= NCI_RF_INTERFACE_NFC_DEP
;
242 cmd
.rf_interface
= NCI_RF_INTERFACE_FRAME
;
246 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_SELECT_CMD
,
247 sizeof(struct nci_rf_discover_select_cmd
), &cmd
);
250 static void nci_rf_deactivate_req(struct nci_dev
*ndev
, unsigned long opt
)
252 struct nci_rf_deactivate_cmd cmd
;
254 cmd
.type
= NCI_DEACTIVATE_TYPE_IDLE_MODE
;
256 nci_send_cmd(ndev
, NCI_OP_RF_DEACTIVATE_CMD
,
257 sizeof(struct nci_rf_deactivate_cmd
), &cmd
);
260 static int nci_open_device(struct nci_dev
*ndev
)
264 mutex_lock(&ndev
->req_lock
);
266 if (test_bit(NCI_UP
, &ndev
->flags
)) {
271 if (ndev
->ops
->open(ndev
)) {
276 atomic_set(&ndev
->cmd_cnt
, 1);
278 set_bit(NCI_INIT
, &ndev
->flags
);
280 rc
= __nci_request(ndev
, nci_reset_req
, 0,
281 msecs_to_jiffies(NCI_RESET_TIMEOUT
));
284 rc
= __nci_request(ndev
, nci_init_req
, 0,
285 msecs_to_jiffies(NCI_INIT_TIMEOUT
));
289 rc
= __nci_request(ndev
, nci_init_complete_req
, 0,
290 msecs_to_jiffies(NCI_INIT_TIMEOUT
));
293 clear_bit(NCI_INIT
, &ndev
->flags
);
296 set_bit(NCI_UP
, &ndev
->flags
);
297 nci_clear_target_list(ndev
);
298 atomic_set(&ndev
->state
, NCI_IDLE
);
300 /* Init failed, cleanup */
301 skb_queue_purge(&ndev
->cmd_q
);
302 skb_queue_purge(&ndev
->rx_q
);
303 skb_queue_purge(&ndev
->tx_q
);
305 ndev
->ops
->close(ndev
);
310 mutex_unlock(&ndev
->req_lock
);
314 static int nci_close_device(struct nci_dev
*ndev
)
316 nci_req_cancel(ndev
, ENODEV
);
317 mutex_lock(&ndev
->req_lock
);
319 if (!test_and_clear_bit(NCI_UP
, &ndev
->flags
)) {
320 del_timer_sync(&ndev
->cmd_timer
);
321 del_timer_sync(&ndev
->data_timer
);
322 mutex_unlock(&ndev
->req_lock
);
326 /* Drop RX and TX queues */
327 skb_queue_purge(&ndev
->rx_q
);
328 skb_queue_purge(&ndev
->tx_q
);
330 /* Flush RX and TX wq */
331 flush_workqueue(ndev
->rx_wq
);
332 flush_workqueue(ndev
->tx_wq
);
335 skb_queue_purge(&ndev
->cmd_q
);
336 atomic_set(&ndev
->cmd_cnt
, 1);
338 set_bit(NCI_INIT
, &ndev
->flags
);
339 __nci_request(ndev
, nci_reset_req
, 0,
340 msecs_to_jiffies(NCI_RESET_TIMEOUT
));
341 clear_bit(NCI_INIT
, &ndev
->flags
);
344 flush_workqueue(ndev
->cmd_wq
);
346 /* After this point our queues are empty
347 * and no works are scheduled. */
348 ndev
->ops
->close(ndev
);
353 mutex_unlock(&ndev
->req_lock
);
358 /* NCI command timer function */
359 static void nci_cmd_timer(unsigned long arg
)
361 struct nci_dev
*ndev
= (void *) arg
;
363 atomic_set(&ndev
->cmd_cnt
, 1);
364 queue_work(ndev
->cmd_wq
, &ndev
->cmd_work
);
367 /* NCI data exchange timer function */
368 static void nci_data_timer(unsigned long arg
)
370 struct nci_dev
*ndev
= (void *) arg
;
372 set_bit(NCI_DATA_EXCHANGE_TO
, &ndev
->flags
);
373 queue_work(ndev
->rx_wq
, &ndev
->rx_work
);
376 static int nci_dev_up(struct nfc_dev
*nfc_dev
)
378 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
380 return nci_open_device(ndev
);
383 static int nci_dev_down(struct nfc_dev
*nfc_dev
)
385 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
387 return nci_close_device(ndev
);
390 static int nci_start_poll(struct nfc_dev
*nfc_dev
,
391 __u32 im_protocols
, __u32 tm_protocols
)
393 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
396 if ((atomic_read(&ndev
->state
) == NCI_DISCOVERY
) ||
397 (atomic_read(&ndev
->state
) == NCI_W4_ALL_DISCOVERIES
)) {
398 pr_err("unable to start poll, since poll is already active\n");
402 if (ndev
->target_active_prot
) {
403 pr_err("there is an active target\n");
407 if ((atomic_read(&ndev
->state
) == NCI_W4_HOST_SELECT
) ||
408 (atomic_read(&ndev
->state
) == NCI_POLL_ACTIVE
)) {
409 pr_debug("target active or w4 select, implicitly deactivate\n");
411 rc
= nci_request(ndev
, nci_rf_deactivate_req
, 0,
412 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
417 rc
= nci_request(ndev
, nci_rf_discover_req
, im_protocols
,
418 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT
));
421 ndev
->poll_prots
= im_protocols
;
426 static void nci_stop_poll(struct nfc_dev
*nfc_dev
)
428 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
430 if ((atomic_read(&ndev
->state
) != NCI_DISCOVERY
) &&
431 (atomic_read(&ndev
->state
) != NCI_W4_ALL_DISCOVERIES
)) {
432 pr_err("unable to stop poll, since poll is not active\n");
436 nci_request(ndev
, nci_rf_deactivate_req
, 0,
437 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
440 static int nci_activate_target(struct nfc_dev
*nfc_dev
,
441 struct nfc_target
*target
, __u32 protocol
)
443 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
444 struct nci_rf_discover_select_param param
;
445 struct nfc_target
*nci_target
= NULL
;
449 pr_debug("target_idx %d, protocol 0x%x\n", target
->idx
, protocol
);
451 if ((atomic_read(&ndev
->state
) != NCI_W4_HOST_SELECT
) &&
452 (atomic_read(&ndev
->state
) != NCI_POLL_ACTIVE
)) {
453 pr_err("there is no available target to activate\n");
457 if (ndev
->target_active_prot
) {
458 pr_err("there is already an active target\n");
462 for (i
= 0; i
< ndev
->n_targets
; i
++) {
463 if (ndev
->targets
[i
].idx
== target
->idx
) {
464 nci_target
= &ndev
->targets
[i
];
470 pr_err("unable to find the selected target\n");
474 if (!(nci_target
->supported_protocols
& (1 << protocol
))) {
475 pr_err("target does not support the requested protocol 0x%x\n",
480 if (atomic_read(&ndev
->state
) == NCI_W4_HOST_SELECT
) {
481 param
.rf_discovery_id
= nci_target
->logical_idx
;
483 if (protocol
== NFC_PROTO_JEWEL
)
484 param
.rf_protocol
= NCI_RF_PROTOCOL_T1T
;
485 else if (protocol
== NFC_PROTO_MIFARE
)
486 param
.rf_protocol
= NCI_RF_PROTOCOL_T2T
;
487 else if (protocol
== NFC_PROTO_FELICA
)
488 param
.rf_protocol
= NCI_RF_PROTOCOL_T3T
;
489 else if (protocol
== NFC_PROTO_ISO14443
)
490 param
.rf_protocol
= NCI_RF_PROTOCOL_ISO_DEP
;
492 param
.rf_protocol
= NCI_RF_PROTOCOL_NFC_DEP
;
494 rc
= nci_request(ndev
, nci_rf_discover_select_req
,
495 (unsigned long)¶m
,
496 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT
));
500 ndev
->target_active_prot
= protocol
;
505 static void nci_deactivate_target(struct nfc_dev
*nfc_dev
,
506 struct nfc_target
*target
)
508 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
510 pr_debug("target_idx %d\n", target
->idx
);
512 if (!ndev
->target_active_prot
) {
513 pr_err("unable to deactivate target, no active target\n");
517 ndev
->target_active_prot
= 0;
519 if (atomic_read(&ndev
->state
) == NCI_POLL_ACTIVE
) {
520 nci_request(ndev
, nci_rf_deactivate_req
, 0,
521 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
525 static int nci_data_exchange(struct nfc_dev
*nfc_dev
, struct nfc_target
*target
,
527 data_exchange_cb_t cb
, void *cb_context
)
529 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
532 pr_debug("target_idx %d, len %d\n", target
->idx
, skb
->len
);
534 if (!ndev
->target_active_prot
) {
535 pr_err("unable to exchange data, no active target\n");
539 if (test_and_set_bit(NCI_DATA_EXCHANGE
, &ndev
->flags
))
542 /* store cb and context to be used on receiving data */
543 ndev
->data_exchange_cb
= cb
;
544 ndev
->data_exchange_cb_context
= cb_context
;
546 rc
= nci_send_data(ndev
, NCI_STATIC_RF_CONN_ID
, skb
);
548 clear_bit(NCI_DATA_EXCHANGE
, &ndev
->flags
);
553 static struct nfc_ops nci_nfc_ops
= {
554 .dev_up
= nci_dev_up
,
555 .dev_down
= nci_dev_down
,
556 .start_poll
= nci_start_poll
,
557 .stop_poll
= nci_stop_poll
,
558 .activate_target
= nci_activate_target
,
559 .deactivate_target
= nci_deactivate_target
,
560 .data_exchange
= nci_data_exchange
,
563 /* ---- Interface to NCI drivers ---- */
566 * nci_allocate_device - allocate a new nci device
568 * @ops: device operations
569 * @supported_protocols: NFC protocols supported by the device
571 struct nci_dev
*nci_allocate_device(struct nci_ops
*ops
,
572 __u32 supported_protocols
,
573 int tx_headroom
, int tx_tailroom
)
575 struct nci_dev
*ndev
;
577 pr_debug("supported_protocols 0x%x\n", supported_protocols
);
579 if (!ops
->open
|| !ops
->close
|| !ops
->send
)
582 if (!supported_protocols
)
585 ndev
= kzalloc(sizeof(struct nci_dev
), GFP_KERNEL
);
590 ndev
->tx_headroom
= tx_headroom
;
591 ndev
->tx_tailroom
= tx_tailroom
;
593 ndev
->nfc_dev
= nfc_allocate_device(&nci_nfc_ops
,
595 tx_headroom
+ NCI_DATA_HDR_SIZE
,
600 nfc_set_drvdata(ndev
->nfc_dev
, ndev
);
608 EXPORT_SYMBOL(nci_allocate_device
);
611 * nci_free_device - deallocate nci device
613 * @ndev: The nci device to deallocate
615 void nci_free_device(struct nci_dev
*ndev
)
617 nfc_free_device(ndev
->nfc_dev
);
620 EXPORT_SYMBOL(nci_free_device
);
623 * nci_register_device - register a nci device in the nfc subsystem
625 * @dev: The nci device to register
627 int nci_register_device(struct nci_dev
*ndev
)
630 struct device
*dev
= &ndev
->nfc_dev
->dev
;
633 rc
= nfc_register_device(ndev
->nfc_dev
);
639 INIT_WORK(&ndev
->cmd_work
, nci_cmd_work
);
640 snprintf(name
, sizeof(name
), "%s_nci_cmd_wq", dev_name(dev
));
641 ndev
->cmd_wq
= create_singlethread_workqueue(name
);
647 INIT_WORK(&ndev
->rx_work
, nci_rx_work
);
648 snprintf(name
, sizeof(name
), "%s_nci_rx_wq", dev_name(dev
));
649 ndev
->rx_wq
= create_singlethread_workqueue(name
);
652 goto destroy_cmd_wq_exit
;
655 INIT_WORK(&ndev
->tx_work
, nci_tx_work
);
656 snprintf(name
, sizeof(name
), "%s_nci_tx_wq", dev_name(dev
));
657 ndev
->tx_wq
= create_singlethread_workqueue(name
);
660 goto destroy_rx_wq_exit
;
663 skb_queue_head_init(&ndev
->cmd_q
);
664 skb_queue_head_init(&ndev
->rx_q
);
665 skb_queue_head_init(&ndev
->tx_q
);
667 setup_timer(&ndev
->cmd_timer
, nci_cmd_timer
,
668 (unsigned long) ndev
);
669 setup_timer(&ndev
->data_timer
, nci_data_timer
,
670 (unsigned long) ndev
);
672 mutex_init(&ndev
->req_lock
);
677 destroy_workqueue(ndev
->rx_wq
);
680 destroy_workqueue(ndev
->cmd_wq
);
683 nfc_unregister_device(ndev
->nfc_dev
);
688 EXPORT_SYMBOL(nci_register_device
);
691 * nci_unregister_device - unregister a nci device in the nfc subsystem
693 * @dev: The nci device to unregister
695 void nci_unregister_device(struct nci_dev
*ndev
)
697 nci_close_device(ndev
);
699 destroy_workqueue(ndev
->cmd_wq
);
700 destroy_workqueue(ndev
->rx_wq
);
701 destroy_workqueue(ndev
->tx_wq
);
703 nfc_unregister_device(ndev
->nfc_dev
);
705 EXPORT_SYMBOL(nci_unregister_device
);
708 * nci_recv_frame - receive frame from NCI drivers
710 * @skb: The sk_buff to receive
712 int nci_recv_frame(struct sk_buff
*skb
)
714 struct nci_dev
*ndev
= (struct nci_dev
*) skb
->dev
;
716 pr_debug("len %d\n", skb
->len
);
718 if (!ndev
|| (!test_bit(NCI_UP
, &ndev
->flags
)
719 && !test_bit(NCI_INIT
, &ndev
->flags
))) {
724 /* Queue frame for rx worker thread */
725 skb_queue_tail(&ndev
->rx_q
, skb
);
726 queue_work(ndev
->rx_wq
, &ndev
->rx_work
);
730 EXPORT_SYMBOL(nci_recv_frame
);
732 static int nci_send_frame(struct sk_buff
*skb
)
734 struct nci_dev
*ndev
= (struct nci_dev
*) skb
->dev
;
736 pr_debug("len %d\n", skb
->len
);
743 /* Get rid of skb owner, prior to sending to the driver. */
746 return ndev
->ops
->send(skb
);
749 /* Send NCI command */
750 int nci_send_cmd(struct nci_dev
*ndev
, __u16 opcode
, __u8 plen
, void *payload
)
752 struct nci_ctrl_hdr
*hdr
;
755 pr_debug("opcode 0x%x, plen %d\n", opcode
, plen
);
757 skb
= nci_skb_alloc(ndev
, (NCI_CTRL_HDR_SIZE
+ plen
), GFP_KERNEL
);
759 pr_err("no memory for command\n");
763 hdr
= (struct nci_ctrl_hdr
*) skb_put(skb
, NCI_CTRL_HDR_SIZE
);
764 hdr
->gid
= nci_opcode_gid(opcode
);
765 hdr
->oid
= nci_opcode_oid(opcode
);
768 nci_mt_set((__u8
*)hdr
, NCI_MT_CMD_PKT
);
769 nci_pbf_set((__u8
*)hdr
, NCI_PBF_LAST
);
772 memcpy(skb_put(skb
, plen
), payload
, plen
);
774 skb
->dev
= (void *) ndev
;
776 skb_queue_tail(&ndev
->cmd_q
, skb
);
777 queue_work(ndev
->cmd_wq
, &ndev
->cmd_work
);
782 /* ---- NCI TX Data worker thread ---- */
784 static void nci_tx_work(struct work_struct
*work
)
786 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, tx_work
);
789 pr_debug("credits_cnt %d\n", atomic_read(&ndev
->credits_cnt
));
791 /* Send queued tx data */
792 while (atomic_read(&ndev
->credits_cnt
)) {
793 skb
= skb_dequeue(&ndev
->tx_q
);
797 /* Check if data flow control is used */
798 if (atomic_read(&ndev
->credits_cnt
) !=
799 NCI_DATA_FLOW_CONTROL_NOT_USED
)
800 atomic_dec(&ndev
->credits_cnt
);
802 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
804 nci_conn_id(skb
->data
),
805 nci_plen(skb
->data
));
809 mod_timer(&ndev
->data_timer
,
810 jiffies
+ msecs_to_jiffies(NCI_DATA_TIMEOUT
));
814 /* ----- NCI RX worker thread (data & control) ----- */
816 static void nci_rx_work(struct work_struct
*work
)
818 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, rx_work
);
821 while ((skb
= skb_dequeue(&ndev
->rx_q
))) {
823 switch (nci_mt(skb
->data
)) {
825 nci_rsp_packet(ndev
, skb
);
829 nci_ntf_packet(ndev
, skb
);
832 case NCI_MT_DATA_PKT
:
833 nci_rx_data_packet(ndev
, skb
);
837 pr_err("unknown MT 0x%x\n", nci_mt(skb
->data
));
843 /* check if a data exchange timout has occurred */
844 if (test_bit(NCI_DATA_EXCHANGE_TO
, &ndev
->flags
)) {
845 /* complete the data exchange transaction, if exists */
846 if (test_bit(NCI_DATA_EXCHANGE
, &ndev
->flags
))
847 nci_data_exchange_complete(ndev
, NULL
, -ETIMEDOUT
);
849 clear_bit(NCI_DATA_EXCHANGE_TO
, &ndev
->flags
);
853 /* ----- NCI TX CMD worker thread ----- */
855 static void nci_cmd_work(struct work_struct
*work
)
857 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, cmd_work
);
860 pr_debug("cmd_cnt %d\n", atomic_read(&ndev
->cmd_cnt
));
862 /* Send queued command */
863 if (atomic_read(&ndev
->cmd_cnt
)) {
864 skb
= skb_dequeue(&ndev
->cmd_q
);
868 atomic_dec(&ndev
->cmd_cnt
);
870 pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
872 nci_opcode_gid(nci_opcode(skb
->data
)),
873 nci_opcode_oid(nci_opcode(skb
->data
)),
874 nci_plen(skb
->data
));
878 mod_timer(&ndev
->cmd_timer
,
879 jiffies
+ msecs_to_jiffies(NCI_CMD_TIMEOUT
));