2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
5 * Copyright (C) 2011 Texas Instruments, Inc.
7 * Written by Ilan Elias <ilane@ti.com>
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/types.h>
29 #include <linux/workqueue.h>
30 #include <linux/completion.h>
31 #include <linux/export.h>
32 #include <linux/sched.h>
33 #include <linux/bitops.h>
34 #include <linux/skbuff.h>
37 #include <net/nfc/nci.h>
38 #include <net/nfc/nci_core.h>
39 #include <linux/nfc.h>
41 static void nci_cmd_work(struct work_struct
*work
);
42 static void nci_rx_work(struct work_struct
*work
);
43 static void nci_tx_work(struct work_struct
*work
);
45 /* ---- NCI requests ---- */
47 void nci_req_complete(struct nci_dev
*ndev
, int result
)
49 if (ndev
->req_status
== NCI_REQ_PEND
) {
50 ndev
->req_result
= result
;
51 ndev
->req_status
= NCI_REQ_DONE
;
52 complete(&ndev
->req_completion
);
56 static void nci_req_cancel(struct nci_dev
*ndev
, int err
)
58 if (ndev
->req_status
== NCI_REQ_PEND
) {
59 ndev
->req_result
= err
;
60 ndev
->req_status
= NCI_REQ_CANCELED
;
61 complete(&ndev
->req_completion
);
65 /* Execute request and wait for completion. */
66 static int __nci_request(struct nci_dev
*ndev
,
67 void (*req
)(struct nci_dev
*ndev
, unsigned long opt
),
72 unsigned long completion_rc
;
74 ndev
->req_status
= NCI_REQ_PEND
;
76 init_completion(&ndev
->req_completion
);
78 completion_rc
= wait_for_completion_interruptible_timeout(
79 &ndev
->req_completion
,
82 nfc_dbg("wait_for_completion return %ld", completion_rc
);
84 if (completion_rc
> 0) {
85 switch (ndev
->req_status
) {
87 rc
= nci_to_errno(ndev
->req_result
);
90 case NCI_REQ_CANCELED
:
91 rc
= -ndev
->req_result
;
99 nfc_err("wait_for_completion_interruptible_timeout failed %ld",
102 rc
= ((completion_rc
== 0) ? (-ETIMEDOUT
) : (completion_rc
));
105 ndev
->req_status
= ndev
->req_result
= 0;
110 static inline int nci_request(struct nci_dev
*ndev
,
111 void (*req
)(struct nci_dev
*ndev
, unsigned long opt
),
112 unsigned long opt
, __u32 timeout
)
116 if (!test_bit(NCI_UP
, &ndev
->flags
))
119 /* Serialize all requests */
120 mutex_lock(&ndev
->req_lock
);
121 rc
= __nci_request(ndev
, req
, opt
, timeout
);
122 mutex_unlock(&ndev
->req_lock
);
127 static void nci_reset_req(struct nci_dev
*ndev
, unsigned long opt
)
129 struct nci_core_reset_cmd cmd
;
131 cmd
.reset_type
= NCI_RESET_TYPE_RESET_CONFIG
;
132 nci_send_cmd(ndev
, NCI_OP_CORE_RESET_CMD
, 1, &cmd
);
135 static void nci_init_req(struct nci_dev
*ndev
, unsigned long opt
)
137 nci_send_cmd(ndev
, NCI_OP_CORE_INIT_CMD
, 0, NULL
);
140 static void nci_init_complete_req(struct nci_dev
*ndev
, unsigned long opt
)
142 struct nci_rf_disc_map_cmd cmd
;
143 struct disc_map_config
*cfg
= cmd
.mapping_configs
;
144 __u8
*num
= &cmd
.num_mapping_configs
;
147 /* set rf mapping configurations */
150 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
151 for (i
= 0; i
< ndev
->num_supported_rf_interfaces
; i
++) {
152 if (ndev
->supported_rf_interfaces
[i
] ==
153 NCI_RF_INTERFACE_ISO_DEP
) {
154 cfg
[*num
].rf_protocol
= NCI_RF_PROTOCOL_ISO_DEP
;
155 cfg
[*num
].mode
= NCI_DISC_MAP_MODE_BOTH
;
156 cfg
[*num
].rf_interface_type
= NCI_RF_INTERFACE_ISO_DEP
;
158 } else if (ndev
->supported_rf_interfaces
[i
] ==
159 NCI_RF_INTERFACE_NFC_DEP
) {
160 cfg
[*num
].rf_protocol
= NCI_RF_PROTOCOL_NFC_DEP
;
161 cfg
[*num
].mode
= NCI_DISC_MAP_MODE_BOTH
;
162 cfg
[*num
].rf_interface_type
= NCI_RF_INTERFACE_NFC_DEP
;
166 if (*num
== NCI_MAX_NUM_MAPPING_CONFIGS
)
170 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_MAP_CMD
,
171 (1 + ((*num
)*sizeof(struct disc_map_config
))),
175 static void nci_rf_discover_req(struct nci_dev
*ndev
, unsigned long opt
)
177 struct nci_rf_disc_cmd cmd
;
178 __u32 protocols
= opt
;
180 cmd
.num_disc_configs
= 0;
182 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
183 (protocols
& NFC_PROTO_JEWEL_MASK
184 || protocols
& NFC_PROTO_MIFARE_MASK
185 || protocols
& NFC_PROTO_ISO14443_MASK
186 || protocols
& NFC_PROTO_NFC_DEP_MASK
)) {
187 cmd
.disc_configs
[cmd
.num_disc_configs
].type
=
188 NCI_DISCOVERY_TYPE_POLL_A_PASSIVE
;
189 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
190 cmd
.num_disc_configs
++;
193 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
194 (protocols
& NFC_PROTO_ISO14443_MASK
)) {
195 cmd
.disc_configs
[cmd
.num_disc_configs
].type
=
196 NCI_DISCOVERY_TYPE_POLL_B_PASSIVE
;
197 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
198 cmd
.num_disc_configs
++;
201 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
202 (protocols
& NFC_PROTO_FELICA_MASK
203 || protocols
& NFC_PROTO_NFC_DEP_MASK
)) {
204 cmd
.disc_configs
[cmd
.num_disc_configs
].type
=
205 NCI_DISCOVERY_TYPE_POLL_F_PASSIVE
;
206 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
207 cmd
.num_disc_configs
++;
210 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_CMD
,
211 (1 + (cmd
.num_disc_configs
*sizeof(struct disc_config
))),
215 static void nci_rf_deactivate_req(struct nci_dev
*ndev
, unsigned long opt
)
217 struct nci_rf_deactivate_cmd cmd
;
219 cmd
.type
= NCI_DEACTIVATE_TYPE_IDLE_MODE
;
221 nci_send_cmd(ndev
, NCI_OP_RF_DEACTIVATE_CMD
,
222 sizeof(struct nci_rf_deactivate_cmd
),
226 static int nci_open_device(struct nci_dev
*ndev
)
230 mutex_lock(&ndev
->req_lock
);
232 if (test_bit(NCI_UP
, &ndev
->flags
)) {
237 if (ndev
->ops
->open(ndev
)) {
242 atomic_set(&ndev
->cmd_cnt
, 1);
244 set_bit(NCI_INIT
, &ndev
->flags
);
246 rc
= __nci_request(ndev
, nci_reset_req
, 0,
247 msecs_to_jiffies(NCI_RESET_TIMEOUT
));
250 rc
= __nci_request(ndev
, nci_init_req
, 0,
251 msecs_to_jiffies(NCI_INIT_TIMEOUT
));
255 rc
= __nci_request(ndev
, nci_init_complete_req
, 0,
256 msecs_to_jiffies(NCI_INIT_TIMEOUT
));
259 clear_bit(NCI_INIT
, &ndev
->flags
);
262 set_bit(NCI_UP
, &ndev
->flags
);
264 /* Init failed, cleanup */
265 skb_queue_purge(&ndev
->cmd_q
);
266 skb_queue_purge(&ndev
->rx_q
);
267 skb_queue_purge(&ndev
->tx_q
);
269 ndev
->ops
->close(ndev
);
274 mutex_unlock(&ndev
->req_lock
);
278 static int nci_close_device(struct nci_dev
*ndev
)
280 nci_req_cancel(ndev
, ENODEV
);
281 mutex_lock(&ndev
->req_lock
);
283 if (!test_and_clear_bit(NCI_UP
, &ndev
->flags
)) {
284 del_timer_sync(&ndev
->cmd_timer
);
285 mutex_unlock(&ndev
->req_lock
);
289 /* Drop RX and TX queues */
290 skb_queue_purge(&ndev
->rx_q
);
291 skb_queue_purge(&ndev
->tx_q
);
293 /* Flush RX and TX wq */
294 flush_workqueue(ndev
->rx_wq
);
295 flush_workqueue(ndev
->tx_wq
);
298 skb_queue_purge(&ndev
->cmd_q
);
299 atomic_set(&ndev
->cmd_cnt
, 1);
301 set_bit(NCI_INIT
, &ndev
->flags
);
302 __nci_request(ndev
, nci_reset_req
, 0,
303 msecs_to_jiffies(NCI_RESET_TIMEOUT
));
304 clear_bit(NCI_INIT
, &ndev
->flags
);
307 flush_workqueue(ndev
->cmd_wq
);
309 /* After this point our queues are empty
310 * and no works are scheduled. */
311 ndev
->ops
->close(ndev
);
316 mutex_unlock(&ndev
->req_lock
);
321 /* NCI command timer function */
322 static void nci_cmd_timer(unsigned long arg
)
324 struct nci_dev
*ndev
= (void *) arg
;
328 atomic_set(&ndev
->cmd_cnt
, 1);
329 queue_work(ndev
->cmd_wq
, &ndev
->cmd_work
);
332 static int nci_dev_up(struct nfc_dev
*nfc_dev
)
334 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
338 return nci_open_device(ndev
);
341 static int nci_dev_down(struct nfc_dev
*nfc_dev
)
343 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
347 return nci_close_device(ndev
);
350 static int nci_start_poll(struct nfc_dev
*nfc_dev
, __u32 protocols
)
352 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
357 if (test_bit(NCI_DISCOVERY
, &ndev
->flags
)) {
358 nfc_err("unable to start poll, since poll is already active");
362 if (ndev
->target_active_prot
) {
363 nfc_err("there is an active target");
367 if (test_bit(NCI_POLL_ACTIVE
, &ndev
->flags
)) {
368 nfc_dbg("target is active, implicitly deactivate...");
370 rc
= nci_request(ndev
, nci_rf_deactivate_req
, 0,
371 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
376 rc
= nci_request(ndev
, nci_rf_discover_req
, protocols
,
377 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT
));
380 ndev
->poll_prots
= protocols
;
385 static void nci_stop_poll(struct nfc_dev
*nfc_dev
)
387 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
391 if (!test_bit(NCI_DISCOVERY
, &ndev
->flags
)) {
392 nfc_err("unable to stop poll, since poll is not active");
396 nci_request(ndev
, nci_rf_deactivate_req
, 0,
397 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
400 static int nci_activate_target(struct nfc_dev
*nfc_dev
, __u32 target_idx
,
403 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
405 nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx
, protocol
);
407 if (!test_bit(NCI_POLL_ACTIVE
, &ndev
->flags
)) {
408 nfc_err("there is no available target to activate");
412 if (ndev
->target_active_prot
) {
413 nfc_err("there is already an active target");
417 if (!(ndev
->target_available_prots
& (1 << protocol
))) {
418 nfc_err("target does not support the requested protocol 0x%x",
423 ndev
->target_active_prot
= protocol
;
424 ndev
->target_available_prots
= 0;
429 static void nci_deactivate_target(struct nfc_dev
*nfc_dev
, __u32 target_idx
)
431 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
433 nfc_dbg("entry, target_idx %d", target_idx
);
435 if (!ndev
->target_active_prot
) {
436 nfc_err("unable to deactivate target, no active target");
440 ndev
->target_active_prot
= 0;
442 if (test_bit(NCI_POLL_ACTIVE
, &ndev
->flags
)) {
443 nci_request(ndev
, nci_rf_deactivate_req
, 0,
444 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
448 static int nci_data_exchange(struct nfc_dev
*nfc_dev
, __u32 target_idx
,
450 data_exchange_cb_t cb
,
453 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
456 nfc_dbg("entry, target_idx %d, len %d", target_idx
, skb
->len
);
458 if (!ndev
->target_active_prot
) {
459 nfc_err("unable to exchange data, no active target");
463 if (test_and_set_bit(NCI_DATA_EXCHANGE
, &ndev
->flags
))
466 /* store cb and context to be used on receiving data */
467 ndev
->data_exchange_cb
= cb
;
468 ndev
->data_exchange_cb_context
= cb_context
;
470 rc
= nci_send_data(ndev
, NCI_STATIC_RF_CONN_ID
, skb
);
472 clear_bit(NCI_DATA_EXCHANGE
, &ndev
->flags
);
477 static struct nfc_ops nci_nfc_ops
= {
478 .dev_up
= nci_dev_up
,
479 .dev_down
= nci_dev_down
,
480 .start_poll
= nci_start_poll
,
481 .stop_poll
= nci_stop_poll
,
482 .activate_target
= nci_activate_target
,
483 .deactivate_target
= nci_deactivate_target
,
484 .data_exchange
= nci_data_exchange
,
487 /* ---- Interface to NCI drivers ---- */
490 * nci_allocate_device - allocate a new nci device
492 * @ops: device operations
493 * @supported_protocols: NFC protocols supported by the device
495 struct nci_dev
*nci_allocate_device(struct nci_ops
*ops
,
496 __u32 supported_protocols
,
500 struct nci_dev
*ndev
;
502 nfc_dbg("entry, supported_protocols 0x%x", supported_protocols
);
504 if (!ops
->open
|| !ops
->close
|| !ops
->send
)
507 if (!supported_protocols
)
510 ndev
= kzalloc(sizeof(struct nci_dev
), GFP_KERNEL
);
515 ndev
->tx_headroom
= tx_headroom
;
516 ndev
->tx_tailroom
= tx_tailroom
;
518 ndev
->nfc_dev
= nfc_allocate_device(&nci_nfc_ops
,
520 tx_headroom
+ NCI_DATA_HDR_SIZE
,
525 nfc_set_drvdata(ndev
->nfc_dev
, ndev
);
533 EXPORT_SYMBOL(nci_allocate_device
);
536 * nci_free_device - deallocate nci device
538 * @ndev: The nci device to deallocate
540 void nci_free_device(struct nci_dev
*ndev
)
544 nfc_free_device(ndev
->nfc_dev
);
547 EXPORT_SYMBOL(nci_free_device
);
550 * nci_register_device - register a nci device in the nfc subsystem
552 * @dev: The nci device to register
554 int nci_register_device(struct nci_dev
*ndev
)
557 struct device
*dev
= &ndev
->nfc_dev
->dev
;
562 rc
= nfc_register_device(ndev
->nfc_dev
);
568 INIT_WORK(&ndev
->cmd_work
, nci_cmd_work
);
569 snprintf(name
, sizeof(name
), "%s_nci_cmd_wq", dev_name(dev
));
570 ndev
->cmd_wq
= create_singlethread_workqueue(name
);
576 INIT_WORK(&ndev
->rx_work
, nci_rx_work
);
577 snprintf(name
, sizeof(name
), "%s_nci_rx_wq", dev_name(dev
));
578 ndev
->rx_wq
= create_singlethread_workqueue(name
);
581 goto destroy_cmd_wq_exit
;
584 INIT_WORK(&ndev
->tx_work
, nci_tx_work
);
585 snprintf(name
, sizeof(name
), "%s_nci_tx_wq", dev_name(dev
));
586 ndev
->tx_wq
= create_singlethread_workqueue(name
);
589 goto destroy_rx_wq_exit
;
592 skb_queue_head_init(&ndev
->cmd_q
);
593 skb_queue_head_init(&ndev
->rx_q
);
594 skb_queue_head_init(&ndev
->tx_q
);
596 setup_timer(&ndev
->cmd_timer
, nci_cmd_timer
,
597 (unsigned long) ndev
);
599 mutex_init(&ndev
->req_lock
);
604 destroy_workqueue(ndev
->rx_wq
);
607 destroy_workqueue(ndev
->cmd_wq
);
610 nfc_unregister_device(ndev
->nfc_dev
);
615 EXPORT_SYMBOL(nci_register_device
);
618 * nci_unregister_device - unregister a nci device in the nfc subsystem
620 * @dev: The nci device to unregister
622 void nci_unregister_device(struct nci_dev
*ndev
)
626 nci_close_device(ndev
);
628 destroy_workqueue(ndev
->cmd_wq
);
629 destroy_workqueue(ndev
->rx_wq
);
630 destroy_workqueue(ndev
->tx_wq
);
632 nfc_unregister_device(ndev
->nfc_dev
);
634 EXPORT_SYMBOL(nci_unregister_device
);
637 * nci_recv_frame - receive frame from NCI drivers
639 * @skb: The sk_buff to receive
641 int nci_recv_frame(struct sk_buff
*skb
)
643 struct nci_dev
*ndev
= (struct nci_dev
*) skb
->dev
;
645 nfc_dbg("entry, len %d", skb
->len
);
647 if (!ndev
|| (!test_bit(NCI_UP
, &ndev
->flags
)
648 && !test_bit(NCI_INIT
, &ndev
->flags
))) {
653 /* Queue frame for rx worker thread */
654 skb_queue_tail(&ndev
->rx_q
, skb
);
655 queue_work(ndev
->rx_wq
, &ndev
->rx_work
);
659 EXPORT_SYMBOL(nci_recv_frame
);
661 static int nci_send_frame(struct sk_buff
*skb
)
663 struct nci_dev
*ndev
= (struct nci_dev
*) skb
->dev
;
665 nfc_dbg("entry, len %d", skb
->len
);
672 /* Get rid of skb owner, prior to sending to the driver. */
675 return ndev
->ops
->send(skb
);
678 /* Send NCI command */
679 int nci_send_cmd(struct nci_dev
*ndev
, __u16 opcode
, __u8 plen
, void *payload
)
681 struct nci_ctrl_hdr
*hdr
;
684 nfc_dbg("entry, opcode 0x%x, plen %d", opcode
, plen
);
686 skb
= nci_skb_alloc(ndev
, (NCI_CTRL_HDR_SIZE
+ plen
), GFP_KERNEL
);
688 nfc_err("no memory for command");
692 hdr
= (struct nci_ctrl_hdr
*) skb_put(skb
, NCI_CTRL_HDR_SIZE
);
693 hdr
->gid
= nci_opcode_gid(opcode
);
694 hdr
->oid
= nci_opcode_oid(opcode
);
697 nci_mt_set((__u8
*)hdr
, NCI_MT_CMD_PKT
);
698 nci_pbf_set((__u8
*)hdr
, NCI_PBF_LAST
);
701 memcpy(skb_put(skb
, plen
), payload
, plen
);
703 skb
->dev
= (void *) ndev
;
705 skb_queue_tail(&ndev
->cmd_q
, skb
);
706 queue_work(ndev
->cmd_wq
, &ndev
->cmd_work
);
711 /* ---- NCI TX Data worker thread ---- */
713 static void nci_tx_work(struct work_struct
*work
)
715 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, tx_work
);
718 nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev
->credits_cnt
));
720 /* Send queued tx data */
721 while (atomic_read(&ndev
->credits_cnt
)) {
722 skb
= skb_dequeue(&ndev
->tx_q
);
726 /* Check if data flow control is used */
727 if (atomic_read(&ndev
->credits_cnt
) !=
728 NCI_DATA_FLOW_CONTROL_NOT_USED
)
729 atomic_dec(&ndev
->credits_cnt
);
731 nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
733 nci_conn_id(skb
->data
),
734 nci_plen(skb
->data
));
740 /* ----- NCI RX worker thread (data & control) ----- */
742 static void nci_rx_work(struct work_struct
*work
)
744 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, rx_work
);
747 while ((skb
= skb_dequeue(&ndev
->rx_q
))) {
749 switch (nci_mt(skb
->data
)) {
751 nci_rsp_packet(ndev
, skb
);
755 nci_ntf_packet(ndev
, skb
);
758 case NCI_MT_DATA_PKT
:
759 nci_rx_data_packet(ndev
, skb
);
763 nfc_err("unknown MT 0x%x", nci_mt(skb
->data
));
770 /* ----- NCI TX CMD worker thread ----- */
772 static void nci_cmd_work(struct work_struct
*work
)
774 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, cmd_work
);
777 nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev
->cmd_cnt
));
779 /* Send queued command */
780 if (atomic_read(&ndev
->cmd_cnt
)) {
781 skb
= skb_dequeue(&ndev
->cmd_q
);
785 atomic_dec(&ndev
->cmd_cnt
);
787 nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
789 nci_opcode_gid(nci_opcode(skb
->data
)),
790 nci_opcode_oid(nci_opcode(skb
->data
)),
791 nci_plen(skb
->data
));
795 mod_timer(&ndev
->cmd_timer
,
796 jiffies
+ msecs_to_jiffies(NCI_CMD_TIMEOUT
));