]>
Commit | Line | Data |
---|---|---|
6b35ff91 SS |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2017 Intel Deutschland GmbH | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * BSD LICENSE | |
20 | * | |
21 | * Copyright(c) 2017 Intel Deutschland GmbH | |
22 | * All rights reserved. | |
23 | * | |
24 | * Redistribution and use in source and binary forms, with or without | |
25 | * modification, are permitted provided that the following conditions | |
26 | * are met: | |
27 | * | |
28 | * * Redistributions of source code must retain the above copyright | |
29 | * notice, this list of conditions and the following disclaimer. | |
30 | * * Redistributions in binary form must reproduce the above copyright | |
31 | * notice, this list of conditions and the following disclaimer in | |
32 | * the documentation and/or other materials provided with the | |
33 | * distribution. | |
34 | * * Neither the name Intel Corporation nor the names of its | |
35 | * contributors may be used to endorse or promote products derived | |
36 | * from this software without specific prior written permission. | |
37 | * | |
38 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
39 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
40 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
41 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
42 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
43 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
44 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
45 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
46 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
47 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
48 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
49 | * | |
50 | *****************************************************************************/ | |
51 | ||
52 | #include "iwl-debug.h" | |
53 | #include "iwl-csr.h" | |
54 | #include "iwl-io.h" | |
55 | #include "internal.h" | |
56 | ||
57 | /* | |
58 | * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's | |
59 | */ | |
60 | void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) | |
61 | { | |
62 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
63 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | |
64 | ||
65 | spin_lock_bh(&txq->lock); | |
66 | while (txq->write_ptr != txq->read_ptr) { | |
67 | IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", | |
68 | txq_id, txq->read_ptr); | |
69 | ||
70 | iwl_pcie_txq_free_tfd(trans, txq); | |
71 | txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); | |
72 | ||
73 | if (txq->read_ptr == txq->write_ptr) { | |
74 | unsigned long flags; | |
75 | ||
76 | spin_lock_irqsave(&trans_pcie->reg_lock, flags); | |
77 | if (txq_id != trans_pcie->cmd_queue) { | |
78 | IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", | |
79 | txq->id); | |
80 | iwl_trans_unref(trans); | |
81 | } else if (trans_pcie->ref_cmd_in_flight) { | |
82 | trans_pcie->ref_cmd_in_flight = false; | |
83 | IWL_DEBUG_RPM(trans, | |
84 | "clear ref_cmd_in_flight\n"); | |
85 | iwl_trans_unref(trans); | |
86 | } | |
87 | spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); | |
88 | } | |
89 | } | |
90 | spin_unlock_bh(&txq->lock); | |
91 | ||
92 | /* just in case - this queue may have been stopped */ | |
93 | iwl_wake_queue(trans, txq); | |
94 | } | |
95 | ||
96 | int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, | |
97 | struct iwl_tx_queue_cfg_cmd *cmd, | |
98 | int cmd_id, | |
99 | unsigned int timeout) | |
100 | { | |
101 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
102 | struct iwl_txq *txq = &trans_pcie->txq[cmd->scd_queue]; | |
103 | struct iwl_host_cmd hcmd = { | |
104 | .id = cmd_id, | |
105 | .len = { sizeof(*cmd) }, | |
106 | .data = { cmd, }, | |
107 | .flags = 0, | |
108 | }; | |
109 | u16 ssn = le16_to_cpu(cmd->ssn); | |
110 | ||
111 | if (test_and_set_bit(cmd->scd_queue, trans_pcie->queue_used)) { | |
112 | WARN_ONCE(1, "queue %d already used", cmd->scd_queue); | |
113 | return -EINVAL; | |
114 | } | |
115 | ||
116 | txq->wd_timeout = msecs_to_jiffies(timeout); | |
117 | ||
118 | /* | |
119 | * Place first TFD at index corresponding to start sequence number. | |
120 | * Assumes that ssn_idx is valid (!= 0xFFF) | |
121 | */ | |
122 | txq->read_ptr = (ssn & 0xff); | |
123 | txq->write_ptr = (ssn & 0xff); | |
124 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, | |
125 | (ssn & 0xff) | (cmd->scd_queue << 8)); | |
126 | ||
127 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d WrPtr: %d\n", | |
128 | cmd->scd_queue, ssn & 0xff); | |
129 | ||
130 | cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); | |
131 | cmd->byte_cnt_addr = cpu_to_le64(trans_pcie->scd_bc_tbls.dma + | |
132 | cmd->scd_queue * | |
133 | sizeof(struct iwlagn_scd_bc_tbl)); | |
134 | cmd->cb_size = cpu_to_le64(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX)); | |
135 | ||
136 | return iwl_trans_send_cmd(trans, &hcmd); | |
137 | } | |
138 | ||
139 | void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) | |
140 | { | |
141 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
142 | ||
143 | trans_pcie->txq[queue].frozen_expiry_remainder = 0; | |
144 | trans_pcie->txq[queue].frozen = false; | |
145 | ||
146 | /* | |
147 | * Upon HW Rfkill - we stop the device, and then stop the queues | |
148 | * in the op_mode. Just for the sake of the simplicity of the op_mode, | |
149 | * allow the op_mode to call txq_disable after it already called | |
150 | * stop_device. | |
151 | */ | |
152 | if (!test_and_clear_bit(queue, trans_pcie->queue_used)) { | |
153 | WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), | |
154 | "queue %d not used", queue); | |
155 | return; | |
156 | } | |
157 | ||
158 | iwl_pcie_gen2_txq_unmap(trans, queue); | |
159 | ||
160 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); | |
161 | } | |
162 |