]>
Commit | Line | Data |
---|---|---|
6b35ff91 SS |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2017 Intel Deutschland GmbH | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * BSD LICENSE | |
20 | * | |
21 | * Copyright(c) 2017 Intel Deutschland GmbH | |
22 | * All rights reserved. | |
23 | * | |
24 | * Redistribution and use in source and binary forms, with or without | |
25 | * modification, are permitted provided that the following conditions | |
26 | * are met: | |
27 | * | |
28 | * * Redistributions of source code must retain the above copyright | |
29 | * notice, this list of conditions and the following disclaimer. | |
30 | * * Redistributions in binary form must reproduce the above copyright | |
31 | * notice, this list of conditions and the following disclaimer in | |
32 | * the documentation and/or other materials provided with the | |
33 | * distribution. | |
34 | * * Neither the name Intel Corporation nor the names of its | |
35 | * contributors may be used to endorse or promote products derived | |
36 | * from this software without specific prior written permission. | |
37 | * | |
38 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
39 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
40 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
41 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
42 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
43 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
44 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
45 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
46 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
47 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
48 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
49 | * | |
50 | *****************************************************************************/ | |
ca60da2e | 51 | #include <linux/pm_runtime.h> |
6ffe5de3 | 52 | #include <net/tso.h> |
6b35ff91 SS |
53 | |
54 | #include "iwl-debug.h" | |
55 | #include "iwl-csr.h" | |
56 | #include "iwl-io.h" | |
57 | #include "internal.h" | |
ab6c6445 SS |
58 | #include "mvm/fw-api.h" |
59 | ||
13a3a390 SS |
60 | /* |
61 | * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels | |
62 | */ | |
63 | void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) | |
64 | { | |
65 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
66 | int txq_id; | |
67 | ||
68 | /* | |
69 | * This function can be called before the op_mode disabled the | |
70 | * queues. This happens when we have an rfkill interrupt. | |
71 | * Since we stop Tx altogether - mark the queues as stopped. | |
72 | */ | |
73 | memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); | |
74 | memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); | |
75 | ||
76 | /* Unmap DMA from host system and free skb's */ | |
77 | for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) { | |
78 | if (!trans_pcie->txq[txq_id]) | |
79 | continue; | |
80 | iwl_pcie_gen2_txq_unmap(trans, txq_id); | |
81 | } | |
82 | } | |
83 | ||
ab6c6445 SS |
84 | /* |
85 | * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array | |
86 | */ | |
13a3a390 SS |
87 | static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, |
88 | int num_tbs) | |
ab6c6445 | 89 | { |
13a3a390 | 90 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; |
ab6c6445 SS |
91 | int write_ptr = txq->write_ptr; |
92 | u8 filled_tfd_size, num_fetch_chunks; | |
93 | u16 len = byte_cnt; | |
94 | __le16 bc_ent; | |
95 | ||
ab6c6445 SS |
96 | len = DIV_ROUND_UP(len, 4); |
97 | ||
98 | if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) | |
99 | return; | |
100 | ||
101 | filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + | |
102 | num_tbs * sizeof(struct iwl_tfh_tb); | |
103 | /* | |
104 | * filled_tfd_size contains the number of filled bytes in the TFD. | |
105 | * Dividing it by 64 will give the number of chunks to fetch | |
106 | * to SRAM- 0 for one chunk, 1 for 2 and so on. | |
107 | * If, for example, TFD contains only 3 TBs then 32 bytes | |
108 | * of the TFD are used, and only one chunk of 64 bytes should | |
109 | * be fetched | |
110 | */ | |
111 | num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; | |
112 | ||
113 | bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); | |
13a3a390 | 114 | scd_bc_tbl->tfd_offset[write_ptr] = bc_ent; |
ab6c6445 SS |
115 | } |
116 | ||
117 | /* | |
118 | * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware | |
119 | */ | |
120 | static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, | |
121 | struct iwl_txq *txq) | |
122 | { | |
ab6c6445 SS |
123 | lockdep_assert_held(&txq->lock); |
124 | ||
066fd29a | 125 | IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); |
ab6c6445 SS |
126 | |
127 | /* | |
128 | * if not in power-save mode, uCode will never sleep when we're | |
129 | * trying to tx (during RFKILL, we're not trying to tx). | |
130 | */ | |
43e9cdc2 | 131 | iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); |
ab6c6445 SS |
132 | } |
133 | ||
066fd29a SS |
134 | static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans, |
135 | struct iwl_tfh_tfd *tfd) | |
ab6c6445 | 136 | { |
066fd29a | 137 | return le16_to_cpu(tfd->num_tbs) & 0x1f; |
ab6c6445 SS |
138 | } |
139 | ||
140 | static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, | |
141 | struct iwl_cmd_meta *meta, | |
cefe13af | 142 | struct iwl_tfh_tfd *tfd) |
ab6c6445 SS |
143 | { |
144 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
145 | int i, num_tbs; | |
ab6c6445 SS |
146 | |
147 | /* Sanity check on number of chunks */ | |
148 | num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); | |
149 | ||
150 | if (num_tbs >= trans_pcie->max_tbs) { | |
151 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); | |
ab6c6445 SS |
152 | return; |
153 | } | |
154 | ||
155 | /* first TB is never freed - it's the bidirectional DMA data */ | |
ab6c6445 SS |
156 | for (i = 1; i < num_tbs; i++) { |
157 | if (meta->tbs & BIT(i)) | |
158 | dma_unmap_page(trans->dev, | |
066fd29a SS |
159 | le64_to_cpu(tfd->tbs[i].addr), |
160 | le16_to_cpu(tfd->tbs[i].tb_len), | |
ab6c6445 SS |
161 | DMA_TO_DEVICE); |
162 | else | |
163 | dma_unmap_single(trans->dev, | |
066fd29a SS |
164 | le64_to_cpu(tfd->tbs[i].addr), |
165 | le16_to_cpu(tfd->tbs[i].tb_len), | |
ab6c6445 SS |
166 | DMA_TO_DEVICE); |
167 | } | |
168 | ||
066fd29a | 169 | tfd->num_tbs = 0; |
ab6c6445 SS |
170 | } |
171 | ||
172 | static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) | |
173 | { | |
cefe13af SS |
174 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
175 | ||
ab6c6445 SS |
176 | /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and |
177 | * idx is bounded by n_window | |
178 | */ | |
179 | int rd_ptr = txq->read_ptr; | |
180 | int idx = get_cmd_index(txq, rd_ptr); | |
181 | ||
182 | lockdep_assert_held(&txq->lock); | |
183 | ||
184 | /* We have only q->n_window txq->entries, but we use | |
185 | * TFD_QUEUE_SIZE_MAX tfds | |
186 | */ | |
cefe13af SS |
187 | iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, |
188 | iwl_pcie_get_tfd(trans_pcie, txq, rd_ptr)); | |
ab6c6445 SS |
189 | |
190 | /* free SKB */ | |
191 | if (txq->entries) { | |
192 | struct sk_buff *skb; | |
193 | ||
194 | skb = txq->entries[idx].skb; | |
195 | ||
196 | /* Can be called from irqs-disabled context | |
197 | * If skb is not NULL, it means that the whole queue is being | |
198 | * freed and that the queue is not empty - free the skb | |
199 | */ | |
200 | if (skb) { | |
201 | iwl_op_mode_free_skb(trans->op_mode, skb); | |
202 | txq->entries[idx].skb = NULL; | |
203 | } | |
204 | } | |
205 | } | |
206 | ||
cefe13af SS |
207 | static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, |
208 | struct iwl_tfh_tfd *tfd, dma_addr_t addr, | |
209 | u16 len) | |
ab6c6445 SS |
210 | { |
211 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
cefe13af SS |
212 | int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); |
213 | struct iwl_tfh_tb *tb = &tfd->tbs[idx]; | |
ab6c6445 SS |
214 | |
215 | /* Each TFD can point to a maximum max_tbs Tx buffers */ | |
13a3a390 | 216 | if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) { |
ab6c6445 SS |
217 | IWL_ERR(trans, "Error can not send more than %d chunks\n", |
218 | trans_pcie->max_tbs); | |
219 | return -EINVAL; | |
220 | } | |
221 | ||
cefe13af SS |
222 | put_unaligned_le64(addr, &tb->addr); |
223 | tb->tb_len = cpu_to_le16(len); | |
ab6c6445 | 224 | |
cefe13af | 225 | tfd->num_tbs = cpu_to_le16(idx + 1); |
ab6c6445 | 226 | |
cefe13af | 227 | return idx; |
ab6c6445 SS |
228 | } |
229 | ||
6ffe5de3 SS |
230 | static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, |
231 | struct sk_buff *skb, | |
232 | struct iwl_tfh_tfd *tfd, int start_len, | |
233 | u8 hdr_len, struct iwl_device_cmd *dev_cmd) | |
234 | { | |
235 | #ifdef CONFIG_INET | |
236 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
237 | struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; | |
238 | struct ieee80211_hdr *hdr = (void *)skb->data; | |
239 | unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; | |
240 | unsigned int mss = skb_shinfo(skb)->gso_size; | |
241 | u16 length, iv_len, amsdu_pad; | |
242 | u8 *start_hdr; | |
243 | struct iwl_tso_hdr_page *hdr_page; | |
244 | struct page **page_ptr; | |
245 | struct tso_t tso; | |
246 | ||
247 | /* if the packet is protected, then it must be CCMP or GCMP */ | |
248 | iv_len = ieee80211_has_protected(hdr->frame_control) ? | |
249 | IEEE80211_CCMP_HDR_LEN : 0; | |
250 | ||
251 | trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), | |
252 | &dev_cmd->hdr, start_len, NULL, 0); | |
253 | ||
254 | ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); | |
255 | snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); | |
256 | total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; | |
257 | amsdu_pad = 0; | |
258 | ||
259 | /* total amount of header we may need for this A-MSDU */ | |
260 | hdr_room = DIV_ROUND_UP(total_len, mss) * | |
261 | (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; | |
262 | ||
263 | /* Our device supports 9 segments at most, it will fit in 1 page */ | |
264 | hdr_page = get_page_hdr(trans, hdr_room); | |
265 | if (!hdr_page) | |
266 | return -ENOMEM; | |
267 | ||
268 | get_page(hdr_page->page); | |
269 | start_hdr = hdr_page->pos; | |
270 | page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); | |
271 | *page_ptr = hdr_page->page; | |
272 | memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); | |
273 | hdr_page->pos += iv_len; | |
274 | ||
275 | /* | |
276 | * Pull the ieee80211 header + IV to be able to use TSO core, | |
277 | * we will restore it for the tx_status flow. | |
278 | */ | |
279 | skb_pull(skb, hdr_len + iv_len); | |
280 | ||
281 | /* | |
282 | * Remove the length of all the headers that we don't actually | |
283 | * have in the MPDU by themselves, but that we duplicate into | |
284 | * all the different MSDUs inside the A-MSDU. | |
285 | */ | |
286 | le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); | |
287 | ||
288 | tso_start(skb, &tso); | |
289 | ||
290 | while (total_len) { | |
291 | /* this is the data left for this subframe */ | |
292 | unsigned int data_left = min_t(unsigned int, mss, total_len); | |
293 | struct sk_buff *csum_skb = NULL; | |
294 | unsigned int tb_len; | |
295 | dma_addr_t tb_phys; | |
296 | struct tcphdr *tcph; | |
297 | u8 *iph, *subf_hdrs_start = hdr_page->pos; | |
298 | ||
299 | total_len -= data_left; | |
300 | ||
301 | memset(hdr_page->pos, 0, amsdu_pad); | |
302 | hdr_page->pos += amsdu_pad; | |
303 | amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + | |
304 | data_left)) & 0x3; | |
305 | ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); | |
306 | hdr_page->pos += ETH_ALEN; | |
307 | ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); | |
308 | hdr_page->pos += ETH_ALEN; | |
309 | ||
310 | length = snap_ip_tcp_hdrlen + data_left; | |
311 | *((__be16 *)hdr_page->pos) = cpu_to_be16(length); | |
312 | hdr_page->pos += sizeof(length); | |
313 | ||
314 | /* | |
315 | * This will copy the SNAP as well which will be considered | |
316 | * as MAC header. | |
317 | */ | |
318 | tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); | |
319 | iph = hdr_page->pos + 8; | |
320 | tcph = (void *)(iph + ip_hdrlen); | |
321 | ||
322 | hdr_page->pos += snap_ip_tcp_hdrlen; | |
323 | ||
324 | tb_len = hdr_page->pos - start_hdr; | |
325 | tb_phys = dma_map_single(trans->dev, start_hdr, | |
326 | tb_len, DMA_TO_DEVICE); | |
327 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { | |
328 | dev_kfree_skb(csum_skb); | |
329 | goto out_err; | |
330 | } | |
331 | iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); | |
332 | trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len); | |
333 | /* add this subframe's headers' length to the tx_cmd */ | |
334 | le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); | |
335 | ||
336 | /* prepare the start_hdr for the next subframe */ | |
337 | start_hdr = hdr_page->pos; | |
338 | ||
339 | /* put the payload */ | |
340 | while (data_left) { | |
341 | tb_len = min_t(unsigned int, tso.size, data_left); | |
342 | tb_phys = dma_map_single(trans->dev, tso.data, | |
343 | tb_len, DMA_TO_DEVICE); | |
344 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { | |
345 | dev_kfree_skb(csum_skb); | |
346 | goto out_err; | |
347 | } | |
348 | iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); | |
349 | trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, | |
350 | tb_len); | |
351 | ||
352 | data_left -= tb_len; | |
353 | tso_build_data(skb, &tso, tb_len); | |
354 | } | |
355 | } | |
356 | ||
357 | /* re -add the WiFi header and IV */ | |
358 | skb_push(skb, hdr_len + iv_len); | |
359 | ||
360 | return 0; | |
361 | ||
362 | out_err: | |
363 | #endif | |
364 | return -EINVAL; | |
365 | } | |
366 | ||
cefe13af SS |
367 | static |
368 | struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, | |
369 | struct iwl_txq *txq, | |
370 | struct iwl_device_cmd *dev_cmd, | |
371 | struct sk_buff *skb, | |
372 | struct iwl_cmd_meta *out_meta) | |
ab6c6445 SS |
373 | { |
374 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
cefe13af SS |
375 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
376 | struct iwl_tfh_tfd *tfd = | |
377 | iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); | |
378 | dma_addr_t tb_phys; | |
6ffe5de3 | 379 | bool amsdu; |
cefe13af SS |
380 | int i, len, tb1_len, tb2_len, hdr_len; |
381 | void *tb1_addr; | |
382 | ||
383 | memset(tfd, 0, sizeof(*tfd)); | |
384 | ||
6ffe5de3 SS |
385 | amsdu = ieee80211_is_data_qos(hdr->frame_control) && |
386 | (*ieee80211_get_qos_ctl(hdr) & | |
387 | IEEE80211_QOS_CTL_A_MSDU_PRESENT); | |
388 | ||
cefe13af SS |
389 | tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); |
390 | /* The first TB points to bi-directional DMA data */ | |
6ffe5de3 SS |
391 | if (!amsdu) |
392 | memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, | |
393 | IWL_FIRST_TB_SIZE); | |
cefe13af SS |
394 | |
395 | iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); | |
396 | ||
397 | /* there must be data left over for TB1 or this code must be changed */ | |
398 | BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); | |
ab6c6445 SS |
399 | |
400 | /* | |
cefe13af SS |
401 | * The second TB (tb1) points to the remainder of the TX command |
402 | * and the 802.11 header - dword aligned size | |
403 | * (This calculation modifies the TX command, so do it before the | |
404 | * setup of the first TB) | |
ab6c6445 | 405 | */ |
cefe13af SS |
406 | len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) + |
407 | ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE; | |
408 | ||
6ffe5de3 SS |
409 | /* do not align A-MSDU to dword as the subframe header aligns it */ |
410 | if (amsdu) | |
411 | tb1_len = len; | |
412 | else | |
413 | tb1_len = ALIGN(len, 4); | |
cefe13af SS |
414 | |
415 | /* map the data for TB1 */ | |
416 | tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; | |
417 | tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); | |
418 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) | |
419 | goto out_err; | |
420 | iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); | |
421 | ||
cefe13af | 422 | hdr_len = ieee80211_hdrlen(hdr->frame_control); |
6ffe5de3 SS |
423 | |
424 | if (amsdu) { | |
425 | if (!iwl_pcie_gen2_build_amsdu(trans, skb, tfd, | |
426 | tb1_len + IWL_FIRST_TB_SIZE, | |
427 | hdr_len, dev_cmd)) | |
428 | goto out_err; | |
429 | ||
430 | /* | |
431 | * building the A-MSDU might have changed this data, so memcpy | |
432 | * it now | |
433 | */ | |
434 | memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, | |
435 | IWL_FIRST_TB_SIZE); | |
436 | return tfd; | |
437 | } | |
438 | ||
439 | /* set up TFD's third entry to point to remainder of skb's head */ | |
ab6c6445 SS |
440 | tb2_len = skb_headlen(skb) - hdr_len; |
441 | ||
442 | if (tb2_len > 0) { | |
cefe13af SS |
443 | tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, |
444 | tb2_len, DMA_TO_DEVICE); | |
445 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) | |
446 | goto out_err; | |
447 | iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); | |
ab6c6445 SS |
448 | } |
449 | ||
450 | /* set up the remaining entries to point to the data */ | |
451 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
452 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
ab6c6445 SS |
453 | int tb_idx; |
454 | ||
455 | if (!skb_frag_size(frag)) | |
456 | continue; | |
457 | ||
458 | tb_phys = skb_frag_dma_map(trans->dev, frag, 0, | |
459 | skb_frag_size(frag), DMA_TO_DEVICE); | |
460 | ||
cefe13af SS |
461 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) |
462 | goto out_err; | |
463 | tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, | |
464 | skb_frag_size(frag)); | |
ab6c6445 SS |
465 | |
466 | out_meta->tbs |= BIT(tb_idx); | |
467 | } | |
468 | ||
cefe13af SS |
469 | trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, |
470 | IWL_FIRST_TB_SIZE + tb1_len, | |
ab6c6445 | 471 | skb->data + hdr_len, tb2_len); |
78c1acf3 | 472 | trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); |
cefe13af SS |
473 | |
474 | return tfd; | |
475 | ||
476 | out_err: | |
477 | iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); | |
478 | return NULL; | |
ab6c6445 SS |
479 | } |
480 | ||
ab6c6445 SS |
481 | int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, |
482 | struct iwl_device_cmd *dev_cmd, int txq_id) | |
483 | { | |
484 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
b97277cc | 485 | struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; |
ab6c6445 | 486 | struct iwl_cmd_meta *out_meta; |
b2a3b1c1 | 487 | struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
ab6c6445 | 488 | void *tfd; |
ab6c6445 SS |
489 | |
490 | if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), | |
491 | "TX on unused queue %d\n", txq_id)) | |
492 | return -EINVAL; | |
493 | ||
ab6c6445 SS |
494 | if (skb_is_nonlinear(skb) && |
495 | skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && | |
496 | __skb_linearize(skb)) | |
497 | return -ENOMEM; | |
498 | ||
ab6c6445 SS |
499 | spin_lock(&txq->lock); |
500 | ||
ab6c6445 SS |
501 | /* Set up driver data for this TFD */ |
502 | txq->entries[txq->write_ptr].skb = skb; | |
503 | txq->entries[txq->write_ptr].cmd = dev_cmd; | |
504 | ||
505 | dev_cmd->hdr.sequence = | |
506 | cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | |
507 | INDEX_TO_SEQ(txq->write_ptr))); | |
508 | ||
ab6c6445 SS |
509 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ |
510 | out_meta = &txq->entries[txq->write_ptr].meta; | |
511 | out_meta->flags = 0; | |
512 | ||
cefe13af SS |
513 | tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); |
514 | if (!tfd) { | |
515 | spin_unlock(&txq->lock); | |
516 | return -1; | |
517 | } | |
ab6c6445 | 518 | |
ab6c6445 | 519 | /* Set up entry for this TFD in Tx byte-count array */ |
13a3a390 | 520 | iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len), |
ab6c6445 SS |
521 | iwl_pcie_gen2_get_num_tbs(trans, tfd)); |
522 | ||
ab6c6445 SS |
523 | /* start timer if queue currently empty */ |
524 | if (txq->read_ptr == txq->write_ptr) { | |
43e9cdc2 SS |
525 | if (txq->wd_timeout) |
526 | mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); | |
ab6c6445 SS |
527 | IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); |
528 | iwl_trans_ref(trans); | |
529 | } | |
530 | ||
531 | /* Tell device the write index *just past* this latest filled TFD */ | |
532 | txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); | |
066fd29a SS |
533 | iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); |
534 | if (iwl_queue_space(txq) < txq->high_mark) | |
535 | iwl_stop_queue(trans, txq); | |
ab6c6445 SS |
536 | |
537 | /* | |
538 | * At this point the frame is "transmitted" successfully | |
539 | * and we will get a TX status notification eventually. | |
540 | */ | |
541 | spin_unlock(&txq->lock); | |
542 | return 0; | |
ab6c6445 | 543 | } |
6b35ff91 | 544 | |
ca60da2e SS |
545 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
546 | ||
547 | /* | |
548 | * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command | |
549 | * @priv: device private data point | |
550 | * @cmd: a pointer to the ucode command structure | |
551 | * | |
552 | * The function returns < 0 values to indicate the operation | |
553 | * failed. On success, it returns the index (>= 0) of command in the | |
554 | * command queue. | |
555 | */ | |
556 | static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, | |
557 | struct iwl_host_cmd *cmd) | |
558 | { | |
559 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
b2a3b1c1 | 560 | struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; |
ca60da2e SS |
561 | struct iwl_device_cmd *out_cmd; |
562 | struct iwl_cmd_meta *out_meta; | |
563 | unsigned long flags; | |
564 | void *dup_buf = NULL; | |
565 | dma_addr_t phys_addr; | |
566 | int idx, i, cmd_pos; | |
567 | u16 copy_size, cmd_size, tb0_size; | |
568 | bool had_nocopy = false; | |
569 | u8 group_id = iwl_cmd_groupid(cmd->id); | |
570 | const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; | |
571 | u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; | |
572 | struct iwl_tfh_tfd *tfd = | |
573 | iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); | |
574 | ||
575 | memset(tfd, 0, sizeof(*tfd)); | |
576 | ||
577 | copy_size = sizeof(struct iwl_cmd_header_wide); | |
578 | cmd_size = sizeof(struct iwl_cmd_header_wide); | |
579 | ||
580 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { | |
581 | cmddata[i] = cmd->data[i]; | |
582 | cmdlen[i] = cmd->len[i]; | |
583 | ||
584 | if (!cmd->len[i]) | |
585 | continue; | |
586 | ||
587 | /* need at least IWL_FIRST_TB_SIZE copied */ | |
588 | if (copy_size < IWL_FIRST_TB_SIZE) { | |
589 | int copy = IWL_FIRST_TB_SIZE - copy_size; | |
590 | ||
591 | if (copy > cmdlen[i]) | |
592 | copy = cmdlen[i]; | |
593 | cmdlen[i] -= copy; | |
594 | cmddata[i] += copy; | |
595 | copy_size += copy; | |
596 | } | |
597 | ||
598 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { | |
599 | had_nocopy = true; | |
600 | if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { | |
601 | idx = -EINVAL; | |
602 | goto free_dup_buf; | |
603 | } | |
604 | } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { | |
605 | /* | |
606 | * This is also a chunk that isn't copied | |
607 | * to the static buffer so set had_nocopy. | |
608 | */ | |
609 | had_nocopy = true; | |
610 | ||
611 | /* only allowed once */ | |
612 | if (WARN_ON(dup_buf)) { | |
613 | idx = -EINVAL; | |
614 | goto free_dup_buf; | |
615 | } | |
616 | ||
617 | dup_buf = kmemdup(cmddata[i], cmdlen[i], | |
618 | GFP_ATOMIC); | |
619 | if (!dup_buf) | |
620 | return -ENOMEM; | |
621 | } else { | |
622 | /* NOCOPY must not be followed by normal! */ | |
623 | if (WARN_ON(had_nocopy)) { | |
624 | idx = -EINVAL; | |
625 | goto free_dup_buf; | |
626 | } | |
627 | copy_size += cmdlen[i]; | |
628 | } | |
629 | cmd_size += cmd->len[i]; | |
630 | } | |
631 | ||
632 | /* | |
633 | * If any of the command structures end up being larger than the | |
634 | * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into | |
635 | * separate TFDs, then we will need to increase the size of the buffers | |
636 | */ | |
637 | if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, | |
638 | "Command %s (%#x) is too large (%d bytes)\n", | |
639 | iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) { | |
640 | idx = -EINVAL; | |
641 | goto free_dup_buf; | |
642 | } | |
643 | ||
644 | spin_lock_bh(&txq->lock); | |
645 | ||
646 | if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | |
647 | spin_unlock_bh(&txq->lock); | |
648 | ||
649 | IWL_ERR(trans, "No space in command queue\n"); | |
650 | iwl_op_mode_cmd_queue_full(trans->op_mode); | |
651 | idx = -ENOSPC; | |
652 | goto free_dup_buf; | |
653 | } | |
654 | ||
655 | idx = get_cmd_index(txq, txq->write_ptr); | |
656 | out_cmd = txq->entries[idx].cmd; | |
657 | out_meta = &txq->entries[idx].meta; | |
658 | ||
659 | /* re-initialize to NULL */ | |
660 | memset(out_meta, 0, sizeof(*out_meta)); | |
661 | if (cmd->flags & CMD_WANT_SKB) | |
662 | out_meta->source = cmd; | |
663 | ||
664 | /* set up the header */ | |
665 | out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); | |
666 | out_cmd->hdr_wide.group_id = group_id; | |
667 | out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); | |
668 | out_cmd->hdr_wide.length = | |
669 | cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); | |
670 | out_cmd->hdr_wide.reserved = 0; | |
671 | out_cmd->hdr_wide.sequence = | |
672 | cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | | |
673 | INDEX_TO_SEQ(txq->write_ptr)); | |
674 | ||
675 | cmd_pos = sizeof(struct iwl_cmd_header_wide); | |
676 | copy_size = sizeof(struct iwl_cmd_header_wide); | |
677 | ||
678 | /* and copy the data that needs to be copied */ | |
679 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { | |
680 | int copy; | |
681 | ||
682 | if (!cmd->len[i]) | |
683 | continue; | |
684 | ||
685 | /* copy everything if not nocopy/dup */ | |
686 | if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | | |
687 | IWL_HCMD_DFL_DUP))) { | |
688 | copy = cmd->len[i]; | |
689 | ||
690 | memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); | |
691 | cmd_pos += copy; | |
692 | copy_size += copy; | |
693 | continue; | |
694 | } | |
695 | ||
696 | /* | |
697 | * Otherwise we need at least IWL_FIRST_TB_SIZE copied | |
698 | * in total (for bi-directional DMA), but copy up to what | |
699 | * we can fit into the payload for debug dump purposes. | |
700 | */ | |
701 | copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); | |
702 | ||
703 | memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); | |
704 | cmd_pos += copy; | |
705 | ||
706 | /* However, treat copy_size the proper way, we need it below */ | |
707 | if (copy_size < IWL_FIRST_TB_SIZE) { | |
708 | copy = IWL_FIRST_TB_SIZE - copy_size; | |
709 | ||
710 | if (copy > cmd->len[i]) | |
711 | copy = cmd->len[i]; | |
712 | copy_size += copy; | |
713 | } | |
714 | } | |
715 | ||
716 | IWL_DEBUG_HC(trans, | |
717 | "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", | |
718 | iwl_get_cmd_string(trans, cmd->id), group_id, | |
719 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | |
720 | cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); | |
721 | ||
722 | /* start the TFD with the minimum copy bytes */ | |
723 | tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); | |
724 | memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); | |
725 | iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx), | |
726 | tb0_size); | |
727 | ||
728 | /* map first command fragment, if any remains */ | |
729 | if (copy_size > tb0_size) { | |
730 | phys_addr = dma_map_single(trans->dev, | |
731 | ((u8 *)&out_cmd->hdr) + tb0_size, | |
732 | copy_size - tb0_size, | |
733 | DMA_TO_DEVICE); | |
734 | if (dma_mapping_error(trans->dev, phys_addr)) { | |
735 | idx = -ENOMEM; | |
736 | iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); | |
737 | goto out; | |
738 | } | |
739 | iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, | |
740 | copy_size - tb0_size); | |
741 | } | |
742 | ||
743 | /* map the remaining (adjusted) nocopy/dup fragments */ | |
744 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { | |
745 | const void *data = cmddata[i]; | |
746 | ||
747 | if (!cmdlen[i]) | |
748 | continue; | |
749 | if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | | |
750 | IWL_HCMD_DFL_DUP))) | |
751 | continue; | |
752 | if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) | |
753 | data = dup_buf; | |
754 | phys_addr = dma_map_single(trans->dev, (void *)data, | |
755 | cmdlen[i], DMA_TO_DEVICE); | |
756 | if (dma_mapping_error(trans->dev, phys_addr)) { | |
757 | idx = -ENOMEM; | |
758 | iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); | |
759 | goto out; | |
760 | } | |
761 | iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); | |
762 | } | |
763 | ||
764 | BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); | |
765 | out_meta->flags = cmd->flags; | |
766 | if (WARN_ON_ONCE(txq->entries[idx].free_buf)) | |
767 | kzfree(txq->entries[idx].free_buf); | |
768 | txq->entries[idx].free_buf = dup_buf; | |
769 | ||
770 | trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); | |
771 | ||
772 | /* start timer if queue currently empty */ | |
773 | if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) | |
774 | mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); | |
775 | ||
776 | spin_lock_irqsave(&trans_pcie->reg_lock, flags); | |
777 | if (!(cmd->flags & CMD_SEND_IN_IDLE) && | |
778 | !trans_pcie->ref_cmd_in_flight) { | |
779 | trans_pcie->ref_cmd_in_flight = true; | |
780 | IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); | |
781 | iwl_trans_ref(trans); | |
782 | } | |
783 | /* Increment and update queue's write index */ | |
784 | txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); | |
785 | iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); | |
786 | spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); | |
787 | ||
788 | out: | |
789 | spin_unlock_bh(&txq->lock); | |
790 | free_dup_buf: | |
791 | if (idx < 0) | |
792 | kfree(dup_buf); | |
793 | return idx; | |
794 | } | |
795 | ||
796 | #define HOST_COMPLETE_TIMEOUT (2 * HZ) | |
797 | ||
798 | static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, | |
799 | struct iwl_host_cmd *cmd) | |
800 | { | |
801 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
802 | const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); | |
b2a3b1c1 | 803 | struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; |
ca60da2e SS |
804 | int cmd_idx; |
805 | int ret; | |
806 | ||
807 | IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); | |
808 | ||
809 | if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, | |
810 | &trans->status), | |
811 | "Command %s: a command is already active!\n", cmd_str)) | |
812 | return -EIO; | |
813 | ||
814 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); | |
815 | ||
816 | if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { | |
817 | ret = wait_event_timeout(trans_pcie->d0i3_waitq, | |
818 | pm_runtime_active(&trans_pcie->pci_dev->dev), | |
819 | msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); | |
820 | if (!ret) { | |
821 | IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); | |
822 | return -ETIMEDOUT; | |
823 | } | |
824 | } | |
825 | ||
826 | cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); | |
827 | if (cmd_idx < 0) { | |
828 | ret = cmd_idx; | |
829 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); | |
830 | IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", | |
831 | cmd_str, ret); | |
832 | return ret; | |
833 | } | |
834 | ||
835 | ret = wait_event_timeout(trans_pcie->wait_command_queue, | |
836 | !test_bit(STATUS_SYNC_HCMD_ACTIVE, | |
837 | &trans->status), | |
838 | HOST_COMPLETE_TIMEOUT); | |
839 | if (!ret) { | |
ca60da2e SS |
840 | IWL_ERR(trans, "Error sending %s: time out after %dms.\n", |
841 | cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); | |
842 | ||
843 | IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", | |
844 | txq->read_ptr, txq->write_ptr); | |
845 | ||
846 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); | |
847 | IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", | |
848 | cmd_str); | |
849 | ret = -ETIMEDOUT; | |
850 | ||
851 | iwl_force_nmi(trans); | |
852 | iwl_trans_fw_error(trans); | |
853 | ||
854 | goto cancel; | |
855 | } | |
856 | ||
857 | if (test_bit(STATUS_FW_ERROR, &trans->status)) { | |
858 | IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); | |
859 | dump_stack(); | |
860 | ret = -EIO; | |
861 | goto cancel; | |
862 | } | |
863 | ||
864 | if (!(cmd->flags & CMD_SEND_IN_RFKILL) && | |
326477e4 | 865 | test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { |
ca60da2e SS |
866 | IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); |
867 | ret = -ERFKILL; | |
868 | goto cancel; | |
869 | } | |
870 | ||
871 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { | |
872 | IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); | |
873 | ret = -EIO; | |
874 | goto cancel; | |
875 | } | |
876 | ||
877 | return 0; | |
878 | ||
879 | cancel: | |
880 | if (cmd->flags & CMD_WANT_SKB) { | |
881 | /* | |
882 | * Cancel the CMD_WANT_SKB flag for the cmd in the | |
883 | * TX cmd queue. Otherwise in case the cmd comes | |
884 | * in later, it will possibly set an invalid | |
885 | * address (cmd->meta.source). | |
886 | */ | |
b2a3b1c1 | 887 | txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; |
ca60da2e SS |
888 | } |
889 | ||
890 | if (cmd->resp_pkt) { | |
891 | iwl_free_resp(cmd); | |
892 | cmd->resp_pkt = NULL; | |
893 | } | |
894 | ||
895 | return ret; | |
896 | } | |
897 | ||
898 | int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, | |
899 | struct iwl_host_cmd *cmd) | |
900 | { | |
901 | if (!(cmd->flags & CMD_SEND_IN_RFKILL) && | |
326477e4 | 902 | test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { |
ca60da2e SS |
903 | IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", |
904 | cmd->id); | |
905 | return -ERFKILL; | |
906 | } | |
907 | ||
908 | if (cmd->flags & CMD_ASYNC) { | |
909 | int ret; | |
910 | ||
911 | /* An asynchronous command can not expect an SKB to be set. */ | |
912 | if (WARN_ON(cmd->flags & CMD_WANT_SKB)) | |
913 | return -EINVAL; | |
914 | ||
915 | ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); | |
916 | if (ret < 0) { | |
917 | IWL_ERR(trans, | |
918 | "Error sending %s: enqueue_hcmd failed: %d\n", | |
919 | iwl_get_cmd_string(trans, cmd->id), ret); | |
920 | return ret; | |
921 | } | |
922 | return 0; | |
923 | } | |
924 | ||
925 | return iwl_pcie_gen2_send_hcmd_sync(trans, cmd); | |
926 | } | |
927 | ||
6b35ff91 SS |
928 | /* |
929 | * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's | |
930 | */ | |
931 | void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) | |
932 | { | |
933 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
b2a3b1c1 | 934 | struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
6b35ff91 SS |
935 | |
936 | spin_lock_bh(&txq->lock); | |
937 | while (txq->write_ptr != txq->read_ptr) { | |
938 | IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", | |
939 | txq_id, txq->read_ptr); | |
940 | ||
066fd29a | 941 | iwl_pcie_gen2_free_tfd(trans, txq); |
6b35ff91 SS |
942 | txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); |
943 | ||
944 | if (txq->read_ptr == txq->write_ptr) { | |
945 | unsigned long flags; | |
946 | ||
947 | spin_lock_irqsave(&trans_pcie->reg_lock, flags); | |
948 | if (txq_id != trans_pcie->cmd_queue) { | |
949 | IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", | |
950 | txq->id); | |
951 | iwl_trans_unref(trans); | |
952 | } else if (trans_pcie->ref_cmd_in_flight) { | |
953 | trans_pcie->ref_cmd_in_flight = false; | |
954 | IWL_DEBUG_RPM(trans, | |
955 | "clear ref_cmd_in_flight\n"); | |
956 | iwl_trans_unref(trans); | |
957 | } | |
958 | spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); | |
959 | } | |
960 | } | |
961 | spin_unlock_bh(&txq->lock); | |
962 | ||
963 | /* just in case - this queue may have been stopped */ | |
964 | iwl_wake_queue(trans, txq); | |
965 | } | |
966 | ||
b8e8d7ce SS |
967 | static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, |
968 | struct iwl_txq *txq) | |
969 | { | |
970 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
971 | struct device *dev = trans->dev; | |
972 | ||
973 | /* De-alloc circular buffer of TFDs */ | |
974 | if (txq->tfds) { | |
975 | dma_free_coherent(dev, | |
976 | trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, | |
977 | txq->tfds, txq->dma_addr); | |
978 | dma_free_coherent(dev, | |
979 | sizeof(*txq->first_tb_bufs) * txq->n_window, | |
980 | txq->first_tb_bufs, txq->first_tb_dma); | |
981 | } | |
982 | ||
983 | kfree(txq->entries); | |
984 | iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl); | |
985 | kfree(txq); | |
986 | } | |
987 | ||
13a3a390 SS |
988 | /* |
989 | * iwl_pcie_txq_free - Deallocate DMA queue. | |
990 | * @txq: Transmit queue to deallocate. | |
991 | * | |
992 | * Empty queue by removing and destroying all BD's. | |
993 | * Free all buffers. | |
994 | * 0-fill, but do not free "txq" descriptor structure. | |
995 | */ | |
996 | static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) | |
997 | { | |
998 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
999 | struct iwl_txq *txq = trans_pcie->txq[txq_id]; | |
13a3a390 SS |
1000 | int i; |
1001 | ||
1002 | if (WARN_ON(!txq)) | |
1003 | return; | |
1004 | ||
1005 | iwl_pcie_gen2_txq_unmap(trans, txq_id); | |
1006 | ||
1007 | /* De-alloc array of command/tx buffers */ | |
1008 | if (txq_id == trans_pcie->cmd_queue) | |
1009 | for (i = 0; i < txq->n_window; i++) { | |
1010 | kzfree(txq->entries[i].cmd); | |
1011 | kzfree(txq->entries[i].free_buf); | |
1012 | } | |
13a3a390 SS |
1013 | del_timer_sync(&txq->stuck_timer); |
1014 | ||
b8e8d7ce SS |
1015 | iwl_pcie_gen2_txq_free_memory(trans, txq); |
1016 | ||
13a3a390 SS |
1017 | trans_pcie->txq[txq_id] = NULL; |
1018 | ||
1019 | clear_bit(txq_id, trans_pcie->queue_used); | |
1020 | } | |
1021 | ||
6b35ff91 SS |
1022 | int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, |
1023 | struct iwl_tx_queue_cfg_cmd *cmd, | |
1024 | int cmd_id, | |
1025 | unsigned int timeout) | |
1026 | { | |
1027 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
310181ec | 1028 | struct iwl_tx_queue_cfg_rsp *rsp; |
13a3a390 | 1029 | struct iwl_txq *txq; |
6b35ff91 SS |
1030 | struct iwl_host_cmd hcmd = { |
1031 | .id = cmd_id, | |
1032 | .len = { sizeof(*cmd) }, | |
1033 | .data = { cmd, }, | |
310181ec | 1034 | .flags = CMD_WANT_SKB, |
6b35ff91 | 1035 | }; |
310181ec | 1036 | int ret, qid; |
6b35ff91 | 1037 | |
13a3a390 SS |
1038 | txq = kzalloc(sizeof(*txq), GFP_KERNEL); |
1039 | if (!txq) | |
1040 | return -ENOMEM; | |
1041 | ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, | |
1042 | sizeof(struct iwlagn_scd_bc_tbl)); | |
1043 | if (ret) { | |
1044 | IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); | |
1045 | kfree(txq); | |
1046 | return -ENOMEM; | |
1047 | } | |
1048 | ||
b8e8d7ce | 1049 | ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false); |
13a3a390 | 1050 | if (ret) { |
310181ec | 1051 | IWL_ERR(trans, "Tx queue alloc failed\n"); |
13a3a390 SS |
1052 | goto error; |
1053 | } | |
b8e8d7ce | 1054 | ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false); |
13a3a390 | 1055 | if (ret) { |
310181ec | 1056 | IWL_ERR(trans, "Tx queue init failed\n"); |
13a3a390 SS |
1057 | goto error; |
1058 | } | |
1059 | ||
6b35ff91 SS |
1060 | txq->wd_timeout = msecs_to_jiffies(timeout); |
1061 | ||
6b35ff91 | 1062 | cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); |
13a3a390 SS |
1063 | cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); |
1064 | cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX)); | |
6b35ff91 | 1065 | |
310181ec SS |
1066 | ret = iwl_trans_send_cmd(trans, &hcmd); |
1067 | if (ret) | |
1068 | goto error; | |
1069 | ||
1070 | if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { | |
1071 | ret = -EINVAL; | |
e9e1ba3d | 1072 | goto error_free_resp; |
310181ec SS |
1073 | } |
1074 | ||
1075 | rsp = (void *)hcmd.resp_pkt->data; | |
1076 | qid = le16_to_cpu(rsp->queue_number); | |
1077 | ||
f8565f33 | 1078 | if (qid >= ARRAY_SIZE(trans_pcie->txq)) { |
310181ec SS |
1079 | WARN_ONCE(1, "queue index %d unsupported", qid); |
1080 | ret = -EIO; | |
e9e1ba3d | 1081 | goto error_free_resp; |
310181ec SS |
1082 | } |
1083 | ||
1084 | if (test_and_set_bit(qid, trans_pcie->queue_used)) { | |
1085 | WARN_ONCE(1, "queue %d already used", qid); | |
1086 | ret = -EIO; | |
e9e1ba3d | 1087 | goto error_free_resp; |
310181ec SS |
1088 | } |
1089 | ||
1090 | txq->id = qid; | |
1091 | trans_pcie->txq[qid] = txq; | |
1092 | ||
1093 | /* Place first TFD at index corresponding to start sequence number */ | |
1094 | txq->read_ptr = le16_to_cpu(rsp->write_pointer); | |
1095 | txq->write_ptr = le16_to_cpu(rsp->write_pointer); | |
1096 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, | |
1097 | (txq->write_ptr) | (qid << 16)); | |
1098 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); | |
1099 | ||
e9e1ba3d | 1100 | iwl_free_resp(&hcmd); |
310181ec | 1101 | return qid; |
13a3a390 | 1102 | |
e9e1ba3d SS |
1103 | error_free_resp: |
1104 | iwl_free_resp(&hcmd); | |
13a3a390 | 1105 | error: |
310181ec SS |
1106 | iwl_pcie_gen2_txq_free_memory(trans, txq); |
1107 | return ret; | |
6b35ff91 SS |
1108 | } |
1109 | ||
1110 | void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) | |
1111 | { | |
1112 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1113 | ||
6b35ff91 SS |
1114 | /* |
1115 | * Upon HW Rfkill - we stop the device, and then stop the queues | |
1116 | * in the op_mode. Just for the sake of the simplicity of the op_mode, | |
1117 | * allow the op_mode to call txq_disable after it already called | |
1118 | * stop_device. | |
1119 | */ | |
1120 | if (!test_and_clear_bit(queue, trans_pcie->queue_used)) { | |
1121 | WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), | |
1122 | "queue %d not used", queue); | |
1123 | return; | |
1124 | } | |
1125 | ||
1126 | iwl_pcie_gen2_txq_unmap(trans, queue); | |
1127 | ||
1128 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); | |
1129 | } | |
1130 | ||
13a3a390 SS |
1131 | void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) |
1132 | { | |
1133 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1134 | int i; | |
1135 | ||
1136 | memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); | |
1137 | ||
1138 | /* Free all TX queues */ | |
1139 | for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) { | |
1140 | if (!trans_pcie->txq[i]) | |
1141 | continue; | |
1142 | ||
1143 | iwl_pcie_gen2_txq_free(trans, i); | |
1144 | } | |
1145 | } | |
1146 | ||
1147 | int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) | |
1148 | { | |
1149 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1150 | struct iwl_txq *cmd_queue; | |
1151 | int txq_id = trans_pcie->cmd_queue, ret; | |
1152 | ||
1153 | /* alloc and init the command queue */ | |
1154 | if (!trans_pcie->txq[txq_id]) { | |
1155 | cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL); | |
1156 | if (!cmd_queue) { | |
1157 | IWL_ERR(trans, "Not enough memory for command queue\n"); | |
1158 | return -ENOMEM; | |
1159 | } | |
1160 | trans_pcie->txq[txq_id] = cmd_queue; | |
b8e8d7ce | 1161 | ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true); |
13a3a390 SS |
1162 | if (ret) { |
1163 | IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); | |
1164 | goto error; | |
1165 | } | |
1166 | } else { | |
1167 | cmd_queue = trans_pcie->txq[txq_id]; | |
1168 | } | |
1169 | ||
b8e8d7ce | 1170 | ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true); |
13a3a390 SS |
1171 | if (ret) { |
1172 | IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); | |
1173 | goto error; | |
1174 | } | |
b8e8d7ce | 1175 | trans_pcie->txq[txq_id]->id = txq_id; |
13a3a390 SS |
1176 | set_bit(txq_id, trans_pcie->queue_used); |
1177 | ||
1178 | return 0; | |
1179 | ||
1180 | error: | |
1181 | iwl_pcie_gen2_tx_free(trans); | |
1182 | return ret; | |
1183 | } | |
1184 |