]>
Commit | Line | Data |
---|---|---|
6b35ff91 SS |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2017 Intel Deutschland GmbH | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * BSD LICENSE | |
20 | * | |
21 | * Copyright(c) 2017 Intel Deutschland GmbH | |
22 | * All rights reserved. | |
23 | * | |
24 | * Redistribution and use in source and binary forms, with or without | |
25 | * modification, are permitted provided that the following conditions | |
26 | * are met: | |
27 | * | |
28 | * * Redistributions of source code must retain the above copyright | |
29 | * notice, this list of conditions and the following disclaimer. | |
30 | * * Redistributions in binary form must reproduce the above copyright | |
31 | * notice, this list of conditions and the following disclaimer in | |
32 | * the documentation and/or other materials provided with the | |
33 | * distribution. | |
34 | * * Neither the name Intel Corporation nor the names of its | |
35 | * contributors may be used to endorse or promote products derived | |
36 | * from this software without specific prior written permission. | |
37 | * | |
38 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
39 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
40 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
41 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
42 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
43 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
44 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
45 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
46 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
47 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
48 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
49 | * | |
50 | *****************************************************************************/ | |
ca60da2e | 51 | #include <linux/pm_runtime.h> |
6b35ff91 SS |
52 | |
53 | #include "iwl-debug.h" | |
54 | #include "iwl-csr.h" | |
55 | #include "iwl-io.h" | |
56 | #include "internal.h" | |
ab6c6445 SS |
57 | #include "mvm/fw-api.h" |
58 | ||
59 | /* | |
60 | * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array | |
61 | */ | |
62 | static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, | |
63 | struct iwl_txq *txq, u16 byte_cnt, | |
64 | int num_tbs) | |
65 | { | |
66 | struct iwlagn_scd_bc_tbl *scd_bc_tbl; | |
67 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
68 | int write_ptr = txq->write_ptr; | |
69 | u8 filled_tfd_size, num_fetch_chunks; | |
70 | u16 len = byte_cnt; | |
71 | __le16 bc_ent; | |
72 | ||
73 | scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; | |
74 | ||
75 | len = DIV_ROUND_UP(len, 4); | |
76 | ||
77 | if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) | |
78 | return; | |
79 | ||
80 | filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + | |
81 | num_tbs * sizeof(struct iwl_tfh_tb); | |
82 | /* | |
83 | * filled_tfd_size contains the number of filled bytes in the TFD. | |
84 | * Dividing it by 64 will give the number of chunks to fetch | |
85 | * to SRAM- 0 for one chunk, 1 for 2 and so on. | |
86 | * If, for example, TFD contains only 3 TBs then 32 bytes | |
87 | * of the TFD are used, and only one chunk of 64 bytes should | |
88 | * be fetched | |
89 | */ | |
90 | num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; | |
91 | ||
92 | bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); | |
93 | scd_bc_tbl[txq->id].tfd_offset[write_ptr] = bc_ent; | |
94 | } | |
95 | ||
96 | /* | |
97 | * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware | |
98 | */ | |
99 | static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, | |
100 | struct iwl_txq *txq) | |
101 | { | |
ab6c6445 SS |
102 | lockdep_assert_held(&txq->lock); |
103 | ||
066fd29a | 104 | IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); |
ab6c6445 SS |
105 | |
106 | /* | |
107 | * if not in power-save mode, uCode will never sleep when we're | |
108 | * trying to tx (during RFKILL, we're not trying to tx). | |
109 | */ | |
43e9cdc2 | 110 | iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); |
ab6c6445 SS |
111 | } |
112 | ||
066fd29a SS |
113 | static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans, |
114 | struct iwl_tfh_tfd *tfd) | |
ab6c6445 | 115 | { |
066fd29a | 116 | return le16_to_cpu(tfd->num_tbs) & 0x1f; |
ab6c6445 SS |
117 | } |
118 | ||
119 | static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, | |
120 | struct iwl_cmd_meta *meta, | |
cefe13af | 121 | struct iwl_tfh_tfd *tfd) |
ab6c6445 SS |
122 | { |
123 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
124 | int i, num_tbs; | |
ab6c6445 SS |
125 | |
126 | /* Sanity check on number of chunks */ | |
127 | num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); | |
128 | ||
129 | if (num_tbs >= trans_pcie->max_tbs) { | |
130 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); | |
ab6c6445 SS |
131 | return; |
132 | } | |
133 | ||
134 | /* first TB is never freed - it's the bidirectional DMA data */ | |
ab6c6445 SS |
135 | for (i = 1; i < num_tbs; i++) { |
136 | if (meta->tbs & BIT(i)) | |
137 | dma_unmap_page(trans->dev, | |
066fd29a SS |
138 | le64_to_cpu(tfd->tbs[i].addr), |
139 | le16_to_cpu(tfd->tbs[i].tb_len), | |
ab6c6445 SS |
140 | DMA_TO_DEVICE); |
141 | else | |
142 | dma_unmap_single(trans->dev, | |
066fd29a SS |
143 | le64_to_cpu(tfd->tbs[i].addr), |
144 | le16_to_cpu(tfd->tbs[i].tb_len), | |
ab6c6445 SS |
145 | DMA_TO_DEVICE); |
146 | } | |
147 | ||
066fd29a | 148 | tfd->num_tbs = 0; |
ab6c6445 SS |
149 | } |
150 | ||
151 | static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) | |
152 | { | |
cefe13af SS |
153 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
154 | ||
ab6c6445 SS |
155 | /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and |
156 | * idx is bounded by n_window | |
157 | */ | |
158 | int rd_ptr = txq->read_ptr; | |
159 | int idx = get_cmd_index(txq, rd_ptr); | |
160 | ||
161 | lockdep_assert_held(&txq->lock); | |
162 | ||
163 | /* We have only q->n_window txq->entries, but we use | |
164 | * TFD_QUEUE_SIZE_MAX tfds | |
165 | */ | |
cefe13af SS |
166 | iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, |
167 | iwl_pcie_get_tfd(trans_pcie, txq, rd_ptr)); | |
ab6c6445 SS |
168 | |
169 | /* free SKB */ | |
170 | if (txq->entries) { | |
171 | struct sk_buff *skb; | |
172 | ||
173 | skb = txq->entries[idx].skb; | |
174 | ||
175 | /* Can be called from irqs-disabled context | |
176 | * If skb is not NULL, it means that the whole queue is being | |
177 | * freed and that the queue is not empty - free the skb | |
178 | */ | |
179 | if (skb) { | |
180 | iwl_op_mode_free_skb(trans->op_mode, skb); | |
181 | txq->entries[idx].skb = NULL; | |
182 | } | |
183 | } | |
184 | } | |
185 | ||
cefe13af SS |
186 | static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, |
187 | struct iwl_tfh_tfd *tfd, dma_addr_t addr, | |
188 | u16 len) | |
ab6c6445 SS |
189 | { |
190 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
cefe13af SS |
191 | int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); |
192 | struct iwl_tfh_tb *tb = &tfd->tbs[idx]; | |
ab6c6445 SS |
193 | |
194 | /* Each TFD can point to a maximum max_tbs Tx buffers */ | |
cefe13af | 195 | if (tfd->num_tbs >= trans_pcie->max_tbs) { |
ab6c6445 SS |
196 | IWL_ERR(trans, "Error can not send more than %d chunks\n", |
197 | trans_pcie->max_tbs); | |
198 | return -EINVAL; | |
199 | } | |
200 | ||
cefe13af SS |
201 | put_unaligned_le64(addr, &tb->addr); |
202 | tb->tb_len = cpu_to_le16(len); | |
ab6c6445 | 203 | |
cefe13af | 204 | tfd->num_tbs = cpu_to_le16(idx + 1); |
ab6c6445 | 205 | |
cefe13af | 206 | return idx; |
ab6c6445 SS |
207 | } |
208 | ||
cefe13af SS |
209 | static |
210 | struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, | |
211 | struct iwl_txq *txq, | |
212 | struct iwl_device_cmd *dev_cmd, | |
213 | struct sk_buff *skb, | |
214 | struct iwl_cmd_meta *out_meta) | |
ab6c6445 SS |
215 | { |
216 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
cefe13af SS |
217 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
218 | struct iwl_tfh_tfd *tfd = | |
219 | iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); | |
220 | dma_addr_t tb_phys; | |
221 | int i, len, tb1_len, tb2_len, hdr_len; | |
222 | void *tb1_addr; | |
223 | ||
224 | memset(tfd, 0, sizeof(*tfd)); | |
225 | ||
226 | tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); | |
227 | /* The first TB points to bi-directional DMA data */ | |
228 | memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, | |
229 | IWL_FIRST_TB_SIZE); | |
230 | ||
231 | iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); | |
232 | ||
233 | /* there must be data left over for TB1 or this code must be changed */ | |
234 | BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); | |
ab6c6445 SS |
235 | |
236 | /* | |
cefe13af SS |
237 | * The second TB (tb1) points to the remainder of the TX command |
238 | * and the 802.11 header - dword aligned size | |
239 | * (This calculation modifies the TX command, so do it before the | |
240 | * setup of the first TB) | |
ab6c6445 | 241 | */ |
cefe13af SS |
242 | len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) + |
243 | ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE; | |
244 | ||
245 | tb1_len = ALIGN(len, 4); | |
246 | ||
247 | /* map the data for TB1 */ | |
248 | tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; | |
249 | tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); | |
250 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) | |
251 | goto out_err; | |
252 | iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); | |
253 | ||
254 | /* set up TFD's third entry to point to remainder of skb's head */ | |
255 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
ab6c6445 SS |
256 | tb2_len = skb_headlen(skb) - hdr_len; |
257 | ||
258 | if (tb2_len > 0) { | |
cefe13af SS |
259 | tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, |
260 | tb2_len, DMA_TO_DEVICE); | |
261 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) | |
262 | goto out_err; | |
263 | iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); | |
ab6c6445 SS |
264 | } |
265 | ||
266 | /* set up the remaining entries to point to the data */ | |
267 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
268 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
ab6c6445 SS |
269 | int tb_idx; |
270 | ||
271 | if (!skb_frag_size(frag)) | |
272 | continue; | |
273 | ||
274 | tb_phys = skb_frag_dma_map(trans->dev, frag, 0, | |
275 | skb_frag_size(frag), DMA_TO_DEVICE); | |
276 | ||
cefe13af SS |
277 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) |
278 | goto out_err; | |
279 | tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, | |
280 | skb_frag_size(frag)); | |
ab6c6445 SS |
281 | |
282 | out_meta->tbs |= BIT(tb_idx); | |
283 | } | |
284 | ||
cefe13af SS |
285 | trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, |
286 | IWL_FIRST_TB_SIZE + tb1_len, | |
ab6c6445 | 287 | skb->data + hdr_len, tb2_len); |
cefe13af SS |
288 | trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len, |
289 | skb->len - hdr_len); | |
290 | ||
291 | return tfd; | |
292 | ||
293 | out_err: | |
294 | iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); | |
295 | return NULL; | |
ab6c6445 SS |
296 | } |
297 | ||
ab6c6445 SS |
298 | int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, |
299 | struct iwl_device_cmd *dev_cmd, int txq_id) | |
300 | { | |
301 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
b97277cc | 302 | struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; |
ab6c6445 | 303 | struct iwl_cmd_meta *out_meta; |
cefe13af | 304 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; |
ab6c6445 | 305 | void *tfd; |
ab6c6445 SS |
306 | |
307 | if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), | |
308 | "TX on unused queue %d\n", txq_id)) | |
309 | return -EINVAL; | |
310 | ||
ab6c6445 SS |
311 | if (skb_is_nonlinear(skb) && |
312 | skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && | |
313 | __skb_linearize(skb)) | |
314 | return -ENOMEM; | |
315 | ||
ab6c6445 SS |
316 | spin_lock(&txq->lock); |
317 | ||
ab6c6445 SS |
318 | /* Set up driver data for this TFD */ |
319 | txq->entries[txq->write_ptr].skb = skb; | |
320 | txq->entries[txq->write_ptr].cmd = dev_cmd; | |
321 | ||
322 | dev_cmd->hdr.sequence = | |
323 | cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | |
324 | INDEX_TO_SEQ(txq->write_ptr))); | |
325 | ||
ab6c6445 SS |
326 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ |
327 | out_meta = &txq->entries[txq->write_ptr].meta; | |
328 | out_meta->flags = 0; | |
329 | ||
cefe13af SS |
330 | tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); |
331 | if (!tfd) { | |
332 | spin_unlock(&txq->lock); | |
333 | return -1; | |
334 | } | |
ab6c6445 | 335 | |
ab6c6445 SS |
336 | /* Set up entry for this TFD in Tx byte-count array */ |
337 | iwl_pcie_gen2_update_byte_tbl(trans, txq, le16_to_cpu(tx_cmd->len), | |
338 | iwl_pcie_gen2_get_num_tbs(trans, tfd)); | |
339 | ||
ab6c6445 SS |
340 | /* start timer if queue currently empty */ |
341 | if (txq->read_ptr == txq->write_ptr) { | |
43e9cdc2 SS |
342 | if (txq->wd_timeout) |
343 | mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); | |
ab6c6445 SS |
344 | IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); |
345 | iwl_trans_ref(trans); | |
346 | } | |
347 | ||
348 | /* Tell device the write index *just past* this latest filled TFD */ | |
349 | txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); | |
066fd29a SS |
350 | iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); |
351 | if (iwl_queue_space(txq) < txq->high_mark) | |
352 | iwl_stop_queue(trans, txq); | |
ab6c6445 SS |
353 | |
354 | /* | |
355 | * At this point the frame is "transmitted" successfully | |
356 | * and we will get a TX status notification eventually. | |
357 | */ | |
358 | spin_unlock(&txq->lock); | |
359 | return 0; | |
ab6c6445 | 360 | } |
6b35ff91 | 361 | |
ca60da2e SS |
362 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
363 | ||
364 | /* | |
365 | * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command | |
366 | * @priv: device private data point | |
367 | * @cmd: a pointer to the ucode command structure | |
368 | * | |
369 | * The function returns < 0 values to indicate the operation | |
370 | * failed. On success, it returns the index (>= 0) of command in the | |
371 | * command queue. | |
372 | */ | |
373 | static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, | |
374 | struct iwl_host_cmd *cmd) | |
375 | { | |
376 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
377 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | |
378 | struct iwl_device_cmd *out_cmd; | |
379 | struct iwl_cmd_meta *out_meta; | |
380 | unsigned long flags; | |
381 | void *dup_buf = NULL; | |
382 | dma_addr_t phys_addr; | |
383 | int idx, i, cmd_pos; | |
384 | u16 copy_size, cmd_size, tb0_size; | |
385 | bool had_nocopy = false; | |
386 | u8 group_id = iwl_cmd_groupid(cmd->id); | |
387 | const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; | |
388 | u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; | |
389 | struct iwl_tfh_tfd *tfd = | |
390 | iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); | |
391 | ||
392 | memset(tfd, 0, sizeof(*tfd)); | |
393 | ||
394 | copy_size = sizeof(struct iwl_cmd_header_wide); | |
395 | cmd_size = sizeof(struct iwl_cmd_header_wide); | |
396 | ||
397 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { | |
398 | cmddata[i] = cmd->data[i]; | |
399 | cmdlen[i] = cmd->len[i]; | |
400 | ||
401 | if (!cmd->len[i]) | |
402 | continue; | |
403 | ||
404 | /* need at least IWL_FIRST_TB_SIZE copied */ | |
405 | if (copy_size < IWL_FIRST_TB_SIZE) { | |
406 | int copy = IWL_FIRST_TB_SIZE - copy_size; | |
407 | ||
408 | if (copy > cmdlen[i]) | |
409 | copy = cmdlen[i]; | |
410 | cmdlen[i] -= copy; | |
411 | cmddata[i] += copy; | |
412 | copy_size += copy; | |
413 | } | |
414 | ||
415 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { | |
416 | had_nocopy = true; | |
417 | if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { | |
418 | idx = -EINVAL; | |
419 | goto free_dup_buf; | |
420 | } | |
421 | } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { | |
422 | /* | |
423 | * This is also a chunk that isn't copied | |
424 | * to the static buffer so set had_nocopy. | |
425 | */ | |
426 | had_nocopy = true; | |
427 | ||
428 | /* only allowed once */ | |
429 | if (WARN_ON(dup_buf)) { | |
430 | idx = -EINVAL; | |
431 | goto free_dup_buf; | |
432 | } | |
433 | ||
434 | dup_buf = kmemdup(cmddata[i], cmdlen[i], | |
435 | GFP_ATOMIC); | |
436 | if (!dup_buf) | |
437 | return -ENOMEM; | |
438 | } else { | |
439 | /* NOCOPY must not be followed by normal! */ | |
440 | if (WARN_ON(had_nocopy)) { | |
441 | idx = -EINVAL; | |
442 | goto free_dup_buf; | |
443 | } | |
444 | copy_size += cmdlen[i]; | |
445 | } | |
446 | cmd_size += cmd->len[i]; | |
447 | } | |
448 | ||
449 | /* | |
450 | * If any of the command structures end up being larger than the | |
451 | * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into | |
452 | * separate TFDs, then we will need to increase the size of the buffers | |
453 | */ | |
454 | if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, | |
455 | "Command %s (%#x) is too large (%d bytes)\n", | |
456 | iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) { | |
457 | idx = -EINVAL; | |
458 | goto free_dup_buf; | |
459 | } | |
460 | ||
461 | spin_lock_bh(&txq->lock); | |
462 | ||
463 | if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | |
464 | spin_unlock_bh(&txq->lock); | |
465 | ||
466 | IWL_ERR(trans, "No space in command queue\n"); | |
467 | iwl_op_mode_cmd_queue_full(trans->op_mode); | |
468 | idx = -ENOSPC; | |
469 | goto free_dup_buf; | |
470 | } | |
471 | ||
472 | idx = get_cmd_index(txq, txq->write_ptr); | |
473 | out_cmd = txq->entries[idx].cmd; | |
474 | out_meta = &txq->entries[idx].meta; | |
475 | ||
476 | /* re-initialize to NULL */ | |
477 | memset(out_meta, 0, sizeof(*out_meta)); | |
478 | if (cmd->flags & CMD_WANT_SKB) | |
479 | out_meta->source = cmd; | |
480 | ||
481 | /* set up the header */ | |
482 | out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); | |
483 | out_cmd->hdr_wide.group_id = group_id; | |
484 | out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); | |
485 | out_cmd->hdr_wide.length = | |
486 | cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); | |
487 | out_cmd->hdr_wide.reserved = 0; | |
488 | out_cmd->hdr_wide.sequence = | |
489 | cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | | |
490 | INDEX_TO_SEQ(txq->write_ptr)); | |
491 | ||
492 | cmd_pos = sizeof(struct iwl_cmd_header_wide); | |
493 | copy_size = sizeof(struct iwl_cmd_header_wide); | |
494 | ||
495 | /* and copy the data that needs to be copied */ | |
496 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { | |
497 | int copy; | |
498 | ||
499 | if (!cmd->len[i]) | |
500 | continue; | |
501 | ||
502 | /* copy everything if not nocopy/dup */ | |
503 | if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | | |
504 | IWL_HCMD_DFL_DUP))) { | |
505 | copy = cmd->len[i]; | |
506 | ||
507 | memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); | |
508 | cmd_pos += copy; | |
509 | copy_size += copy; | |
510 | continue; | |
511 | } | |
512 | ||
513 | /* | |
514 | * Otherwise we need at least IWL_FIRST_TB_SIZE copied | |
515 | * in total (for bi-directional DMA), but copy up to what | |
516 | * we can fit into the payload for debug dump purposes. | |
517 | */ | |
518 | copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); | |
519 | ||
520 | memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); | |
521 | cmd_pos += copy; | |
522 | ||
523 | /* However, treat copy_size the proper way, we need it below */ | |
524 | if (copy_size < IWL_FIRST_TB_SIZE) { | |
525 | copy = IWL_FIRST_TB_SIZE - copy_size; | |
526 | ||
527 | if (copy > cmd->len[i]) | |
528 | copy = cmd->len[i]; | |
529 | copy_size += copy; | |
530 | } | |
531 | } | |
532 | ||
533 | IWL_DEBUG_HC(trans, | |
534 | "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", | |
535 | iwl_get_cmd_string(trans, cmd->id), group_id, | |
536 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | |
537 | cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); | |
538 | ||
539 | /* start the TFD with the minimum copy bytes */ | |
540 | tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); | |
541 | memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); | |
542 | iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx), | |
543 | tb0_size); | |
544 | ||
545 | /* map first command fragment, if any remains */ | |
546 | if (copy_size > tb0_size) { | |
547 | phys_addr = dma_map_single(trans->dev, | |
548 | ((u8 *)&out_cmd->hdr) + tb0_size, | |
549 | copy_size - tb0_size, | |
550 | DMA_TO_DEVICE); | |
551 | if (dma_mapping_error(trans->dev, phys_addr)) { | |
552 | idx = -ENOMEM; | |
553 | iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); | |
554 | goto out; | |
555 | } | |
556 | iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, | |
557 | copy_size - tb0_size); | |
558 | } | |
559 | ||
560 | /* map the remaining (adjusted) nocopy/dup fragments */ | |
561 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { | |
562 | const void *data = cmddata[i]; | |
563 | ||
564 | if (!cmdlen[i]) | |
565 | continue; | |
566 | if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | | |
567 | IWL_HCMD_DFL_DUP))) | |
568 | continue; | |
569 | if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) | |
570 | data = dup_buf; | |
571 | phys_addr = dma_map_single(trans->dev, (void *)data, | |
572 | cmdlen[i], DMA_TO_DEVICE); | |
573 | if (dma_mapping_error(trans->dev, phys_addr)) { | |
574 | idx = -ENOMEM; | |
575 | iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); | |
576 | goto out; | |
577 | } | |
578 | iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); | |
579 | } | |
580 | ||
581 | BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); | |
582 | out_meta->flags = cmd->flags; | |
583 | if (WARN_ON_ONCE(txq->entries[idx].free_buf)) | |
584 | kzfree(txq->entries[idx].free_buf); | |
585 | txq->entries[idx].free_buf = dup_buf; | |
586 | ||
587 | trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); | |
588 | ||
589 | /* start timer if queue currently empty */ | |
590 | if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) | |
591 | mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); | |
592 | ||
593 | spin_lock_irqsave(&trans_pcie->reg_lock, flags); | |
594 | if (!(cmd->flags & CMD_SEND_IN_IDLE) && | |
595 | !trans_pcie->ref_cmd_in_flight) { | |
596 | trans_pcie->ref_cmd_in_flight = true; | |
597 | IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); | |
598 | iwl_trans_ref(trans); | |
599 | } | |
600 | /* Increment and update queue's write index */ | |
601 | txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); | |
602 | iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); | |
603 | spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); | |
604 | ||
605 | out: | |
606 | spin_unlock_bh(&txq->lock); | |
607 | free_dup_buf: | |
608 | if (idx < 0) | |
609 | kfree(dup_buf); | |
610 | return idx; | |
611 | } | |
612 | ||
613 | #define HOST_COMPLETE_TIMEOUT (2 * HZ) | |
614 | ||
615 | static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, | |
616 | struct iwl_host_cmd *cmd) | |
617 | { | |
618 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
619 | const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); | |
620 | int cmd_idx; | |
621 | int ret; | |
622 | ||
623 | IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); | |
624 | ||
625 | if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, | |
626 | &trans->status), | |
627 | "Command %s: a command is already active!\n", cmd_str)) | |
628 | return -EIO; | |
629 | ||
630 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); | |
631 | ||
632 | if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { | |
633 | ret = wait_event_timeout(trans_pcie->d0i3_waitq, | |
634 | pm_runtime_active(&trans_pcie->pci_dev->dev), | |
635 | msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); | |
636 | if (!ret) { | |
637 | IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); | |
638 | return -ETIMEDOUT; | |
639 | } | |
640 | } | |
641 | ||
642 | cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); | |
643 | if (cmd_idx < 0) { | |
644 | ret = cmd_idx; | |
645 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); | |
646 | IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", | |
647 | cmd_str, ret); | |
648 | return ret; | |
649 | } | |
650 | ||
651 | ret = wait_event_timeout(trans_pcie->wait_command_queue, | |
652 | !test_bit(STATUS_SYNC_HCMD_ACTIVE, | |
653 | &trans->status), | |
654 | HOST_COMPLETE_TIMEOUT); | |
655 | if (!ret) { | |
656 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | |
657 | ||
658 | IWL_ERR(trans, "Error sending %s: time out after %dms.\n", | |
659 | cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); | |
660 | ||
661 | IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", | |
662 | txq->read_ptr, txq->write_ptr); | |
663 | ||
664 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); | |
665 | IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", | |
666 | cmd_str); | |
667 | ret = -ETIMEDOUT; | |
668 | ||
669 | iwl_force_nmi(trans); | |
670 | iwl_trans_fw_error(trans); | |
671 | ||
672 | goto cancel; | |
673 | } | |
674 | ||
675 | if (test_bit(STATUS_FW_ERROR, &trans->status)) { | |
676 | IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); | |
677 | dump_stack(); | |
678 | ret = -EIO; | |
679 | goto cancel; | |
680 | } | |
681 | ||
682 | if (!(cmd->flags & CMD_SEND_IN_RFKILL) && | |
683 | test_bit(STATUS_RFKILL, &trans->status)) { | |
684 | IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); | |
685 | ret = -ERFKILL; | |
686 | goto cancel; | |
687 | } | |
688 | ||
689 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { | |
690 | IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); | |
691 | ret = -EIO; | |
692 | goto cancel; | |
693 | } | |
694 | ||
695 | return 0; | |
696 | ||
697 | cancel: | |
698 | if (cmd->flags & CMD_WANT_SKB) { | |
699 | /* | |
700 | * Cancel the CMD_WANT_SKB flag for the cmd in the | |
701 | * TX cmd queue. Otherwise in case the cmd comes | |
702 | * in later, it will possibly set an invalid | |
703 | * address (cmd->meta.source). | |
704 | */ | |
705 | trans_pcie->txq[trans_pcie->cmd_queue]. | |
706 | entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; | |
707 | } | |
708 | ||
709 | if (cmd->resp_pkt) { | |
710 | iwl_free_resp(cmd); | |
711 | cmd->resp_pkt = NULL; | |
712 | } | |
713 | ||
714 | return ret; | |
715 | } | |
716 | ||
717 | int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, | |
718 | struct iwl_host_cmd *cmd) | |
719 | { | |
720 | if (!(cmd->flags & CMD_SEND_IN_RFKILL) && | |
721 | test_bit(STATUS_RFKILL, &trans->status)) { | |
722 | IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", | |
723 | cmd->id); | |
724 | return -ERFKILL; | |
725 | } | |
726 | ||
727 | if (cmd->flags & CMD_ASYNC) { | |
728 | int ret; | |
729 | ||
730 | /* An asynchronous command can not expect an SKB to be set. */ | |
731 | if (WARN_ON(cmd->flags & CMD_WANT_SKB)) | |
732 | return -EINVAL; | |
733 | ||
734 | ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); | |
735 | if (ret < 0) { | |
736 | IWL_ERR(trans, | |
737 | "Error sending %s: enqueue_hcmd failed: %d\n", | |
738 | iwl_get_cmd_string(trans, cmd->id), ret); | |
739 | return ret; | |
740 | } | |
741 | return 0; | |
742 | } | |
743 | ||
744 | return iwl_pcie_gen2_send_hcmd_sync(trans, cmd); | |
745 | } | |
746 | ||
6b35ff91 SS |
747 | /* |
748 | * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's | |
749 | */ | |
750 | void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) | |
751 | { | |
752 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
753 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | |
754 | ||
755 | spin_lock_bh(&txq->lock); | |
756 | while (txq->write_ptr != txq->read_ptr) { | |
757 | IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", | |
758 | txq_id, txq->read_ptr); | |
759 | ||
066fd29a | 760 | iwl_pcie_gen2_free_tfd(trans, txq); |
6b35ff91 SS |
761 | txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); |
762 | ||
763 | if (txq->read_ptr == txq->write_ptr) { | |
764 | unsigned long flags; | |
765 | ||
766 | spin_lock_irqsave(&trans_pcie->reg_lock, flags); | |
767 | if (txq_id != trans_pcie->cmd_queue) { | |
768 | IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", | |
769 | txq->id); | |
770 | iwl_trans_unref(trans); | |
771 | } else if (trans_pcie->ref_cmd_in_flight) { | |
772 | trans_pcie->ref_cmd_in_flight = false; | |
773 | IWL_DEBUG_RPM(trans, | |
774 | "clear ref_cmd_in_flight\n"); | |
775 | iwl_trans_unref(trans); | |
776 | } | |
777 | spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); | |
778 | } | |
779 | } | |
780 | spin_unlock_bh(&txq->lock); | |
781 | ||
782 | /* just in case - this queue may have been stopped */ | |
783 | iwl_wake_queue(trans, txq); | |
784 | } | |
785 | ||
786 | int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, | |
787 | struct iwl_tx_queue_cfg_cmd *cmd, | |
788 | int cmd_id, | |
789 | unsigned int timeout) | |
790 | { | |
791 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
792 | struct iwl_txq *txq = &trans_pcie->txq[cmd->scd_queue]; | |
793 | struct iwl_host_cmd hcmd = { | |
794 | .id = cmd_id, | |
795 | .len = { sizeof(*cmd) }, | |
796 | .data = { cmd, }, | |
797 | .flags = 0, | |
798 | }; | |
799 | u16 ssn = le16_to_cpu(cmd->ssn); | |
800 | ||
801 | if (test_and_set_bit(cmd->scd_queue, trans_pcie->queue_used)) { | |
802 | WARN_ONCE(1, "queue %d already used", cmd->scd_queue); | |
803 | return -EINVAL; | |
804 | } | |
805 | ||
806 | txq->wd_timeout = msecs_to_jiffies(timeout); | |
807 | ||
808 | /* | |
809 | * Place first TFD at index corresponding to start sequence number. | |
810 | * Assumes that ssn_idx is valid (!= 0xFFF) | |
811 | */ | |
812 | txq->read_ptr = (ssn & 0xff); | |
813 | txq->write_ptr = (ssn & 0xff); | |
814 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, | |
48229293 | 815 | (ssn & 0xff) | (cmd->scd_queue << 16)); |
6b35ff91 SS |
816 | |
817 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d WrPtr: %d\n", | |
818 | cmd->scd_queue, ssn & 0xff); | |
819 | ||
820 | cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); | |
821 | cmd->byte_cnt_addr = cpu_to_le64(trans_pcie->scd_bc_tbls.dma + | |
822 | cmd->scd_queue * | |
823 | sizeof(struct iwlagn_scd_bc_tbl)); | |
824 | cmd->cb_size = cpu_to_le64(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX)); | |
825 | ||
826 | return iwl_trans_send_cmd(trans, &hcmd); | |
827 | } | |
828 | ||
829 | void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) | |
830 | { | |
831 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
832 | ||
6b35ff91 SS |
833 | /* |
834 | * Upon HW Rfkill - we stop the device, and then stop the queues | |
835 | * in the op_mode. Just for the sake of the simplicity of the op_mode, | |
836 | * allow the op_mode to call txq_disable after it already called | |
837 | * stop_device. | |
838 | */ | |
839 | if (!test_and_clear_bit(queue, trans_pcie->queue_used)) { | |
840 | WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), | |
841 | "queue %d not used", queue); | |
842 | return; | |
843 | } | |
844 | ||
845 | iwl_pcie_gen2_txq_unmap(trans, queue); | |
846 | ||
847 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); | |
848 | } | |
849 |