]>
Commit | Line | Data |
---|---|---|
6b35ff91 SS |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2017 Intel Deutschland GmbH | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * BSD LICENSE | |
20 | * | |
21 | * Copyright(c) 2017 Intel Deutschland GmbH | |
22 | * All rights reserved. | |
23 | * | |
24 | * Redistribution and use in source and binary forms, with or without | |
25 | * modification, are permitted provided that the following conditions | |
26 | * are met: | |
27 | * | |
28 | * * Redistributions of source code must retain the above copyright | |
29 | * notice, this list of conditions and the following disclaimer. | |
30 | * * Redistributions in binary form must reproduce the above copyright | |
31 | * notice, this list of conditions and the following disclaimer in | |
32 | * the documentation and/or other materials provided with the | |
33 | * distribution. | |
34 | * * Neither the name Intel Corporation nor the names of its | |
35 | * contributors may be used to endorse or promote products derived | |
36 | * from this software without specific prior written permission. | |
37 | * | |
38 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
39 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
40 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
41 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
42 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
43 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
44 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
45 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
46 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
47 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
48 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
49 | * | |
50 | *****************************************************************************/ | |
51 | ||
52 | #include "iwl-debug.h" | |
53 | #include "iwl-csr.h" | |
54 | #include "iwl-io.h" | |
55 | #include "internal.h" | |
ab6c6445 SS |
56 | #include "mvm/fw-api.h" |
57 | ||
58 | /* | |
59 | * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array | |
60 | */ | |
61 | static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, | |
62 | struct iwl_txq *txq, u16 byte_cnt, | |
63 | int num_tbs) | |
64 | { | |
65 | struct iwlagn_scd_bc_tbl *scd_bc_tbl; | |
66 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
67 | int write_ptr = txq->write_ptr; | |
68 | u8 filled_tfd_size, num_fetch_chunks; | |
69 | u16 len = byte_cnt; | |
70 | __le16 bc_ent; | |
71 | ||
72 | scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; | |
73 | ||
74 | len = DIV_ROUND_UP(len, 4); | |
75 | ||
76 | if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) | |
77 | return; | |
78 | ||
79 | filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + | |
80 | num_tbs * sizeof(struct iwl_tfh_tb); | |
81 | /* | |
82 | * filled_tfd_size contains the number of filled bytes in the TFD. | |
83 | * Dividing it by 64 will give the number of chunks to fetch | |
84 | * to SRAM- 0 for one chunk, 1 for 2 and so on. | |
85 | * If, for example, TFD contains only 3 TBs then 32 bytes | |
86 | * of the TFD are used, and only one chunk of 64 bytes should | |
87 | * be fetched | |
88 | */ | |
89 | num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; | |
90 | ||
91 | bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); | |
92 | scd_bc_tbl[txq->id].tfd_offset[write_ptr] = bc_ent; | |
93 | } | |
94 | ||
95 | /* | |
96 | * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware | |
97 | */ | |
98 | static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, | |
99 | struct iwl_txq *txq) | |
100 | { | |
ab6c6445 SS |
101 | lockdep_assert_held(&txq->lock); |
102 | ||
066fd29a | 103 | IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); |
ab6c6445 SS |
104 | |
105 | /* | |
106 | * if not in power-save mode, uCode will never sleep when we're | |
107 | * trying to tx (during RFKILL, we're not trying to tx). | |
108 | */ | |
ab6c6445 SS |
109 | if (!txq->block) |
110 | iwl_write32(trans, HBUS_TARG_WRPTR, | |
066fd29a | 111 | txq->write_ptr | (txq->id << 8)); |
ab6c6445 SS |
112 | } |
113 | ||
066fd29a SS |
114 | static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans, |
115 | struct iwl_tfh_tfd *tfd) | |
ab6c6445 | 116 | { |
066fd29a | 117 | return le16_to_cpu(tfd->num_tbs) & 0x1f; |
ab6c6445 SS |
118 | } |
119 | ||
120 | static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, | |
121 | struct iwl_cmd_meta *meta, | |
122 | struct iwl_txq *txq, int index) | |
123 | { | |
124 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
125 | int i, num_tbs; | |
066fd29a | 126 | struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); |
ab6c6445 SS |
127 | |
128 | /* Sanity check on number of chunks */ | |
129 | num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); | |
130 | ||
131 | if (num_tbs >= trans_pcie->max_tbs) { | |
132 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); | |
ab6c6445 SS |
133 | return; |
134 | } | |
135 | ||
136 | /* first TB is never freed - it's the bidirectional DMA data */ | |
ab6c6445 SS |
137 | for (i = 1; i < num_tbs; i++) { |
138 | if (meta->tbs & BIT(i)) | |
139 | dma_unmap_page(trans->dev, | |
066fd29a SS |
140 | le64_to_cpu(tfd->tbs[i].addr), |
141 | le16_to_cpu(tfd->tbs[i].tb_len), | |
ab6c6445 SS |
142 | DMA_TO_DEVICE); |
143 | else | |
144 | dma_unmap_single(trans->dev, | |
066fd29a SS |
145 | le64_to_cpu(tfd->tbs[i].addr), |
146 | le16_to_cpu(tfd->tbs[i].tb_len), | |
ab6c6445 SS |
147 | DMA_TO_DEVICE); |
148 | } | |
149 | ||
066fd29a | 150 | tfd->num_tbs = 0; |
ab6c6445 SS |
151 | } |
152 | ||
153 | static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) | |
154 | { | |
155 | /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and | |
156 | * idx is bounded by n_window | |
157 | */ | |
158 | int rd_ptr = txq->read_ptr; | |
159 | int idx = get_cmd_index(txq, rd_ptr); | |
160 | ||
161 | lockdep_assert_held(&txq->lock); | |
162 | ||
163 | /* We have only q->n_window txq->entries, but we use | |
164 | * TFD_QUEUE_SIZE_MAX tfds | |
165 | */ | |
166 | iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); | |
167 | ||
168 | /* free SKB */ | |
169 | if (txq->entries) { | |
170 | struct sk_buff *skb; | |
171 | ||
172 | skb = txq->entries[idx].skb; | |
173 | ||
174 | /* Can be called from irqs-disabled context | |
175 | * If skb is not NULL, it means that the whole queue is being | |
176 | * freed and that the queue is not empty - free the skb | |
177 | */ | |
178 | if (skb) { | |
179 | iwl_op_mode_free_skb(trans->op_mode, skb); | |
180 | txq->entries[idx].skb = NULL; | |
181 | } | |
182 | } | |
183 | } | |
184 | ||
185 | static inline void iwl_pcie_gen2_set_tb(struct iwl_trans *trans, void *tfd, | |
186 | u8 idx, dma_addr_t addr, u16 len) | |
187 | { | |
066fd29a SS |
188 | struct iwl_tfh_tfd *tfd_fh = (void *)tfd; |
189 | struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx]; | |
ab6c6445 | 190 | |
066fd29a SS |
191 | put_unaligned_le64(addr, &tb->addr); |
192 | tb->tb_len = cpu_to_le16(len); | |
ab6c6445 | 193 | |
066fd29a | 194 | tfd_fh->num_tbs = cpu_to_le16(idx + 1); |
ab6c6445 SS |
195 | } |
196 | ||
197 | int iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, | |
198 | dma_addr_t addr, u16 len, bool reset) | |
199 | { | |
200 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
201 | void *tfd; | |
202 | u32 num_tbs; | |
203 | ||
204 | tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; | |
205 | ||
206 | if (reset) | |
207 | memset(tfd, 0, trans_pcie->tfd_size); | |
208 | ||
209 | num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); | |
210 | ||
211 | /* Each TFD can point to a maximum max_tbs Tx buffers */ | |
212 | if (num_tbs >= trans_pcie->max_tbs) { | |
213 | IWL_ERR(trans, "Error can not send more than %d chunks\n", | |
214 | trans_pcie->max_tbs); | |
215 | return -EINVAL; | |
216 | } | |
217 | ||
218 | if (WARN(addr & ~IWL_TX_DMA_MASK, | |
219 | "Unaligned address = %llx\n", (unsigned long long)addr)) | |
220 | return -EINVAL; | |
221 | ||
222 | iwl_pcie_gen2_set_tb(trans, tfd, num_tbs, addr, len); | |
223 | ||
224 | return num_tbs; | |
225 | } | |
226 | ||
227 | static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, | |
228 | struct iwl_txq *txq, u8 hdr_len, | |
229 | struct iwl_cmd_meta *out_meta, | |
230 | struct iwl_device_cmd *dev_cmd, u16 tb1_len) | |
231 | { | |
232 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
233 | u16 tb2_len; | |
234 | int i; | |
235 | ||
236 | /* | |
237 | * Set up TFD's third entry to point directly to remainder | |
238 | * of skb's head, if any | |
239 | */ | |
240 | tb2_len = skb_headlen(skb) - hdr_len; | |
241 | ||
242 | if (tb2_len > 0) { | |
243 | dma_addr_t tb2_phys = dma_map_single(trans->dev, | |
244 | skb->data + hdr_len, | |
245 | tb2_len, DMA_TO_DEVICE); | |
246 | if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { | |
247 | iwl_pcie_gen2_tfd_unmap(trans, out_meta, txq, | |
248 | txq->write_ptr); | |
249 | return -EINVAL; | |
250 | } | |
251 | iwl_pcie_gen2_build_tfd(trans, txq, tb2_phys, tb2_len, false); | |
252 | } | |
253 | ||
254 | /* set up the remaining entries to point to the data */ | |
255 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
256 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
257 | dma_addr_t tb_phys; | |
258 | int tb_idx; | |
259 | ||
260 | if (!skb_frag_size(frag)) | |
261 | continue; | |
262 | ||
263 | tb_phys = skb_frag_dma_map(trans->dev, frag, 0, | |
264 | skb_frag_size(frag), DMA_TO_DEVICE); | |
265 | ||
266 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { | |
267 | iwl_pcie_gen2_tfd_unmap(trans, out_meta, txq, | |
268 | txq->write_ptr); | |
269 | return -EINVAL; | |
270 | } | |
271 | tb_idx = iwl_pcie_gen2_build_tfd(trans, txq, tb_phys, | |
272 | skb_frag_size(frag), false); | |
273 | ||
274 | out_meta->tbs |= BIT(tb_idx); | |
275 | } | |
276 | ||
277 | trace_iwlwifi_dev_tx(trans->dev, skb, | |
278 | iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), | |
279 | trans_pcie->tfd_size, | |
280 | &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, | |
281 | skb->data + hdr_len, tb2_len); | |
282 | trace_iwlwifi_dev_tx_data(trans->dev, skb, | |
283 | hdr_len, skb->len - hdr_len); | |
284 | return 0; | |
285 | } | |
286 | ||
287 | #define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(TX_CMD_FLG_MH_PAD) | |
288 | ||
289 | int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, | |
290 | struct iwl_device_cmd *dev_cmd, int txq_id) | |
291 | { | |
292 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
293 | struct ieee80211_hdr *hdr; | |
294 | struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; | |
295 | struct iwl_cmd_meta *out_meta; | |
296 | struct iwl_txq *txq; | |
297 | dma_addr_t tb0_phys, tb1_phys, scratch_phys; | |
298 | void *tb1_addr; | |
299 | void *tfd; | |
300 | u16 len, tb1_len; | |
ab6c6445 SS |
301 | __le16 fc; |
302 | u8 hdr_len; | |
303 | u16 wifi_seq; | |
ab6c6445 SS |
304 | |
305 | txq = &trans_pcie->txq[txq_id]; | |
306 | ||
307 | if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), | |
308 | "TX on unused queue %d\n", txq_id)) | |
309 | return -EINVAL; | |
310 | ||
ab6c6445 SS |
311 | if (skb_is_nonlinear(skb) && |
312 | skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && | |
313 | __skb_linearize(skb)) | |
314 | return -ENOMEM; | |
315 | ||
316 | /* mac80211 always puts the full header into the SKB's head, | |
317 | * so there's no need to check if it's readable there | |
318 | */ | |
319 | hdr = (struct ieee80211_hdr *)skb->data; | |
320 | fc = hdr->frame_control; | |
321 | hdr_len = ieee80211_hdrlen(fc); | |
322 | ||
323 | spin_lock(&txq->lock); | |
324 | ||
ab6c6445 SS |
325 | /* In AGG mode, the index in the ring must correspond to the WiFi |
326 | * sequence number. This is a HW requirements to help the SCD to parse | |
327 | * the BA. | |
328 | * Check here that the packets are in the right place on the ring. | |
329 | */ | |
330 | wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); | |
331 | WARN_ONCE(txq->ampdu && | |
332 | (wifi_seq & 0xff) != txq->write_ptr, | |
333 | "Q: %d WiFi Seq %d tfdNum %d", | |
334 | txq_id, wifi_seq, txq->write_ptr); | |
335 | ||
336 | /* Set up driver data for this TFD */ | |
337 | txq->entries[txq->write_ptr].skb = skb; | |
338 | txq->entries[txq->write_ptr].cmd = dev_cmd; | |
339 | ||
340 | dev_cmd->hdr.sequence = | |
341 | cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | |
342 | INDEX_TO_SEQ(txq->write_ptr))); | |
343 | ||
344 | tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); | |
345 | scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + | |
346 | offsetof(struct iwl_tx_cmd, scratch); | |
347 | ||
348 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | |
349 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | |
350 | ||
351 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | |
352 | out_meta = &txq->entries[txq->write_ptr].meta; | |
353 | out_meta->flags = 0; | |
354 | ||
355 | /* | |
356 | * The second TB (tb1) points to the remainder of the TX command | |
357 | * and the 802.11 header - dword aligned size | |
358 | * (This calculation modifies the TX command, so do it before the | |
359 | * setup of the first TB) | |
360 | */ | |
361 | len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + | |
362 | hdr_len - IWL_FIRST_TB_SIZE; | |
066fd29a SS |
363 | tb1_len = ALIGN(len, 4); |
364 | /* Tell NIC about any 2-byte padding after MAC header */ | |
365 | if (tb1_len != len) | |
366 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | |
ab6c6445 SS |
367 | |
368 | /* | |
369 | * The first TB points to bi-directional DMA data, we'll | |
370 | * memcpy the data into it later. | |
371 | */ | |
372 | iwl_pcie_gen2_build_tfd(trans, txq, tb0_phys, IWL_FIRST_TB_SIZE, true); | |
373 | ||
374 | /* there must be data left over for TB1 or this code must be changed */ | |
375 | BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); | |
376 | ||
377 | /* map the data for TB1 */ | |
378 | tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; | |
379 | tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); | |
380 | if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) | |
381 | goto out_err; | |
382 | iwl_pcie_gen2_build_tfd(trans, txq, tb1_phys, tb1_len, false); | |
383 | ||
066fd29a SS |
384 | if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, |
385 | out_meta, dev_cmd, tb1_len))) | |
ab6c6445 | 386 | goto out_err; |
ab6c6445 SS |
387 | |
388 | /* building the A-MSDU might have changed this data, so memcpy it now */ | |
389 | memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, | |
390 | IWL_FIRST_TB_SIZE); | |
391 | ||
392 | tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); | |
393 | /* Set up entry for this TFD in Tx byte-count array */ | |
394 | iwl_pcie_gen2_update_byte_tbl(trans, txq, le16_to_cpu(tx_cmd->len), | |
395 | iwl_pcie_gen2_get_num_tbs(trans, tfd)); | |
396 | ||
ab6c6445 SS |
397 | /* start timer if queue currently empty */ |
398 | if (txq->read_ptr == txq->write_ptr) { | |
399 | if (txq->wd_timeout) { | |
400 | /* | |
401 | * If the TXQ is active, then set the timer, if not, | |
402 | * set the timer in remainder so that the timer will | |
403 | * be armed with the right value when the station will | |
404 | * wake up. | |
405 | */ | |
406 | if (!txq->frozen) | |
407 | mod_timer(&txq->stuck_timer, | |
408 | jiffies + txq->wd_timeout); | |
409 | else | |
410 | txq->frozen_expiry_remainder = txq->wd_timeout; | |
411 | } | |
412 | IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); | |
413 | iwl_trans_ref(trans); | |
414 | } | |
415 | ||
416 | /* Tell device the write index *just past* this latest filled TFD */ | |
417 | txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); | |
066fd29a SS |
418 | iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); |
419 | if (iwl_queue_space(txq) < txq->high_mark) | |
420 | iwl_stop_queue(trans, txq); | |
ab6c6445 SS |
421 | |
422 | /* | |
423 | * At this point the frame is "transmitted" successfully | |
424 | * and we will get a TX status notification eventually. | |
425 | */ | |
426 | spin_unlock(&txq->lock); | |
427 | return 0; | |
428 | out_err: | |
429 | spin_unlock(&txq->lock); | |
430 | return -1; | |
431 | } | |
6b35ff91 SS |
432 | |
433 | /* | |
434 | * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's | |
435 | */ | |
436 | void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) | |
437 | { | |
438 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
439 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | |
440 | ||
441 | spin_lock_bh(&txq->lock); | |
442 | while (txq->write_ptr != txq->read_ptr) { | |
443 | IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", | |
444 | txq_id, txq->read_ptr); | |
445 | ||
066fd29a | 446 | iwl_pcie_gen2_free_tfd(trans, txq); |
6b35ff91 SS |
447 | txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); |
448 | ||
449 | if (txq->read_ptr == txq->write_ptr) { | |
450 | unsigned long flags; | |
451 | ||
452 | spin_lock_irqsave(&trans_pcie->reg_lock, flags); | |
453 | if (txq_id != trans_pcie->cmd_queue) { | |
454 | IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", | |
455 | txq->id); | |
456 | iwl_trans_unref(trans); | |
457 | } else if (trans_pcie->ref_cmd_in_flight) { | |
458 | trans_pcie->ref_cmd_in_flight = false; | |
459 | IWL_DEBUG_RPM(trans, | |
460 | "clear ref_cmd_in_flight\n"); | |
461 | iwl_trans_unref(trans); | |
462 | } | |
463 | spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); | |
464 | } | |
465 | } | |
466 | spin_unlock_bh(&txq->lock); | |
467 | ||
468 | /* just in case - this queue may have been stopped */ | |
469 | iwl_wake_queue(trans, txq); | |
470 | } | |
471 | ||
472 | int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, | |
473 | struct iwl_tx_queue_cfg_cmd *cmd, | |
474 | int cmd_id, | |
475 | unsigned int timeout) | |
476 | { | |
477 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
478 | struct iwl_txq *txq = &trans_pcie->txq[cmd->scd_queue]; | |
479 | struct iwl_host_cmd hcmd = { | |
480 | .id = cmd_id, | |
481 | .len = { sizeof(*cmd) }, | |
482 | .data = { cmd, }, | |
483 | .flags = 0, | |
484 | }; | |
485 | u16 ssn = le16_to_cpu(cmd->ssn); | |
486 | ||
487 | if (test_and_set_bit(cmd->scd_queue, trans_pcie->queue_used)) { | |
488 | WARN_ONCE(1, "queue %d already used", cmd->scd_queue); | |
489 | return -EINVAL; | |
490 | } | |
491 | ||
492 | txq->wd_timeout = msecs_to_jiffies(timeout); | |
493 | ||
494 | /* | |
495 | * Place first TFD at index corresponding to start sequence number. | |
496 | * Assumes that ssn_idx is valid (!= 0xFFF) | |
497 | */ | |
498 | txq->read_ptr = (ssn & 0xff); | |
499 | txq->write_ptr = (ssn & 0xff); | |
500 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, | |
501 | (ssn & 0xff) | (cmd->scd_queue << 8)); | |
502 | ||
503 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d WrPtr: %d\n", | |
504 | cmd->scd_queue, ssn & 0xff); | |
505 | ||
506 | cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); | |
507 | cmd->byte_cnt_addr = cpu_to_le64(trans_pcie->scd_bc_tbls.dma + | |
508 | cmd->scd_queue * | |
509 | sizeof(struct iwlagn_scd_bc_tbl)); | |
510 | cmd->cb_size = cpu_to_le64(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX)); | |
511 | ||
512 | return iwl_trans_send_cmd(trans, &hcmd); | |
513 | } | |
514 | ||
515 | void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) | |
516 | { | |
517 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
518 | ||
519 | trans_pcie->txq[queue].frozen_expiry_remainder = 0; | |
520 | trans_pcie->txq[queue].frozen = false; | |
521 | ||
522 | /* | |
523 | * Upon HW Rfkill - we stop the device, and then stop the queues | |
524 | * in the op_mode. Just for the sake of the simplicity of the op_mode, | |
525 | * allow the op_mode to call txq_disable after it already called | |
526 | * stop_device. | |
527 | */ | |
528 | if (!test_and_clear_bit(queue, trans_pcie->queue_used)) { | |
529 | WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), | |
530 | "queue %d not used", queue); | |
531 | return; | |
532 | } | |
533 | ||
534 | iwl_pcie_gen2_txq_unmap(trans, queue); | |
535 | ||
536 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); | |
537 | } | |
538 |