]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/rdma/hfi1/sdma.c
Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / drivers / staging / rdma / hfi1 / sdma.c
1 /*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #include <linux/spinlock.h>
52 #include <linux/seqlock.h>
53 #include <linux/netdevice.h>
54 #include <linux/moduleparam.h>
55 #include <linux/bitops.h>
56 #include <linux/timer.h>
57 #include <linux/vmalloc.h>
58
59 #include "hfi.h"
60 #include "common.h"
61 #include "qp.h"
62 #include "sdma.h"
63 #include "iowait.h"
64 #include "trace.h"
65
66 /* must be a power of 2 >= 64 <= 32768 */
67 #define SDMA_DESCQ_CNT 1024
68 #define INVALID_TAIL 0xffff
69
70 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
71 module_param(sdma_descq_cnt, uint, S_IRUGO);
72 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
73
74 static uint sdma_idle_cnt = 250;
75 module_param(sdma_idle_cnt, uint, S_IRUGO);
76 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
77
78 uint mod_num_sdma;
79 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
80 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
81
82 #define SDMA_WAIT_BATCH_SIZE 20
83 /* max wait time for a SDMA engine to indicate it has halted */
84 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
85 /* all SDMA engine errors that cause a halt */
86
87 #define SD(name) SEND_DMA_##name
88 #define ALL_SDMA_ENG_HALT_ERRS \
89 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
90 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
91 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
92 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
93 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
94 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
95 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
107
108 /* sdma_sendctrl operations */
109 #define SDMA_SENDCTRL_OP_ENABLE (1U << 0)
110 #define SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
111 #define SDMA_SENDCTRL_OP_HALT (1U << 2)
112 #define SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
113
114 /* handle long defines */
115 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
116 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
117 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
118 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
119
120 static const char * const sdma_state_names[] = {
121 [sdma_state_s00_hw_down] = "s00_HwDown",
122 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
123 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
124 [sdma_state_s20_idle] = "s20_Idle",
125 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
126 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
127 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
128 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
129 [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
130 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
131 [sdma_state_s99_running] = "s99_Running",
132 };
133
134 static const char * const sdma_event_names[] = {
135 [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
136 [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
137 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
138 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
139 [sdma_event_e30_go_running] = "e30_GoRunning",
140 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
141 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
142 [sdma_event_e60_hw_halted] = "e60_HwHalted",
143 [sdma_event_e70_go_idle] = "e70_GoIdle",
144 [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
145 [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
146 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
147 [sdma_event_e85_link_down] = "e85_LinkDown",
148 [sdma_event_e90_sw_halted] = "e90_SwHalted",
149 };
150
151 static const struct sdma_set_state_action sdma_action_table[] = {
152 [sdma_state_s00_hw_down] = {
153 .go_s99_running_tofalse = 1,
154 .op_enable = 0,
155 .op_intenable = 0,
156 .op_halt = 0,
157 .op_cleanup = 0,
158 },
159 [sdma_state_s10_hw_start_up_halt_wait] = {
160 .op_enable = 0,
161 .op_intenable = 0,
162 .op_halt = 1,
163 .op_cleanup = 0,
164 },
165 [sdma_state_s15_hw_start_up_clean_wait] = {
166 .op_enable = 0,
167 .op_intenable = 1,
168 .op_halt = 0,
169 .op_cleanup = 1,
170 },
171 [sdma_state_s20_idle] = {
172 .op_enable = 0,
173 .op_intenable = 1,
174 .op_halt = 0,
175 .op_cleanup = 0,
176 },
177 [sdma_state_s30_sw_clean_up_wait] = {
178 .op_enable = 0,
179 .op_intenable = 0,
180 .op_halt = 0,
181 .op_cleanup = 0,
182 },
183 [sdma_state_s40_hw_clean_up_wait] = {
184 .op_enable = 0,
185 .op_intenable = 0,
186 .op_halt = 0,
187 .op_cleanup = 1,
188 },
189 [sdma_state_s50_hw_halt_wait] = {
190 .op_enable = 0,
191 .op_intenable = 0,
192 .op_halt = 0,
193 .op_cleanup = 0,
194 },
195 [sdma_state_s60_idle_halt_wait] = {
196 .go_s99_running_tofalse = 1,
197 .op_enable = 0,
198 .op_intenable = 0,
199 .op_halt = 1,
200 .op_cleanup = 0,
201 },
202 [sdma_state_s80_hw_freeze] = {
203 .op_enable = 0,
204 .op_intenable = 0,
205 .op_halt = 0,
206 .op_cleanup = 0,
207 },
208 [sdma_state_s82_freeze_sw_clean] = {
209 .op_enable = 0,
210 .op_intenable = 0,
211 .op_halt = 0,
212 .op_cleanup = 0,
213 },
214 [sdma_state_s99_running] = {
215 .op_enable = 1,
216 .op_intenable = 1,
217 .op_halt = 0,
218 .op_cleanup = 0,
219 .go_s99_running_totrue = 1,
220 },
221 };
222
223 #define SDMA_TAIL_UPDATE_THRESH 0x1F
224
225 /* declare all statics here rather than keep sorting */
226 static void sdma_complete(struct kref *);
227 static void sdma_finalput(struct sdma_state *);
228 static void sdma_get(struct sdma_state *);
229 static void sdma_hw_clean_up_task(unsigned long);
230 static void sdma_put(struct sdma_state *);
231 static void sdma_set_state(struct sdma_engine *, enum sdma_states);
232 static void sdma_start_hw_clean_up(struct sdma_engine *);
233 static void sdma_start_sw_clean_up(struct sdma_engine *);
234 static void sdma_sw_clean_up_task(unsigned long);
235 static void sdma_sendctrl(struct sdma_engine *, unsigned);
236 static void init_sdma_regs(struct sdma_engine *, u32, uint);
237 static void sdma_process_event(
238 struct sdma_engine *sde,
239 enum sdma_events event);
240 static void __sdma_process_event(
241 struct sdma_engine *sde,
242 enum sdma_events event);
243 static void dump_sdma_state(struct sdma_engine *sde);
244 static void sdma_make_progress(struct sdma_engine *sde, u64 status);
245 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail);
246 static void sdma_flush_descq(struct sdma_engine *sde);
247
248 /**
249 * sdma_state_name() - return state string from enum
250 * @state: state
251 */
252 static const char *sdma_state_name(enum sdma_states state)
253 {
254 return sdma_state_names[state];
255 }
256
257 static void sdma_get(struct sdma_state *ss)
258 {
259 kref_get(&ss->kref);
260 }
261
262 static void sdma_complete(struct kref *kref)
263 {
264 struct sdma_state *ss =
265 container_of(kref, struct sdma_state, kref);
266
267 complete(&ss->comp);
268 }
269
270 static void sdma_put(struct sdma_state *ss)
271 {
272 kref_put(&ss->kref, sdma_complete);
273 }
274
275 static void sdma_finalput(struct sdma_state *ss)
276 {
277 sdma_put(ss);
278 wait_for_completion(&ss->comp);
279 }
280
281 static inline void write_sde_csr(
282 struct sdma_engine *sde,
283 u32 offset0,
284 u64 value)
285 {
286 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
287 }
288
289 static inline u64 read_sde_csr(
290 struct sdma_engine *sde,
291 u32 offset0)
292 {
293 return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
294 }
295
296 /*
297 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
298 * sdma engine 'sde' to drop to 0.
299 */
300 static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
301 int pause)
302 {
303 u64 off = 8 * sde->this_idx;
304 struct hfi1_devdata *dd = sde->dd;
305 int lcnt = 0;
306
307 while (1) {
308 u64 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
309
310 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
311 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
312 if (reg == 0)
313 break;
314 if (lcnt++ > 100) {
315 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u\n",
316 __func__, sde->this_idx, (u32)reg);
317 break;
318 }
319 udelay(1);
320 }
321 }
322
323 /*
324 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
325 * and pause for credit return.
326 */
327 void sdma_wait(struct hfi1_devdata *dd)
328 {
329 int i;
330
331 for (i = 0; i < dd->num_sdma; i++) {
332 struct sdma_engine *sde = &dd->per_sdma[i];
333
334 sdma_wait_for_packet_egress(sde, 0);
335 }
336 }
337
338 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
339 {
340 u64 reg;
341
342 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
343 return;
344 reg = cnt;
345 reg &= SD(DESC_CNT_CNT_MASK);
346 reg <<= SD(DESC_CNT_CNT_SHIFT);
347 write_sde_csr(sde, SD(DESC_CNT), reg);
348 }
349
350 /*
351 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
352 *
353 * Depending on timing there can be txreqs in two places:
354 * - in the descq ring
355 * - in the flush list
356 *
357 * To avoid ordering issues the descq ring needs to be flushed
358 * first followed by the flush list.
359 *
360 * This routine is called from two places
361 * - From a work queue item
362 * - Directly from the state machine just before setting the
363 * state to running
364 *
365 * Must be called with head_lock held
366 *
367 */
368 static void sdma_flush(struct sdma_engine *sde)
369 {
370 struct sdma_txreq *txp, *txp_next;
371 LIST_HEAD(flushlist);
372
373 /* flush from head to tail */
374 sdma_flush_descq(sde);
375 spin_lock(&sde->flushlist_lock);
376 /* copy flush list */
377 list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
378 list_del_init(&txp->list);
379 list_add_tail(&txp->list, &flushlist);
380 }
381 spin_unlock(&sde->flushlist_lock);
382 /* flush from flush list */
383 list_for_each_entry_safe(txp, txp_next, &flushlist, list) {
384 int drained = 0;
385 /* protect against complete modifying */
386 struct iowait *wait = txp->wait;
387
388 list_del_init(&txp->list);
389 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
390 trace_hfi1_sdma_out_sn(sde, txp->sn);
391 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
392 dd_dev_err(sde->dd, "expected %llu got %llu\n",
393 sde->head_sn, txp->sn);
394 sde->head_sn++;
395 #endif
396 sdma_txclean(sde->dd, txp);
397 if (wait)
398 drained = atomic_dec_and_test(&wait->sdma_busy);
399 if (txp->complete)
400 (*txp->complete)(txp, SDMA_TXREQ_S_ABORTED, drained);
401 if (wait && drained)
402 iowait_drain_wakeup(wait);
403 }
404 }
405
406 /*
407 * Fields a work request for flushing the descq ring
408 * and the flush list
409 *
410 * If the engine has been brought to running during
411 * the scheduling delay, the flush is ignored, assuming
412 * that the process of bringing the engine to running
413 * would have done this flush prior to going to running.
414 *
415 */
416 static void sdma_field_flush(struct work_struct *work)
417 {
418 unsigned long flags;
419 struct sdma_engine *sde =
420 container_of(work, struct sdma_engine, flush_worker);
421
422 write_seqlock_irqsave(&sde->head_lock, flags);
423 if (!__sdma_running(sde))
424 sdma_flush(sde);
425 write_sequnlock_irqrestore(&sde->head_lock, flags);
426 }
427
428 static void sdma_err_halt_wait(struct work_struct *work)
429 {
430 struct sdma_engine *sde = container_of(work, struct sdma_engine,
431 err_halt_worker);
432 u64 statuscsr;
433 unsigned long timeout;
434
435 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
436 while (1) {
437 statuscsr = read_sde_csr(sde, SD(STATUS));
438 statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
439 if (statuscsr)
440 break;
441 if (time_after(jiffies, timeout)) {
442 dd_dev_err(sde->dd,
443 "SDMA engine %d - timeout waiting for engine to halt\n",
444 sde->this_idx);
445 /*
446 * Continue anyway. This could happen if there was
447 * an uncorrectable error in the wrong spot.
448 */
449 break;
450 }
451 usleep_range(80, 120);
452 }
453
454 sdma_process_event(sde, sdma_event_e15_hw_halt_done);
455 }
456
457 static void sdma_start_err_halt_wait(struct sdma_engine *sde)
458 {
459 schedule_work(&sde->err_halt_worker);
460 }
461
462
463 static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
464 {
465 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
466
467 unsigned index;
468 struct hfi1_devdata *dd = sde->dd;
469
470 for (index = 0; index < dd->num_sdma; index++) {
471 struct sdma_engine *curr_sdma = &dd->per_sdma[index];
472
473 if (curr_sdma != sde)
474 curr_sdma->progress_check_head =
475 curr_sdma->descq_head;
476 }
477 dd_dev_err(sde->dd,
478 "SDMA engine %d - check scheduled\n",
479 sde->this_idx);
480 mod_timer(&sde->err_progress_check_timer, jiffies + 10);
481 }
482 }
483
484 static void sdma_err_progress_check(unsigned long data)
485 {
486 unsigned index;
487 struct sdma_engine *sde = (struct sdma_engine *)data;
488
489 dd_dev_err(sde->dd, "SDE progress check event\n");
490 for (index = 0; index < sde->dd->num_sdma; index++) {
491 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
492 unsigned long flags;
493
494 /* check progress on each engine except the current one */
495 if (curr_sde == sde)
496 continue;
497 /*
498 * We must lock interrupts when acquiring sde->lock,
499 * to avoid a deadlock if interrupt triggers and spins on
500 * the same lock on same CPU
501 */
502 spin_lock_irqsave(&curr_sde->tail_lock, flags);
503 write_seqlock(&curr_sde->head_lock);
504
505 /* skip non-running queues */
506 if (curr_sde->state.current_state != sdma_state_s99_running) {
507 write_sequnlock(&curr_sde->head_lock);
508 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
509 continue;
510 }
511
512 if ((curr_sde->descq_head != curr_sde->descq_tail) &&
513 (curr_sde->descq_head ==
514 curr_sde->progress_check_head))
515 __sdma_process_event(curr_sde,
516 sdma_event_e90_sw_halted);
517 write_sequnlock(&curr_sde->head_lock);
518 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
519 }
520 schedule_work(&sde->err_halt_worker);
521 }
522
523 static void sdma_hw_clean_up_task(unsigned long opaque)
524 {
525 struct sdma_engine *sde = (struct sdma_engine *) opaque;
526 u64 statuscsr;
527
528 while (1) {
529 #ifdef CONFIG_SDMA_VERBOSITY
530 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
531 sde->this_idx, slashstrip(__FILE__), __LINE__,
532 __func__);
533 #endif
534 statuscsr = read_sde_csr(sde, SD(STATUS));
535 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
536 if (statuscsr)
537 break;
538 udelay(10);
539 }
540
541 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
542 }
543
544 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
545 {
546 smp_read_barrier_depends(); /* see sdma_update_tail() */
547 return sde->tx_ring[sde->tx_head & sde->sdma_mask];
548 }
549
550 /*
551 * flush ring for recovery
552 */
553 static void sdma_flush_descq(struct sdma_engine *sde)
554 {
555 u16 head, tail;
556 int progress = 0;
557 struct sdma_txreq *txp = get_txhead(sde);
558
559 /* The reason for some of the complexity of this code is that
560 * not all descriptors have corresponding txps. So, we have to
561 * be able to skip over descs until we wander into the range of
562 * the next txp on the list.
563 */
564 head = sde->descq_head & sde->sdma_mask;
565 tail = sde->descq_tail & sde->sdma_mask;
566 while (head != tail) {
567 /* advance head, wrap if needed */
568 head = ++sde->descq_head & sde->sdma_mask;
569 /* if now past this txp's descs, do the callback */
570 if (txp && txp->next_descq_idx == head) {
571 int drained = 0;
572 /* protect against complete modifying */
573 struct iowait *wait = txp->wait;
574
575 /* remove from list */
576 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
577 if (wait)
578 drained = atomic_dec_and_test(&wait->sdma_busy);
579 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
580 trace_hfi1_sdma_out_sn(sde, txp->sn);
581 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
582 dd_dev_err(sde->dd, "expected %llu got %llu\n",
583 sde->head_sn, txp->sn);
584 sde->head_sn++;
585 #endif
586 sdma_txclean(sde->dd, txp);
587 trace_hfi1_sdma_progress(sde, head, tail, txp);
588 if (txp->complete)
589 (*txp->complete)(
590 txp,
591 SDMA_TXREQ_S_ABORTED,
592 drained);
593 if (wait && drained)
594 iowait_drain_wakeup(wait);
595 /* see if there is another txp */
596 txp = get_txhead(sde);
597 }
598 progress++;
599 }
600 if (progress)
601 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
602 }
603
604 static void sdma_sw_clean_up_task(unsigned long opaque)
605 {
606 struct sdma_engine *sde = (struct sdma_engine *) opaque;
607 unsigned long flags;
608
609 spin_lock_irqsave(&sde->tail_lock, flags);
610 write_seqlock(&sde->head_lock);
611
612 /*
613 * At this point, the following should always be true:
614 * - We are halted, so no more descriptors are getting retired.
615 * - We are not running, so no one is submitting new work.
616 * - Only we can send the e40_sw_cleaned, so we can't start
617 * running again until we say so. So, the active list and
618 * descq are ours to play with.
619 */
620
621
622 /*
623 * In the error clean up sequence, software clean must be called
624 * before the hardware clean so we can use the hardware head in
625 * the progress routine. A hardware clean or SPC unfreeze will
626 * reset the hardware head.
627 *
628 * Process all retired requests. The progress routine will use the
629 * latest physical hardware head - we are not running so speed does
630 * not matter.
631 */
632 sdma_make_progress(sde, 0);
633
634 sdma_flush(sde);
635
636 /*
637 * Reset our notion of head and tail.
638 * Note that the HW registers have been reset via an earlier
639 * clean up.
640 */
641 sde->descq_tail = 0;
642 sde->descq_head = 0;
643 sde->desc_avail = sdma_descq_freecnt(sde);
644 *sde->head_dma = 0;
645
646 __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
647
648 write_sequnlock(&sde->head_lock);
649 spin_unlock_irqrestore(&sde->tail_lock, flags);
650 }
651
652 static void sdma_sw_tear_down(struct sdma_engine *sde)
653 {
654 struct sdma_state *ss = &sde->state;
655
656 /* Releasing this reference means the state machine has stopped. */
657 sdma_put(ss);
658
659 /* stop waiting for all unfreeze events to complete */
660 atomic_set(&sde->dd->sdma_unfreeze_count, -1);
661 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
662 }
663
664 static void sdma_start_hw_clean_up(struct sdma_engine *sde)
665 {
666 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
667 }
668
669 static void sdma_start_sw_clean_up(struct sdma_engine *sde)
670 {
671 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
672 }
673
674 static void sdma_set_state(struct sdma_engine *sde,
675 enum sdma_states next_state)
676 {
677 struct sdma_state *ss = &sde->state;
678 const struct sdma_set_state_action *action = sdma_action_table;
679 unsigned op = 0;
680
681 trace_hfi1_sdma_state(
682 sde,
683 sdma_state_names[ss->current_state],
684 sdma_state_names[next_state]);
685
686 /* debugging bookkeeping */
687 ss->previous_state = ss->current_state;
688 ss->previous_op = ss->current_op;
689 ss->current_state = next_state;
690
691 if (ss->previous_state != sdma_state_s99_running
692 && next_state == sdma_state_s99_running)
693 sdma_flush(sde);
694
695 if (action[next_state].op_enable)
696 op |= SDMA_SENDCTRL_OP_ENABLE;
697
698 if (action[next_state].op_intenable)
699 op |= SDMA_SENDCTRL_OP_INTENABLE;
700
701 if (action[next_state].op_halt)
702 op |= SDMA_SENDCTRL_OP_HALT;
703
704 if (action[next_state].op_cleanup)
705 op |= SDMA_SENDCTRL_OP_CLEANUP;
706
707 if (action[next_state].go_s99_running_tofalse)
708 ss->go_s99_running = 0;
709
710 if (action[next_state].go_s99_running_totrue)
711 ss->go_s99_running = 1;
712
713 ss->current_op = op;
714 sdma_sendctrl(sde, ss->current_op);
715 }
716
717 /**
718 * sdma_get_descq_cnt() - called when device probed
719 *
720 * Return a validated descq count.
721 *
722 * This is currently only used in the verbs initialization to build the tx
723 * list.
724 *
725 * This will probably be deleted in favor of a more scalable approach to
726 * alloc tx's.
727 *
728 */
729 u16 sdma_get_descq_cnt(void)
730 {
731 u16 count = sdma_descq_cnt;
732
733 if (!count)
734 return SDMA_DESCQ_CNT;
735 /* count must be a power of 2 greater than 64 and less than
736 * 32768. Otherwise return default.
737 */
738 if (!is_power_of_2(count))
739 return SDMA_DESCQ_CNT;
740 if (count < 64 || count > 32768)
741 return SDMA_DESCQ_CNT;
742 return count;
743 }
744 /**
745 * sdma_select_engine_vl() - select sdma engine
746 * @dd: devdata
747 * @selector: a spreading factor
748 * @vl: this vl
749 *
750 *
751 * This function returns an engine based on the selector and a vl. The
752 * mapping fields are protected by RCU.
753 */
754 struct sdma_engine *sdma_select_engine_vl(
755 struct hfi1_devdata *dd,
756 u32 selector,
757 u8 vl)
758 {
759 struct sdma_vl_map *m;
760 struct sdma_map_elem *e;
761 struct sdma_engine *rval;
762
763 if (WARN_ON(vl > 8))
764 return NULL;
765
766 rcu_read_lock();
767 m = rcu_dereference(dd->sdma_map);
768 if (unlikely(!m)) {
769 rcu_read_unlock();
770 return NULL;
771 }
772 e = m->map[vl & m->mask];
773 rval = e->sde[selector & e->mask];
774 rcu_read_unlock();
775
776 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
777 return rval;
778 }
779
780 /**
781 * sdma_select_engine_sc() - select sdma engine
782 * @dd: devdata
783 * @selector: a spreading factor
784 * @sc5: the 5 bit sc
785 *
786 *
787 * This function returns an engine based on the selector and an sc.
788 */
789 struct sdma_engine *sdma_select_engine_sc(
790 struct hfi1_devdata *dd,
791 u32 selector,
792 u8 sc5)
793 {
794 u8 vl = sc_to_vlt(dd, sc5);
795
796 return sdma_select_engine_vl(dd, selector, vl);
797 }
798
799 /*
800 * Free the indicated map struct
801 */
802 static void sdma_map_free(struct sdma_vl_map *m)
803 {
804 int i;
805
806 for (i = 0; m && i < m->actual_vls; i++)
807 kfree(m->map[i]);
808 kfree(m);
809 }
810
811 /*
812 * Handle RCU callback
813 */
814 static void sdma_map_rcu_callback(struct rcu_head *list)
815 {
816 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
817
818 sdma_map_free(m);
819 }
820
821 /**
822 * sdma_map_init - called when # vls change
823 * @dd: hfi1_devdata
824 * @port: port number
825 * @num_vls: number of vls
826 * @vl_engines: per vl engine mapping (optional)
827 *
828 * This routine changes the mapping based on the number of vls.
829 *
830 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
831 * implies auto computing the loading and giving each VLs a uniform
832 * distribution of engines per VL.
833 *
834 * The auto algorithm computes the sde_per_vl and the number of extra
835 * engines. Any extra engines are added from the last VL on down.
836 *
837 * rcu locking is used here to control access to the mapping fields.
838 *
839 * If either the num_vls or num_sdma are non-power of 2, the array sizes
840 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
841 * up to the next highest power of 2 and the first entry is reused
842 * in a round robin fashion.
843 *
844 * If an error occurs the map change is not done and the mapping is
845 * not changed.
846 *
847 */
848 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
849 {
850 int i, j;
851 int extra, sde_per_vl;
852 int engine = 0;
853 u8 lvl_engines[OPA_MAX_VLS];
854 struct sdma_vl_map *oldmap, *newmap;
855
856 if (!(dd->flags & HFI1_HAS_SEND_DMA))
857 return 0;
858
859 if (!vl_engines) {
860 /* truncate divide */
861 sde_per_vl = dd->num_sdma / num_vls;
862 /* extras */
863 extra = dd->num_sdma % num_vls;
864 vl_engines = lvl_engines;
865 /* add extras from last vl down */
866 for (i = num_vls - 1; i >= 0; i--, extra--)
867 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
868 }
869 /* build new map */
870 newmap = kzalloc(
871 sizeof(struct sdma_vl_map) +
872 roundup_pow_of_two(num_vls) *
873 sizeof(struct sdma_map_elem *),
874 GFP_KERNEL);
875 if (!newmap)
876 goto bail;
877 newmap->actual_vls = num_vls;
878 newmap->vls = roundup_pow_of_two(num_vls);
879 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
880 for (i = 0; i < newmap->vls; i++) {
881 /* save for wrap around */
882 int first_engine = engine;
883
884 if (i < newmap->actual_vls) {
885 int sz = roundup_pow_of_two(vl_engines[i]);
886
887 /* only allocate once */
888 newmap->map[i] = kzalloc(
889 sizeof(struct sdma_map_elem) +
890 sz * sizeof(struct sdma_engine *),
891 GFP_KERNEL);
892 if (!newmap->map[i])
893 goto bail;
894 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
895 /* assign engines */
896 for (j = 0; j < sz; j++) {
897 newmap->map[i]->sde[j] =
898 &dd->per_sdma[engine];
899 if (++engine >= first_engine + vl_engines[i])
900 /* wrap back to first engine */
901 engine = first_engine;
902 }
903 } else {
904 /* just re-use entry without allocating */
905 newmap->map[i] = newmap->map[i % num_vls];
906 }
907 engine = first_engine + vl_engines[i];
908 }
909 /* newmap in hand, save old map */
910 spin_lock_irq(&dd->sde_map_lock);
911 oldmap = rcu_dereference_protected(dd->sdma_map,
912 lockdep_is_held(&dd->sde_map_lock));
913
914 /* publish newmap */
915 rcu_assign_pointer(dd->sdma_map, newmap);
916
917 spin_unlock_irq(&dd->sde_map_lock);
918 /* success, free any old map after grace period */
919 if (oldmap)
920 call_rcu(&oldmap->list, sdma_map_rcu_callback);
921 return 0;
922 bail:
923 /* free any partial allocation */
924 sdma_map_free(newmap);
925 return -ENOMEM;
926 }
927
928 /*
929 * Clean up allocated memory.
930 *
931 * This routine is can be called regardless of the success of sdma_init()
932 *
933 */
934 static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
935 {
936 size_t i;
937 struct sdma_engine *sde;
938
939 if (dd->sdma_pad_dma) {
940 dma_free_coherent(&dd->pcidev->dev, 4,
941 (void *)dd->sdma_pad_dma,
942 dd->sdma_pad_phys);
943 dd->sdma_pad_dma = NULL;
944 dd->sdma_pad_phys = 0;
945 }
946 if (dd->sdma_heads_dma) {
947 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
948 (void *)dd->sdma_heads_dma,
949 dd->sdma_heads_phys);
950 dd->sdma_heads_dma = NULL;
951 dd->sdma_heads_phys = 0;
952 }
953 for (i = 0; dd->per_sdma && i < num_engines; ++i) {
954 sde = &dd->per_sdma[i];
955
956 sde->head_dma = NULL;
957 sde->head_phys = 0;
958
959 if (sde->descq) {
960 dma_free_coherent(
961 &dd->pcidev->dev,
962 sde->descq_cnt * sizeof(u64[2]),
963 sde->descq,
964 sde->descq_phys
965 );
966 sde->descq = NULL;
967 sde->descq_phys = 0;
968 }
969 if (is_vmalloc_addr(sde->tx_ring))
970 vfree(sde->tx_ring);
971 else
972 kfree(sde->tx_ring);
973 sde->tx_ring = NULL;
974 }
975 spin_lock_irq(&dd->sde_map_lock);
976 kfree(rcu_access_pointer(dd->sdma_map));
977 RCU_INIT_POINTER(dd->sdma_map, NULL);
978 spin_unlock_irq(&dd->sde_map_lock);
979 synchronize_rcu();
980 kfree(dd->per_sdma);
981 dd->per_sdma = NULL;
982 }
983
984 /**
985 * sdma_init() - called when device probed
986 * @dd: hfi1_devdata
987 * @port: port number (currently only zero)
988 *
989 * sdma_init initializes the specified number of engines.
990 *
991 * The code initializes each sde, its csrs. Interrupts
992 * are not required to be enabled.
993 *
994 * Returns:
995 * 0 - success, -errno on failure
996 */
997 int sdma_init(struct hfi1_devdata *dd, u8 port)
998 {
999 unsigned this_idx;
1000 struct sdma_engine *sde;
1001 u16 descq_cnt;
1002 void *curr_head;
1003 struct hfi1_pportdata *ppd = dd->pport + port;
1004 u32 per_sdma_credits;
1005 uint idle_cnt = sdma_idle_cnt;
1006 size_t num_engines = dd->chip_sdma_engines;
1007
1008 if (!HFI1_CAP_IS_KSET(SDMA)) {
1009 HFI1_CAP_CLEAR(SDMA_AHG);
1010 return 0;
1011 }
1012 if (mod_num_sdma &&
1013 /* can't exceed chip support */
1014 mod_num_sdma <= dd->chip_sdma_engines &&
1015 /* count must be >= vls */
1016 mod_num_sdma >= num_vls)
1017 num_engines = mod_num_sdma;
1018
1019 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
1020 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
1021 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
1022 dd->chip_sdma_mem_size);
1023
1024 per_sdma_credits =
1025 dd->chip_sdma_mem_size/(num_engines * SDMA_BLOCK_SIZE);
1026
1027 /* set up freeze waitqueue */
1028 init_waitqueue_head(&dd->sdma_unfreeze_wq);
1029 atomic_set(&dd->sdma_unfreeze_count, 0);
1030
1031 descq_cnt = sdma_get_descq_cnt();
1032 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
1033 num_engines, descq_cnt);
1034
1035 /* alloc memory for array of send engines */
1036 dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
1037 if (!dd->per_sdma)
1038 return -ENOMEM;
1039
1040 idle_cnt = ns_to_cclock(dd, idle_cnt);
1041 /* Allocate memory for SendDMA descriptor FIFOs */
1042 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1043 sde = &dd->per_sdma[this_idx];
1044 sde->dd = dd;
1045 sde->ppd = ppd;
1046 sde->this_idx = this_idx;
1047 sde->descq_cnt = descq_cnt;
1048 sde->desc_avail = sdma_descq_freecnt(sde);
1049 sde->sdma_shift = ilog2(descq_cnt);
1050 sde->sdma_mask = (1 << sde->sdma_shift) - 1;
1051 sde->descq_full_count = 0;
1052
1053 /* Create a mask for all 3 chip interrupt sources */
1054 sde->imask = (u64)1 << (0*TXE_NUM_SDMA_ENGINES + this_idx)
1055 | (u64)1 << (1*TXE_NUM_SDMA_ENGINES + this_idx)
1056 | (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
1057 /* Create a mask specifically for sdma_idle */
1058 sde->idle_mask =
1059 (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
1060 /* Create a mask specifically for sdma_progress */
1061 sde->progress_mask =
1062 (u64)1 << (TXE_NUM_SDMA_ENGINES + this_idx);
1063 spin_lock_init(&sde->tail_lock);
1064 seqlock_init(&sde->head_lock);
1065 spin_lock_init(&sde->senddmactrl_lock);
1066 spin_lock_init(&sde->flushlist_lock);
1067 /* insure there is always a zero bit */
1068 sde->ahg_bits = 0xfffffffe00000000ULL;
1069
1070 sdma_set_state(sde, sdma_state_s00_hw_down);
1071
1072 /* set up reference counting */
1073 kref_init(&sde->state.kref);
1074 init_completion(&sde->state.comp);
1075
1076 INIT_LIST_HEAD(&sde->flushlist);
1077 INIT_LIST_HEAD(&sde->dmawait);
1078
1079 sde->tail_csr =
1080 get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
1081
1082 if (idle_cnt)
1083 dd->default_desc1 =
1084 SDMA_DESC1_HEAD_TO_HOST_FLAG;
1085 else
1086 dd->default_desc1 =
1087 SDMA_DESC1_INT_REQ_FLAG;
1088
1089 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
1090 (unsigned long)sde);
1091
1092 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
1093 (unsigned long)sde);
1094 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
1095 INIT_WORK(&sde->flush_worker, sdma_field_flush);
1096
1097 sde->progress_check_head = 0;
1098
1099 init_timer(&sde->err_progress_check_timer);
1100 sde->err_progress_check_timer.function =
1101 sdma_err_progress_check;
1102 sde->err_progress_check_timer.data = (unsigned long)sde;
1103
1104 sde->descq = dma_zalloc_coherent(
1105 &dd->pcidev->dev,
1106 descq_cnt * sizeof(u64[2]),
1107 &sde->descq_phys,
1108 GFP_KERNEL
1109 );
1110 if (!sde->descq)
1111 goto bail;
1112 sde->tx_ring =
1113 kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
1114 GFP_KERNEL);
1115 if (!sde->tx_ring)
1116 sde->tx_ring =
1117 vzalloc(
1118 sizeof(struct sdma_txreq *) *
1119 descq_cnt);
1120 if (!sde->tx_ring)
1121 goto bail;
1122 }
1123
1124 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1125 /* Allocate memory for DMA of head registers to memory */
1126 dd->sdma_heads_dma = dma_zalloc_coherent(
1127 &dd->pcidev->dev,
1128 dd->sdma_heads_size,
1129 &dd->sdma_heads_phys,
1130 GFP_KERNEL
1131 );
1132 if (!dd->sdma_heads_dma) {
1133 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1134 goto bail;
1135 }
1136
1137 /* Allocate memory for pad */
1138 dd->sdma_pad_dma = dma_zalloc_coherent(
1139 &dd->pcidev->dev,
1140 sizeof(u32),
1141 &dd->sdma_pad_phys,
1142 GFP_KERNEL
1143 );
1144 if (!dd->sdma_pad_dma) {
1145 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1146 goto bail;
1147 }
1148
1149 /* assign each engine to different cacheline and init registers */
1150 curr_head = (void *)dd->sdma_heads_dma;
1151 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1152 unsigned long phys_offset;
1153
1154 sde = &dd->per_sdma[this_idx];
1155
1156 sde->head_dma = curr_head;
1157 curr_head += L1_CACHE_BYTES;
1158 phys_offset = (unsigned long)sde->head_dma -
1159 (unsigned long)dd->sdma_heads_dma;
1160 sde->head_phys = dd->sdma_heads_phys + phys_offset;
1161 init_sdma_regs(sde, per_sdma_credits, idle_cnt);
1162 }
1163 dd->flags |= HFI1_HAS_SEND_DMA;
1164 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
1165 dd->num_sdma = num_engines;
1166 if (sdma_map_init(dd, port, ppd->vls_operational, NULL))
1167 goto bail;
1168 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
1169 return 0;
1170
1171 bail:
1172 sdma_clean(dd, num_engines);
1173 return -ENOMEM;
1174 }
1175
1176 /**
1177 * sdma_all_running() - called when the link goes up
1178 * @dd: hfi1_devdata
1179 *
1180 * This routine moves all engines to the running state.
1181 */
1182 void sdma_all_running(struct hfi1_devdata *dd)
1183 {
1184 struct sdma_engine *sde;
1185 unsigned int i;
1186
1187 /* move all engines to running */
1188 for (i = 0; i < dd->num_sdma; ++i) {
1189 sde = &dd->per_sdma[i];
1190 sdma_process_event(sde, sdma_event_e30_go_running);
1191 }
1192 }
1193
1194 /**
1195 * sdma_all_idle() - called when the link goes down
1196 * @dd: hfi1_devdata
1197 *
1198 * This routine moves all engines to the idle state.
1199 */
1200 void sdma_all_idle(struct hfi1_devdata *dd)
1201 {
1202 struct sdma_engine *sde;
1203 unsigned int i;
1204
1205 /* idle all engines */
1206 for (i = 0; i < dd->num_sdma; ++i) {
1207 sde = &dd->per_sdma[i];
1208 sdma_process_event(sde, sdma_event_e70_go_idle);
1209 }
1210 }
1211
1212 /**
1213 * sdma_start() - called to kick off state processing for all engines
1214 * @dd: hfi1_devdata
1215 *
1216 * This routine is for kicking off the state processing for all required
1217 * sdma engines. Interrupts need to be working at this point.
1218 *
1219 */
1220 void sdma_start(struct hfi1_devdata *dd)
1221 {
1222 unsigned i;
1223 struct sdma_engine *sde;
1224
1225 /* kick off the engines state processing */
1226 for (i = 0; i < dd->num_sdma; ++i) {
1227 sde = &dd->per_sdma[i];
1228 sdma_process_event(sde, sdma_event_e10_go_hw_start);
1229 }
1230 }
1231
1232 /**
1233 * sdma_exit() - used when module is removed
1234 * @dd: hfi1_devdata
1235 */
1236 void sdma_exit(struct hfi1_devdata *dd)
1237 {
1238 unsigned this_idx;
1239 struct sdma_engine *sde;
1240
1241 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
1242 ++this_idx) {
1243
1244 sde = &dd->per_sdma[this_idx];
1245 if (!list_empty(&sde->dmawait))
1246 dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
1247 sde->this_idx);
1248 sdma_process_event(sde, sdma_event_e00_go_hw_down);
1249
1250 del_timer_sync(&sde->err_progress_check_timer);
1251
1252 /*
1253 * This waits for the state machine to exit so it is not
1254 * necessary to kill the sdma_sw_clean_up_task to make sure
1255 * it is not running.
1256 */
1257 sdma_finalput(&sde->state);
1258 }
1259 sdma_clean(dd, dd->num_sdma);
1260 }
1261
1262 /*
1263 * unmap the indicated descriptor
1264 */
1265 static inline void sdma_unmap_desc(
1266 struct hfi1_devdata *dd,
1267 struct sdma_desc *descp)
1268 {
1269 switch (sdma_mapping_type(descp)) {
1270 case SDMA_MAP_SINGLE:
1271 dma_unmap_single(
1272 &dd->pcidev->dev,
1273 sdma_mapping_addr(descp),
1274 sdma_mapping_len(descp),
1275 DMA_TO_DEVICE);
1276 break;
1277 case SDMA_MAP_PAGE:
1278 dma_unmap_page(
1279 &dd->pcidev->dev,
1280 sdma_mapping_addr(descp),
1281 sdma_mapping_len(descp),
1282 DMA_TO_DEVICE);
1283 break;
1284 }
1285 }
1286
1287 /*
1288 * return the mode as indicated by the first
1289 * descriptor in the tx.
1290 */
1291 static inline u8 ahg_mode(struct sdma_txreq *tx)
1292 {
1293 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1294 >> SDMA_DESC1_HEADER_MODE_SHIFT;
1295 }
1296
1297 /**
1298 * sdma_txclean() - clean tx of mappings, descp *kmalloc's
1299 * @dd: hfi1_devdata for unmapping
1300 * @tx: tx request to clean
1301 *
1302 * This is used in the progress routine to clean the tx or
1303 * by the ULP to toss an in-process tx build.
1304 *
1305 * The code can be called multiple times without issue.
1306 *
1307 */
1308 void sdma_txclean(
1309 struct hfi1_devdata *dd,
1310 struct sdma_txreq *tx)
1311 {
1312 u16 i;
1313
1314 if (tx->num_desc) {
1315 u8 skip = 0, mode = ahg_mode(tx);
1316
1317 /* unmap first */
1318 sdma_unmap_desc(dd, &tx->descp[0]);
1319 /* determine number of AHG descriptors to skip */
1320 if (mode > SDMA_AHG_APPLY_UPDATE1)
1321 skip = mode >> 1;
1322 for (i = 1 + skip; i < tx->num_desc; i++)
1323 sdma_unmap_desc(dd, &tx->descp[i]);
1324 tx->num_desc = 0;
1325 }
1326 kfree(tx->coalesce_buf);
1327 tx->coalesce_buf = NULL;
1328 /* kmalloc'ed descp */
1329 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
1330 tx->desc_limit = ARRAY_SIZE(tx->descs);
1331 kfree(tx->descp);
1332 }
1333 }
1334
1335 static inline u16 sdma_gethead(struct sdma_engine *sde)
1336 {
1337 struct hfi1_devdata *dd = sde->dd;
1338 int use_dmahead;
1339 u16 hwhead;
1340
1341 #ifdef CONFIG_SDMA_VERBOSITY
1342 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1343 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1344 #endif
1345
1346 retry:
1347 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
1348 (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
1349 hwhead = use_dmahead ?
1350 (u16) le64_to_cpu(*sde->head_dma) :
1351 (u16) read_sde_csr(sde, SD(HEAD));
1352
1353 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
1354 u16 cnt;
1355 u16 swtail;
1356 u16 swhead;
1357 int sane;
1358
1359 swhead = sde->descq_head & sde->sdma_mask;
1360 /* this code is really bad for cache line trading */
1361 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1362 cnt = sde->descq_cnt;
1363
1364 if (swhead < swtail)
1365 /* not wrapped */
1366 sane = (hwhead >= swhead) & (hwhead <= swtail);
1367 else if (swhead > swtail)
1368 /* wrapped around */
1369 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
1370 (hwhead <= swtail);
1371 else
1372 /* empty */
1373 sane = (hwhead == swhead);
1374
1375 if (unlikely(!sane)) {
1376 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1377 sde->this_idx,
1378 use_dmahead ? "dma" : "kreg",
1379 hwhead, swhead, swtail, cnt);
1380 if (use_dmahead) {
1381 /* try one more time, using csr */
1382 use_dmahead = 0;
1383 goto retry;
1384 }
1385 /* proceed as if no progress */
1386 hwhead = swhead;
1387 }
1388 }
1389 return hwhead;
1390 }
1391
1392 /*
1393 * This is called when there are send DMA descriptors that might be
1394 * available.
1395 *
1396 * This is called with head_lock held.
1397 */
1398 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
1399 {
1400 struct iowait *wait, *nw;
1401 struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
1402 unsigned i, n = 0, seq;
1403 struct sdma_txreq *stx;
1404 struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
1405
1406 #ifdef CONFIG_SDMA_VERBOSITY
1407 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
1408 slashstrip(__FILE__), __LINE__, __func__);
1409 dd_dev_err(sde->dd, "avail: %u\n", avail);
1410 #endif
1411
1412 do {
1413 seq = read_seqbegin(&dev->iowait_lock);
1414 if (!list_empty(&sde->dmawait)) {
1415 /* at least one item */
1416 write_seqlock(&dev->iowait_lock);
1417 /* Harvest waiters wanting DMA descriptors */
1418 list_for_each_entry_safe(
1419 wait,
1420 nw,
1421 &sde->dmawait,
1422 list) {
1423 u16 num_desc = 0;
1424
1425 if (!wait->wakeup)
1426 continue;
1427 if (n == ARRAY_SIZE(waits))
1428 break;
1429 if (!list_empty(&wait->tx_head)) {
1430 stx = list_first_entry(
1431 &wait->tx_head,
1432 struct sdma_txreq,
1433 list);
1434 num_desc = stx->num_desc;
1435 }
1436 if (num_desc > avail)
1437 break;
1438 avail -= num_desc;
1439 list_del_init(&wait->list);
1440 waits[n++] = wait;
1441 }
1442 write_sequnlock(&dev->iowait_lock);
1443 break;
1444 }
1445 } while (read_seqretry(&dev->iowait_lock, seq));
1446
1447 for (i = 0; i < n; i++)
1448 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
1449 }
1450
1451 /* head_lock must be held */
1452 static void sdma_make_progress(struct sdma_engine *sde, u64 status)
1453 {
1454 struct sdma_txreq *txp = NULL;
1455 int progress = 0;
1456 u16 hwhead, swhead, swtail;
1457 int idle_check_done = 0;
1458
1459 hwhead = sdma_gethead(sde);
1460
1461 /* The reason for some of the complexity of this code is that
1462 * not all descriptors have corresponding txps. So, we have to
1463 * be able to skip over descs until we wander into the range of
1464 * the next txp on the list.
1465 */
1466
1467 retry:
1468 txp = get_txhead(sde);
1469 swhead = sde->descq_head & sde->sdma_mask;
1470 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1471 while (swhead != hwhead) {
1472 /* advance head, wrap if needed */
1473 swhead = ++sde->descq_head & sde->sdma_mask;
1474
1475 /* if now past this txp's descs, do the callback */
1476 if (txp && txp->next_descq_idx == swhead) {
1477 int drained = 0;
1478 /* protect against complete modifying */
1479 struct iowait *wait = txp->wait;
1480
1481 /* remove from list */
1482 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
1483 if (wait)
1484 drained = atomic_dec_and_test(&wait->sdma_busy);
1485 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
1486 trace_hfi1_sdma_out_sn(sde, txp->sn);
1487 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
1488 dd_dev_err(sde->dd, "expected %llu got %llu\n",
1489 sde->head_sn, txp->sn);
1490 sde->head_sn++;
1491 #endif
1492 sdma_txclean(sde->dd, txp);
1493 if (txp->complete)
1494 (*txp->complete)(
1495 txp,
1496 SDMA_TXREQ_S_OK,
1497 drained);
1498 if (wait && drained)
1499 iowait_drain_wakeup(wait);
1500 /* see if there is another txp */
1501 txp = get_txhead(sde);
1502 }
1503 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1504 progress++;
1505 }
1506
1507 /*
1508 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1509 * to updates to the the dma_head location in host memory. The head
1510 * value read might not be fully up to date. If there are pending
1511 * descriptors and the SDMA idle interrupt fired then read from the
1512 * CSR SDMA head instead to get the latest value from the hardware.
1513 * The hardware SDMA head should be read at most once in this invocation
1514 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1515 */
1516 if ((status & sde->idle_mask) && !idle_check_done) {
1517 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1518 if (swtail != hwhead) {
1519 hwhead = (u16)read_sde_csr(sde, SD(HEAD));
1520 idle_check_done = 1;
1521 goto retry;
1522 }
1523 }
1524
1525 sde->last_status = status;
1526 if (progress)
1527 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
1528 }
1529
1530 /*
1531 * sdma_engine_interrupt() - interrupt handler for engine
1532 * @sde: sdma engine
1533 * @status: sdma interrupt reason
1534 *
1535 * Status is a mask of the 3 possible interrupts for this engine. It will
1536 * contain bits _only_ for this SDMA engine. It will contain at least one
1537 * bit, it may contain more.
1538 */
1539 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
1540 {
1541 trace_hfi1_sdma_engine_interrupt(sde, status);
1542 write_seqlock(&sde->head_lock);
1543 sdma_set_desc_cnt(sde, sde->descq_cnt / 2);
1544 sdma_make_progress(sde, status);
1545 write_sequnlock(&sde->head_lock);
1546 }
1547
1548 /**
1549 * sdma_engine_error() - error handler for engine
1550 * @sde: sdma engine
1551 * @status: sdma interrupt reason
1552 */
1553 void sdma_engine_error(struct sdma_engine *sde, u64 status)
1554 {
1555 unsigned long flags;
1556
1557 #ifdef CONFIG_SDMA_VERBOSITY
1558 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1559 sde->this_idx,
1560 (unsigned long long)status,
1561 sdma_state_names[sde->state.current_state]);
1562 #endif
1563 spin_lock_irqsave(&sde->tail_lock, flags);
1564 write_seqlock(&sde->head_lock);
1565 if (status & ALL_SDMA_ENG_HALT_ERRS)
1566 __sdma_process_event(sde, sdma_event_e60_hw_halted);
1567 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
1568 dd_dev_err(sde->dd,
1569 "SDMA (%u) engine error: 0x%llx state %s\n",
1570 sde->this_idx,
1571 (unsigned long long)status,
1572 sdma_state_names[sde->state.current_state]);
1573 dump_sdma_state(sde);
1574 }
1575 write_sequnlock(&sde->head_lock);
1576 spin_unlock_irqrestore(&sde->tail_lock, flags);
1577 }
1578
1579 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
1580 {
1581 u64 set_senddmactrl = 0;
1582 u64 clr_senddmactrl = 0;
1583 unsigned long flags;
1584
1585 #ifdef CONFIG_SDMA_VERBOSITY
1586 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1587 sde->this_idx,
1588 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
1589 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
1590 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
1591 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
1592 #endif
1593
1594 if (op & SDMA_SENDCTRL_OP_ENABLE)
1595 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1596 else
1597 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1598
1599 if (op & SDMA_SENDCTRL_OP_INTENABLE)
1600 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1601 else
1602 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1603
1604 if (op & SDMA_SENDCTRL_OP_HALT)
1605 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1606 else
1607 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1608
1609 spin_lock_irqsave(&sde->senddmactrl_lock, flags);
1610
1611 sde->p_senddmactrl |= set_senddmactrl;
1612 sde->p_senddmactrl &= ~clr_senddmactrl;
1613
1614 if (op & SDMA_SENDCTRL_OP_CLEANUP)
1615 write_sde_csr(sde, SD(CTRL),
1616 sde->p_senddmactrl |
1617 SD(CTRL_SDMA_CLEANUP_SMASK));
1618 else
1619 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
1620
1621 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
1622
1623 #ifdef CONFIG_SDMA_VERBOSITY
1624 sdma_dumpstate(sde);
1625 #endif
1626 }
1627
1628 static void sdma_setlengen(struct sdma_engine *sde)
1629 {
1630 #ifdef CONFIG_SDMA_VERBOSITY
1631 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1632 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1633 #endif
1634
1635 /*
1636 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1637 * count to enable generation checking and load the internal
1638 * generation counter.
1639 */
1640 write_sde_csr(sde, SD(LEN_GEN),
1641 (sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT)
1642 );
1643 write_sde_csr(sde, SD(LEN_GEN),
1644 ((sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT))
1645 | (4ULL << SD(LEN_GEN_GENERATION_SHIFT))
1646 );
1647 }
1648
1649 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
1650 {
1651 /* Commit writes to memory and advance the tail on the chip */
1652 smp_wmb(); /* see get_txhead() */
1653 writeq(tail, sde->tail_csr);
1654 }
1655
1656 /*
1657 * This is called when changing to state s10_hw_start_up_halt_wait as
1658 * a result of send buffer errors or send DMA descriptor errors.
1659 */
1660 static void sdma_hw_start_up(struct sdma_engine *sde)
1661 {
1662 u64 reg;
1663
1664 #ifdef CONFIG_SDMA_VERBOSITY
1665 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1666 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1667 #endif
1668
1669 sdma_setlengen(sde);
1670 sdma_update_tail(sde, 0); /* Set SendDmaTail */
1671 *sde->head_dma = 0;
1672
1673 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
1674 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
1675 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
1676 }
1677
1678 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
1679 (r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1680
1681 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
1682 (r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1683 /*
1684 * set_sdma_integrity
1685 *
1686 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
1687 */
1688 static void set_sdma_integrity(struct sdma_engine *sde)
1689 {
1690 struct hfi1_devdata *dd = sde->dd;
1691 u64 reg;
1692
1693 if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
1694 return;
1695
1696 reg = hfi1_pkt_base_sdma_integrity(dd);
1697
1698 if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
1699 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
1700 else
1701 SET_STATIC_RATE_CONTROL_SMASK(reg);
1702
1703 write_sde_csr(sde, SD(CHECK_ENABLE), reg);
1704 }
1705
1706
1707 static void init_sdma_regs(
1708 struct sdma_engine *sde,
1709 u32 credits,
1710 uint idle_cnt)
1711 {
1712 u8 opval, opmask;
1713 #ifdef CONFIG_SDMA_VERBOSITY
1714 struct hfi1_devdata *dd = sde->dd;
1715
1716 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1717 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1718 #endif
1719
1720 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
1721 sdma_setlengen(sde);
1722 sdma_update_tail(sde, 0); /* Set SendDmaTail */
1723 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
1724 write_sde_csr(sde, SD(DESC_CNT), 0);
1725 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
1726 write_sde_csr(sde, SD(MEMORY),
1727 ((u64)credits <<
1728 SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
1729 ((u64)(credits * sde->this_idx) <<
1730 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
1731 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
1732 set_sdma_integrity(sde);
1733 opmask = OPCODE_CHECK_MASK_DISABLED;
1734 opval = OPCODE_CHECK_VAL_DISABLED;
1735 write_sde_csr(sde, SD(CHECK_OPCODE),
1736 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
1737 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
1738 }
1739
1740 #ifdef CONFIG_SDMA_VERBOSITY
1741
1742 #define sdma_dumpstate_helper0(reg) do { \
1743 csr = read_csr(sde->dd, reg); \
1744 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
1745 } while (0)
1746
1747 #define sdma_dumpstate_helper(reg) do { \
1748 csr = read_sde_csr(sde, reg); \
1749 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
1750 #reg, sde->this_idx, csr); \
1751 } while (0)
1752
1753 #define sdma_dumpstate_helper2(reg) do { \
1754 csr = read_csr(sde->dd, reg + (8 * i)); \
1755 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
1756 #reg, i, csr); \
1757 } while (0)
1758
1759 void sdma_dumpstate(struct sdma_engine *sde)
1760 {
1761 u64 csr;
1762 unsigned i;
1763
1764 sdma_dumpstate_helper(SD(CTRL));
1765 sdma_dumpstate_helper(SD(STATUS));
1766 sdma_dumpstate_helper0(SD(ERR_STATUS));
1767 sdma_dumpstate_helper0(SD(ERR_MASK));
1768 sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
1769 sdma_dumpstate_helper(SD(ENG_ERR_MASK));
1770
1771 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
1772 sdma_dumpstate_helper2(CCE_INT_STATUS);
1773 sdma_dumpstate_helper2(CCE_INT_MASK);
1774 sdma_dumpstate_helper2(CCE_INT_BLOCKED);
1775 }
1776
1777 sdma_dumpstate_helper(SD(TAIL));
1778 sdma_dumpstate_helper(SD(HEAD));
1779 sdma_dumpstate_helper(SD(PRIORITY_THLD));
1780 sdma_dumpstate_helper(SD(IDLE_CNT));
1781 sdma_dumpstate_helper(SD(RELOAD_CNT));
1782 sdma_dumpstate_helper(SD(DESC_CNT));
1783 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
1784 sdma_dumpstate_helper(SD(MEMORY));
1785 sdma_dumpstate_helper0(SD(ENGINES));
1786 sdma_dumpstate_helper0(SD(MEM_SIZE));
1787 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
1788 sdma_dumpstate_helper(SD(BASE_ADDR));
1789 sdma_dumpstate_helper(SD(LEN_GEN));
1790 sdma_dumpstate_helper(SD(HEAD_ADDR));
1791 sdma_dumpstate_helper(SD(CHECK_ENABLE));
1792 sdma_dumpstate_helper(SD(CHECK_VL));
1793 sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
1794 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
1795 sdma_dumpstate_helper(SD(CHECK_SLID));
1796 sdma_dumpstate_helper(SD(CHECK_OPCODE));
1797 }
1798 #endif
1799
1800 static void dump_sdma_state(struct sdma_engine *sde)
1801 {
1802 struct hw_sdma_desc *descq;
1803 struct hw_sdma_desc *descqp;
1804 u64 desc[2];
1805 u64 addr;
1806 u8 gen;
1807 u16 len;
1808 u16 head, tail, cnt;
1809
1810 head = sde->descq_head & sde->sdma_mask;
1811 tail = sde->descq_tail & sde->sdma_mask;
1812 cnt = sdma_descq_freecnt(sde);
1813 descq = sde->descq;
1814
1815 dd_dev_err(sde->dd,
1816 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
1817 sde->this_idx,
1818 head,
1819 tail,
1820 cnt,
1821 !list_empty(&sde->flushlist));
1822
1823 /* print info for each entry in the descriptor queue */
1824 while (head != tail) {
1825 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
1826
1827 descqp = &sde->descq[head];
1828 desc[0] = le64_to_cpu(descqp->qw[0]);
1829 desc[1] = le64_to_cpu(descqp->qw[1]);
1830 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
1831 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
1832 'H' : '-';
1833 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
1834 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
1835 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
1836 & SDMA_DESC0_PHY_ADDR_MASK;
1837 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
1838 & SDMA_DESC1_GENERATION_MASK;
1839 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
1840 & SDMA_DESC0_BYTE_COUNT_MASK;
1841 dd_dev_err(sde->dd,
1842 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1843 head, flags, addr, gen, len);
1844 dd_dev_err(sde->dd,
1845 "\tdesc0:0x%016llx desc1 0x%016llx\n",
1846 desc[0], desc[1]);
1847 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
1848 dd_dev_err(sde->dd,
1849 "\taidx: %u amode: %u alen: %u\n",
1850 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
1851 >> SDMA_DESC1_HEADER_INDEX_SHIFT),
1852 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1853 >> SDMA_DESC1_HEADER_MODE_SHIFT),
1854 (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK)
1855 >> SDMA_DESC1_HEADER_DWS_SHIFT));
1856 head++;
1857 head &= sde->sdma_mask;
1858 }
1859 }
1860
1861 #define SDE_FMT \
1862 "SDE %u STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
1863 /**
1864 * sdma_seqfile_dump_sde() - debugfs dump of sde
1865 * @s: seq file
1866 * @sde: send dma engine to dump
1867 *
1868 * This routine dumps the sde to the indicated seq file.
1869 */
1870 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
1871 {
1872 u16 head, tail;
1873 struct hw_sdma_desc *descqp;
1874 u64 desc[2];
1875 u64 addr;
1876 u8 gen;
1877 u16 len;
1878
1879 head = sde->descq_head & sde->sdma_mask;
1880 tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1881 seq_printf(s, SDE_FMT, sde->this_idx,
1882 sdma_state_name(sde->state.current_state),
1883 (unsigned long long)read_sde_csr(sde, SD(CTRL)),
1884 (unsigned long long)read_sde_csr(sde, SD(STATUS)),
1885 (unsigned long long)read_sde_csr(sde,
1886 SD(ENG_ERR_STATUS)),
1887 (unsigned long long)read_sde_csr(sde, SD(TAIL)),
1888 tail,
1889 (unsigned long long)read_sde_csr(sde, SD(HEAD)),
1890 head,
1891 (unsigned long long)le64_to_cpu(*sde->head_dma),
1892 (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
1893 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
1894 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
1895 (unsigned long long)sde->last_status,
1896 (unsigned long long)sde->ahg_bits,
1897 sde->tx_tail,
1898 sde->tx_head,
1899 sde->descq_tail,
1900 sde->descq_head,
1901 !list_empty(&sde->flushlist),
1902 sde->descq_full_count,
1903 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
1904
1905 /* print info for each entry in the descriptor queue */
1906 while (head != tail) {
1907 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
1908
1909 descqp = &sde->descq[head];
1910 desc[0] = le64_to_cpu(descqp->qw[0]);
1911 desc[1] = le64_to_cpu(descqp->qw[1]);
1912 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
1913 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
1914 'H' : '-';
1915 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
1916 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
1917 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
1918 & SDMA_DESC0_PHY_ADDR_MASK;
1919 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
1920 & SDMA_DESC1_GENERATION_MASK;
1921 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
1922 & SDMA_DESC0_BYTE_COUNT_MASK;
1923 seq_printf(s,
1924 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1925 head, flags, addr, gen, len);
1926 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
1927 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
1928 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
1929 >> SDMA_DESC1_HEADER_INDEX_SHIFT),
1930 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1931 >> SDMA_DESC1_HEADER_MODE_SHIFT));
1932 head = (head + 1) & sde->sdma_mask;
1933 }
1934 }
1935
1936 /*
1937 * add the generation number into
1938 * the qw1 and return
1939 */
1940 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
1941 {
1942 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
1943
1944 qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
1945 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
1946 << SDMA_DESC1_GENERATION_SHIFT;
1947 return qw1;
1948 }
1949
1950 /*
1951 * This routine submits the indicated tx
1952 *
1953 * Space has already been guaranteed and
1954 * tail side of ring is locked.
1955 *
1956 * The hardware tail update is done
1957 * in the caller and that is facilitated
1958 * by returning the new tail.
1959 *
1960 * There is special case logic for ahg
1961 * to not add the generation number for
1962 * up to 2 descriptors that follow the
1963 * first descriptor.
1964 *
1965 */
1966 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
1967 {
1968 int i;
1969 u16 tail;
1970 struct sdma_desc *descp = tx->descp;
1971 u8 skip = 0, mode = ahg_mode(tx);
1972
1973 tail = sde->descq_tail & sde->sdma_mask;
1974 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
1975 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
1976 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
1977 tail, &sde->descq[tail]);
1978 tail = ++sde->descq_tail & sde->sdma_mask;
1979 descp++;
1980 if (mode > SDMA_AHG_APPLY_UPDATE1)
1981 skip = mode >> 1;
1982 for (i = 1; i < tx->num_desc; i++, descp++) {
1983 u64 qw1;
1984
1985 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
1986 if (skip) {
1987 /* edits don't have generation */
1988 qw1 = descp->qw[1];
1989 skip--;
1990 } else {
1991 /* replace generation with real one for non-edits */
1992 qw1 = add_gen(sde, descp->qw[1]);
1993 }
1994 sde->descq[tail].qw[1] = cpu_to_le64(qw1);
1995 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
1996 tail, &sde->descq[tail]);
1997 tail = ++sde->descq_tail & sde->sdma_mask;
1998 }
1999 tx->next_descq_idx = tail;
2000 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2001 tx->sn = sde->tail_sn++;
2002 trace_hfi1_sdma_in_sn(sde, tx->sn);
2003 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
2004 #endif
2005 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
2006 sde->desc_avail -= tx->num_desc;
2007 return tail;
2008 }
2009
2010 /*
2011 * Check for progress
2012 */
2013 static int sdma_check_progress(
2014 struct sdma_engine *sde,
2015 struct iowait *wait,
2016 struct sdma_txreq *tx)
2017 {
2018 int ret;
2019
2020 sde->desc_avail = sdma_descq_freecnt(sde);
2021 if (tx->num_desc <= sde->desc_avail)
2022 return -EAGAIN;
2023 /* pulse the head_lock */
2024 if (wait && wait->sleep) {
2025 unsigned seq;
2026
2027 seq = raw_seqcount_begin(
2028 (const seqcount_t *)&sde->head_lock.seqcount);
2029 ret = wait->sleep(sde, wait, tx, seq);
2030 if (ret == -EAGAIN)
2031 sde->desc_avail = sdma_descq_freecnt(sde);
2032 } else
2033 ret = -EBUSY;
2034 return ret;
2035 }
2036
2037 /**
2038 * sdma_send_txreq() - submit a tx req to ring
2039 * @sde: sdma engine to use
2040 * @wait: wait structure to use when full (may be NULL)
2041 * @tx: sdma_txreq to submit
2042 *
2043 * The call submits the tx into the ring. If a iowait structure is non-NULL
2044 * the packet will be queued to the list in wait.
2045 *
2046 * Return:
2047 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2048 * ring (wait == NULL)
2049 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2050 */
2051 int sdma_send_txreq(struct sdma_engine *sde,
2052 struct iowait *wait,
2053 struct sdma_txreq *tx)
2054 {
2055 int ret = 0;
2056 u16 tail;
2057 unsigned long flags;
2058
2059 /* user should have supplied entire packet */
2060 if (unlikely(tx->tlen))
2061 return -EINVAL;
2062 tx->wait = wait;
2063 spin_lock_irqsave(&sde->tail_lock, flags);
2064 retry:
2065 if (unlikely(!__sdma_running(sde)))
2066 goto unlock_noconn;
2067 if (unlikely(tx->num_desc > sde->desc_avail))
2068 goto nodesc;
2069 tail = submit_tx(sde, tx);
2070 if (wait)
2071 atomic_inc(&wait->sdma_busy);
2072 sdma_update_tail(sde, tail);
2073 unlock:
2074 spin_unlock_irqrestore(&sde->tail_lock, flags);
2075 return ret;
2076 unlock_noconn:
2077 if (wait)
2078 atomic_inc(&wait->sdma_busy);
2079 tx->next_descq_idx = 0;
2080 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2081 tx->sn = sde->tail_sn++;
2082 trace_hfi1_sdma_in_sn(sde, tx->sn);
2083 #endif
2084 spin_lock(&sde->flushlist_lock);
2085 list_add_tail(&tx->list, &sde->flushlist);
2086 spin_unlock(&sde->flushlist_lock);
2087 if (wait) {
2088 wait->tx_count++;
2089 wait->count += tx->num_desc;
2090 }
2091 schedule_work(&sde->flush_worker);
2092 ret = -ECOMM;
2093 goto unlock;
2094 nodesc:
2095 ret = sdma_check_progress(sde, wait, tx);
2096 if (ret == -EAGAIN) {
2097 ret = 0;
2098 goto retry;
2099 }
2100 sde->descq_full_count++;
2101 goto unlock;
2102 }
2103
2104 /**
2105 * sdma_send_txlist() - submit a list of tx req to ring
2106 * @sde: sdma engine to use
2107 * @wait: wait structure to use when full (may be NULL)
2108 * @tx_list: list of sdma_txreqs to submit
2109 *
2110 * The call submits the list into the ring.
2111 *
2112 * If the iowait structure is non-NULL and not equal to the iowait list
2113 * the unprocessed part of the list will be appended to the list in wait.
2114 *
2115 * In all cases, the tx_list will be updated so the head of the tx_list is
2116 * the list of descriptors that have yet to be transmitted.
2117 *
2118 * The intent of this call is to provide a more efficient
2119 * way of submitting multiple packets to SDMA while holding the tail
2120 * side locking.
2121 *
2122 * Return:
2123 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring
2124 * (wait == NULL)
2125 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2126 */
2127 int sdma_send_txlist(struct sdma_engine *sde,
2128 struct iowait *wait,
2129 struct list_head *tx_list)
2130 {
2131 struct sdma_txreq *tx, *tx_next;
2132 int ret = 0;
2133 unsigned long flags;
2134 u16 tail = INVALID_TAIL;
2135 int count = 0;
2136
2137 spin_lock_irqsave(&sde->tail_lock, flags);
2138 retry:
2139 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2140 tx->wait = wait;
2141 if (unlikely(!__sdma_running(sde)))
2142 goto unlock_noconn;
2143 if (unlikely(tx->num_desc > sde->desc_avail))
2144 goto nodesc;
2145 if (unlikely(tx->tlen)) {
2146 ret = -EINVAL;
2147 goto update_tail;
2148 }
2149 list_del_init(&tx->list);
2150 tail = submit_tx(sde, tx);
2151 count++;
2152 if (tail != INVALID_TAIL &&
2153 (count & SDMA_TAIL_UPDATE_THRESH) == 0) {
2154 sdma_update_tail(sde, tail);
2155 tail = INVALID_TAIL;
2156 }
2157 }
2158 update_tail:
2159 if (wait)
2160 atomic_add(count, &wait->sdma_busy);
2161 if (tail != INVALID_TAIL)
2162 sdma_update_tail(sde, tail);
2163 spin_unlock_irqrestore(&sde->tail_lock, flags);
2164 return ret;
2165 unlock_noconn:
2166 spin_lock(&sde->flushlist_lock);
2167 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2168 tx->wait = wait;
2169 list_del_init(&tx->list);
2170 if (wait)
2171 atomic_inc(&wait->sdma_busy);
2172 tx->next_descq_idx = 0;
2173 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2174 tx->sn = sde->tail_sn++;
2175 trace_hfi1_sdma_in_sn(sde, tx->sn);
2176 #endif
2177 list_add_tail(&tx->list, &sde->flushlist);
2178 if (wait) {
2179 wait->tx_count++;
2180 wait->count += tx->num_desc;
2181 }
2182 }
2183 spin_unlock(&sde->flushlist_lock);
2184 schedule_work(&sde->flush_worker);
2185 ret = -ECOMM;
2186 goto update_tail;
2187 nodesc:
2188 ret = sdma_check_progress(sde, wait, tx);
2189 if (ret == -EAGAIN) {
2190 ret = 0;
2191 goto retry;
2192 }
2193 sde->descq_full_count++;
2194 goto update_tail;
2195 }
2196
2197 static void sdma_process_event(struct sdma_engine *sde,
2198 enum sdma_events event)
2199 {
2200 unsigned long flags;
2201
2202 spin_lock_irqsave(&sde->tail_lock, flags);
2203 write_seqlock(&sde->head_lock);
2204
2205 __sdma_process_event(sde, event);
2206
2207 if (sde->state.current_state == sdma_state_s99_running)
2208 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
2209
2210 write_sequnlock(&sde->head_lock);
2211 spin_unlock_irqrestore(&sde->tail_lock, flags);
2212 }
2213
2214 static void __sdma_process_event(struct sdma_engine *sde,
2215 enum sdma_events event)
2216 {
2217 struct sdma_state *ss = &sde->state;
2218 int need_progress = 0;
2219
2220 /* CONFIG SDMA temporary */
2221 #ifdef CONFIG_SDMA_VERBOSITY
2222 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
2223 sdma_state_names[ss->current_state],
2224 sdma_event_names[event]);
2225 #endif
2226
2227 switch (ss->current_state) {
2228 case sdma_state_s00_hw_down:
2229 switch (event) {
2230 case sdma_event_e00_go_hw_down:
2231 break;
2232 case sdma_event_e30_go_running:
2233 /*
2234 * If down, but running requested (usually result
2235 * of link up, then we need to start up.
2236 * This can happen when hw down is requested while
2237 * bringing the link up with traffic active on
2238 * 7220, e.g. */
2239 ss->go_s99_running = 1;
2240 /* fall through and start dma engine */
2241 case sdma_event_e10_go_hw_start:
2242 /* This reference means the state machine is started */
2243 sdma_get(&sde->state);
2244 sdma_set_state(sde,
2245 sdma_state_s10_hw_start_up_halt_wait);
2246 break;
2247 case sdma_event_e15_hw_halt_done:
2248 break;
2249 case sdma_event_e25_hw_clean_up_done:
2250 break;
2251 case sdma_event_e40_sw_cleaned:
2252 sdma_sw_tear_down(sde);
2253 break;
2254 case sdma_event_e50_hw_cleaned:
2255 break;
2256 case sdma_event_e60_hw_halted:
2257 break;
2258 case sdma_event_e70_go_idle:
2259 break;
2260 case sdma_event_e80_hw_freeze:
2261 break;
2262 case sdma_event_e81_hw_frozen:
2263 break;
2264 case sdma_event_e82_hw_unfreeze:
2265 break;
2266 case sdma_event_e85_link_down:
2267 break;
2268 case sdma_event_e90_sw_halted:
2269 break;
2270 }
2271 break;
2272
2273 case sdma_state_s10_hw_start_up_halt_wait:
2274 switch (event) {
2275 case sdma_event_e00_go_hw_down:
2276 sdma_set_state(sde, sdma_state_s00_hw_down);
2277 sdma_sw_tear_down(sde);
2278 break;
2279 case sdma_event_e10_go_hw_start:
2280 break;
2281 case sdma_event_e15_hw_halt_done:
2282 sdma_set_state(sde,
2283 sdma_state_s15_hw_start_up_clean_wait);
2284 sdma_start_hw_clean_up(sde);
2285 break;
2286 case sdma_event_e25_hw_clean_up_done:
2287 break;
2288 case sdma_event_e30_go_running:
2289 ss->go_s99_running = 1;
2290 break;
2291 case sdma_event_e40_sw_cleaned:
2292 break;
2293 case sdma_event_e50_hw_cleaned:
2294 break;
2295 case sdma_event_e60_hw_halted:
2296 sdma_start_err_halt_wait(sde);
2297 break;
2298 case sdma_event_e70_go_idle:
2299 ss->go_s99_running = 0;
2300 break;
2301 case sdma_event_e80_hw_freeze:
2302 break;
2303 case sdma_event_e81_hw_frozen:
2304 break;
2305 case sdma_event_e82_hw_unfreeze:
2306 break;
2307 case sdma_event_e85_link_down:
2308 break;
2309 case sdma_event_e90_sw_halted:
2310 break;
2311 }
2312 break;
2313
2314 case sdma_state_s15_hw_start_up_clean_wait:
2315 switch (event) {
2316 case sdma_event_e00_go_hw_down:
2317 sdma_set_state(sde, sdma_state_s00_hw_down);
2318 sdma_sw_tear_down(sde);
2319 break;
2320 case sdma_event_e10_go_hw_start:
2321 break;
2322 case sdma_event_e15_hw_halt_done:
2323 break;
2324 case sdma_event_e25_hw_clean_up_done:
2325 sdma_hw_start_up(sde);
2326 sdma_set_state(sde, ss->go_s99_running ?
2327 sdma_state_s99_running :
2328 sdma_state_s20_idle);
2329 break;
2330 case sdma_event_e30_go_running:
2331 ss->go_s99_running = 1;
2332 break;
2333 case sdma_event_e40_sw_cleaned:
2334 break;
2335 case sdma_event_e50_hw_cleaned:
2336 break;
2337 case sdma_event_e60_hw_halted:
2338 break;
2339 case sdma_event_e70_go_idle:
2340 ss->go_s99_running = 0;
2341 break;
2342 case sdma_event_e80_hw_freeze:
2343 break;
2344 case sdma_event_e81_hw_frozen:
2345 break;
2346 case sdma_event_e82_hw_unfreeze:
2347 break;
2348 case sdma_event_e85_link_down:
2349 break;
2350 case sdma_event_e90_sw_halted:
2351 break;
2352 }
2353 break;
2354
2355 case sdma_state_s20_idle:
2356 switch (event) {
2357 case sdma_event_e00_go_hw_down:
2358 sdma_set_state(sde, sdma_state_s00_hw_down);
2359 sdma_sw_tear_down(sde);
2360 break;
2361 case sdma_event_e10_go_hw_start:
2362 break;
2363 case sdma_event_e15_hw_halt_done:
2364 break;
2365 case sdma_event_e25_hw_clean_up_done:
2366 break;
2367 case sdma_event_e30_go_running:
2368 sdma_set_state(sde, sdma_state_s99_running);
2369 ss->go_s99_running = 1;
2370 break;
2371 case sdma_event_e40_sw_cleaned:
2372 break;
2373 case sdma_event_e50_hw_cleaned:
2374 break;
2375 case sdma_event_e60_hw_halted:
2376 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2377 sdma_start_err_halt_wait(sde);
2378 break;
2379 case sdma_event_e70_go_idle:
2380 break;
2381 case sdma_event_e85_link_down:
2382 /* fall through */
2383 case sdma_event_e80_hw_freeze:
2384 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2385 atomic_dec(&sde->dd->sdma_unfreeze_count);
2386 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2387 break;
2388 case sdma_event_e81_hw_frozen:
2389 break;
2390 case sdma_event_e82_hw_unfreeze:
2391 break;
2392 case sdma_event_e90_sw_halted:
2393 break;
2394 }
2395 break;
2396
2397 case sdma_state_s30_sw_clean_up_wait:
2398 switch (event) {
2399 case sdma_event_e00_go_hw_down:
2400 sdma_set_state(sde, sdma_state_s00_hw_down);
2401 break;
2402 case sdma_event_e10_go_hw_start:
2403 break;
2404 case sdma_event_e15_hw_halt_done:
2405 break;
2406 case sdma_event_e25_hw_clean_up_done:
2407 break;
2408 case sdma_event_e30_go_running:
2409 ss->go_s99_running = 1;
2410 break;
2411 case sdma_event_e40_sw_cleaned:
2412 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
2413 sdma_start_hw_clean_up(sde);
2414 break;
2415 case sdma_event_e50_hw_cleaned:
2416 break;
2417 case sdma_event_e60_hw_halted:
2418 break;
2419 case sdma_event_e70_go_idle:
2420 ss->go_s99_running = 0;
2421 break;
2422 case sdma_event_e80_hw_freeze:
2423 break;
2424 case sdma_event_e81_hw_frozen:
2425 break;
2426 case sdma_event_e82_hw_unfreeze:
2427 break;
2428 case sdma_event_e85_link_down:
2429 ss->go_s99_running = 0;
2430 break;
2431 case sdma_event_e90_sw_halted:
2432 break;
2433 }
2434 break;
2435
2436 case sdma_state_s40_hw_clean_up_wait:
2437 switch (event) {
2438 case sdma_event_e00_go_hw_down:
2439 sdma_set_state(sde, sdma_state_s00_hw_down);
2440 sdma_start_sw_clean_up(sde);
2441 break;
2442 case sdma_event_e10_go_hw_start:
2443 break;
2444 case sdma_event_e15_hw_halt_done:
2445 break;
2446 case sdma_event_e25_hw_clean_up_done:
2447 sdma_hw_start_up(sde);
2448 sdma_set_state(sde, ss->go_s99_running ?
2449 sdma_state_s99_running :
2450 sdma_state_s20_idle);
2451 break;
2452 case sdma_event_e30_go_running:
2453 ss->go_s99_running = 1;
2454 break;
2455 case sdma_event_e40_sw_cleaned:
2456 break;
2457 case sdma_event_e50_hw_cleaned:
2458 break;
2459 case sdma_event_e60_hw_halted:
2460 break;
2461 case sdma_event_e70_go_idle:
2462 ss->go_s99_running = 0;
2463 break;
2464 case sdma_event_e80_hw_freeze:
2465 break;
2466 case sdma_event_e81_hw_frozen:
2467 break;
2468 case sdma_event_e82_hw_unfreeze:
2469 break;
2470 case sdma_event_e85_link_down:
2471 ss->go_s99_running = 0;
2472 break;
2473 case sdma_event_e90_sw_halted:
2474 break;
2475 }
2476 break;
2477
2478 case sdma_state_s50_hw_halt_wait:
2479 switch (event) {
2480 case sdma_event_e00_go_hw_down:
2481 sdma_set_state(sde, sdma_state_s00_hw_down);
2482 sdma_start_sw_clean_up(sde);
2483 break;
2484 case sdma_event_e10_go_hw_start:
2485 break;
2486 case sdma_event_e15_hw_halt_done:
2487 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2488 sdma_start_sw_clean_up(sde);
2489 break;
2490 case sdma_event_e25_hw_clean_up_done:
2491 break;
2492 case sdma_event_e30_go_running:
2493 ss->go_s99_running = 1;
2494 break;
2495 case sdma_event_e40_sw_cleaned:
2496 break;
2497 case sdma_event_e50_hw_cleaned:
2498 break;
2499 case sdma_event_e60_hw_halted:
2500 sdma_start_err_halt_wait(sde);
2501 break;
2502 case sdma_event_e70_go_idle:
2503 ss->go_s99_running = 0;
2504 break;
2505 case sdma_event_e80_hw_freeze:
2506 break;
2507 case sdma_event_e81_hw_frozen:
2508 break;
2509 case sdma_event_e82_hw_unfreeze:
2510 break;
2511 case sdma_event_e85_link_down:
2512 ss->go_s99_running = 0;
2513 break;
2514 case sdma_event_e90_sw_halted:
2515 break;
2516 }
2517 break;
2518
2519 case sdma_state_s60_idle_halt_wait:
2520 switch (event) {
2521 case sdma_event_e00_go_hw_down:
2522 sdma_set_state(sde, sdma_state_s00_hw_down);
2523 sdma_start_sw_clean_up(sde);
2524 break;
2525 case sdma_event_e10_go_hw_start:
2526 break;
2527 case sdma_event_e15_hw_halt_done:
2528 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2529 sdma_start_sw_clean_up(sde);
2530 break;
2531 case sdma_event_e25_hw_clean_up_done:
2532 break;
2533 case sdma_event_e30_go_running:
2534 ss->go_s99_running = 1;
2535 break;
2536 case sdma_event_e40_sw_cleaned:
2537 break;
2538 case sdma_event_e50_hw_cleaned:
2539 break;
2540 case sdma_event_e60_hw_halted:
2541 sdma_start_err_halt_wait(sde);
2542 break;
2543 case sdma_event_e70_go_idle:
2544 ss->go_s99_running = 0;
2545 break;
2546 case sdma_event_e80_hw_freeze:
2547 break;
2548 case sdma_event_e81_hw_frozen:
2549 break;
2550 case sdma_event_e82_hw_unfreeze:
2551 break;
2552 case sdma_event_e85_link_down:
2553 break;
2554 case sdma_event_e90_sw_halted:
2555 break;
2556 }
2557 break;
2558
2559 case sdma_state_s80_hw_freeze:
2560 switch (event) {
2561 case sdma_event_e00_go_hw_down:
2562 sdma_set_state(sde, sdma_state_s00_hw_down);
2563 sdma_start_sw_clean_up(sde);
2564 break;
2565 case sdma_event_e10_go_hw_start:
2566 break;
2567 case sdma_event_e15_hw_halt_done:
2568 break;
2569 case sdma_event_e25_hw_clean_up_done:
2570 break;
2571 case sdma_event_e30_go_running:
2572 ss->go_s99_running = 1;
2573 break;
2574 case sdma_event_e40_sw_cleaned:
2575 break;
2576 case sdma_event_e50_hw_cleaned:
2577 break;
2578 case sdma_event_e60_hw_halted:
2579 break;
2580 case sdma_event_e70_go_idle:
2581 ss->go_s99_running = 0;
2582 break;
2583 case sdma_event_e80_hw_freeze:
2584 break;
2585 case sdma_event_e81_hw_frozen:
2586 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
2587 sdma_start_sw_clean_up(sde);
2588 break;
2589 case sdma_event_e82_hw_unfreeze:
2590 break;
2591 case sdma_event_e85_link_down:
2592 break;
2593 case sdma_event_e90_sw_halted:
2594 break;
2595 }
2596 break;
2597
2598 case sdma_state_s82_freeze_sw_clean:
2599 switch (event) {
2600 case sdma_event_e00_go_hw_down:
2601 sdma_set_state(sde, sdma_state_s00_hw_down);
2602 sdma_start_sw_clean_up(sde);
2603 break;
2604 case sdma_event_e10_go_hw_start:
2605 break;
2606 case sdma_event_e15_hw_halt_done:
2607 break;
2608 case sdma_event_e25_hw_clean_up_done:
2609 break;
2610 case sdma_event_e30_go_running:
2611 ss->go_s99_running = 1;
2612 break;
2613 case sdma_event_e40_sw_cleaned:
2614 /* notify caller this engine is done cleaning */
2615 atomic_dec(&sde->dd->sdma_unfreeze_count);
2616 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2617 break;
2618 case sdma_event_e50_hw_cleaned:
2619 break;
2620 case sdma_event_e60_hw_halted:
2621 break;
2622 case sdma_event_e70_go_idle:
2623 ss->go_s99_running = 0;
2624 break;
2625 case sdma_event_e80_hw_freeze:
2626 break;
2627 case sdma_event_e81_hw_frozen:
2628 break;
2629 case sdma_event_e82_hw_unfreeze:
2630 sdma_hw_start_up(sde);
2631 sdma_set_state(sde, ss->go_s99_running ?
2632 sdma_state_s99_running :
2633 sdma_state_s20_idle);
2634 break;
2635 case sdma_event_e85_link_down:
2636 break;
2637 case sdma_event_e90_sw_halted:
2638 break;
2639 }
2640 break;
2641
2642 case sdma_state_s99_running:
2643 switch (event) {
2644 case sdma_event_e00_go_hw_down:
2645 sdma_set_state(sde, sdma_state_s00_hw_down);
2646 sdma_start_sw_clean_up(sde);
2647 break;
2648 case sdma_event_e10_go_hw_start:
2649 break;
2650 case sdma_event_e15_hw_halt_done:
2651 break;
2652 case sdma_event_e25_hw_clean_up_done:
2653 break;
2654 case sdma_event_e30_go_running:
2655 break;
2656 case sdma_event_e40_sw_cleaned:
2657 break;
2658 case sdma_event_e50_hw_cleaned:
2659 break;
2660 case sdma_event_e60_hw_halted:
2661 need_progress = 1;
2662 sdma_err_progress_check_schedule(sde);
2663 case sdma_event_e90_sw_halted:
2664 /*
2665 * SW initiated halt does not perform engines
2666 * progress check
2667 */
2668 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2669 sdma_start_err_halt_wait(sde);
2670 break;
2671 case sdma_event_e70_go_idle:
2672 sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
2673 break;
2674 case sdma_event_e85_link_down:
2675 ss->go_s99_running = 0;
2676 /* fall through */
2677 case sdma_event_e80_hw_freeze:
2678 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2679 atomic_dec(&sde->dd->sdma_unfreeze_count);
2680 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2681 break;
2682 case sdma_event_e81_hw_frozen:
2683 break;
2684 case sdma_event_e82_hw_unfreeze:
2685 break;
2686 }
2687 break;
2688 }
2689
2690 ss->last_event = event;
2691 if (need_progress)
2692 sdma_make_progress(sde, 0);
2693 }
2694
2695 /*
2696 * _extend_sdma_tx_descs() - helper to extend txreq
2697 *
2698 * This is called once the initial nominal allocation
2699 * of descriptors in the sdma_txreq is exhausted.
2700 *
2701 * The code will bump the allocation up to the max
2702 * of MAX_DESC (64) descriptors. There doesn't seem
2703 * much point in an interim step.
2704 *
2705 */
2706 int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
2707 {
2708 int i;
2709
2710 tx->descp = kmalloc_array(
2711 MAX_DESC,
2712 sizeof(struct sdma_desc),
2713 GFP_ATOMIC);
2714 if (!tx->descp)
2715 return -ENOMEM;
2716 tx->desc_limit = MAX_DESC;
2717 /* copy ones already built */
2718 for (i = 0; i < tx->num_desc; i++)
2719 tx->descp[i] = tx->descs[i];
2720 return 0;
2721 }
2722
2723 /* Update sdes when the lmc changes */
2724 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
2725 {
2726 struct sdma_engine *sde;
2727 int i;
2728 u64 sreg;
2729
2730 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
2731 SD(CHECK_SLID_MASK_SHIFT)) |
2732 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
2733 SD(CHECK_SLID_VALUE_SHIFT));
2734
2735 for (i = 0; i < dd->num_sdma; i++) {
2736 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
2737 i, (u32)sreg);
2738 sde = &dd->per_sdma[i];
2739 write_sde_csr(sde, SD(CHECK_SLID), sreg);
2740 }
2741 }
2742
2743 /* tx not dword sized - pad */
2744 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
2745 {
2746 int rval = 0;
2747
2748 if ((unlikely(tx->num_desc == tx->desc_limit))) {
2749 rval = _extend_sdma_tx_descs(dd, tx);
2750 if (rval)
2751 return rval;
2752 }
2753 /* finish the one just added */
2754 tx->num_desc++;
2755 make_tx_sdma_desc(
2756 tx,
2757 SDMA_MAP_NONE,
2758 dd->sdma_pad_phys,
2759 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
2760 _sdma_close_tx(dd, tx);
2761 return rval;
2762 }
2763
2764 /*
2765 * Add ahg to the sdma_txreq
2766 *
2767 * The logic will consume up to 3
2768 * descriptors at the beginning of
2769 * sdma_txreq.
2770 */
2771 void _sdma_txreq_ahgadd(
2772 struct sdma_txreq *tx,
2773 u8 num_ahg,
2774 u8 ahg_entry,
2775 u32 *ahg,
2776 u8 ahg_hlen)
2777 {
2778 u32 i, shift = 0, desc = 0;
2779 u8 mode;
2780
2781 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
2782 /* compute mode */
2783 if (num_ahg == 1)
2784 mode = SDMA_AHG_APPLY_UPDATE1;
2785 else if (num_ahg <= 5)
2786 mode = SDMA_AHG_APPLY_UPDATE2;
2787 else
2788 mode = SDMA_AHG_APPLY_UPDATE3;
2789 tx->num_desc++;
2790 /* initialize to consumed descriptors to zero */
2791 switch (mode) {
2792 case SDMA_AHG_APPLY_UPDATE3:
2793 tx->num_desc++;
2794 tx->descs[2].qw[0] = 0;
2795 tx->descs[2].qw[1] = 0;
2796 /* FALLTHROUGH */
2797 case SDMA_AHG_APPLY_UPDATE2:
2798 tx->num_desc++;
2799 tx->descs[1].qw[0] = 0;
2800 tx->descs[1].qw[1] = 0;
2801 break;
2802 }
2803 ahg_hlen >>= 2;
2804 tx->descs[0].qw[1] |=
2805 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
2806 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
2807 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
2808 << SDMA_DESC1_HEADER_DWS_SHIFT) |
2809 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
2810 << SDMA_DESC1_HEADER_MODE_SHIFT) |
2811 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
2812 << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
2813 for (i = 0; i < (num_ahg - 1); i++) {
2814 if (!shift && !(i & 2))
2815 desc++;
2816 tx->descs[desc].qw[!!(i & 2)] |=
2817 (((u64)ahg[i + 1])
2818 << shift);
2819 shift = (shift + 32) & 63;
2820 }
2821 }
2822
2823 /**
2824 * sdma_ahg_alloc - allocate an AHG entry
2825 * @sde: engine to allocate from
2826 *
2827 * Return:
2828 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
2829 * -ENOSPC if an entry is not available
2830 */
2831 int sdma_ahg_alloc(struct sdma_engine *sde)
2832 {
2833 int nr;
2834 int oldbit;
2835
2836 if (!sde) {
2837 trace_hfi1_ahg_allocate(sde, -EINVAL);
2838 return -EINVAL;
2839 }
2840 while (1) {
2841 nr = ffz(ACCESS_ONCE(sde->ahg_bits));
2842 if (nr > 31) {
2843 trace_hfi1_ahg_allocate(sde, -ENOSPC);
2844 return -ENOSPC;
2845 }
2846 oldbit = test_and_set_bit(nr, &sde->ahg_bits);
2847 if (!oldbit)
2848 break;
2849 cpu_relax();
2850 }
2851 trace_hfi1_ahg_allocate(sde, nr);
2852 return nr;
2853 }
2854
2855 /**
2856 * sdma_ahg_free - free an AHG entry
2857 * @sde: engine to return AHG entry
2858 * @ahg_index: index to free
2859 *
2860 * This routine frees the indicate AHG entry.
2861 */
2862 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
2863 {
2864 if (!sde)
2865 return;
2866 trace_hfi1_ahg_deallocate(sde, ahg_index);
2867 if (ahg_index < 0 || ahg_index > 31)
2868 return;
2869 clear_bit(ahg_index, &sde->ahg_bits);
2870 }
2871
2872 /*
2873 * SPC freeze handling for SDMA engines. Called when the driver knows
2874 * the SPC is going into a freeze but before the freeze is fully
2875 * settled. Generally an error interrupt.
2876 *
2877 * This event will pull the engine out of running so no more entries can be
2878 * added to the engine's queue.
2879 */
2880 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
2881 {
2882 int i;
2883 enum sdma_events event = link_down ? sdma_event_e85_link_down :
2884 sdma_event_e80_hw_freeze;
2885
2886 /* set up the wait but do not wait here */
2887 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
2888
2889 /* tell all engines to stop running and wait */
2890 for (i = 0; i < dd->num_sdma; i++)
2891 sdma_process_event(&dd->per_sdma[i], event);
2892
2893 /* sdma_freeze() will wait for all engines to have stopped */
2894 }
2895
2896 /*
2897 * SPC freeze handling for SDMA engines. Called when the driver knows
2898 * the SPC is fully frozen.
2899 */
2900 void sdma_freeze(struct hfi1_devdata *dd)
2901 {
2902 int i;
2903 int ret;
2904
2905 /*
2906 * Make sure all engines have moved out of the running state before
2907 * continuing.
2908 */
2909 ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
2910 atomic_read(&dd->sdma_unfreeze_count) <= 0);
2911 /* interrupted or count is negative, then unloading - just exit */
2912 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
2913 return;
2914
2915 /* set up the count for the next wait */
2916 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
2917
2918 /* tell all engines that the SPC is frozen, they can start cleaning */
2919 for (i = 0; i < dd->num_sdma; i++)
2920 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
2921
2922 /*
2923 * Wait for everyone to finish software clean before exiting. The
2924 * software clean will read engine CSRs, so must be completed before
2925 * the next step, which will clear the engine CSRs.
2926 */
2927 (void) wait_event_interruptible(dd->sdma_unfreeze_wq,
2928 atomic_read(&dd->sdma_unfreeze_count) <= 0);
2929 /* no need to check results - done no matter what */
2930 }
2931
2932 /*
2933 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
2934 *
2935 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
2936 * that is left is a software clean. We could do it after the SPC is fully
2937 * frozen, but then we'd have to add another state to wait for the unfreeze.
2938 * Instead, just defer the software clean until the unfreeze step.
2939 */
2940 void sdma_unfreeze(struct hfi1_devdata *dd)
2941 {
2942 int i;
2943
2944 /* tell all engines start freeze clean up */
2945 for (i = 0; i < dd->num_sdma; i++)
2946 sdma_process_event(&dd->per_sdma[i],
2947 sdma_event_e82_hw_unfreeze);
2948 }
2949
2950 /**
2951 * _sdma_engine_progress_schedule() - schedule progress on engine
2952 * @sde: sdma_engine to schedule progress
2953 *
2954 */
2955 void _sdma_engine_progress_schedule(
2956 struct sdma_engine *sde)
2957 {
2958 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
2959 /* assume we have selected a good cpu */
2960 write_csr(sde->dd,
2961 CCE_INT_FORCE + (8*(IS_SDMA_START/64)), sde->progress_mask);
2962 }