]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/staging/rdma/hfi1/sdma.c
staging/rdma/hfi1: Remove QSFP_ENABLED from HFI capability mask
[mirror_ubuntu-artful-kernel.git] / drivers / staging / rdma / hfi1 / sdma.c
CommitLineData
77241056
MM
1/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51#include <linux/spinlock.h>
52#include <linux/seqlock.h>
53#include <linux/netdevice.h>
54#include <linux/moduleparam.h>
55#include <linux/bitops.h>
56#include <linux/timer.h>
57#include <linux/vmalloc.h>
58
59#include "hfi.h"
60#include "common.h"
61#include "qp.h"
62#include "sdma.h"
63#include "iowait.h"
64#include "trace.h"
65
66/* must be a power of 2 >= 64 <= 32768 */
67#define SDMA_DESCQ_CNT 1024
68#define INVALID_TAIL 0xffff
69
70static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
71module_param(sdma_descq_cnt, uint, S_IRUGO);
72MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
73
74static uint sdma_idle_cnt = 250;
75module_param(sdma_idle_cnt, uint, S_IRUGO);
76MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
77
78uint mod_num_sdma;
79module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
80MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
81
82#define SDMA_WAIT_BATCH_SIZE 20
83/* max wait time for a SDMA engine to indicate it has halted */
84#define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
85/* all SDMA engine errors that cause a halt */
86
87#define SD(name) SEND_DMA_##name
88#define ALL_SDMA_ENG_HALT_ERRS \
89 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
90 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
91 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
92 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
93 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
94 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
95 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
107
108/* sdma_sendctrl operations */
109#define SDMA_SENDCTRL_OP_ENABLE (1U << 0)
110#define SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
111#define SDMA_SENDCTRL_OP_HALT (1U << 2)
112#define SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
113
114/* handle long defines */
115#define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
116SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
117#define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
118SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
119
120static const char * const sdma_state_names[] = {
121 [sdma_state_s00_hw_down] = "s00_HwDown",
122 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
123 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
124 [sdma_state_s20_idle] = "s20_Idle",
125 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
126 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
127 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
128 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
129 [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
130 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
131 [sdma_state_s99_running] = "s99_Running",
132};
133
134static const char * const sdma_event_names[] = {
135 [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
136 [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
137 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
138 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
139 [sdma_event_e30_go_running] = "e30_GoRunning",
140 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
141 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
142 [sdma_event_e60_hw_halted] = "e60_HwHalted",
143 [sdma_event_e70_go_idle] = "e70_GoIdle",
144 [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
145 [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
146 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
147 [sdma_event_e85_link_down] = "e85_LinkDown",
148 [sdma_event_e90_sw_halted] = "e90_SwHalted",
149};
150
151static const struct sdma_set_state_action sdma_action_table[] = {
152 [sdma_state_s00_hw_down] = {
153 .go_s99_running_tofalse = 1,
154 .op_enable = 0,
155 .op_intenable = 0,
156 .op_halt = 0,
157 .op_cleanup = 0,
158 },
159 [sdma_state_s10_hw_start_up_halt_wait] = {
160 .op_enable = 0,
161 .op_intenable = 0,
162 .op_halt = 1,
163 .op_cleanup = 0,
164 },
165 [sdma_state_s15_hw_start_up_clean_wait] = {
166 .op_enable = 0,
167 .op_intenable = 1,
168 .op_halt = 0,
169 .op_cleanup = 1,
170 },
171 [sdma_state_s20_idle] = {
172 .op_enable = 0,
173 .op_intenable = 1,
174 .op_halt = 0,
175 .op_cleanup = 0,
176 },
177 [sdma_state_s30_sw_clean_up_wait] = {
178 .op_enable = 0,
179 .op_intenable = 0,
180 .op_halt = 0,
181 .op_cleanup = 0,
182 },
183 [sdma_state_s40_hw_clean_up_wait] = {
184 .op_enable = 0,
185 .op_intenable = 0,
186 .op_halt = 0,
187 .op_cleanup = 1,
188 },
189 [sdma_state_s50_hw_halt_wait] = {
190 .op_enable = 0,
191 .op_intenable = 0,
192 .op_halt = 0,
193 .op_cleanup = 0,
194 },
195 [sdma_state_s60_idle_halt_wait] = {
196 .go_s99_running_tofalse = 1,
197 .op_enable = 0,
198 .op_intenable = 0,
199 .op_halt = 1,
200 .op_cleanup = 0,
201 },
202 [sdma_state_s80_hw_freeze] = {
203 .op_enable = 0,
204 .op_intenable = 0,
205 .op_halt = 0,
206 .op_cleanup = 0,
207 },
208 [sdma_state_s82_freeze_sw_clean] = {
209 .op_enable = 0,
210 .op_intenable = 0,
211 .op_halt = 0,
212 .op_cleanup = 0,
213 },
214 [sdma_state_s99_running] = {
215 .op_enable = 1,
216 .op_intenable = 1,
217 .op_halt = 0,
218 .op_cleanup = 0,
219 .go_s99_running_totrue = 1,
220 },
221};
222
223#define SDMA_TAIL_UPDATE_THRESH 0x1F
224
225/* declare all statics here rather than keep sorting */
226static void sdma_complete(struct kref *);
227static void sdma_finalput(struct sdma_state *);
228static void sdma_get(struct sdma_state *);
229static void sdma_hw_clean_up_task(unsigned long);
230static void sdma_put(struct sdma_state *);
231static void sdma_set_state(struct sdma_engine *, enum sdma_states);
232static void sdma_start_hw_clean_up(struct sdma_engine *);
233static void sdma_start_sw_clean_up(struct sdma_engine *);
234static void sdma_sw_clean_up_task(unsigned long);
235static void sdma_sendctrl(struct sdma_engine *, unsigned);
236static void init_sdma_regs(struct sdma_engine *, u32, uint);
237static void sdma_process_event(
238 struct sdma_engine *sde,
239 enum sdma_events event);
240static void __sdma_process_event(
241 struct sdma_engine *sde,
242 enum sdma_events event);
243static void dump_sdma_state(struct sdma_engine *sde);
244static void sdma_make_progress(struct sdma_engine *sde, u64 status);
245static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail);
246static void sdma_flush_descq(struct sdma_engine *sde);
247
248/**
249 * sdma_state_name() - return state string from enum
250 * @state: state
251 */
252static const char *sdma_state_name(enum sdma_states state)
253{
254 return sdma_state_names[state];
255}
256
257static void sdma_get(struct sdma_state *ss)
258{
259 kref_get(&ss->kref);
260}
261
262static void sdma_complete(struct kref *kref)
263{
264 struct sdma_state *ss =
265 container_of(kref, struct sdma_state, kref);
266
267 complete(&ss->comp);
268}
269
270static void sdma_put(struct sdma_state *ss)
271{
272 kref_put(&ss->kref, sdma_complete);
273}
274
275static void sdma_finalput(struct sdma_state *ss)
276{
277 sdma_put(ss);
278 wait_for_completion(&ss->comp);
279}
280
281static inline void write_sde_csr(
282 struct sdma_engine *sde,
283 u32 offset0,
284 u64 value)
285{
286 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
287}
288
289static inline u64 read_sde_csr(
290 struct sdma_engine *sde,
291 u32 offset0)
292{
293 return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
294}
295
296/*
297 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
298 * sdma engine 'sde' to drop to 0.
299 */
300static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
301 int pause)
302{
303 u64 off = 8 * sde->this_idx;
304 struct hfi1_devdata *dd = sde->dd;
305 int lcnt = 0;
25d97dd5
VM
306 u64 reg_prev;
307 u64 reg = 0;
77241056
MM
308
309 while (1) {
25d97dd5
VM
310 reg_prev = reg;
311 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
77241056
MM
312
313 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
314 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
315 if (reg == 0)
316 break;
25d97dd5
VM
317 /* counter is reest if accupancy count changes */
318 if (reg != reg_prev)
319 lcnt = 0;
320 if (lcnt++ > 500) {
321 /* timed out - bounce the link */
322 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
77241056 323 __func__, sde->this_idx, (u32)reg);
25d97dd5
VM
324 queue_work(dd->pport->hfi1_wq,
325 &dd->pport->link_bounce_work);
77241056
MM
326 break;
327 }
328 udelay(1);
329 }
330}
331
332/*
333 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
334 * and pause for credit return.
335 */
336void sdma_wait(struct hfi1_devdata *dd)
337{
338 int i;
339
340 for (i = 0; i < dd->num_sdma; i++) {
341 struct sdma_engine *sde = &dd->per_sdma[i];
342
343 sdma_wait_for_packet_egress(sde, 0);
344 }
345}
346
347static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
348{
349 u64 reg;
350
351 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
352 return;
353 reg = cnt;
354 reg &= SD(DESC_CNT_CNT_MASK);
355 reg <<= SD(DESC_CNT_CNT_SHIFT);
356 write_sde_csr(sde, SD(DESC_CNT), reg);
357}
358
359/*
360 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
361 *
362 * Depending on timing there can be txreqs in two places:
363 * - in the descq ring
364 * - in the flush list
365 *
366 * To avoid ordering issues the descq ring needs to be flushed
367 * first followed by the flush list.
368 *
369 * This routine is called from two places
370 * - From a work queue item
371 * - Directly from the state machine just before setting the
372 * state to running
373 *
374 * Must be called with head_lock held
375 *
376 */
377static void sdma_flush(struct sdma_engine *sde)
378{
379 struct sdma_txreq *txp, *txp_next;
380 LIST_HEAD(flushlist);
381
382 /* flush from head to tail */
383 sdma_flush_descq(sde);
384 spin_lock(&sde->flushlist_lock);
385 /* copy flush list */
386 list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
387 list_del_init(&txp->list);
388 list_add_tail(&txp->list, &flushlist);
389 }
390 spin_unlock(&sde->flushlist_lock);
391 /* flush from flush list */
392 list_for_each_entry_safe(txp, txp_next, &flushlist, list) {
393 int drained = 0;
394 /* protect against complete modifying */
395 struct iowait *wait = txp->wait;
396
397 list_del_init(&txp->list);
398#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
399 trace_hfi1_sdma_out_sn(sde, txp->sn);
400 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
401 dd_dev_err(sde->dd, "expected %llu got %llu\n",
402 sde->head_sn, txp->sn);
403 sde->head_sn++;
404#endif
405 sdma_txclean(sde->dd, txp);
406 if (wait)
407 drained = atomic_dec_and_test(&wait->sdma_busy);
408 if (txp->complete)
409 (*txp->complete)(txp, SDMA_TXREQ_S_ABORTED, drained);
410 if (wait && drained)
411 iowait_drain_wakeup(wait);
412 }
413}
414
415/*
416 * Fields a work request for flushing the descq ring
417 * and the flush list
418 *
419 * If the engine has been brought to running during
420 * the scheduling delay, the flush is ignored, assuming
421 * that the process of bringing the engine to running
422 * would have done this flush prior to going to running.
423 *
424 */
425static void sdma_field_flush(struct work_struct *work)
426{
427 unsigned long flags;
428 struct sdma_engine *sde =
429 container_of(work, struct sdma_engine, flush_worker);
430
431 write_seqlock_irqsave(&sde->head_lock, flags);
432 if (!__sdma_running(sde))
433 sdma_flush(sde);
434 write_sequnlock_irqrestore(&sde->head_lock, flags);
435}
436
437static void sdma_err_halt_wait(struct work_struct *work)
438{
439 struct sdma_engine *sde = container_of(work, struct sdma_engine,
440 err_halt_worker);
441 u64 statuscsr;
442 unsigned long timeout;
443
444 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
445 while (1) {
446 statuscsr = read_sde_csr(sde, SD(STATUS));
447 statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
448 if (statuscsr)
449 break;
450 if (time_after(jiffies, timeout)) {
451 dd_dev_err(sde->dd,
452 "SDMA engine %d - timeout waiting for engine to halt\n",
453 sde->this_idx);
454 /*
455 * Continue anyway. This could happen if there was
456 * an uncorrectable error in the wrong spot.
457 */
458 break;
459 }
460 usleep_range(80, 120);
461 }
462
463 sdma_process_event(sde, sdma_event_e15_hw_halt_done);
464}
465
466static void sdma_start_err_halt_wait(struct sdma_engine *sde)
467{
468 schedule_work(&sde->err_halt_worker);
469}
470
471
472static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
473{
474 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
475
476 unsigned index;
477 struct hfi1_devdata *dd = sde->dd;
478
479 for (index = 0; index < dd->num_sdma; index++) {
480 struct sdma_engine *curr_sdma = &dd->per_sdma[index];
481
482 if (curr_sdma != sde)
483 curr_sdma->progress_check_head =
484 curr_sdma->descq_head;
485 }
486 dd_dev_err(sde->dd,
487 "SDMA engine %d - check scheduled\n",
488 sde->this_idx);
489 mod_timer(&sde->err_progress_check_timer, jiffies + 10);
490 }
491}
492
493static void sdma_err_progress_check(unsigned long data)
494{
495 unsigned index;
496 struct sdma_engine *sde = (struct sdma_engine *)data;
497
498 dd_dev_err(sde->dd, "SDE progress check event\n");
499 for (index = 0; index < sde->dd->num_sdma; index++) {
500 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
501 unsigned long flags;
502
503 /* check progress on each engine except the current one */
504 if (curr_sde == sde)
505 continue;
506 /*
507 * We must lock interrupts when acquiring sde->lock,
508 * to avoid a deadlock if interrupt triggers and spins on
509 * the same lock on same CPU
510 */
511 spin_lock_irqsave(&curr_sde->tail_lock, flags);
512 write_seqlock(&curr_sde->head_lock);
513
514 /* skip non-running queues */
515 if (curr_sde->state.current_state != sdma_state_s99_running) {
516 write_sequnlock(&curr_sde->head_lock);
517 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
518 continue;
519 }
520
521 if ((curr_sde->descq_head != curr_sde->descq_tail) &&
522 (curr_sde->descq_head ==
523 curr_sde->progress_check_head))
524 __sdma_process_event(curr_sde,
525 sdma_event_e90_sw_halted);
526 write_sequnlock(&curr_sde->head_lock);
527 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
528 }
529 schedule_work(&sde->err_halt_worker);
530}
531
532static void sdma_hw_clean_up_task(unsigned long opaque)
533{
534 struct sdma_engine *sde = (struct sdma_engine *) opaque;
535 u64 statuscsr;
536
537 while (1) {
538#ifdef CONFIG_SDMA_VERBOSITY
539 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
540 sde->this_idx, slashstrip(__FILE__), __LINE__,
541 __func__);
542#endif
543 statuscsr = read_sde_csr(sde, SD(STATUS));
544 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
545 if (statuscsr)
546 break;
547 udelay(10);
548 }
549
550 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
551}
552
553static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
554{
555 smp_read_barrier_depends(); /* see sdma_update_tail() */
556 return sde->tx_ring[sde->tx_head & sde->sdma_mask];
557}
558
559/*
560 * flush ring for recovery
561 */
562static void sdma_flush_descq(struct sdma_engine *sde)
563{
564 u16 head, tail;
565 int progress = 0;
566 struct sdma_txreq *txp = get_txhead(sde);
567
568 /* The reason for some of the complexity of this code is that
569 * not all descriptors have corresponding txps. So, we have to
570 * be able to skip over descs until we wander into the range of
571 * the next txp on the list.
572 */
573 head = sde->descq_head & sde->sdma_mask;
574 tail = sde->descq_tail & sde->sdma_mask;
575 while (head != tail) {
576 /* advance head, wrap if needed */
577 head = ++sde->descq_head & sde->sdma_mask;
578 /* if now past this txp's descs, do the callback */
579 if (txp && txp->next_descq_idx == head) {
580 int drained = 0;
581 /* protect against complete modifying */
582 struct iowait *wait = txp->wait;
583
584 /* remove from list */
585 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
586 if (wait)
587 drained = atomic_dec_and_test(&wait->sdma_busy);
588#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
589 trace_hfi1_sdma_out_sn(sde, txp->sn);
590 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
591 dd_dev_err(sde->dd, "expected %llu got %llu\n",
592 sde->head_sn, txp->sn);
593 sde->head_sn++;
594#endif
595 sdma_txclean(sde->dd, txp);
596 trace_hfi1_sdma_progress(sde, head, tail, txp);
597 if (txp->complete)
598 (*txp->complete)(
599 txp,
600 SDMA_TXREQ_S_ABORTED,
601 drained);
602 if (wait && drained)
603 iowait_drain_wakeup(wait);
604 /* see if there is another txp */
605 txp = get_txhead(sde);
606 }
607 progress++;
608 }
609 if (progress)
610 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
611}
612
613static void sdma_sw_clean_up_task(unsigned long opaque)
614{
615 struct sdma_engine *sde = (struct sdma_engine *) opaque;
616 unsigned long flags;
617
618 spin_lock_irqsave(&sde->tail_lock, flags);
619 write_seqlock(&sde->head_lock);
620
621 /*
622 * At this point, the following should always be true:
623 * - We are halted, so no more descriptors are getting retired.
624 * - We are not running, so no one is submitting new work.
625 * - Only we can send the e40_sw_cleaned, so we can't start
626 * running again until we say so. So, the active list and
627 * descq are ours to play with.
628 */
629
630
631 /*
632 * In the error clean up sequence, software clean must be called
633 * before the hardware clean so we can use the hardware head in
634 * the progress routine. A hardware clean or SPC unfreeze will
635 * reset the hardware head.
636 *
637 * Process all retired requests. The progress routine will use the
638 * latest physical hardware head - we are not running so speed does
639 * not matter.
640 */
641 sdma_make_progress(sde, 0);
642
643 sdma_flush(sde);
644
645 /*
646 * Reset our notion of head and tail.
647 * Note that the HW registers have been reset via an earlier
648 * clean up.
649 */
650 sde->descq_tail = 0;
651 sde->descq_head = 0;
652 sde->desc_avail = sdma_descq_freecnt(sde);
653 *sde->head_dma = 0;
654
655 __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
656
657 write_sequnlock(&sde->head_lock);
658 spin_unlock_irqrestore(&sde->tail_lock, flags);
659}
660
661static void sdma_sw_tear_down(struct sdma_engine *sde)
662{
663 struct sdma_state *ss = &sde->state;
664
665 /* Releasing this reference means the state machine has stopped. */
666 sdma_put(ss);
667
668 /* stop waiting for all unfreeze events to complete */
669 atomic_set(&sde->dd->sdma_unfreeze_count, -1);
670 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
671}
672
673static void sdma_start_hw_clean_up(struct sdma_engine *sde)
674{
675 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
676}
677
678static void sdma_start_sw_clean_up(struct sdma_engine *sde)
679{
680 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
681}
682
683static void sdma_set_state(struct sdma_engine *sde,
684 enum sdma_states next_state)
685{
686 struct sdma_state *ss = &sde->state;
687 const struct sdma_set_state_action *action = sdma_action_table;
688 unsigned op = 0;
689
690 trace_hfi1_sdma_state(
691 sde,
692 sdma_state_names[ss->current_state],
693 sdma_state_names[next_state]);
694
695 /* debugging bookkeeping */
696 ss->previous_state = ss->current_state;
697 ss->previous_op = ss->current_op;
698 ss->current_state = next_state;
699
700 if (ss->previous_state != sdma_state_s99_running
701 && next_state == sdma_state_s99_running)
702 sdma_flush(sde);
703
704 if (action[next_state].op_enable)
705 op |= SDMA_SENDCTRL_OP_ENABLE;
706
707 if (action[next_state].op_intenable)
708 op |= SDMA_SENDCTRL_OP_INTENABLE;
709
710 if (action[next_state].op_halt)
711 op |= SDMA_SENDCTRL_OP_HALT;
712
713 if (action[next_state].op_cleanup)
714 op |= SDMA_SENDCTRL_OP_CLEANUP;
715
716 if (action[next_state].go_s99_running_tofalse)
717 ss->go_s99_running = 0;
718
719 if (action[next_state].go_s99_running_totrue)
720 ss->go_s99_running = 1;
721
722 ss->current_op = op;
723 sdma_sendctrl(sde, ss->current_op);
724}
725
726/**
727 * sdma_get_descq_cnt() - called when device probed
728 *
729 * Return a validated descq count.
730 *
731 * This is currently only used in the verbs initialization to build the tx
732 * list.
733 *
734 * This will probably be deleted in favor of a more scalable approach to
735 * alloc tx's.
736 *
737 */
738u16 sdma_get_descq_cnt(void)
739{
740 u16 count = sdma_descq_cnt;
741
742 if (!count)
743 return SDMA_DESCQ_CNT;
744 /* count must be a power of 2 greater than 64 and less than
745 * 32768. Otherwise return default.
746 */
747 if (!is_power_of_2(count))
748 return SDMA_DESCQ_CNT;
aeef010a 749 if (count < 64 || count > 32768)
77241056
MM
750 return SDMA_DESCQ_CNT;
751 return count;
752}
b91cc573 753
77241056
MM
754/**
755 * sdma_select_engine_vl() - select sdma engine
756 * @dd: devdata
757 * @selector: a spreading factor
758 * @vl: this vl
759 *
760 *
761 * This function returns an engine based on the selector and a vl. The
762 * mapping fields are protected by RCU.
763 */
764struct sdma_engine *sdma_select_engine_vl(
765 struct hfi1_devdata *dd,
766 u32 selector,
767 u8 vl)
768{
769 struct sdma_vl_map *m;
770 struct sdma_map_elem *e;
771 struct sdma_engine *rval;
772
773 if (WARN_ON(vl > 8))
774 return NULL;
775
776 rcu_read_lock();
777 m = rcu_dereference(dd->sdma_map);
778 if (unlikely(!m)) {
779 rcu_read_unlock();
780 return NULL;
781 }
782 e = m->map[vl & m->mask];
783 rval = e->sde[selector & e->mask];
784 rcu_read_unlock();
785
786 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
787 return rval;
788}
789
790/**
791 * sdma_select_engine_sc() - select sdma engine
792 * @dd: devdata
793 * @selector: a spreading factor
794 * @sc5: the 5 bit sc
795 *
796 *
797 * This function returns an engine based on the selector and an sc.
798 */
799struct sdma_engine *sdma_select_engine_sc(
800 struct hfi1_devdata *dd,
801 u32 selector,
802 u8 sc5)
803{
804 u8 vl = sc_to_vlt(dd, sc5);
805
806 return sdma_select_engine_vl(dd, selector, vl);
807}
808
809/*
810 * Free the indicated map struct
811 */
812static void sdma_map_free(struct sdma_vl_map *m)
813{
814 int i;
815
816 for (i = 0; m && i < m->actual_vls; i++)
817 kfree(m->map[i]);
818 kfree(m);
819}
820
821/*
822 * Handle RCU callback
823 */
824static void sdma_map_rcu_callback(struct rcu_head *list)
825{
826 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
827
828 sdma_map_free(m);
829}
830
831/**
832 * sdma_map_init - called when # vls change
833 * @dd: hfi1_devdata
834 * @port: port number
835 * @num_vls: number of vls
836 * @vl_engines: per vl engine mapping (optional)
837 *
838 * This routine changes the mapping based on the number of vls.
839 *
840 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
841 * implies auto computing the loading and giving each VLs a uniform
842 * distribution of engines per VL.
843 *
844 * The auto algorithm computes the sde_per_vl and the number of extra
845 * engines. Any extra engines are added from the last VL on down.
846 *
847 * rcu locking is used here to control access to the mapping fields.
848 *
849 * If either the num_vls or num_sdma are non-power of 2, the array sizes
850 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
851 * up to the next highest power of 2 and the first entry is reused
852 * in a round robin fashion.
853 *
854 * If an error occurs the map change is not done and the mapping is
855 * not changed.
856 *
857 */
858int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
859{
860 int i, j;
861 int extra, sde_per_vl;
862 int engine = 0;
863 u8 lvl_engines[OPA_MAX_VLS];
864 struct sdma_vl_map *oldmap, *newmap;
865
866 if (!(dd->flags & HFI1_HAS_SEND_DMA))
867 return 0;
868
869 if (!vl_engines) {
870 /* truncate divide */
871 sde_per_vl = dd->num_sdma / num_vls;
872 /* extras */
873 extra = dd->num_sdma % num_vls;
874 vl_engines = lvl_engines;
875 /* add extras from last vl down */
876 for (i = num_vls - 1; i >= 0; i--, extra--)
877 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
878 }
879 /* build new map */
880 newmap = kzalloc(
881 sizeof(struct sdma_vl_map) +
882 roundup_pow_of_two(num_vls) *
883 sizeof(struct sdma_map_elem *),
884 GFP_KERNEL);
885 if (!newmap)
886 goto bail;
887 newmap->actual_vls = num_vls;
888 newmap->vls = roundup_pow_of_two(num_vls);
889 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
890 for (i = 0; i < newmap->vls; i++) {
891 /* save for wrap around */
892 int first_engine = engine;
893
894 if (i < newmap->actual_vls) {
895 int sz = roundup_pow_of_two(vl_engines[i]);
896
897 /* only allocate once */
898 newmap->map[i] = kzalloc(
899 sizeof(struct sdma_map_elem) +
900 sz * sizeof(struct sdma_engine *),
901 GFP_KERNEL);
902 if (!newmap->map[i])
903 goto bail;
904 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
905 /* assign engines */
906 for (j = 0; j < sz; j++) {
907 newmap->map[i]->sde[j] =
908 &dd->per_sdma[engine];
909 if (++engine >= first_engine + vl_engines[i])
910 /* wrap back to first engine */
911 engine = first_engine;
912 }
913 } else {
914 /* just re-use entry without allocating */
915 newmap->map[i] = newmap->map[i % num_vls];
916 }
917 engine = first_engine + vl_engines[i];
918 }
919 /* newmap in hand, save old map */
920 spin_lock_irq(&dd->sde_map_lock);
921 oldmap = rcu_dereference_protected(dd->sdma_map,
922 lockdep_is_held(&dd->sde_map_lock));
923
924 /* publish newmap */
925 rcu_assign_pointer(dd->sdma_map, newmap);
926
927 spin_unlock_irq(&dd->sde_map_lock);
928 /* success, free any old map after grace period */
929 if (oldmap)
930 call_rcu(&oldmap->list, sdma_map_rcu_callback);
931 return 0;
932bail:
933 /* free any partial allocation */
934 sdma_map_free(newmap);
935 return -ENOMEM;
936}
937
938/*
939 * Clean up allocated memory.
940 *
941 * This routine is can be called regardless of the success of sdma_init()
942 *
943 */
944static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
945{
946 size_t i;
947 struct sdma_engine *sde;
948
949 if (dd->sdma_pad_dma) {
950 dma_free_coherent(&dd->pcidev->dev, 4,
951 (void *)dd->sdma_pad_dma,
952 dd->sdma_pad_phys);
953 dd->sdma_pad_dma = NULL;
954 dd->sdma_pad_phys = 0;
955 }
956 if (dd->sdma_heads_dma) {
957 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
958 (void *)dd->sdma_heads_dma,
959 dd->sdma_heads_phys);
960 dd->sdma_heads_dma = NULL;
961 dd->sdma_heads_phys = 0;
962 }
963 for (i = 0; dd->per_sdma && i < num_engines; ++i) {
964 sde = &dd->per_sdma[i];
965
966 sde->head_dma = NULL;
967 sde->head_phys = 0;
968
969 if (sde->descq) {
970 dma_free_coherent(
971 &dd->pcidev->dev,
972 sde->descq_cnt * sizeof(u64[2]),
973 sde->descq,
974 sde->descq_phys
975 );
976 sde->descq = NULL;
977 sde->descq_phys = 0;
978 }
60f57ec2 979 kvfree(sde->tx_ring);
77241056
MM
980 sde->tx_ring = NULL;
981 }
982 spin_lock_irq(&dd->sde_map_lock);
983 kfree(rcu_access_pointer(dd->sdma_map));
984 RCU_INIT_POINTER(dd->sdma_map, NULL);
985 spin_unlock_irq(&dd->sde_map_lock);
986 synchronize_rcu();
987 kfree(dd->per_sdma);
988 dd->per_sdma = NULL;
989}
990
991/**
992 * sdma_init() - called when device probed
993 * @dd: hfi1_devdata
994 * @port: port number (currently only zero)
995 *
996 * sdma_init initializes the specified number of engines.
997 *
998 * The code initializes each sde, its csrs. Interrupts
999 * are not required to be enabled.
1000 *
1001 * Returns:
1002 * 0 - success, -errno on failure
1003 */
1004int sdma_init(struct hfi1_devdata *dd, u8 port)
1005{
1006 unsigned this_idx;
1007 struct sdma_engine *sde;
1008 u16 descq_cnt;
1009 void *curr_head;
1010 struct hfi1_pportdata *ppd = dd->pport + port;
1011 u32 per_sdma_credits;
1012 uint idle_cnt = sdma_idle_cnt;
1013 size_t num_engines = dd->chip_sdma_engines;
1014
1015 if (!HFI1_CAP_IS_KSET(SDMA)) {
1016 HFI1_CAP_CLEAR(SDMA_AHG);
1017 return 0;
1018 }
1019 if (mod_num_sdma &&
1020 /* can't exceed chip support */
1021 mod_num_sdma <= dd->chip_sdma_engines &&
1022 /* count must be >= vls */
1023 mod_num_sdma >= num_vls)
1024 num_engines = mod_num_sdma;
1025
1026 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
1027 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
1028 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
1029 dd->chip_sdma_mem_size);
1030
1031 per_sdma_credits =
1032 dd->chip_sdma_mem_size/(num_engines * SDMA_BLOCK_SIZE);
1033
1034 /* set up freeze waitqueue */
1035 init_waitqueue_head(&dd->sdma_unfreeze_wq);
1036 atomic_set(&dd->sdma_unfreeze_count, 0);
1037
1038 descq_cnt = sdma_get_descq_cnt();
1039 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
1040 num_engines, descq_cnt);
1041
1042 /* alloc memory for array of send engines */
1043 dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
1044 if (!dd->per_sdma)
1045 return -ENOMEM;
1046
1047 idle_cnt = ns_to_cclock(dd, idle_cnt);
1048 /* Allocate memory for SendDMA descriptor FIFOs */
1049 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1050 sde = &dd->per_sdma[this_idx];
1051 sde->dd = dd;
1052 sde->ppd = ppd;
1053 sde->this_idx = this_idx;
1054 sde->descq_cnt = descq_cnt;
1055 sde->desc_avail = sdma_descq_freecnt(sde);
1056 sde->sdma_shift = ilog2(descq_cnt);
1057 sde->sdma_mask = (1 << sde->sdma_shift) - 1;
1058 sde->descq_full_count = 0;
1059
1060 /* Create a mask for all 3 chip interrupt sources */
1061 sde->imask = (u64)1 << (0*TXE_NUM_SDMA_ENGINES + this_idx)
1062 | (u64)1 << (1*TXE_NUM_SDMA_ENGINES + this_idx)
1063 | (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
1064 /* Create a mask specifically for sdma_idle */
1065 sde->idle_mask =
1066 (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
1067 /* Create a mask specifically for sdma_progress */
1068 sde->progress_mask =
1069 (u64)1 << (TXE_NUM_SDMA_ENGINES + this_idx);
1070 spin_lock_init(&sde->tail_lock);
1071 seqlock_init(&sde->head_lock);
1072 spin_lock_init(&sde->senddmactrl_lock);
1073 spin_lock_init(&sde->flushlist_lock);
1074 /* insure there is always a zero bit */
1075 sde->ahg_bits = 0xfffffffe00000000ULL;
1076
1077 sdma_set_state(sde, sdma_state_s00_hw_down);
1078
1079 /* set up reference counting */
1080 kref_init(&sde->state.kref);
1081 init_completion(&sde->state.comp);
1082
1083 INIT_LIST_HEAD(&sde->flushlist);
1084 INIT_LIST_HEAD(&sde->dmawait);
1085
1086 sde->tail_csr =
1087 get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
1088
1089 if (idle_cnt)
1090 dd->default_desc1 =
1091 SDMA_DESC1_HEAD_TO_HOST_FLAG;
1092 else
1093 dd->default_desc1 =
1094 SDMA_DESC1_INT_REQ_FLAG;
1095
1096 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
1097 (unsigned long)sde);
1098
1099 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
1100 (unsigned long)sde);
1101 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
1102 INIT_WORK(&sde->flush_worker, sdma_field_flush);
1103
1104 sde->progress_check_head = 0;
1105
daac731b
MFW
1106 setup_timer(&sde->err_progress_check_timer,
1107 sdma_err_progress_check, (unsigned long)sde);
77241056
MM
1108
1109 sde->descq = dma_zalloc_coherent(
1110 &dd->pcidev->dev,
1111 descq_cnt * sizeof(u64[2]),
1112 &sde->descq_phys,
1113 GFP_KERNEL
1114 );
1115 if (!sde->descq)
1116 goto bail;
1117 sde->tx_ring =
1118 kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
1119 GFP_KERNEL);
1120 if (!sde->tx_ring)
1121 sde->tx_ring =
1122 vzalloc(
1123 sizeof(struct sdma_txreq *) *
1124 descq_cnt);
1125 if (!sde->tx_ring)
1126 goto bail;
1127 }
1128
1129 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1130 /* Allocate memory for DMA of head registers to memory */
1131 dd->sdma_heads_dma = dma_zalloc_coherent(
1132 &dd->pcidev->dev,
1133 dd->sdma_heads_size,
1134 &dd->sdma_heads_phys,
1135 GFP_KERNEL
1136 );
1137 if (!dd->sdma_heads_dma) {
1138 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1139 goto bail;
1140 }
1141
1142 /* Allocate memory for pad */
1143 dd->sdma_pad_dma = dma_zalloc_coherent(
1144 &dd->pcidev->dev,
1145 sizeof(u32),
1146 &dd->sdma_pad_phys,
1147 GFP_KERNEL
1148 );
1149 if (!dd->sdma_pad_dma) {
1150 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1151 goto bail;
1152 }
1153
1154 /* assign each engine to different cacheline and init registers */
1155 curr_head = (void *)dd->sdma_heads_dma;
1156 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1157 unsigned long phys_offset;
1158
1159 sde = &dd->per_sdma[this_idx];
1160
1161 sde->head_dma = curr_head;
1162 curr_head += L1_CACHE_BYTES;
1163 phys_offset = (unsigned long)sde->head_dma -
1164 (unsigned long)dd->sdma_heads_dma;
1165 sde->head_phys = dd->sdma_heads_phys + phys_offset;
1166 init_sdma_regs(sde, per_sdma_credits, idle_cnt);
1167 }
1168 dd->flags |= HFI1_HAS_SEND_DMA;
1169 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
1170 dd->num_sdma = num_engines;
1171 if (sdma_map_init(dd, port, ppd->vls_operational, NULL))
1172 goto bail;
1173 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
1174 return 0;
1175
1176bail:
1177 sdma_clean(dd, num_engines);
1178 return -ENOMEM;
1179}
1180
1181/**
1182 * sdma_all_running() - called when the link goes up
1183 * @dd: hfi1_devdata
1184 *
1185 * This routine moves all engines to the running state.
1186 */
1187void sdma_all_running(struct hfi1_devdata *dd)
1188{
1189 struct sdma_engine *sde;
1190 unsigned int i;
1191
1192 /* move all engines to running */
1193 for (i = 0; i < dd->num_sdma; ++i) {
1194 sde = &dd->per_sdma[i];
1195 sdma_process_event(sde, sdma_event_e30_go_running);
1196 }
1197}
1198
1199/**
1200 * sdma_all_idle() - called when the link goes down
1201 * @dd: hfi1_devdata
1202 *
1203 * This routine moves all engines to the idle state.
1204 */
1205void sdma_all_idle(struct hfi1_devdata *dd)
1206{
1207 struct sdma_engine *sde;
1208 unsigned int i;
1209
1210 /* idle all engines */
1211 for (i = 0; i < dd->num_sdma; ++i) {
1212 sde = &dd->per_sdma[i];
1213 sdma_process_event(sde, sdma_event_e70_go_idle);
1214 }
1215}
1216
1217/**
1218 * sdma_start() - called to kick off state processing for all engines
1219 * @dd: hfi1_devdata
1220 *
1221 * This routine is for kicking off the state processing for all required
1222 * sdma engines. Interrupts need to be working at this point.
1223 *
1224 */
1225void sdma_start(struct hfi1_devdata *dd)
1226{
1227 unsigned i;
1228 struct sdma_engine *sde;
1229
1230 /* kick off the engines state processing */
1231 for (i = 0; i < dd->num_sdma; ++i) {
1232 sde = &dd->per_sdma[i];
1233 sdma_process_event(sde, sdma_event_e10_go_hw_start);
1234 }
1235}
1236
1237/**
1238 * sdma_exit() - used when module is removed
1239 * @dd: hfi1_devdata
1240 */
1241void sdma_exit(struct hfi1_devdata *dd)
1242{
1243 unsigned this_idx;
1244 struct sdma_engine *sde;
1245
1246 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
1247 ++this_idx) {
1248
1249 sde = &dd->per_sdma[this_idx];
1250 if (!list_empty(&sde->dmawait))
1251 dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
1252 sde->this_idx);
1253 sdma_process_event(sde, sdma_event_e00_go_hw_down);
1254
1255 del_timer_sync(&sde->err_progress_check_timer);
1256
1257 /*
1258 * This waits for the state machine to exit so it is not
1259 * necessary to kill the sdma_sw_clean_up_task to make sure
1260 * it is not running.
1261 */
1262 sdma_finalput(&sde->state);
1263 }
1264 sdma_clean(dd, dd->num_sdma);
1265}
1266
1267/*
1268 * unmap the indicated descriptor
1269 */
1270static inline void sdma_unmap_desc(
1271 struct hfi1_devdata *dd,
1272 struct sdma_desc *descp)
1273{
1274 switch (sdma_mapping_type(descp)) {
1275 case SDMA_MAP_SINGLE:
1276 dma_unmap_single(
1277 &dd->pcidev->dev,
1278 sdma_mapping_addr(descp),
1279 sdma_mapping_len(descp),
1280 DMA_TO_DEVICE);
1281 break;
1282 case SDMA_MAP_PAGE:
1283 dma_unmap_page(
1284 &dd->pcidev->dev,
1285 sdma_mapping_addr(descp),
1286 sdma_mapping_len(descp),
1287 DMA_TO_DEVICE);
1288 break;
1289 }
1290}
1291
1292/*
1293 * return the mode as indicated by the first
1294 * descriptor in the tx.
1295 */
1296static inline u8 ahg_mode(struct sdma_txreq *tx)
1297{
1298 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1299 >> SDMA_DESC1_HEADER_MODE_SHIFT;
1300}
1301
1302/**
1303 * sdma_txclean() - clean tx of mappings, descp *kmalloc's
1304 * @dd: hfi1_devdata for unmapping
1305 * @tx: tx request to clean
1306 *
1307 * This is used in the progress routine to clean the tx or
1308 * by the ULP to toss an in-process tx build.
1309 *
1310 * The code can be called multiple times without issue.
1311 *
1312 */
1313void sdma_txclean(
1314 struct hfi1_devdata *dd,
1315 struct sdma_txreq *tx)
1316{
1317 u16 i;
1318
1319 if (tx->num_desc) {
1320 u8 skip = 0, mode = ahg_mode(tx);
1321
1322 /* unmap first */
1323 sdma_unmap_desc(dd, &tx->descp[0]);
1324 /* determine number of AHG descriptors to skip */
1325 if (mode > SDMA_AHG_APPLY_UPDATE1)
1326 skip = mode >> 1;
1327 for (i = 1 + skip; i < tx->num_desc; i++)
1328 sdma_unmap_desc(dd, &tx->descp[i]);
1329 tx->num_desc = 0;
1330 }
1331 kfree(tx->coalesce_buf);
1332 tx->coalesce_buf = NULL;
1333 /* kmalloc'ed descp */
1334 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
1335 tx->desc_limit = ARRAY_SIZE(tx->descs);
1336 kfree(tx->descp);
1337 }
1338}
1339
1340static inline u16 sdma_gethead(struct sdma_engine *sde)
1341{
1342 struct hfi1_devdata *dd = sde->dd;
1343 int use_dmahead;
1344 u16 hwhead;
1345
1346#ifdef CONFIG_SDMA_VERBOSITY
1347 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1348 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1349#endif
1350
1351retry:
1352 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
1353 (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
1354 hwhead = use_dmahead ?
1355 (u16) le64_to_cpu(*sde->head_dma) :
1356 (u16) read_sde_csr(sde, SD(HEAD));
1357
1358 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
1359 u16 cnt;
1360 u16 swtail;
1361 u16 swhead;
1362 int sane;
1363
1364 swhead = sde->descq_head & sde->sdma_mask;
1365 /* this code is really bad for cache line trading */
1366 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1367 cnt = sde->descq_cnt;
1368
1369 if (swhead < swtail)
1370 /* not wrapped */
1371 sane = (hwhead >= swhead) & (hwhead <= swtail);
1372 else if (swhead > swtail)
1373 /* wrapped around */
1374 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
1375 (hwhead <= swtail);
1376 else
1377 /* empty */
1378 sane = (hwhead == swhead);
1379
1380 if (unlikely(!sane)) {
1381 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1382 sde->this_idx,
1383 use_dmahead ? "dma" : "kreg",
1384 hwhead, swhead, swtail, cnt);
1385 if (use_dmahead) {
1386 /* try one more time, using csr */
1387 use_dmahead = 0;
1388 goto retry;
1389 }
1390 /* proceed as if no progress */
1391 hwhead = swhead;
1392 }
1393 }
1394 return hwhead;
1395}
1396
1397/*
1398 * This is called when there are send DMA descriptors that might be
1399 * available.
1400 *
1401 * This is called with head_lock held.
1402 */
1403static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
1404{
1405 struct iowait *wait, *nw;
1406 struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
1407 unsigned i, n = 0, seq;
1408 struct sdma_txreq *stx;
1409 struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
1410
1411#ifdef CONFIG_SDMA_VERBOSITY
1412 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
1413 slashstrip(__FILE__), __LINE__, __func__);
1414 dd_dev_err(sde->dd, "avail: %u\n", avail);
1415#endif
1416
1417 do {
1418 seq = read_seqbegin(&dev->iowait_lock);
1419 if (!list_empty(&sde->dmawait)) {
1420 /* at least one item */
1421 write_seqlock(&dev->iowait_lock);
1422 /* Harvest waiters wanting DMA descriptors */
1423 list_for_each_entry_safe(
1424 wait,
1425 nw,
1426 &sde->dmawait,
1427 list) {
1428 u16 num_desc = 0;
1429
1430 if (!wait->wakeup)
1431 continue;
1432 if (n == ARRAY_SIZE(waits))
1433 break;
1434 if (!list_empty(&wait->tx_head)) {
1435 stx = list_first_entry(
1436 &wait->tx_head,
1437 struct sdma_txreq,
1438 list);
1439 num_desc = stx->num_desc;
1440 }
1441 if (num_desc > avail)
1442 break;
1443 avail -= num_desc;
1444 list_del_init(&wait->list);
1445 waits[n++] = wait;
1446 }
1447 write_sequnlock(&dev->iowait_lock);
1448 break;
1449 }
1450 } while (read_seqretry(&dev->iowait_lock, seq));
1451
1452 for (i = 0; i < n; i++)
1453 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
1454}
1455
1456/* head_lock must be held */
1457static void sdma_make_progress(struct sdma_engine *sde, u64 status)
1458{
1459 struct sdma_txreq *txp = NULL;
1460 int progress = 0;
1461 u16 hwhead, swhead, swtail;
1462 int idle_check_done = 0;
1463
1464 hwhead = sdma_gethead(sde);
1465
1466 /* The reason for some of the complexity of this code is that
1467 * not all descriptors have corresponding txps. So, we have to
1468 * be able to skip over descs until we wander into the range of
1469 * the next txp on the list.
1470 */
1471
1472retry:
1473 txp = get_txhead(sde);
1474 swhead = sde->descq_head & sde->sdma_mask;
1475 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1476 while (swhead != hwhead) {
1477 /* advance head, wrap if needed */
1478 swhead = ++sde->descq_head & sde->sdma_mask;
1479
1480 /* if now past this txp's descs, do the callback */
1481 if (txp && txp->next_descq_idx == swhead) {
1482 int drained = 0;
1483 /* protect against complete modifying */
1484 struct iowait *wait = txp->wait;
1485
1486 /* remove from list */
1487 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
1488 if (wait)
1489 drained = atomic_dec_and_test(&wait->sdma_busy);
1490#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
1491 trace_hfi1_sdma_out_sn(sde, txp->sn);
1492 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
1493 dd_dev_err(sde->dd, "expected %llu got %llu\n",
1494 sde->head_sn, txp->sn);
1495 sde->head_sn++;
1496#endif
1497 sdma_txclean(sde->dd, txp);
1498 if (txp->complete)
1499 (*txp->complete)(
1500 txp,
1501 SDMA_TXREQ_S_OK,
1502 drained);
1503 if (wait && drained)
1504 iowait_drain_wakeup(wait);
1505 /* see if there is another txp */
1506 txp = get_txhead(sde);
1507 }
1508 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1509 progress++;
1510 }
1511
1512 /*
1513 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1514 * to updates to the the dma_head location in host memory. The head
1515 * value read might not be fully up to date. If there are pending
1516 * descriptors and the SDMA idle interrupt fired then read from the
1517 * CSR SDMA head instead to get the latest value from the hardware.
1518 * The hardware SDMA head should be read at most once in this invocation
1519 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1520 */
1521 if ((status & sde->idle_mask) && !idle_check_done) {
1522 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1523 if (swtail != hwhead) {
1524 hwhead = (u16)read_sde_csr(sde, SD(HEAD));
1525 idle_check_done = 1;
1526 goto retry;
1527 }
1528 }
1529
1530 sde->last_status = status;
1531 if (progress)
1532 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
1533}
1534
1535/*
1536 * sdma_engine_interrupt() - interrupt handler for engine
1537 * @sde: sdma engine
1538 * @status: sdma interrupt reason
1539 *
1540 * Status is a mask of the 3 possible interrupts for this engine. It will
1541 * contain bits _only_ for this SDMA engine. It will contain at least one
1542 * bit, it may contain more.
1543 */
1544void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
1545{
1546 trace_hfi1_sdma_engine_interrupt(sde, status);
1547 write_seqlock(&sde->head_lock);
1548 sdma_set_desc_cnt(sde, sde->descq_cnt / 2);
1549 sdma_make_progress(sde, status);
1550 write_sequnlock(&sde->head_lock);
1551}
1552
1553/**
1554 * sdma_engine_error() - error handler for engine
1555 * @sde: sdma engine
1556 * @status: sdma interrupt reason
1557 */
1558void sdma_engine_error(struct sdma_engine *sde, u64 status)
1559{
1560 unsigned long flags;
1561
1562#ifdef CONFIG_SDMA_VERBOSITY
1563 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1564 sde->this_idx,
1565 (unsigned long long)status,
1566 sdma_state_names[sde->state.current_state]);
1567#endif
1568 spin_lock_irqsave(&sde->tail_lock, flags);
1569 write_seqlock(&sde->head_lock);
1570 if (status & ALL_SDMA_ENG_HALT_ERRS)
1571 __sdma_process_event(sde, sdma_event_e60_hw_halted);
1572 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
1573 dd_dev_err(sde->dd,
1574 "SDMA (%u) engine error: 0x%llx state %s\n",
1575 sde->this_idx,
1576 (unsigned long long)status,
1577 sdma_state_names[sde->state.current_state]);
1578 dump_sdma_state(sde);
1579 }
1580 write_sequnlock(&sde->head_lock);
1581 spin_unlock_irqrestore(&sde->tail_lock, flags);
1582}
1583
1584static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
1585{
1586 u64 set_senddmactrl = 0;
1587 u64 clr_senddmactrl = 0;
1588 unsigned long flags;
1589
1590#ifdef CONFIG_SDMA_VERBOSITY
1591 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1592 sde->this_idx,
1593 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
1594 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
1595 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
1596 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
1597#endif
1598
1599 if (op & SDMA_SENDCTRL_OP_ENABLE)
1600 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1601 else
1602 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1603
1604 if (op & SDMA_SENDCTRL_OP_INTENABLE)
1605 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1606 else
1607 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1608
1609 if (op & SDMA_SENDCTRL_OP_HALT)
1610 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1611 else
1612 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1613
1614 spin_lock_irqsave(&sde->senddmactrl_lock, flags);
1615
1616 sde->p_senddmactrl |= set_senddmactrl;
1617 sde->p_senddmactrl &= ~clr_senddmactrl;
1618
1619 if (op & SDMA_SENDCTRL_OP_CLEANUP)
1620 write_sde_csr(sde, SD(CTRL),
1621 sde->p_senddmactrl |
1622 SD(CTRL_SDMA_CLEANUP_SMASK));
1623 else
1624 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
1625
1626 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
1627
1628#ifdef CONFIG_SDMA_VERBOSITY
1629 sdma_dumpstate(sde);
1630#endif
1631}
1632
1633static void sdma_setlengen(struct sdma_engine *sde)
1634{
1635#ifdef CONFIG_SDMA_VERBOSITY
1636 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1637 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1638#endif
1639
1640 /*
1641 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1642 * count to enable generation checking and load the internal
1643 * generation counter.
1644 */
1645 write_sde_csr(sde, SD(LEN_GEN),
1646 (sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT)
1647 );
1648 write_sde_csr(sde, SD(LEN_GEN),
1649 ((sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT))
1650 | (4ULL << SD(LEN_GEN_GENERATION_SHIFT))
1651 );
1652}
1653
1654static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
1655{
1656 /* Commit writes to memory and advance the tail on the chip */
1657 smp_wmb(); /* see get_txhead() */
1658 writeq(tail, sde->tail_csr);
1659}
1660
1661/*
1662 * This is called when changing to state s10_hw_start_up_halt_wait as
1663 * a result of send buffer errors or send DMA descriptor errors.
1664 */
1665static void sdma_hw_start_up(struct sdma_engine *sde)
1666{
1667 u64 reg;
1668
1669#ifdef CONFIG_SDMA_VERBOSITY
1670 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1671 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1672#endif
1673
1674 sdma_setlengen(sde);
1675 sdma_update_tail(sde, 0); /* Set SendDmaTail */
1676 *sde->head_dma = 0;
1677
1678 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
1679 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
1680 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
1681}
1682
1683#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
1684(r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1685
1686#define SET_STATIC_RATE_CONTROL_SMASK(r) \
1687(r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1688/*
1689 * set_sdma_integrity
1690 *
1691 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
1692 */
1693static void set_sdma_integrity(struct sdma_engine *sde)
1694{
1695 struct hfi1_devdata *dd = sde->dd;
1696 u64 reg;
1697
1698 if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
1699 return;
1700
1701 reg = hfi1_pkt_base_sdma_integrity(dd);
1702
1703 if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
1704 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
1705 else
1706 SET_STATIC_RATE_CONTROL_SMASK(reg);
1707
1708 write_sde_csr(sde, SD(CHECK_ENABLE), reg);
1709}
1710
1711
1712static void init_sdma_regs(
1713 struct sdma_engine *sde,
1714 u32 credits,
1715 uint idle_cnt)
1716{
1717 u8 opval, opmask;
1718#ifdef CONFIG_SDMA_VERBOSITY
1719 struct hfi1_devdata *dd = sde->dd;
1720
1721 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1722 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1723#endif
1724
1725 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
1726 sdma_setlengen(sde);
1727 sdma_update_tail(sde, 0); /* Set SendDmaTail */
1728 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
1729 write_sde_csr(sde, SD(DESC_CNT), 0);
1730 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
1731 write_sde_csr(sde, SD(MEMORY),
1732 ((u64)credits <<
1733 SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
1734 ((u64)(credits * sde->this_idx) <<
1735 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
1736 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
1737 set_sdma_integrity(sde);
1738 opmask = OPCODE_CHECK_MASK_DISABLED;
1739 opval = OPCODE_CHECK_VAL_DISABLED;
1740 write_sde_csr(sde, SD(CHECK_OPCODE),
1741 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
1742 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
1743}
1744
1745#ifdef CONFIG_SDMA_VERBOSITY
1746
1747#define sdma_dumpstate_helper0(reg) do { \
1748 csr = read_csr(sde->dd, reg); \
1749 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
1750 } while (0)
1751
1752#define sdma_dumpstate_helper(reg) do { \
1753 csr = read_sde_csr(sde, reg); \
1754 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
1755 #reg, sde->this_idx, csr); \
1756 } while (0)
1757
1758#define sdma_dumpstate_helper2(reg) do { \
1759 csr = read_csr(sde->dd, reg + (8 * i)); \
1760 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
1761 #reg, i, csr); \
1762 } while (0)
1763
1764void sdma_dumpstate(struct sdma_engine *sde)
1765{
1766 u64 csr;
1767 unsigned i;
1768
1769 sdma_dumpstate_helper(SD(CTRL));
1770 sdma_dumpstate_helper(SD(STATUS));
1771 sdma_dumpstate_helper0(SD(ERR_STATUS));
1772 sdma_dumpstate_helper0(SD(ERR_MASK));
1773 sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
1774 sdma_dumpstate_helper(SD(ENG_ERR_MASK));
1775
1776 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
6fd8edab 1777 sdma_dumpstate_helper2(CCE_INT_STATUS);
77241056
MM
1778 sdma_dumpstate_helper2(CCE_INT_MASK);
1779 sdma_dumpstate_helper2(CCE_INT_BLOCKED);
1780 }
1781
1782 sdma_dumpstate_helper(SD(TAIL));
1783 sdma_dumpstate_helper(SD(HEAD));
1784 sdma_dumpstate_helper(SD(PRIORITY_THLD));
6fd8edab 1785 sdma_dumpstate_helper(SD(IDLE_CNT));
77241056
MM
1786 sdma_dumpstate_helper(SD(RELOAD_CNT));
1787 sdma_dumpstate_helper(SD(DESC_CNT));
1788 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
1789 sdma_dumpstate_helper(SD(MEMORY));
1790 sdma_dumpstate_helper0(SD(ENGINES));
1791 sdma_dumpstate_helper0(SD(MEM_SIZE));
1792 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
1793 sdma_dumpstate_helper(SD(BASE_ADDR));
1794 sdma_dumpstate_helper(SD(LEN_GEN));
1795 sdma_dumpstate_helper(SD(HEAD_ADDR));
1796 sdma_dumpstate_helper(SD(CHECK_ENABLE));
1797 sdma_dumpstate_helper(SD(CHECK_VL));
1798 sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
1799 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
1800 sdma_dumpstate_helper(SD(CHECK_SLID));
1801 sdma_dumpstate_helper(SD(CHECK_OPCODE));
1802}
1803#endif
1804
1805static void dump_sdma_state(struct sdma_engine *sde)
1806{
1807 struct hw_sdma_desc *descq;
1808 struct hw_sdma_desc *descqp;
1809 u64 desc[2];
1810 u64 addr;
1811 u8 gen;
1812 u16 len;
1813 u16 head, tail, cnt;
1814
1815 head = sde->descq_head & sde->sdma_mask;
1816 tail = sde->descq_tail & sde->sdma_mask;
1817 cnt = sdma_descq_freecnt(sde);
1818 descq = sde->descq;
1819
1820 dd_dev_err(sde->dd,
1821 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
1822 sde->this_idx,
1823 head,
1824 tail,
1825 cnt,
1826 !list_empty(&sde->flushlist));
1827
1828 /* print info for each entry in the descriptor queue */
1829 while (head != tail) {
1830 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
1831
1832 descqp = &sde->descq[head];
1833 desc[0] = le64_to_cpu(descqp->qw[0]);
1834 desc[1] = le64_to_cpu(descqp->qw[1]);
1835 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
1836 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
1837 'H' : '-';
1838 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
1839 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
1840 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
1841 & SDMA_DESC0_PHY_ADDR_MASK;
1842 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
1843 & SDMA_DESC1_GENERATION_MASK;
1844 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
1845 & SDMA_DESC0_BYTE_COUNT_MASK;
1846 dd_dev_err(sde->dd,
1847 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1848 head, flags, addr, gen, len);
1849 dd_dev_err(sde->dd,
1850 "\tdesc0:0x%016llx desc1 0x%016llx\n",
1851 desc[0], desc[1]);
1852 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
1853 dd_dev_err(sde->dd,
1854 "\taidx: %u amode: %u alen: %u\n",
1855 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
7d630467 1856 >> SDMA_DESC1_HEADER_INDEX_SHIFT),
77241056
MM
1857 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1858 >> SDMA_DESC1_HEADER_MODE_SHIFT),
1859 (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK)
1860 >> SDMA_DESC1_HEADER_DWS_SHIFT));
1861 head++;
1862 head &= sde->sdma_mask;
1863 }
1864}
1865
1866#define SDE_FMT \
1867 "SDE %u STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
1868/**
1869 * sdma_seqfile_dump_sde() - debugfs dump of sde
1870 * @s: seq file
1871 * @sde: send dma engine to dump
1872 *
1873 * This routine dumps the sde to the indicated seq file.
1874 */
1875void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
1876{
1877 u16 head, tail;
1878 struct hw_sdma_desc *descqp;
1879 u64 desc[2];
1880 u64 addr;
1881 u8 gen;
1882 u16 len;
1883
1884 head = sde->descq_head & sde->sdma_mask;
1885 tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1886 seq_printf(s, SDE_FMT, sde->this_idx,
1887 sdma_state_name(sde->state.current_state),
1888 (unsigned long long)read_sde_csr(sde, SD(CTRL)),
1889 (unsigned long long)read_sde_csr(sde, SD(STATUS)),
1890 (unsigned long long)read_sde_csr(sde,
1891 SD(ENG_ERR_STATUS)),
1892 (unsigned long long)read_sde_csr(sde, SD(TAIL)),
1893 tail,
1894 (unsigned long long)read_sde_csr(sde, SD(HEAD)),
1895 head,
1896 (unsigned long long)le64_to_cpu(*sde->head_dma),
1897 (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
1898 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
1899 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
1900 (unsigned long long)sde->last_status,
1901 (unsigned long long)sde->ahg_bits,
1902 sde->tx_tail,
1903 sde->tx_head,
1904 sde->descq_tail,
1905 sde->descq_head,
1906 !list_empty(&sde->flushlist),
1907 sde->descq_full_count,
1908 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
1909
1910 /* print info for each entry in the descriptor queue */
1911 while (head != tail) {
1912 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
1913
1914 descqp = &sde->descq[head];
1915 desc[0] = le64_to_cpu(descqp->qw[0]);
1916 desc[1] = le64_to_cpu(descqp->qw[1]);
1917 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
1918 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
1919 'H' : '-';
1920 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
1921 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
1922 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
1923 & SDMA_DESC0_PHY_ADDR_MASK;
1924 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
1925 & SDMA_DESC1_GENERATION_MASK;
1926 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
1927 & SDMA_DESC0_BYTE_COUNT_MASK;
1928 seq_printf(s,
1929 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1930 head, flags, addr, gen, len);
1931 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
1932 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
1933 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
7d630467 1934 >> SDMA_DESC1_HEADER_INDEX_SHIFT),
77241056
MM
1935 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1936 >> SDMA_DESC1_HEADER_MODE_SHIFT));
1937 head = (head + 1) & sde->sdma_mask;
1938 }
1939}
1940
1941/*
1942 * add the generation number into
1943 * the qw1 and return
1944 */
1945static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
1946{
1947 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
1948
1949 qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
1950 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
1951 << SDMA_DESC1_GENERATION_SHIFT;
1952 return qw1;
1953}
1954
1955/*
1956 * This routine submits the indicated tx
1957 *
1958 * Space has already been guaranteed and
1959 * tail side of ring is locked.
1960 *
1961 * The hardware tail update is done
1962 * in the caller and that is facilitated
1963 * by returning the new tail.
1964 *
1965 * There is special case logic for ahg
1966 * to not add the generation number for
1967 * up to 2 descriptors that follow the
1968 * first descriptor.
1969 *
1970 */
1971static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
1972{
1973 int i;
1974 u16 tail;
1975 struct sdma_desc *descp = tx->descp;
1976 u8 skip = 0, mode = ahg_mode(tx);
1977
1978 tail = sde->descq_tail & sde->sdma_mask;
1979 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
1980 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
1981 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
1982 tail, &sde->descq[tail]);
1983 tail = ++sde->descq_tail & sde->sdma_mask;
1984 descp++;
1985 if (mode > SDMA_AHG_APPLY_UPDATE1)
1986 skip = mode >> 1;
1987 for (i = 1; i < tx->num_desc; i++, descp++) {
1988 u64 qw1;
1989
1990 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
1991 if (skip) {
1992 /* edits don't have generation */
1993 qw1 = descp->qw[1];
1994 skip--;
1995 } else {
1996 /* replace generation with real one for non-edits */
1997 qw1 = add_gen(sde, descp->qw[1]);
1998 }
1999 sde->descq[tail].qw[1] = cpu_to_le64(qw1);
2000 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
2001 tail, &sde->descq[tail]);
2002 tail = ++sde->descq_tail & sde->sdma_mask;
2003 }
2004 tx->next_descq_idx = tail;
2005#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2006 tx->sn = sde->tail_sn++;
2007 trace_hfi1_sdma_in_sn(sde, tx->sn);
2008 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
2009#endif
2010 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
2011 sde->desc_avail -= tx->num_desc;
2012 return tail;
2013}
2014
2015/*
2016 * Check for progress
2017 */
2018static int sdma_check_progress(
2019 struct sdma_engine *sde,
2020 struct iowait *wait,
2021 struct sdma_txreq *tx)
2022{
2023 int ret;
2024
2025 sde->desc_avail = sdma_descq_freecnt(sde);
2026 if (tx->num_desc <= sde->desc_avail)
2027 return -EAGAIN;
2028 /* pulse the head_lock */
2029 if (wait && wait->sleep) {
2030 unsigned seq;
2031
2032 seq = raw_seqcount_begin(
2033 (const seqcount_t *)&sde->head_lock.seqcount);
2034 ret = wait->sleep(sde, wait, tx, seq);
2035 if (ret == -EAGAIN)
2036 sde->desc_avail = sdma_descq_freecnt(sde);
2037 } else
2038 ret = -EBUSY;
2039 return ret;
2040}
2041
2042/**
2043 * sdma_send_txreq() - submit a tx req to ring
2044 * @sde: sdma engine to use
2045 * @wait: wait structure to use when full (may be NULL)
2046 * @tx: sdma_txreq to submit
2047 *
2048 * The call submits the tx into the ring. If a iowait structure is non-NULL
2049 * the packet will be queued to the list in wait.
2050 *
2051 * Return:
2052 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2053 * ring (wait == NULL)
2054 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2055 */
2056int sdma_send_txreq(struct sdma_engine *sde,
2057 struct iowait *wait,
2058 struct sdma_txreq *tx)
2059{
2060 int ret = 0;
2061 u16 tail;
2062 unsigned long flags;
2063
2064 /* user should have supplied entire packet */
2065 if (unlikely(tx->tlen))
2066 return -EINVAL;
2067 tx->wait = wait;
2068 spin_lock_irqsave(&sde->tail_lock, flags);
2069retry:
2070 if (unlikely(!__sdma_running(sde)))
2071 goto unlock_noconn;
2072 if (unlikely(tx->num_desc > sde->desc_avail))
2073 goto nodesc;
2074 tail = submit_tx(sde, tx);
2075 if (wait)
2076 atomic_inc(&wait->sdma_busy);
2077 sdma_update_tail(sde, tail);
2078unlock:
2079 spin_unlock_irqrestore(&sde->tail_lock, flags);
2080 return ret;
2081unlock_noconn:
2082 if (wait)
2083 atomic_inc(&wait->sdma_busy);
2084 tx->next_descq_idx = 0;
2085#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2086 tx->sn = sde->tail_sn++;
2087 trace_hfi1_sdma_in_sn(sde, tx->sn);
2088#endif
2089 spin_lock(&sde->flushlist_lock);
2090 list_add_tail(&tx->list, &sde->flushlist);
2091 spin_unlock(&sde->flushlist_lock);
2092 if (wait) {
2093 wait->tx_count++;
2094 wait->count += tx->num_desc;
2095 }
2096 schedule_work(&sde->flush_worker);
2097 ret = -ECOMM;
2098 goto unlock;
2099nodesc:
2100 ret = sdma_check_progress(sde, wait, tx);
2101 if (ret == -EAGAIN) {
2102 ret = 0;
2103 goto retry;
2104 }
2105 sde->descq_full_count++;
2106 goto unlock;
2107}
2108
2109/**
2110 * sdma_send_txlist() - submit a list of tx req to ring
2111 * @sde: sdma engine to use
2112 * @wait: wait structure to use when full (may be NULL)
2113 * @tx_list: list of sdma_txreqs to submit
2114 *
2115 * The call submits the list into the ring.
2116 *
2117 * If the iowait structure is non-NULL and not equal to the iowait list
2118 * the unprocessed part of the list will be appended to the list in wait.
2119 *
2120 * In all cases, the tx_list will be updated so the head of the tx_list is
2121 * the list of descriptors that have yet to be transmitted.
2122 *
2123 * The intent of this call is to provide a more efficient
2124 * way of submitting multiple packets to SDMA while holding the tail
2125 * side locking.
2126 *
2127 * Return:
2128 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring
2129 * (wait == NULL)
2130 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2131 */
2132int sdma_send_txlist(struct sdma_engine *sde,
2133 struct iowait *wait,
2134 struct list_head *tx_list)
2135{
2136 struct sdma_txreq *tx, *tx_next;
2137 int ret = 0;
2138 unsigned long flags;
2139 u16 tail = INVALID_TAIL;
2140 int count = 0;
2141
2142 spin_lock_irqsave(&sde->tail_lock, flags);
2143retry:
2144 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2145 tx->wait = wait;
2146 if (unlikely(!__sdma_running(sde)))
2147 goto unlock_noconn;
2148 if (unlikely(tx->num_desc > sde->desc_avail))
2149 goto nodesc;
2150 if (unlikely(tx->tlen)) {
2151 ret = -EINVAL;
2152 goto update_tail;
2153 }
2154 list_del_init(&tx->list);
2155 tail = submit_tx(sde, tx);
2156 count++;
2157 if (tail != INVALID_TAIL &&
2158 (count & SDMA_TAIL_UPDATE_THRESH) == 0) {
2159 sdma_update_tail(sde, tail);
2160 tail = INVALID_TAIL;
2161 }
2162 }
2163update_tail:
2164 if (wait)
2165 atomic_add(count, &wait->sdma_busy);
2166 if (tail != INVALID_TAIL)
2167 sdma_update_tail(sde, tail);
2168 spin_unlock_irqrestore(&sde->tail_lock, flags);
2169 return ret;
2170unlock_noconn:
2171 spin_lock(&sde->flushlist_lock);
2172 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2173 tx->wait = wait;
2174 list_del_init(&tx->list);
2175 if (wait)
2176 atomic_inc(&wait->sdma_busy);
2177 tx->next_descq_idx = 0;
2178#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2179 tx->sn = sde->tail_sn++;
2180 trace_hfi1_sdma_in_sn(sde, tx->sn);
2181#endif
2182 list_add_tail(&tx->list, &sde->flushlist);
2183 if (wait) {
2184 wait->tx_count++;
2185 wait->count += tx->num_desc;
2186 }
2187 }
2188 spin_unlock(&sde->flushlist_lock);
2189 schedule_work(&sde->flush_worker);
2190 ret = -ECOMM;
2191 goto update_tail;
2192nodesc:
2193 ret = sdma_check_progress(sde, wait, tx);
2194 if (ret == -EAGAIN) {
2195 ret = 0;
2196 goto retry;
2197 }
2198 sde->descq_full_count++;
2199 goto update_tail;
2200}
2201
2202static void sdma_process_event(struct sdma_engine *sde,
2203 enum sdma_events event)
2204{
2205 unsigned long flags;
2206
2207 spin_lock_irqsave(&sde->tail_lock, flags);
2208 write_seqlock(&sde->head_lock);
2209
2210 __sdma_process_event(sde, event);
2211
2212 if (sde->state.current_state == sdma_state_s99_running)
2213 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
2214
2215 write_sequnlock(&sde->head_lock);
2216 spin_unlock_irqrestore(&sde->tail_lock, flags);
2217}
2218
2219static void __sdma_process_event(struct sdma_engine *sde,
2220 enum sdma_events event)
2221{
2222 struct sdma_state *ss = &sde->state;
2223 int need_progress = 0;
2224
2225 /* CONFIG SDMA temporary */
2226#ifdef CONFIG_SDMA_VERBOSITY
2227 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
2228 sdma_state_names[ss->current_state],
2229 sdma_event_names[event]);
2230#endif
2231
2232 switch (ss->current_state) {
2233 case sdma_state_s00_hw_down:
2234 switch (event) {
2235 case sdma_event_e00_go_hw_down:
2236 break;
2237 case sdma_event_e30_go_running:
2238 /*
2239 * If down, but running requested (usually result
2240 * of link up, then we need to start up.
2241 * This can happen when hw down is requested while
2242 * bringing the link up with traffic active on
2243 * 7220, e.g. */
2244 ss->go_s99_running = 1;
2245 /* fall through and start dma engine */
2246 case sdma_event_e10_go_hw_start:
2247 /* This reference means the state machine is started */
2248 sdma_get(&sde->state);
2249 sdma_set_state(sde,
2250 sdma_state_s10_hw_start_up_halt_wait);
2251 break;
2252 case sdma_event_e15_hw_halt_done:
2253 break;
2254 case sdma_event_e25_hw_clean_up_done:
2255 break;
2256 case sdma_event_e40_sw_cleaned:
2257 sdma_sw_tear_down(sde);
2258 break;
2259 case sdma_event_e50_hw_cleaned:
2260 break;
2261 case sdma_event_e60_hw_halted:
2262 break;
2263 case sdma_event_e70_go_idle:
2264 break;
2265 case sdma_event_e80_hw_freeze:
2266 break;
2267 case sdma_event_e81_hw_frozen:
2268 break;
2269 case sdma_event_e82_hw_unfreeze:
2270 break;
2271 case sdma_event_e85_link_down:
2272 break;
2273 case sdma_event_e90_sw_halted:
2274 break;
2275 }
2276 break;
2277
2278 case sdma_state_s10_hw_start_up_halt_wait:
2279 switch (event) {
2280 case sdma_event_e00_go_hw_down:
2281 sdma_set_state(sde, sdma_state_s00_hw_down);
2282 sdma_sw_tear_down(sde);
2283 break;
2284 case sdma_event_e10_go_hw_start:
2285 break;
2286 case sdma_event_e15_hw_halt_done:
2287 sdma_set_state(sde,
2288 sdma_state_s15_hw_start_up_clean_wait);
2289 sdma_start_hw_clean_up(sde);
2290 break;
2291 case sdma_event_e25_hw_clean_up_done:
2292 break;
2293 case sdma_event_e30_go_running:
2294 ss->go_s99_running = 1;
2295 break;
2296 case sdma_event_e40_sw_cleaned:
2297 break;
2298 case sdma_event_e50_hw_cleaned:
2299 break;
2300 case sdma_event_e60_hw_halted:
2301 sdma_start_err_halt_wait(sde);
2302 break;
2303 case sdma_event_e70_go_idle:
2304 ss->go_s99_running = 0;
2305 break;
2306 case sdma_event_e80_hw_freeze:
2307 break;
2308 case sdma_event_e81_hw_frozen:
2309 break;
2310 case sdma_event_e82_hw_unfreeze:
2311 break;
2312 case sdma_event_e85_link_down:
2313 break;
2314 case sdma_event_e90_sw_halted:
2315 break;
2316 }
2317 break;
2318
2319 case sdma_state_s15_hw_start_up_clean_wait:
2320 switch (event) {
2321 case sdma_event_e00_go_hw_down:
2322 sdma_set_state(sde, sdma_state_s00_hw_down);
2323 sdma_sw_tear_down(sde);
2324 break;
2325 case sdma_event_e10_go_hw_start:
2326 break;
2327 case sdma_event_e15_hw_halt_done:
2328 break;
2329 case sdma_event_e25_hw_clean_up_done:
2330 sdma_hw_start_up(sde);
2331 sdma_set_state(sde, ss->go_s99_running ?
2332 sdma_state_s99_running :
2333 sdma_state_s20_idle);
2334 break;
2335 case sdma_event_e30_go_running:
2336 ss->go_s99_running = 1;
2337 break;
2338 case sdma_event_e40_sw_cleaned:
2339 break;
2340 case sdma_event_e50_hw_cleaned:
2341 break;
2342 case sdma_event_e60_hw_halted:
2343 break;
2344 case sdma_event_e70_go_idle:
2345 ss->go_s99_running = 0;
2346 break;
2347 case sdma_event_e80_hw_freeze:
2348 break;
2349 case sdma_event_e81_hw_frozen:
2350 break;
2351 case sdma_event_e82_hw_unfreeze:
2352 break;
2353 case sdma_event_e85_link_down:
2354 break;
2355 case sdma_event_e90_sw_halted:
2356 break;
2357 }
2358 break;
2359
2360 case sdma_state_s20_idle:
2361 switch (event) {
2362 case sdma_event_e00_go_hw_down:
2363 sdma_set_state(sde, sdma_state_s00_hw_down);
2364 sdma_sw_tear_down(sde);
2365 break;
2366 case sdma_event_e10_go_hw_start:
2367 break;
2368 case sdma_event_e15_hw_halt_done:
2369 break;
2370 case sdma_event_e25_hw_clean_up_done:
2371 break;
2372 case sdma_event_e30_go_running:
2373 sdma_set_state(sde, sdma_state_s99_running);
2374 ss->go_s99_running = 1;
2375 break;
2376 case sdma_event_e40_sw_cleaned:
2377 break;
2378 case sdma_event_e50_hw_cleaned:
2379 break;
2380 case sdma_event_e60_hw_halted:
2381 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2382 sdma_start_err_halt_wait(sde);
2383 break;
2384 case sdma_event_e70_go_idle:
2385 break;
2386 case sdma_event_e85_link_down:
2387 /* fall through */
2388 case sdma_event_e80_hw_freeze:
2389 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2390 atomic_dec(&sde->dd->sdma_unfreeze_count);
2391 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2392 break;
2393 case sdma_event_e81_hw_frozen:
2394 break;
2395 case sdma_event_e82_hw_unfreeze:
2396 break;
2397 case sdma_event_e90_sw_halted:
2398 break;
2399 }
2400 break;
2401
2402 case sdma_state_s30_sw_clean_up_wait:
2403 switch (event) {
2404 case sdma_event_e00_go_hw_down:
2405 sdma_set_state(sde, sdma_state_s00_hw_down);
2406 break;
2407 case sdma_event_e10_go_hw_start:
2408 break;
2409 case sdma_event_e15_hw_halt_done:
2410 break;
2411 case sdma_event_e25_hw_clean_up_done:
2412 break;
2413 case sdma_event_e30_go_running:
2414 ss->go_s99_running = 1;
2415 break;
2416 case sdma_event_e40_sw_cleaned:
2417 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
2418 sdma_start_hw_clean_up(sde);
2419 break;
2420 case sdma_event_e50_hw_cleaned:
2421 break;
2422 case sdma_event_e60_hw_halted:
2423 break;
2424 case sdma_event_e70_go_idle:
2425 ss->go_s99_running = 0;
2426 break;
2427 case sdma_event_e80_hw_freeze:
2428 break;
2429 case sdma_event_e81_hw_frozen:
2430 break;
2431 case sdma_event_e82_hw_unfreeze:
2432 break;
2433 case sdma_event_e85_link_down:
2434 ss->go_s99_running = 0;
2435 break;
2436 case sdma_event_e90_sw_halted:
2437 break;
2438 }
2439 break;
2440
2441 case sdma_state_s40_hw_clean_up_wait:
2442 switch (event) {
2443 case sdma_event_e00_go_hw_down:
2444 sdma_set_state(sde, sdma_state_s00_hw_down);
2445 sdma_start_sw_clean_up(sde);
2446 break;
2447 case sdma_event_e10_go_hw_start:
2448 break;
2449 case sdma_event_e15_hw_halt_done:
2450 break;
2451 case sdma_event_e25_hw_clean_up_done:
2452 sdma_hw_start_up(sde);
2453 sdma_set_state(sde, ss->go_s99_running ?
2454 sdma_state_s99_running :
2455 sdma_state_s20_idle);
2456 break;
2457 case sdma_event_e30_go_running:
2458 ss->go_s99_running = 1;
2459 break;
2460 case sdma_event_e40_sw_cleaned:
2461 break;
2462 case sdma_event_e50_hw_cleaned:
2463 break;
2464 case sdma_event_e60_hw_halted:
2465 break;
2466 case sdma_event_e70_go_idle:
2467 ss->go_s99_running = 0;
2468 break;
2469 case sdma_event_e80_hw_freeze:
2470 break;
2471 case sdma_event_e81_hw_frozen:
2472 break;
2473 case sdma_event_e82_hw_unfreeze:
2474 break;
2475 case sdma_event_e85_link_down:
2476 ss->go_s99_running = 0;
2477 break;
2478 case sdma_event_e90_sw_halted:
2479 break;
2480 }
2481 break;
2482
2483 case sdma_state_s50_hw_halt_wait:
2484 switch (event) {
2485 case sdma_event_e00_go_hw_down:
2486 sdma_set_state(sde, sdma_state_s00_hw_down);
2487 sdma_start_sw_clean_up(sde);
2488 break;
2489 case sdma_event_e10_go_hw_start:
2490 break;
2491 case sdma_event_e15_hw_halt_done:
2492 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2493 sdma_start_sw_clean_up(sde);
2494 break;
2495 case sdma_event_e25_hw_clean_up_done:
2496 break;
2497 case sdma_event_e30_go_running:
2498 ss->go_s99_running = 1;
2499 break;
2500 case sdma_event_e40_sw_cleaned:
2501 break;
2502 case sdma_event_e50_hw_cleaned:
2503 break;
2504 case sdma_event_e60_hw_halted:
2505 sdma_start_err_halt_wait(sde);
2506 break;
2507 case sdma_event_e70_go_idle:
2508 ss->go_s99_running = 0;
2509 break;
2510 case sdma_event_e80_hw_freeze:
2511 break;
2512 case sdma_event_e81_hw_frozen:
2513 break;
2514 case sdma_event_e82_hw_unfreeze:
2515 break;
2516 case sdma_event_e85_link_down:
2517 ss->go_s99_running = 0;
2518 break;
2519 case sdma_event_e90_sw_halted:
2520 break;
2521 }
2522 break;
2523
2524 case sdma_state_s60_idle_halt_wait:
2525 switch (event) {
2526 case sdma_event_e00_go_hw_down:
2527 sdma_set_state(sde, sdma_state_s00_hw_down);
2528 sdma_start_sw_clean_up(sde);
2529 break;
2530 case sdma_event_e10_go_hw_start:
2531 break;
2532 case sdma_event_e15_hw_halt_done:
2533 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2534 sdma_start_sw_clean_up(sde);
2535 break;
2536 case sdma_event_e25_hw_clean_up_done:
2537 break;
2538 case sdma_event_e30_go_running:
2539 ss->go_s99_running = 1;
2540 break;
2541 case sdma_event_e40_sw_cleaned:
2542 break;
2543 case sdma_event_e50_hw_cleaned:
2544 break;
2545 case sdma_event_e60_hw_halted:
2546 sdma_start_err_halt_wait(sde);
2547 break;
2548 case sdma_event_e70_go_idle:
2549 ss->go_s99_running = 0;
2550 break;
2551 case sdma_event_e80_hw_freeze:
2552 break;
2553 case sdma_event_e81_hw_frozen:
2554 break;
2555 case sdma_event_e82_hw_unfreeze:
2556 break;
2557 case sdma_event_e85_link_down:
2558 break;
2559 case sdma_event_e90_sw_halted:
2560 break;
2561 }
2562 break;
2563
2564 case sdma_state_s80_hw_freeze:
2565 switch (event) {
2566 case sdma_event_e00_go_hw_down:
2567 sdma_set_state(sde, sdma_state_s00_hw_down);
2568 sdma_start_sw_clean_up(sde);
2569 break;
2570 case sdma_event_e10_go_hw_start:
2571 break;
2572 case sdma_event_e15_hw_halt_done:
2573 break;
2574 case sdma_event_e25_hw_clean_up_done:
2575 break;
2576 case sdma_event_e30_go_running:
2577 ss->go_s99_running = 1;
2578 break;
2579 case sdma_event_e40_sw_cleaned:
2580 break;
2581 case sdma_event_e50_hw_cleaned:
2582 break;
2583 case sdma_event_e60_hw_halted:
2584 break;
2585 case sdma_event_e70_go_idle:
2586 ss->go_s99_running = 0;
2587 break;
2588 case sdma_event_e80_hw_freeze:
2589 break;
2590 case sdma_event_e81_hw_frozen:
2591 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
2592 sdma_start_sw_clean_up(sde);
2593 break;
2594 case sdma_event_e82_hw_unfreeze:
2595 break;
2596 case sdma_event_e85_link_down:
2597 break;
2598 case sdma_event_e90_sw_halted:
2599 break;
2600 }
2601 break;
2602
2603 case sdma_state_s82_freeze_sw_clean:
2604 switch (event) {
2605 case sdma_event_e00_go_hw_down:
2606 sdma_set_state(sde, sdma_state_s00_hw_down);
2607 sdma_start_sw_clean_up(sde);
2608 break;
2609 case sdma_event_e10_go_hw_start:
2610 break;
2611 case sdma_event_e15_hw_halt_done:
2612 break;
2613 case sdma_event_e25_hw_clean_up_done:
2614 break;
2615 case sdma_event_e30_go_running:
2616 ss->go_s99_running = 1;
2617 break;
2618 case sdma_event_e40_sw_cleaned:
2619 /* notify caller this engine is done cleaning */
2620 atomic_dec(&sde->dd->sdma_unfreeze_count);
2621 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2622 break;
2623 case sdma_event_e50_hw_cleaned:
2624 break;
2625 case sdma_event_e60_hw_halted:
2626 break;
2627 case sdma_event_e70_go_idle:
2628 ss->go_s99_running = 0;
2629 break;
2630 case sdma_event_e80_hw_freeze:
2631 break;
2632 case sdma_event_e81_hw_frozen:
2633 break;
2634 case sdma_event_e82_hw_unfreeze:
2635 sdma_hw_start_up(sde);
2636 sdma_set_state(sde, ss->go_s99_running ?
2637 sdma_state_s99_running :
2638 sdma_state_s20_idle);
2639 break;
2640 case sdma_event_e85_link_down:
2641 break;
2642 case sdma_event_e90_sw_halted:
2643 break;
2644 }
2645 break;
2646
2647 case sdma_state_s99_running:
2648 switch (event) {
2649 case sdma_event_e00_go_hw_down:
2650 sdma_set_state(sde, sdma_state_s00_hw_down);
2651 sdma_start_sw_clean_up(sde);
2652 break;
2653 case sdma_event_e10_go_hw_start:
2654 break;
2655 case sdma_event_e15_hw_halt_done:
2656 break;
2657 case sdma_event_e25_hw_clean_up_done:
2658 break;
2659 case sdma_event_e30_go_running:
2660 break;
2661 case sdma_event_e40_sw_cleaned:
2662 break;
2663 case sdma_event_e50_hw_cleaned:
2664 break;
2665 case sdma_event_e60_hw_halted:
2666 need_progress = 1;
2667 sdma_err_progress_check_schedule(sde);
2668 case sdma_event_e90_sw_halted:
2669 /*
2670 * SW initiated halt does not perform engines
2671 * progress check
2672 */
2673 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2674 sdma_start_err_halt_wait(sde);
2675 break;
2676 case sdma_event_e70_go_idle:
2677 sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
2678 break;
2679 case sdma_event_e85_link_down:
2680 ss->go_s99_running = 0;
2681 /* fall through */
2682 case sdma_event_e80_hw_freeze:
2683 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2684 atomic_dec(&sde->dd->sdma_unfreeze_count);
2685 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2686 break;
2687 case sdma_event_e81_hw_frozen:
2688 break;
2689 case sdma_event_e82_hw_unfreeze:
2690 break;
2691 }
2692 break;
2693 }
2694
2695 ss->last_event = event;
2696 if (need_progress)
2697 sdma_make_progress(sde, 0);
2698}
2699
2700/*
2701 * _extend_sdma_tx_descs() - helper to extend txreq
2702 *
2703 * This is called once the initial nominal allocation
2704 * of descriptors in the sdma_txreq is exhausted.
2705 *
2706 * The code will bump the allocation up to the max
2707 * of MAX_DESC (64) descriptors. There doesn't seem
2708 * much point in an interim step.
2709 *
2710 */
2711int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
2712{
2713 int i;
2714
2715 tx->descp = kmalloc_array(
2716 MAX_DESC,
2717 sizeof(struct sdma_desc),
2718 GFP_ATOMIC);
2719 if (!tx->descp)
2720 return -ENOMEM;
2721 tx->desc_limit = MAX_DESC;
2722 /* copy ones already built */
2723 for (i = 0; i < tx->num_desc; i++)
2724 tx->descp[i] = tx->descs[i];
2725 return 0;
2726}
2727
2728/* Update sdes when the lmc changes */
2729void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
2730{
2731 struct sdma_engine *sde;
2732 int i;
2733 u64 sreg;
2734
2735 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
2736 SD(CHECK_SLID_MASK_SHIFT)) |
2737 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
2738 SD(CHECK_SLID_VALUE_SHIFT));
2739
2740 for (i = 0; i < dd->num_sdma; i++) {
2741 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
2742 i, (u32)sreg);
2743 sde = &dd->per_sdma[i];
2744 write_sde_csr(sde, SD(CHECK_SLID), sreg);
2745 }
2746}
2747
2748/* tx not dword sized - pad */
2749int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
2750{
2751 int rval = 0;
2752
2753 if ((unlikely(tx->num_desc == tx->desc_limit))) {
2754 rval = _extend_sdma_tx_descs(dd, tx);
2755 if (rval)
2756 return rval;
2757 }
2758 /* finish the one just added */
2759 tx->num_desc++;
2760 make_tx_sdma_desc(
2761 tx,
2762 SDMA_MAP_NONE,
2763 dd->sdma_pad_phys,
2764 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
2765 _sdma_close_tx(dd, tx);
2766 return rval;
2767}
2768
2769/*
2770 * Add ahg to the sdma_txreq
2771 *
2772 * The logic will consume up to 3
2773 * descriptors at the beginning of
2774 * sdma_txreq.
2775 */
2776void _sdma_txreq_ahgadd(
2777 struct sdma_txreq *tx,
2778 u8 num_ahg,
2779 u8 ahg_entry,
2780 u32 *ahg,
2781 u8 ahg_hlen)
2782{
2783 u32 i, shift = 0, desc = 0;
2784 u8 mode;
2785
2786 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
2787 /* compute mode */
2788 if (num_ahg == 1)
2789 mode = SDMA_AHG_APPLY_UPDATE1;
2790 else if (num_ahg <= 5)
2791 mode = SDMA_AHG_APPLY_UPDATE2;
2792 else
2793 mode = SDMA_AHG_APPLY_UPDATE3;
2794 tx->num_desc++;
2795 /* initialize to consumed descriptors to zero */
2796 switch (mode) {
2797 case SDMA_AHG_APPLY_UPDATE3:
2798 tx->num_desc++;
2799 tx->descs[2].qw[0] = 0;
2800 tx->descs[2].qw[1] = 0;
2801 /* FALLTHROUGH */
2802 case SDMA_AHG_APPLY_UPDATE2:
2803 tx->num_desc++;
2804 tx->descs[1].qw[0] = 0;
2805 tx->descs[1].qw[1] = 0;
2806 break;
2807 }
2808 ahg_hlen >>= 2;
2809 tx->descs[0].qw[1] |=
2810 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
2811 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
2812 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
2813 << SDMA_DESC1_HEADER_DWS_SHIFT) |
2814 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
2815 << SDMA_DESC1_HEADER_MODE_SHIFT) |
2816 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
2817 << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
2818 for (i = 0; i < (num_ahg - 1); i++) {
2819 if (!shift && !(i & 2))
2820 desc++;
2821 tx->descs[desc].qw[!!(i & 2)] |=
2822 (((u64)ahg[i + 1])
2823 << shift);
2824 shift = (shift + 32) & 63;
2825 }
2826}
2827
2828/**
2829 * sdma_ahg_alloc - allocate an AHG entry
2830 * @sde: engine to allocate from
2831 *
2832 * Return:
2833 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
2834 * -ENOSPC if an entry is not available
2835 */
2836int sdma_ahg_alloc(struct sdma_engine *sde)
2837{
2838 int nr;
2839 int oldbit;
2840
2841 if (!sde) {
2842 trace_hfi1_ahg_allocate(sde, -EINVAL);
2843 return -EINVAL;
2844 }
2845 while (1) {
2846 nr = ffz(ACCESS_ONCE(sde->ahg_bits));
2847 if (nr > 31) {
2848 trace_hfi1_ahg_allocate(sde, -ENOSPC);
2849 return -ENOSPC;
2850 }
2851 oldbit = test_and_set_bit(nr, &sde->ahg_bits);
2852 if (!oldbit)
2853 break;
2854 cpu_relax();
2855 }
2856 trace_hfi1_ahg_allocate(sde, nr);
2857 return nr;
2858}
2859
2860/**
2861 * sdma_ahg_free - free an AHG entry
2862 * @sde: engine to return AHG entry
2863 * @ahg_index: index to free
2864 *
2865 * This routine frees the indicate AHG entry.
2866 */
2867void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
2868{
2869 if (!sde)
2870 return;
2871 trace_hfi1_ahg_deallocate(sde, ahg_index);
2872 if (ahg_index < 0 || ahg_index > 31)
2873 return;
2874 clear_bit(ahg_index, &sde->ahg_bits);
2875}
2876
2877/*
2878 * SPC freeze handling for SDMA engines. Called when the driver knows
2879 * the SPC is going into a freeze but before the freeze is fully
2880 * settled. Generally an error interrupt.
2881 *
2882 * This event will pull the engine out of running so no more entries can be
2883 * added to the engine's queue.
2884 */
2885void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
2886{
2887 int i;
2888 enum sdma_events event = link_down ? sdma_event_e85_link_down :
2889 sdma_event_e80_hw_freeze;
2890
2891 /* set up the wait but do not wait here */
2892 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
2893
2894 /* tell all engines to stop running and wait */
2895 for (i = 0; i < dd->num_sdma; i++)
2896 sdma_process_event(&dd->per_sdma[i], event);
2897
2898 /* sdma_freeze() will wait for all engines to have stopped */
2899}
2900
2901/*
2902 * SPC freeze handling for SDMA engines. Called when the driver knows
2903 * the SPC is fully frozen.
2904 */
2905void sdma_freeze(struct hfi1_devdata *dd)
2906{
2907 int i;
2908 int ret;
2909
2910 /*
2911 * Make sure all engines have moved out of the running state before
2912 * continuing.
2913 */
2914 ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
2915 atomic_read(&dd->sdma_unfreeze_count) <= 0);
2916 /* interrupted or count is negative, then unloading - just exit */
2917 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
2918 return;
2919
2920 /* set up the count for the next wait */
2921 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
2922
2923 /* tell all engines that the SPC is frozen, they can start cleaning */
2924 for (i = 0; i < dd->num_sdma; i++)
2925 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
2926
2927 /*
2928 * Wait for everyone to finish software clean before exiting. The
2929 * software clean will read engine CSRs, so must be completed before
2930 * the next step, which will clear the engine CSRs.
2931 */
2932 (void) wait_event_interruptible(dd->sdma_unfreeze_wq,
2933 atomic_read(&dd->sdma_unfreeze_count) <= 0);
2934 /* no need to check results - done no matter what */
2935}
2936
2937/*
2938 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
2939 *
2940 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
2941 * that is left is a software clean. We could do it after the SPC is fully
2942 * frozen, but then we'd have to add another state to wait for the unfreeze.
2943 * Instead, just defer the software clean until the unfreeze step.
2944 */
2945void sdma_unfreeze(struct hfi1_devdata *dd)
2946{
2947 int i;
2948
2949 /* tell all engines start freeze clean up */
2950 for (i = 0; i < dd->num_sdma; i++)
2951 sdma_process_event(&dd->per_sdma[i],
2952 sdma_event_e82_hw_unfreeze);
2953}
2954
2955/**
2956 * _sdma_engine_progress_schedule() - schedule progress on engine
2957 * @sde: sdma_engine to schedule progress
2958 *
2959 */
2960void _sdma_engine_progress_schedule(
2961 struct sdma_engine *sde)
2962{
2963 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
2964 /* assume we have selected a good cpu */
2965 write_csr(sde->dd,
2966 CCE_INT_FORCE + (8*(IS_SDMA_START/64)), sde->progress_mask);
2967}