3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/spinlock.h>
52 #include <linux/seqlock.h>
53 #include <linux/netdevice.h>
54 #include <linux/moduleparam.h>
55 #include <linux/bitops.h>
56 #include <linux/timer.h>
57 #include <linux/vmalloc.h>
66 /* must be a power of 2 >= 64 <= 32768 */
67 #define SDMA_DESCQ_CNT 1024
68 #define INVALID_TAIL 0xffff
70 static uint sdma_descq_cnt
= SDMA_DESCQ_CNT
;
71 module_param(sdma_descq_cnt
, uint
, S_IRUGO
);
72 MODULE_PARM_DESC(sdma_descq_cnt
, "Number of SDMA descq entries");
74 static uint sdma_idle_cnt
= 250;
75 module_param(sdma_idle_cnt
, uint
, S_IRUGO
);
76 MODULE_PARM_DESC(sdma_idle_cnt
, "sdma interrupt idle delay (ns,default 250)");
79 module_param_named(num_sdma
, mod_num_sdma
, uint
, S_IRUGO
);
80 MODULE_PARM_DESC(num_sdma
, "Set max number SDMA engines to use");
82 #define SDMA_WAIT_BATCH_SIZE 20
83 /* max wait time for a SDMA engine to indicate it has halted */
84 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
85 /* all SDMA engine errors that cause a halt */
87 #define SD(name) SEND_DMA_##name
88 #define ALL_SDMA_ENG_HALT_ERRS \
89 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
90 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
91 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
92 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
93 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
94 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
95 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
108 /* sdma_sendctrl operations */
109 #define SDMA_SENDCTRL_OP_ENABLE (1U << 0)
110 #define SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
111 #define SDMA_SENDCTRL_OP_HALT (1U << 2)
112 #define SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
114 /* handle long defines */
115 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
116 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
117 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
118 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
120 static const char * const sdma_state_names
[] = {
121 [sdma_state_s00_hw_down
] = "s00_HwDown",
122 [sdma_state_s10_hw_start_up_halt_wait
] = "s10_HwStartUpHaltWait",
123 [sdma_state_s15_hw_start_up_clean_wait
] = "s15_HwStartUpCleanWait",
124 [sdma_state_s20_idle
] = "s20_Idle",
125 [sdma_state_s30_sw_clean_up_wait
] = "s30_SwCleanUpWait",
126 [sdma_state_s40_hw_clean_up_wait
] = "s40_HwCleanUpWait",
127 [sdma_state_s50_hw_halt_wait
] = "s50_HwHaltWait",
128 [sdma_state_s60_idle_halt_wait
] = "s60_IdleHaltWait",
129 [sdma_state_s80_hw_freeze
] = "s80_HwFreeze",
130 [sdma_state_s82_freeze_sw_clean
] = "s82_FreezeSwClean",
131 [sdma_state_s99_running
] = "s99_Running",
134 static const char * const sdma_event_names
[] = {
135 [sdma_event_e00_go_hw_down
] = "e00_GoHwDown",
136 [sdma_event_e10_go_hw_start
] = "e10_GoHwStart",
137 [sdma_event_e15_hw_halt_done
] = "e15_HwHaltDone",
138 [sdma_event_e25_hw_clean_up_done
] = "e25_HwCleanUpDone",
139 [sdma_event_e30_go_running
] = "e30_GoRunning",
140 [sdma_event_e40_sw_cleaned
] = "e40_SwCleaned",
141 [sdma_event_e50_hw_cleaned
] = "e50_HwCleaned",
142 [sdma_event_e60_hw_halted
] = "e60_HwHalted",
143 [sdma_event_e70_go_idle
] = "e70_GoIdle",
144 [sdma_event_e80_hw_freeze
] = "e80_HwFreeze",
145 [sdma_event_e81_hw_frozen
] = "e81_HwFrozen",
146 [sdma_event_e82_hw_unfreeze
] = "e82_HwUnfreeze",
147 [sdma_event_e85_link_down
] = "e85_LinkDown",
148 [sdma_event_e90_sw_halted
] = "e90_SwHalted",
151 static const struct sdma_set_state_action sdma_action_table
[] = {
152 [sdma_state_s00_hw_down
] = {
153 .go_s99_running_tofalse
= 1,
159 [sdma_state_s10_hw_start_up_halt_wait
] = {
165 [sdma_state_s15_hw_start_up_clean_wait
] = {
171 [sdma_state_s20_idle
] = {
177 [sdma_state_s30_sw_clean_up_wait
] = {
183 [sdma_state_s40_hw_clean_up_wait
] = {
189 [sdma_state_s50_hw_halt_wait
] = {
195 [sdma_state_s60_idle_halt_wait
] = {
196 .go_s99_running_tofalse
= 1,
202 [sdma_state_s80_hw_freeze
] = {
208 [sdma_state_s82_freeze_sw_clean
] = {
214 [sdma_state_s99_running
] = {
219 .go_s99_running_totrue
= 1,
223 #define SDMA_TAIL_UPDATE_THRESH 0x1F
225 /* declare all statics here rather than keep sorting */
226 static void sdma_complete(struct kref
*);
227 static void sdma_finalput(struct sdma_state
*);
228 static void sdma_get(struct sdma_state
*);
229 static void sdma_hw_clean_up_task(unsigned long);
230 static void sdma_put(struct sdma_state
*);
231 static void sdma_set_state(struct sdma_engine
*, enum sdma_states
);
232 static void sdma_start_hw_clean_up(struct sdma_engine
*);
233 static void sdma_start_sw_clean_up(struct sdma_engine
*);
234 static void sdma_sw_clean_up_task(unsigned long);
235 static void sdma_sendctrl(struct sdma_engine
*, unsigned);
236 static void init_sdma_regs(struct sdma_engine
*, u32
, uint
);
237 static void sdma_process_event(
238 struct sdma_engine
*sde
,
239 enum sdma_events event
);
240 static void __sdma_process_event(
241 struct sdma_engine
*sde
,
242 enum sdma_events event
);
243 static void dump_sdma_state(struct sdma_engine
*sde
);
244 static void sdma_make_progress(struct sdma_engine
*sde
, u64 status
);
245 static void sdma_desc_avail(struct sdma_engine
*sde
, unsigned avail
);
246 static void sdma_flush_descq(struct sdma_engine
*sde
);
249 * sdma_state_name() - return state string from enum
252 static const char *sdma_state_name(enum sdma_states state
)
254 return sdma_state_names
[state
];
257 static void sdma_get(struct sdma_state
*ss
)
262 static void sdma_complete(struct kref
*kref
)
264 struct sdma_state
*ss
=
265 container_of(kref
, struct sdma_state
, kref
);
270 static void sdma_put(struct sdma_state
*ss
)
272 kref_put(&ss
->kref
, sdma_complete
);
275 static void sdma_finalput(struct sdma_state
*ss
)
278 wait_for_completion(&ss
->comp
);
281 static inline void write_sde_csr(
282 struct sdma_engine
*sde
,
286 write_kctxt_csr(sde
->dd
, sde
->this_idx
, offset0
, value
);
289 static inline u64
read_sde_csr(
290 struct sdma_engine
*sde
,
293 return read_kctxt_csr(sde
->dd
, sde
->this_idx
, offset0
);
297 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
298 * sdma engine 'sde' to drop to 0.
300 static void sdma_wait_for_packet_egress(struct sdma_engine
*sde
,
303 u64 off
= 8 * sde
->this_idx
;
304 struct hfi1_devdata
*dd
= sde
->dd
;
308 u64 reg
= read_csr(dd
, off
+ SEND_EGRESS_SEND_DMA_STATUS
);
310 reg
&= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
;
311 reg
>>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
;
315 dd_dev_err(dd
, "%s: engine %u timeout waiting for packets to egress, remaining count %u\n",
316 __func__
, sde
->this_idx
, (u32
)reg
);
324 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
325 * and pause for credit return.
327 void sdma_wait(struct hfi1_devdata
*dd
)
331 for (i
= 0; i
< dd
->num_sdma
; i
++) {
332 struct sdma_engine
*sde
= &dd
->per_sdma
[i
];
334 sdma_wait_for_packet_egress(sde
, 0);
338 static inline void sdma_set_desc_cnt(struct sdma_engine
*sde
, unsigned cnt
)
342 if (!(sde
->dd
->flags
& HFI1_HAS_SDMA_TIMEOUT
))
345 reg
&= SD(DESC_CNT_CNT_MASK
);
346 reg
<<= SD(DESC_CNT_CNT_SHIFT
);
347 write_sde_csr(sde
, SD(DESC_CNT
), reg
);
351 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
353 * Depending on timing there can be txreqs in two places:
354 * - in the descq ring
355 * - in the flush list
357 * To avoid ordering issues the descq ring needs to be flushed
358 * first followed by the flush list.
360 * This routine is called from two places
361 * - From a work queue item
362 * - Directly from the state machine just before setting the
365 * Must be called with head_lock held
368 static void sdma_flush(struct sdma_engine
*sde
)
370 struct sdma_txreq
*txp
, *txp_next
;
371 LIST_HEAD(flushlist
);
373 /* flush from head to tail */
374 sdma_flush_descq(sde
);
375 spin_lock(&sde
->flushlist_lock
);
376 /* copy flush list */
377 list_for_each_entry_safe(txp
, txp_next
, &sde
->flushlist
, list
) {
378 list_del_init(&txp
->list
);
379 list_add_tail(&txp
->list
, &flushlist
);
381 spin_unlock(&sde
->flushlist_lock
);
382 /* flush from flush list */
383 list_for_each_entry_safe(txp
, txp_next
, &flushlist
, list
) {
385 /* protect against complete modifying */
386 struct iowait
*wait
= txp
->wait
;
388 list_del_init(&txp
->list
);
389 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
390 trace_hfi1_sdma_out_sn(sde
, txp
->sn
);
391 if (WARN_ON_ONCE(sde
->head_sn
!= txp
->sn
))
392 dd_dev_err(sde
->dd
, "expected %llu got %llu\n",
393 sde
->head_sn
, txp
->sn
);
396 sdma_txclean(sde
->dd
, txp
);
398 drained
= atomic_dec_and_test(&wait
->sdma_busy
);
400 (*txp
->complete
)(txp
, SDMA_TXREQ_S_ABORTED
, drained
);
402 iowait_drain_wakeup(wait
);
407 * Fields a work request for flushing the descq ring
410 * If the engine has been brought to running during
411 * the scheduling delay, the flush is ignored, assuming
412 * that the process of bringing the engine to running
413 * would have done this flush prior to going to running.
416 static void sdma_field_flush(struct work_struct
*work
)
419 struct sdma_engine
*sde
=
420 container_of(work
, struct sdma_engine
, flush_worker
);
422 write_seqlock_irqsave(&sde
->head_lock
, flags
);
423 if (!__sdma_running(sde
))
425 write_sequnlock_irqrestore(&sde
->head_lock
, flags
);
428 static void sdma_err_halt_wait(struct work_struct
*work
)
430 struct sdma_engine
*sde
= container_of(work
, struct sdma_engine
,
433 unsigned long timeout
;
435 timeout
= jiffies
+ msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT
);
437 statuscsr
= read_sde_csr(sde
, SD(STATUS
));
438 statuscsr
&= SD(STATUS_ENG_HALTED_SMASK
);
441 if (time_after(jiffies
, timeout
)) {
443 "SDMA engine %d - timeout waiting for engine to halt\n",
446 * Continue anyway. This could happen if there was
447 * an uncorrectable error in the wrong spot.
451 usleep_range(80, 120);
454 sdma_process_event(sde
, sdma_event_e15_hw_halt_done
);
457 static void sdma_start_err_halt_wait(struct sdma_engine
*sde
)
459 schedule_work(&sde
->err_halt_worker
);
463 static void sdma_err_progress_check_schedule(struct sdma_engine
*sde
)
465 if (!is_bx(sde
->dd
) && HFI1_CAP_IS_KSET(SDMA_AHG
)) {
468 struct hfi1_devdata
*dd
= sde
->dd
;
470 for (index
= 0; index
< dd
->num_sdma
; index
++) {
471 struct sdma_engine
*curr_sdma
= &dd
->per_sdma
[index
];
473 if (curr_sdma
!= sde
)
474 curr_sdma
->progress_check_head
=
475 curr_sdma
->descq_head
;
478 "SDMA engine %d - check scheduled\n",
480 mod_timer(&sde
->err_progress_check_timer
, jiffies
+ 10);
484 static void sdma_err_progress_check(unsigned long data
)
487 struct sdma_engine
*sde
= (struct sdma_engine
*)data
;
489 dd_dev_err(sde
->dd
, "SDE progress check event\n");
490 for (index
= 0; index
< sde
->dd
->num_sdma
; index
++) {
491 struct sdma_engine
*curr_sde
= &sde
->dd
->per_sdma
[index
];
494 /* check progress on each engine except the current one */
498 * We must lock interrupts when acquiring sde->lock,
499 * to avoid a deadlock if interrupt triggers and spins on
500 * the same lock on same CPU
502 spin_lock_irqsave(&curr_sde
->tail_lock
, flags
);
503 write_seqlock(&curr_sde
->head_lock
);
505 /* skip non-running queues */
506 if (curr_sde
->state
.current_state
!= sdma_state_s99_running
) {
507 write_sequnlock(&curr_sde
->head_lock
);
508 spin_unlock_irqrestore(&curr_sde
->tail_lock
, flags
);
512 if ((curr_sde
->descq_head
!= curr_sde
->descq_tail
) &&
513 (curr_sde
->descq_head
==
514 curr_sde
->progress_check_head
))
515 __sdma_process_event(curr_sde
,
516 sdma_event_e90_sw_halted
);
517 write_sequnlock(&curr_sde
->head_lock
);
518 spin_unlock_irqrestore(&curr_sde
->tail_lock
, flags
);
520 schedule_work(&sde
->err_halt_worker
);
523 static void sdma_hw_clean_up_task(unsigned long opaque
)
525 struct sdma_engine
*sde
= (struct sdma_engine
*) opaque
;
529 #ifdef CONFIG_SDMA_VERBOSITY
530 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
531 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
,
534 statuscsr
= read_sde_csr(sde
, SD(STATUS
));
535 statuscsr
&= SD(STATUS_ENG_CLEANED_UP_SMASK
);
541 sdma_process_event(sde
, sdma_event_e25_hw_clean_up_done
);
544 static inline struct sdma_txreq
*get_txhead(struct sdma_engine
*sde
)
546 smp_read_barrier_depends(); /* see sdma_update_tail() */
547 return sde
->tx_ring
[sde
->tx_head
& sde
->sdma_mask
];
551 * flush ring for recovery
553 static void sdma_flush_descq(struct sdma_engine
*sde
)
557 struct sdma_txreq
*txp
= get_txhead(sde
);
559 /* The reason for some of the complexity of this code is that
560 * not all descriptors have corresponding txps. So, we have to
561 * be able to skip over descs until we wander into the range of
562 * the next txp on the list.
564 head
= sde
->descq_head
& sde
->sdma_mask
;
565 tail
= sde
->descq_tail
& sde
->sdma_mask
;
566 while (head
!= tail
) {
567 /* advance head, wrap if needed */
568 head
= ++sde
->descq_head
& sde
->sdma_mask
;
569 /* if now past this txp's descs, do the callback */
570 if (txp
&& txp
->next_descq_idx
== head
) {
572 /* protect against complete modifying */
573 struct iowait
*wait
= txp
->wait
;
575 /* remove from list */
576 sde
->tx_ring
[sde
->tx_head
++ & sde
->sdma_mask
] = NULL
;
578 drained
= atomic_dec_and_test(&wait
->sdma_busy
);
579 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
580 trace_hfi1_sdma_out_sn(sde
, txp
->sn
);
581 if (WARN_ON_ONCE(sde
->head_sn
!= txp
->sn
))
582 dd_dev_err(sde
->dd
, "expected %llu got %llu\n",
583 sde
->head_sn
, txp
->sn
);
586 sdma_txclean(sde
->dd
, txp
);
587 trace_hfi1_sdma_progress(sde
, head
, tail
, txp
);
591 SDMA_TXREQ_S_ABORTED
,
594 iowait_drain_wakeup(wait
);
595 /* see if there is another txp */
596 txp
= get_txhead(sde
);
601 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
604 static void sdma_sw_clean_up_task(unsigned long opaque
)
606 struct sdma_engine
*sde
= (struct sdma_engine
*) opaque
;
609 spin_lock_irqsave(&sde
->tail_lock
, flags
);
610 write_seqlock(&sde
->head_lock
);
613 * At this point, the following should always be true:
614 * - We are halted, so no more descriptors are getting retired.
615 * - We are not running, so no one is submitting new work.
616 * - Only we can send the e40_sw_cleaned, so we can't start
617 * running again until we say so. So, the active list and
618 * descq are ours to play with.
623 * In the error clean up sequence, software clean must be called
624 * before the hardware clean so we can use the hardware head in
625 * the progress routine. A hardware clean or SPC unfreeze will
626 * reset the hardware head.
628 * Process all retired requests. The progress routine will use the
629 * latest physical hardware head - we are not running so speed does
632 sdma_make_progress(sde
, 0);
637 * Reset our notion of head and tail.
638 * Note that the HW registers have been reset via an earlier
643 sde
->desc_avail
= sdma_descq_freecnt(sde
);
646 __sdma_process_event(sde
, sdma_event_e40_sw_cleaned
);
648 write_sequnlock(&sde
->head_lock
);
649 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
652 static void sdma_sw_tear_down(struct sdma_engine
*sde
)
654 struct sdma_state
*ss
= &sde
->state
;
656 /* Releasing this reference means the state machine has stopped. */
659 /* stop waiting for all unfreeze events to complete */
660 atomic_set(&sde
->dd
->sdma_unfreeze_count
, -1);
661 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
664 static void sdma_start_hw_clean_up(struct sdma_engine
*sde
)
666 tasklet_hi_schedule(&sde
->sdma_hw_clean_up_task
);
669 static void sdma_start_sw_clean_up(struct sdma_engine
*sde
)
671 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
674 static void sdma_set_state(struct sdma_engine
*sde
,
675 enum sdma_states next_state
)
677 struct sdma_state
*ss
= &sde
->state
;
678 const struct sdma_set_state_action
*action
= sdma_action_table
;
681 trace_hfi1_sdma_state(
683 sdma_state_names
[ss
->current_state
],
684 sdma_state_names
[next_state
]);
686 /* debugging bookkeeping */
687 ss
->previous_state
= ss
->current_state
;
688 ss
->previous_op
= ss
->current_op
;
689 ss
->current_state
= next_state
;
691 if (ss
->previous_state
!= sdma_state_s99_running
692 && next_state
== sdma_state_s99_running
)
695 if (action
[next_state
].op_enable
)
696 op
|= SDMA_SENDCTRL_OP_ENABLE
;
698 if (action
[next_state
].op_intenable
)
699 op
|= SDMA_SENDCTRL_OP_INTENABLE
;
701 if (action
[next_state
].op_halt
)
702 op
|= SDMA_SENDCTRL_OP_HALT
;
704 if (action
[next_state
].op_cleanup
)
705 op
|= SDMA_SENDCTRL_OP_CLEANUP
;
707 if (action
[next_state
].go_s99_running_tofalse
)
708 ss
->go_s99_running
= 0;
710 if (action
[next_state
].go_s99_running_totrue
)
711 ss
->go_s99_running
= 1;
714 sdma_sendctrl(sde
, ss
->current_op
);
718 * sdma_get_descq_cnt() - called when device probed
720 * Return a validated descq count.
722 * This is currently only used in the verbs initialization to build the tx
725 * This will probably be deleted in favor of a more scalable approach to
729 u16
sdma_get_descq_cnt(void)
731 u16 count
= sdma_descq_cnt
;
734 return SDMA_DESCQ_CNT
;
735 /* count must be a power of 2 greater than 64 and less than
736 * 32768. Otherwise return default.
738 if (!is_power_of_2(count
))
739 return SDMA_DESCQ_CNT
;
740 if (count
< 64 || count
> 32768)
741 return SDMA_DESCQ_CNT
;
745 * sdma_select_engine_vl() - select sdma engine
747 * @selector: a spreading factor
751 * This function returns an engine based on the selector and a vl. The
752 * mapping fields are protected by RCU.
754 struct sdma_engine
*sdma_select_engine_vl(
755 struct hfi1_devdata
*dd
,
759 struct sdma_vl_map
*m
;
760 struct sdma_map_elem
*e
;
761 struct sdma_engine
*rval
;
767 m
= rcu_dereference(dd
->sdma_map
);
772 e
= m
->map
[vl
& m
->mask
];
773 rval
= e
->sde
[selector
& e
->mask
];
776 trace_hfi1_sdma_engine_select(dd
, selector
, vl
, rval
->this_idx
);
781 * sdma_select_engine_sc() - select sdma engine
783 * @selector: a spreading factor
787 * This function returns an engine based on the selector and an sc.
789 struct sdma_engine
*sdma_select_engine_sc(
790 struct hfi1_devdata
*dd
,
794 u8 vl
= sc_to_vlt(dd
, sc5
);
796 return sdma_select_engine_vl(dd
, selector
, vl
);
800 * Free the indicated map struct
802 static void sdma_map_free(struct sdma_vl_map
*m
)
806 for (i
= 0; m
&& i
< m
->actual_vls
; i
++)
812 * Handle RCU callback
814 static void sdma_map_rcu_callback(struct rcu_head
*list
)
816 struct sdma_vl_map
*m
= container_of(list
, struct sdma_vl_map
, list
);
822 * sdma_map_init - called when # vls change
825 * @num_vls: number of vls
826 * @vl_engines: per vl engine mapping (optional)
828 * This routine changes the mapping based on the number of vls.
830 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
831 * implies auto computing the loading and giving each VLs a uniform
832 * distribution of engines per VL.
834 * The auto algorithm computes the sde_per_vl and the number of extra
835 * engines. Any extra engines are added from the last VL on down.
837 * rcu locking is used here to control access to the mapping fields.
839 * If either the num_vls or num_sdma are non-power of 2, the array sizes
840 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
841 * up to the next highest power of 2 and the first entry is reused
842 * in a round robin fashion.
844 * If an error occurs the map change is not done and the mapping is
848 int sdma_map_init(struct hfi1_devdata
*dd
, u8 port
, u8 num_vls
, u8
*vl_engines
)
851 int extra
, sde_per_vl
;
853 u8 lvl_engines
[OPA_MAX_VLS
];
854 struct sdma_vl_map
*oldmap
, *newmap
;
856 if (!(dd
->flags
& HFI1_HAS_SEND_DMA
))
860 /* truncate divide */
861 sde_per_vl
= dd
->num_sdma
/ num_vls
;
863 extra
= dd
->num_sdma
% num_vls
;
864 vl_engines
= lvl_engines
;
865 /* add extras from last vl down */
866 for (i
= num_vls
- 1; i
>= 0; i
--, extra
--)
867 vl_engines
[i
] = sde_per_vl
+ (extra
> 0 ? 1 : 0);
871 sizeof(struct sdma_vl_map
) +
872 roundup_pow_of_two(num_vls
) *
873 sizeof(struct sdma_map_elem
*),
877 newmap
->actual_vls
= num_vls
;
878 newmap
->vls
= roundup_pow_of_two(num_vls
);
879 newmap
->mask
= (1 << ilog2(newmap
->vls
)) - 1;
880 for (i
= 0; i
< newmap
->vls
; i
++) {
881 /* save for wrap around */
882 int first_engine
= engine
;
884 if (i
< newmap
->actual_vls
) {
885 int sz
= roundup_pow_of_two(vl_engines
[i
]);
887 /* only allocate once */
888 newmap
->map
[i
] = kzalloc(
889 sizeof(struct sdma_map_elem
) +
890 sz
* sizeof(struct sdma_engine
*),
894 newmap
->map
[i
]->mask
= (1 << ilog2(sz
)) - 1;
896 for (j
= 0; j
< sz
; j
++) {
897 newmap
->map
[i
]->sde
[j
] =
898 &dd
->per_sdma
[engine
];
899 if (++engine
>= first_engine
+ vl_engines
[i
])
900 /* wrap back to first engine */
901 engine
= first_engine
;
904 /* just re-use entry without allocating */
905 newmap
->map
[i
] = newmap
->map
[i
% num_vls
];
907 engine
= first_engine
+ vl_engines
[i
];
909 /* newmap in hand, save old map */
910 spin_lock_irq(&dd
->sde_map_lock
);
911 oldmap
= rcu_dereference_protected(dd
->sdma_map
,
912 lockdep_is_held(&dd
->sde_map_lock
));
915 rcu_assign_pointer(dd
->sdma_map
, newmap
);
917 spin_unlock_irq(&dd
->sde_map_lock
);
918 /* success, free any old map after grace period */
920 call_rcu(&oldmap
->list
, sdma_map_rcu_callback
);
923 /* free any partial allocation */
924 sdma_map_free(newmap
);
929 * Clean up allocated memory.
931 * This routine is can be called regardless of the success of sdma_init()
934 static void sdma_clean(struct hfi1_devdata
*dd
, size_t num_engines
)
937 struct sdma_engine
*sde
;
939 if (dd
->sdma_pad_dma
) {
940 dma_free_coherent(&dd
->pcidev
->dev
, 4,
941 (void *)dd
->sdma_pad_dma
,
943 dd
->sdma_pad_dma
= NULL
;
944 dd
->sdma_pad_phys
= 0;
946 if (dd
->sdma_heads_dma
) {
947 dma_free_coherent(&dd
->pcidev
->dev
, dd
->sdma_heads_size
,
948 (void *)dd
->sdma_heads_dma
,
949 dd
->sdma_heads_phys
);
950 dd
->sdma_heads_dma
= NULL
;
951 dd
->sdma_heads_phys
= 0;
953 for (i
= 0; dd
->per_sdma
&& i
< num_engines
; ++i
) {
954 sde
= &dd
->per_sdma
[i
];
956 sde
->head_dma
= NULL
;
962 sde
->descq_cnt
* sizeof(u64
[2]),
969 if (is_vmalloc_addr(sde
->tx_ring
))
975 spin_lock_irq(&dd
->sde_map_lock
);
976 kfree(rcu_access_pointer(dd
->sdma_map
));
977 RCU_INIT_POINTER(dd
->sdma_map
, NULL
);
978 spin_unlock_irq(&dd
->sde_map_lock
);
985 * sdma_init() - called when device probed
987 * @port: port number (currently only zero)
989 * sdma_init initializes the specified number of engines.
991 * The code initializes each sde, its csrs. Interrupts
992 * are not required to be enabled.
995 * 0 - success, -errno on failure
997 int sdma_init(struct hfi1_devdata
*dd
, u8 port
)
1000 struct sdma_engine
*sde
;
1003 struct hfi1_pportdata
*ppd
= dd
->pport
+ port
;
1004 u32 per_sdma_credits
;
1005 uint idle_cnt
= sdma_idle_cnt
;
1006 size_t num_engines
= dd
->chip_sdma_engines
;
1008 if (!HFI1_CAP_IS_KSET(SDMA
)) {
1009 HFI1_CAP_CLEAR(SDMA_AHG
);
1013 /* can't exceed chip support */
1014 mod_num_sdma
<= dd
->chip_sdma_engines
&&
1015 /* count must be >= vls */
1016 mod_num_sdma
>= num_vls
)
1017 num_engines
= mod_num_sdma
;
1019 dd_dev_info(dd
, "SDMA mod_num_sdma: %u\n", mod_num_sdma
);
1020 dd_dev_info(dd
, "SDMA chip_sdma_engines: %u\n", dd
->chip_sdma_engines
);
1021 dd_dev_info(dd
, "SDMA chip_sdma_mem_size: %u\n",
1022 dd
->chip_sdma_mem_size
);
1025 dd
->chip_sdma_mem_size
/(num_engines
* SDMA_BLOCK_SIZE
);
1027 /* set up freeze waitqueue */
1028 init_waitqueue_head(&dd
->sdma_unfreeze_wq
);
1029 atomic_set(&dd
->sdma_unfreeze_count
, 0);
1031 descq_cnt
= sdma_get_descq_cnt();
1032 dd_dev_info(dd
, "SDMA engines %zu descq_cnt %u\n",
1033 num_engines
, descq_cnt
);
1035 /* alloc memory for array of send engines */
1036 dd
->per_sdma
= kcalloc(num_engines
, sizeof(*dd
->per_sdma
), GFP_KERNEL
);
1040 idle_cnt
= ns_to_cclock(dd
, idle_cnt
);
1041 /* Allocate memory for SendDMA descriptor FIFOs */
1042 for (this_idx
= 0; this_idx
< num_engines
; ++this_idx
) {
1043 sde
= &dd
->per_sdma
[this_idx
];
1046 sde
->this_idx
= this_idx
;
1047 sde
->descq_cnt
= descq_cnt
;
1048 sde
->desc_avail
= sdma_descq_freecnt(sde
);
1049 sde
->sdma_shift
= ilog2(descq_cnt
);
1050 sde
->sdma_mask
= (1 << sde
->sdma_shift
) - 1;
1051 sde
->descq_full_count
= 0;
1053 /* Create a mask for all 3 chip interrupt sources */
1054 sde
->imask
= (u64
)1 << (0*TXE_NUM_SDMA_ENGINES
+ this_idx
)
1055 | (u64
)1 << (1*TXE_NUM_SDMA_ENGINES
+ this_idx
)
1056 | (u64
)1 << (2*TXE_NUM_SDMA_ENGINES
+ this_idx
);
1057 /* Create a mask specifically for sdma_idle */
1059 (u64
)1 << (2*TXE_NUM_SDMA_ENGINES
+ this_idx
);
1060 /* Create a mask specifically for sdma_progress */
1061 sde
->progress_mask
=
1062 (u64
)1 << (TXE_NUM_SDMA_ENGINES
+ this_idx
);
1063 spin_lock_init(&sde
->tail_lock
);
1064 seqlock_init(&sde
->head_lock
);
1065 spin_lock_init(&sde
->senddmactrl_lock
);
1066 spin_lock_init(&sde
->flushlist_lock
);
1067 /* insure there is always a zero bit */
1068 sde
->ahg_bits
= 0xfffffffe00000000ULL
;
1070 sdma_set_state(sde
, sdma_state_s00_hw_down
);
1072 /* set up reference counting */
1073 kref_init(&sde
->state
.kref
);
1074 init_completion(&sde
->state
.comp
);
1076 INIT_LIST_HEAD(&sde
->flushlist
);
1077 INIT_LIST_HEAD(&sde
->dmawait
);
1080 get_kctxt_csr_addr(dd
, this_idx
, SD(TAIL
));
1084 SDMA_DESC1_HEAD_TO_HOST_FLAG
;
1087 SDMA_DESC1_INT_REQ_FLAG
;
1089 tasklet_init(&sde
->sdma_hw_clean_up_task
, sdma_hw_clean_up_task
,
1090 (unsigned long)sde
);
1092 tasklet_init(&sde
->sdma_sw_clean_up_task
, sdma_sw_clean_up_task
,
1093 (unsigned long)sde
);
1094 INIT_WORK(&sde
->err_halt_worker
, sdma_err_halt_wait
);
1095 INIT_WORK(&sde
->flush_worker
, sdma_field_flush
);
1097 sde
->progress_check_head
= 0;
1099 init_timer(&sde
->err_progress_check_timer
);
1100 sde
->err_progress_check_timer
.function
=
1101 sdma_err_progress_check
;
1102 sde
->err_progress_check_timer
.data
= (unsigned long)sde
;
1104 sde
->descq
= dma_zalloc_coherent(
1106 descq_cnt
* sizeof(u64
[2]),
1113 kcalloc(descq_cnt
, sizeof(struct sdma_txreq
*),
1118 sizeof(struct sdma_txreq
*) *
1124 dd
->sdma_heads_size
= L1_CACHE_BYTES
* num_engines
;
1125 /* Allocate memory for DMA of head registers to memory */
1126 dd
->sdma_heads_dma
= dma_zalloc_coherent(
1128 dd
->sdma_heads_size
,
1129 &dd
->sdma_heads_phys
,
1132 if (!dd
->sdma_heads_dma
) {
1133 dd_dev_err(dd
, "failed to allocate SendDMA head memory\n");
1137 /* Allocate memory for pad */
1138 dd
->sdma_pad_dma
= dma_zalloc_coherent(
1144 if (!dd
->sdma_pad_dma
) {
1145 dd_dev_err(dd
, "failed to allocate SendDMA pad memory\n");
1149 /* assign each engine to different cacheline and init registers */
1150 curr_head
= (void *)dd
->sdma_heads_dma
;
1151 for (this_idx
= 0; this_idx
< num_engines
; ++this_idx
) {
1152 unsigned long phys_offset
;
1154 sde
= &dd
->per_sdma
[this_idx
];
1156 sde
->head_dma
= curr_head
;
1157 curr_head
+= L1_CACHE_BYTES
;
1158 phys_offset
= (unsigned long)sde
->head_dma
-
1159 (unsigned long)dd
->sdma_heads_dma
;
1160 sde
->head_phys
= dd
->sdma_heads_phys
+ phys_offset
;
1161 init_sdma_regs(sde
, per_sdma_credits
, idle_cnt
);
1163 dd
->flags
|= HFI1_HAS_SEND_DMA
;
1164 dd
->flags
|= idle_cnt
? HFI1_HAS_SDMA_TIMEOUT
: 0;
1165 dd
->num_sdma
= num_engines
;
1166 if (sdma_map_init(dd
, port
, ppd
->vls_operational
, NULL
))
1168 dd_dev_info(dd
, "SDMA num_sdma: %u\n", dd
->num_sdma
);
1172 sdma_clean(dd
, num_engines
);
1177 * sdma_all_running() - called when the link goes up
1180 * This routine moves all engines to the running state.
1182 void sdma_all_running(struct hfi1_devdata
*dd
)
1184 struct sdma_engine
*sde
;
1187 /* move all engines to running */
1188 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1189 sde
= &dd
->per_sdma
[i
];
1190 sdma_process_event(sde
, sdma_event_e30_go_running
);
1195 * sdma_all_idle() - called when the link goes down
1198 * This routine moves all engines to the idle state.
1200 void sdma_all_idle(struct hfi1_devdata
*dd
)
1202 struct sdma_engine
*sde
;
1205 /* idle all engines */
1206 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1207 sde
= &dd
->per_sdma
[i
];
1208 sdma_process_event(sde
, sdma_event_e70_go_idle
);
1213 * sdma_start() - called to kick off state processing for all engines
1216 * This routine is for kicking off the state processing for all required
1217 * sdma engines. Interrupts need to be working at this point.
1220 void sdma_start(struct hfi1_devdata
*dd
)
1223 struct sdma_engine
*sde
;
1225 /* kick off the engines state processing */
1226 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1227 sde
= &dd
->per_sdma
[i
];
1228 sdma_process_event(sde
, sdma_event_e10_go_hw_start
);
1233 * sdma_exit() - used when module is removed
1236 void sdma_exit(struct hfi1_devdata
*dd
)
1239 struct sdma_engine
*sde
;
1241 for (this_idx
= 0; dd
->per_sdma
&& this_idx
< dd
->num_sdma
;
1244 sde
= &dd
->per_sdma
[this_idx
];
1245 if (!list_empty(&sde
->dmawait
))
1246 dd_dev_err(dd
, "sde %u: dmawait list not empty!\n",
1248 sdma_process_event(sde
, sdma_event_e00_go_hw_down
);
1250 del_timer_sync(&sde
->err_progress_check_timer
);
1253 * This waits for the state machine to exit so it is not
1254 * necessary to kill the sdma_sw_clean_up_task to make sure
1255 * it is not running.
1257 sdma_finalput(&sde
->state
);
1259 sdma_clean(dd
, dd
->num_sdma
);
1263 * unmap the indicated descriptor
1265 static inline void sdma_unmap_desc(
1266 struct hfi1_devdata
*dd
,
1267 struct sdma_desc
*descp
)
1269 switch (sdma_mapping_type(descp
)) {
1270 case SDMA_MAP_SINGLE
:
1273 sdma_mapping_addr(descp
),
1274 sdma_mapping_len(descp
),
1280 sdma_mapping_addr(descp
),
1281 sdma_mapping_len(descp
),
1288 * return the mode as indicated by the first
1289 * descriptor in the tx.
1291 static inline u8
ahg_mode(struct sdma_txreq
*tx
)
1293 return (tx
->descp
[0].qw
[1] & SDMA_DESC1_HEADER_MODE_SMASK
)
1294 >> SDMA_DESC1_HEADER_MODE_SHIFT
;
1298 * sdma_txclean() - clean tx of mappings, descp *kmalloc's
1299 * @dd: hfi1_devdata for unmapping
1300 * @tx: tx request to clean
1302 * This is used in the progress routine to clean the tx or
1303 * by the ULP to toss an in-process tx build.
1305 * The code can be called multiple times without issue.
1309 struct hfi1_devdata
*dd
,
1310 struct sdma_txreq
*tx
)
1315 u8 skip
= 0, mode
= ahg_mode(tx
);
1318 sdma_unmap_desc(dd
, &tx
->descp
[0]);
1319 /* determine number of AHG descriptors to skip */
1320 if (mode
> SDMA_AHG_APPLY_UPDATE1
)
1322 for (i
= 1 + skip
; i
< tx
->num_desc
; i
++)
1323 sdma_unmap_desc(dd
, &tx
->descp
[i
]);
1326 kfree(tx
->coalesce_buf
);
1327 tx
->coalesce_buf
= NULL
;
1328 /* kmalloc'ed descp */
1329 if (unlikely(tx
->desc_limit
> ARRAY_SIZE(tx
->descs
))) {
1330 tx
->desc_limit
= ARRAY_SIZE(tx
->descs
);
1335 static inline u16
sdma_gethead(struct sdma_engine
*sde
)
1337 struct hfi1_devdata
*dd
= sde
->dd
;
1341 #ifdef CONFIG_SDMA_VERBOSITY
1342 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1343 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1347 use_dmahead
= HFI1_CAP_IS_KSET(USE_SDMA_HEAD
) && __sdma_running(sde
) &&
1348 (dd
->flags
& HFI1_HAS_SDMA_TIMEOUT
);
1349 hwhead
= use_dmahead
?
1350 (u16
) le64_to_cpu(*sde
->head_dma
) :
1351 (u16
) read_sde_csr(sde
, SD(HEAD
));
1353 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK
))) {
1359 swhead
= sde
->descq_head
& sde
->sdma_mask
;
1360 /* this code is really bad for cache line trading */
1361 swtail
= ACCESS_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
1362 cnt
= sde
->descq_cnt
;
1364 if (swhead
< swtail
)
1366 sane
= (hwhead
>= swhead
) & (hwhead
<= swtail
);
1367 else if (swhead
> swtail
)
1368 /* wrapped around */
1369 sane
= ((hwhead
>= swhead
) && (hwhead
< cnt
)) ||
1373 sane
= (hwhead
== swhead
);
1375 if (unlikely(!sane
)) {
1376 dd_dev_err(dd
, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1378 use_dmahead
? "dma" : "kreg",
1379 hwhead
, swhead
, swtail
, cnt
);
1381 /* try one more time, using csr */
1385 /* proceed as if no progress */
1393 * This is called when there are send DMA descriptors that might be
1396 * This is called with head_lock held.
1398 static void sdma_desc_avail(struct sdma_engine
*sde
, unsigned avail
)
1400 struct iowait
*wait
, *nw
;
1401 struct iowait
*waits
[SDMA_WAIT_BATCH_SIZE
];
1402 unsigned i
, n
= 0, seq
;
1403 struct sdma_txreq
*stx
;
1404 struct hfi1_ibdev
*dev
= &sde
->dd
->verbs_dev
;
1406 #ifdef CONFIG_SDMA_VERBOSITY
1407 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
1408 slashstrip(__FILE__
), __LINE__
, __func__
);
1409 dd_dev_err(sde
->dd
, "avail: %u\n", avail
);
1413 seq
= read_seqbegin(&dev
->iowait_lock
);
1414 if (!list_empty(&sde
->dmawait
)) {
1415 /* at least one item */
1416 write_seqlock(&dev
->iowait_lock
);
1417 /* Harvest waiters wanting DMA descriptors */
1418 list_for_each_entry_safe(
1427 if (n
== ARRAY_SIZE(waits
))
1429 if (!list_empty(&wait
->tx_head
)) {
1430 stx
= list_first_entry(
1434 num_desc
= stx
->num_desc
;
1436 if (num_desc
> avail
)
1439 list_del_init(&wait
->list
);
1442 write_sequnlock(&dev
->iowait_lock
);
1445 } while (read_seqretry(&dev
->iowait_lock
, seq
));
1447 for (i
= 0; i
< n
; i
++)
1448 waits
[i
]->wakeup(waits
[i
], SDMA_AVAIL_REASON
);
1451 /* head_lock must be held */
1452 static void sdma_make_progress(struct sdma_engine
*sde
, u64 status
)
1454 struct sdma_txreq
*txp
= NULL
;
1456 u16 hwhead
, swhead
, swtail
;
1457 int idle_check_done
= 0;
1459 hwhead
= sdma_gethead(sde
);
1461 /* The reason for some of the complexity of this code is that
1462 * not all descriptors have corresponding txps. So, we have to
1463 * be able to skip over descs until we wander into the range of
1464 * the next txp on the list.
1468 txp
= get_txhead(sde
);
1469 swhead
= sde
->descq_head
& sde
->sdma_mask
;
1470 trace_hfi1_sdma_progress(sde
, hwhead
, swhead
, txp
);
1471 while (swhead
!= hwhead
) {
1472 /* advance head, wrap if needed */
1473 swhead
= ++sde
->descq_head
& sde
->sdma_mask
;
1475 /* if now past this txp's descs, do the callback */
1476 if (txp
&& txp
->next_descq_idx
== swhead
) {
1478 /* protect against complete modifying */
1479 struct iowait
*wait
= txp
->wait
;
1481 /* remove from list */
1482 sde
->tx_ring
[sde
->tx_head
++ & sde
->sdma_mask
] = NULL
;
1484 drained
= atomic_dec_and_test(&wait
->sdma_busy
);
1485 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
1486 trace_hfi1_sdma_out_sn(sde
, txp
->sn
);
1487 if (WARN_ON_ONCE(sde
->head_sn
!= txp
->sn
))
1488 dd_dev_err(sde
->dd
, "expected %llu got %llu\n",
1489 sde
->head_sn
, txp
->sn
);
1492 sdma_txclean(sde
->dd
, txp
);
1498 if (wait
&& drained
)
1499 iowait_drain_wakeup(wait
);
1500 /* see if there is another txp */
1501 txp
= get_txhead(sde
);
1503 trace_hfi1_sdma_progress(sde
, hwhead
, swhead
, txp
);
1508 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1509 * to updates to the the dma_head location in host memory. The head
1510 * value read might not be fully up to date. If there are pending
1511 * descriptors and the SDMA idle interrupt fired then read from the
1512 * CSR SDMA head instead to get the latest value from the hardware.
1513 * The hardware SDMA head should be read at most once in this invocation
1514 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1516 if ((status
& sde
->idle_mask
) && !idle_check_done
) {
1517 swtail
= ACCESS_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
1518 if (swtail
!= hwhead
) {
1519 hwhead
= (u16
)read_sde_csr(sde
, SD(HEAD
));
1520 idle_check_done
= 1;
1525 sde
->last_status
= status
;
1527 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
1531 * sdma_engine_interrupt() - interrupt handler for engine
1533 * @status: sdma interrupt reason
1535 * Status is a mask of the 3 possible interrupts for this engine. It will
1536 * contain bits _only_ for this SDMA engine. It will contain at least one
1537 * bit, it may contain more.
1539 void sdma_engine_interrupt(struct sdma_engine
*sde
, u64 status
)
1541 trace_hfi1_sdma_engine_interrupt(sde
, status
);
1542 write_seqlock(&sde
->head_lock
);
1543 sdma_set_desc_cnt(sde
, sde
->descq_cnt
/ 2);
1544 sdma_make_progress(sde
, status
);
1545 write_sequnlock(&sde
->head_lock
);
1549 * sdma_engine_error() - error handler for engine
1551 * @status: sdma interrupt reason
1553 void sdma_engine_error(struct sdma_engine
*sde
, u64 status
)
1555 unsigned long flags
;
1557 #ifdef CONFIG_SDMA_VERBOSITY
1558 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1560 (unsigned long long)status
,
1561 sdma_state_names
[sde
->state
.current_state
]);
1563 spin_lock_irqsave(&sde
->tail_lock
, flags
);
1564 write_seqlock(&sde
->head_lock
);
1565 if (status
& ALL_SDMA_ENG_HALT_ERRS
)
1566 __sdma_process_event(sde
, sdma_event_e60_hw_halted
);
1567 if (status
& ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK
)) {
1569 "SDMA (%u) engine error: 0x%llx state %s\n",
1571 (unsigned long long)status
,
1572 sdma_state_names
[sde
->state
.current_state
]);
1573 dump_sdma_state(sde
);
1575 write_sequnlock(&sde
->head_lock
);
1576 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
1579 static void sdma_sendctrl(struct sdma_engine
*sde
, unsigned op
)
1581 u64 set_senddmactrl
= 0;
1582 u64 clr_senddmactrl
= 0;
1583 unsigned long flags
;
1585 #ifdef CONFIG_SDMA_VERBOSITY
1586 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1588 (op
& SDMA_SENDCTRL_OP_ENABLE
) ? 1 : 0,
1589 (op
& SDMA_SENDCTRL_OP_INTENABLE
) ? 1 : 0,
1590 (op
& SDMA_SENDCTRL_OP_HALT
) ? 1 : 0,
1591 (op
& SDMA_SENDCTRL_OP_CLEANUP
) ? 1 : 0);
1594 if (op
& SDMA_SENDCTRL_OP_ENABLE
)
1595 set_senddmactrl
|= SD(CTRL_SDMA_ENABLE_SMASK
);
1597 clr_senddmactrl
|= SD(CTRL_SDMA_ENABLE_SMASK
);
1599 if (op
& SDMA_SENDCTRL_OP_INTENABLE
)
1600 set_senddmactrl
|= SD(CTRL_SDMA_INT_ENABLE_SMASK
);
1602 clr_senddmactrl
|= SD(CTRL_SDMA_INT_ENABLE_SMASK
);
1604 if (op
& SDMA_SENDCTRL_OP_HALT
)
1605 set_senddmactrl
|= SD(CTRL_SDMA_HALT_SMASK
);
1607 clr_senddmactrl
|= SD(CTRL_SDMA_HALT_SMASK
);
1609 spin_lock_irqsave(&sde
->senddmactrl_lock
, flags
);
1611 sde
->p_senddmactrl
|= set_senddmactrl
;
1612 sde
->p_senddmactrl
&= ~clr_senddmactrl
;
1614 if (op
& SDMA_SENDCTRL_OP_CLEANUP
)
1615 write_sde_csr(sde
, SD(CTRL
),
1616 sde
->p_senddmactrl
|
1617 SD(CTRL_SDMA_CLEANUP_SMASK
));
1619 write_sde_csr(sde
, SD(CTRL
), sde
->p_senddmactrl
);
1621 spin_unlock_irqrestore(&sde
->senddmactrl_lock
, flags
);
1623 #ifdef CONFIG_SDMA_VERBOSITY
1624 sdma_dumpstate(sde
);
1628 static void sdma_setlengen(struct sdma_engine
*sde
)
1630 #ifdef CONFIG_SDMA_VERBOSITY
1631 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1632 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1636 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1637 * count to enable generation checking and load the internal
1638 * generation counter.
1640 write_sde_csr(sde
, SD(LEN_GEN
),
1641 (sde
->descq_cnt
/64) << SD(LEN_GEN_LENGTH_SHIFT
)
1643 write_sde_csr(sde
, SD(LEN_GEN
),
1644 ((sde
->descq_cnt
/64) << SD(LEN_GEN_LENGTH_SHIFT
))
1645 | (4ULL << SD(LEN_GEN_GENERATION_SHIFT
))
1649 static inline void sdma_update_tail(struct sdma_engine
*sde
, u16 tail
)
1651 /* Commit writes to memory and advance the tail on the chip */
1652 smp_wmb(); /* see get_txhead() */
1653 writeq(tail
, sde
->tail_csr
);
1657 * This is called when changing to state s10_hw_start_up_halt_wait as
1658 * a result of send buffer errors or send DMA descriptor errors.
1660 static void sdma_hw_start_up(struct sdma_engine
*sde
)
1664 #ifdef CONFIG_SDMA_VERBOSITY
1665 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1666 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1669 sdma_setlengen(sde
);
1670 sdma_update_tail(sde
, 0); /* Set SendDmaTail */
1673 reg
= SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK
) <<
1674 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT
);
1675 write_sde_csr(sde
, SD(ENG_ERR_CLEAR
), reg
);
1678 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
1679 (r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1681 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
1682 (r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1684 * set_sdma_integrity
1686 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
1688 static void set_sdma_integrity(struct sdma_engine
*sde
)
1690 struct hfi1_devdata
*dd
= sde
->dd
;
1693 if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY
)))
1696 reg
= hfi1_pkt_base_sdma_integrity(dd
);
1698 if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL
))
1699 CLEAR_STATIC_RATE_CONTROL_SMASK(reg
);
1701 SET_STATIC_RATE_CONTROL_SMASK(reg
);
1703 write_sde_csr(sde
, SD(CHECK_ENABLE
), reg
);
1707 static void init_sdma_regs(
1708 struct sdma_engine
*sde
,
1713 #ifdef CONFIG_SDMA_VERBOSITY
1714 struct hfi1_devdata
*dd
= sde
->dd
;
1716 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1717 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1720 write_sde_csr(sde
, SD(BASE_ADDR
), sde
->descq_phys
);
1721 sdma_setlengen(sde
);
1722 sdma_update_tail(sde
, 0); /* Set SendDmaTail */
1723 write_sde_csr(sde
, SD(RELOAD_CNT
), idle_cnt
);
1724 write_sde_csr(sde
, SD(DESC_CNT
), 0);
1725 write_sde_csr(sde
, SD(HEAD_ADDR
), sde
->head_phys
);
1726 write_sde_csr(sde
, SD(MEMORY
),
1728 SD(MEMORY_SDMA_MEMORY_CNT_SHIFT
)) |
1729 ((u64
)(credits
* sde
->this_idx
) <<
1730 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT
)));
1731 write_sde_csr(sde
, SD(ENG_ERR_MASK
), ~0ull);
1732 set_sdma_integrity(sde
);
1733 opmask
= OPCODE_CHECK_MASK_DISABLED
;
1734 opval
= OPCODE_CHECK_VAL_DISABLED
;
1735 write_sde_csr(sde
, SD(CHECK_OPCODE
),
1736 (opmask
<< SEND_CTXT_CHECK_OPCODE_MASK_SHIFT
) |
1737 (opval
<< SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT
));
1740 #ifdef CONFIG_SDMA_VERBOSITY
1742 #define sdma_dumpstate_helper0(reg) do { \
1743 csr = read_csr(sde->dd, reg); \
1744 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
1747 #define sdma_dumpstate_helper(reg) do { \
1748 csr = read_sde_csr(sde, reg); \
1749 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
1750 #reg, sde->this_idx, csr); \
1753 #define sdma_dumpstate_helper2(reg) do { \
1754 csr = read_csr(sde->dd, reg + (8 * i)); \
1755 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
1759 void sdma_dumpstate(struct sdma_engine
*sde
)
1764 sdma_dumpstate_helper(SD(CTRL
));
1765 sdma_dumpstate_helper(SD(STATUS
));
1766 sdma_dumpstate_helper0(SD(ERR_STATUS
));
1767 sdma_dumpstate_helper0(SD(ERR_MASK
));
1768 sdma_dumpstate_helper(SD(ENG_ERR_STATUS
));
1769 sdma_dumpstate_helper(SD(ENG_ERR_MASK
));
1771 for (i
= 0; i
< CCE_NUM_INT_CSRS
; ++i
) {
1772 sdma_dumpstate_helper2(CCE_INT_STATUS
);
1773 sdma_dumpstate_helper2(CCE_INT_MASK
);
1774 sdma_dumpstate_helper2(CCE_INT_BLOCKED
);
1777 sdma_dumpstate_helper(SD(TAIL
));
1778 sdma_dumpstate_helper(SD(HEAD
));
1779 sdma_dumpstate_helper(SD(PRIORITY_THLD
));
1780 sdma_dumpstate_helper(SD(IDLE_CNT
));
1781 sdma_dumpstate_helper(SD(RELOAD_CNT
));
1782 sdma_dumpstate_helper(SD(DESC_CNT
));
1783 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT
));
1784 sdma_dumpstate_helper(SD(MEMORY
));
1785 sdma_dumpstate_helper0(SD(ENGINES
));
1786 sdma_dumpstate_helper0(SD(MEM_SIZE
));
1787 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
1788 sdma_dumpstate_helper(SD(BASE_ADDR
));
1789 sdma_dumpstate_helper(SD(LEN_GEN
));
1790 sdma_dumpstate_helper(SD(HEAD_ADDR
));
1791 sdma_dumpstate_helper(SD(CHECK_ENABLE
));
1792 sdma_dumpstate_helper(SD(CHECK_VL
));
1793 sdma_dumpstate_helper(SD(CHECK_JOB_KEY
));
1794 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY
));
1795 sdma_dumpstate_helper(SD(CHECK_SLID
));
1796 sdma_dumpstate_helper(SD(CHECK_OPCODE
));
1800 static void dump_sdma_state(struct sdma_engine
*sde
)
1802 struct hw_sdma_desc
*descq
;
1803 struct hw_sdma_desc
*descqp
;
1808 u16 head
, tail
, cnt
;
1810 head
= sde
->descq_head
& sde
->sdma_mask
;
1811 tail
= sde
->descq_tail
& sde
->sdma_mask
;
1812 cnt
= sdma_descq_freecnt(sde
);
1816 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
1821 !list_empty(&sde
->flushlist
));
1823 /* print info for each entry in the descriptor queue */
1824 while (head
!= tail
) {
1825 char flags
[6] = { 'x', 'x', 'x', 'x', 0 };
1827 descqp
= &sde
->descq
[head
];
1828 desc
[0] = le64_to_cpu(descqp
->qw
[0]);
1829 desc
[1] = le64_to_cpu(descqp
->qw
[1]);
1830 flags
[0] = (desc
[1] & SDMA_DESC1_INT_REQ_FLAG
) ? 'I' : '-';
1831 flags
[1] = (desc
[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG
) ?
1833 flags
[2] = (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
) ? 'F' : '-';
1834 flags
[3] = (desc
[0] & SDMA_DESC0_LAST_DESC_FLAG
) ? 'L' : '-';
1835 addr
= (desc
[0] >> SDMA_DESC0_PHY_ADDR_SHIFT
)
1836 & SDMA_DESC0_PHY_ADDR_MASK
;
1837 gen
= (desc
[1] >> SDMA_DESC1_GENERATION_SHIFT
)
1838 & SDMA_DESC1_GENERATION_MASK
;
1839 len
= (desc
[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT
)
1840 & SDMA_DESC0_BYTE_COUNT_MASK
;
1842 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1843 head
, flags
, addr
, gen
, len
);
1845 "\tdesc0:0x%016llx desc1 0x%016llx\n",
1847 if (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
)
1849 "\taidx: %u amode: %u alen: %u\n",
1850 (u8
)((desc
[1] & SDMA_DESC1_HEADER_INDEX_SMASK
)
1851 >> SDMA_DESC1_HEADER_INDEX_SHIFT
),
1852 (u8
)((desc
[1] & SDMA_DESC1_HEADER_MODE_SMASK
)
1853 >> SDMA_DESC1_HEADER_MODE_SHIFT
),
1854 (u8
)((desc
[1] & SDMA_DESC1_HEADER_DWS_SMASK
)
1855 >> SDMA_DESC1_HEADER_DWS_SHIFT
));
1857 head
&= sde
->sdma_mask
;
1862 "SDE %u STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
1864 * sdma_seqfile_dump_sde() - debugfs dump of sde
1866 * @sde: send dma engine to dump
1868 * This routine dumps the sde to the indicated seq file.
1870 void sdma_seqfile_dump_sde(struct seq_file
*s
, struct sdma_engine
*sde
)
1873 struct hw_sdma_desc
*descqp
;
1879 head
= sde
->descq_head
& sde
->sdma_mask
;
1880 tail
= ACCESS_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
1881 seq_printf(s
, SDE_FMT
, sde
->this_idx
,
1882 sdma_state_name(sde
->state
.current_state
),
1883 (unsigned long long)read_sde_csr(sde
, SD(CTRL
)),
1884 (unsigned long long)read_sde_csr(sde
, SD(STATUS
)),
1885 (unsigned long long)read_sde_csr(sde
,
1886 SD(ENG_ERR_STATUS
)),
1887 (unsigned long long)read_sde_csr(sde
, SD(TAIL
)),
1889 (unsigned long long)read_sde_csr(sde
, SD(HEAD
)),
1891 (unsigned long long)le64_to_cpu(*sde
->head_dma
),
1892 (unsigned long long)read_sde_csr(sde
, SD(MEMORY
)),
1893 (unsigned long long)read_sde_csr(sde
, SD(LEN_GEN
)),
1894 (unsigned long long)read_sde_csr(sde
, SD(RELOAD_CNT
)),
1895 (unsigned long long)sde
->last_status
,
1896 (unsigned long long)sde
->ahg_bits
,
1901 !list_empty(&sde
->flushlist
),
1902 sde
->descq_full_count
,
1903 (unsigned long long)read_sde_csr(sde
, SEND_DMA_CHECK_SLID
));
1905 /* print info for each entry in the descriptor queue */
1906 while (head
!= tail
) {
1907 char flags
[6] = { 'x', 'x', 'x', 'x', 0 };
1909 descqp
= &sde
->descq
[head
];
1910 desc
[0] = le64_to_cpu(descqp
->qw
[0]);
1911 desc
[1] = le64_to_cpu(descqp
->qw
[1]);
1912 flags
[0] = (desc
[1] & SDMA_DESC1_INT_REQ_FLAG
) ? 'I' : '-';
1913 flags
[1] = (desc
[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG
) ?
1915 flags
[2] = (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
) ? 'F' : '-';
1916 flags
[3] = (desc
[0] & SDMA_DESC0_LAST_DESC_FLAG
) ? 'L' : '-';
1917 addr
= (desc
[0] >> SDMA_DESC0_PHY_ADDR_SHIFT
)
1918 & SDMA_DESC0_PHY_ADDR_MASK
;
1919 gen
= (desc
[1] >> SDMA_DESC1_GENERATION_SHIFT
)
1920 & SDMA_DESC1_GENERATION_MASK
;
1921 len
= (desc
[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT
)
1922 & SDMA_DESC0_BYTE_COUNT_MASK
;
1924 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1925 head
, flags
, addr
, gen
, len
);
1926 if (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
)
1927 seq_printf(s
, "\t\tahgidx: %u ahgmode: %u\n",
1928 (u8
)((desc
[1] & SDMA_DESC1_HEADER_INDEX_SMASK
)
1929 >> SDMA_DESC1_HEADER_INDEX_SHIFT
),
1930 (u8
)((desc
[1] & SDMA_DESC1_HEADER_MODE_SMASK
)
1931 >> SDMA_DESC1_HEADER_MODE_SHIFT
));
1932 head
= (head
+ 1) & sde
->sdma_mask
;
1937 * add the generation number into
1938 * the qw1 and return
1940 static inline u64
add_gen(struct sdma_engine
*sde
, u64 qw1
)
1942 u8 generation
= (sde
->descq_tail
>> sde
->sdma_shift
) & 3;
1944 qw1
&= ~SDMA_DESC1_GENERATION_SMASK
;
1945 qw1
|= ((u64
)generation
& SDMA_DESC1_GENERATION_MASK
)
1946 << SDMA_DESC1_GENERATION_SHIFT
;
1951 * This routine submits the indicated tx
1953 * Space has already been guaranteed and
1954 * tail side of ring is locked.
1956 * The hardware tail update is done
1957 * in the caller and that is facilitated
1958 * by returning the new tail.
1960 * There is special case logic for ahg
1961 * to not add the generation number for
1962 * up to 2 descriptors that follow the
1966 static inline u16
submit_tx(struct sdma_engine
*sde
, struct sdma_txreq
*tx
)
1970 struct sdma_desc
*descp
= tx
->descp
;
1971 u8 skip
= 0, mode
= ahg_mode(tx
);
1973 tail
= sde
->descq_tail
& sde
->sdma_mask
;
1974 sde
->descq
[tail
].qw
[0] = cpu_to_le64(descp
->qw
[0]);
1975 sde
->descq
[tail
].qw
[1] = cpu_to_le64(add_gen(sde
, descp
->qw
[1]));
1976 trace_hfi1_sdma_descriptor(sde
, descp
->qw
[0], descp
->qw
[1],
1977 tail
, &sde
->descq
[tail
]);
1978 tail
= ++sde
->descq_tail
& sde
->sdma_mask
;
1980 if (mode
> SDMA_AHG_APPLY_UPDATE1
)
1982 for (i
= 1; i
< tx
->num_desc
; i
++, descp
++) {
1985 sde
->descq
[tail
].qw
[0] = cpu_to_le64(descp
->qw
[0]);
1987 /* edits don't have generation */
1991 /* replace generation with real one for non-edits */
1992 qw1
= add_gen(sde
, descp
->qw
[1]);
1994 sde
->descq
[tail
].qw
[1] = cpu_to_le64(qw1
);
1995 trace_hfi1_sdma_descriptor(sde
, descp
->qw
[0], qw1
,
1996 tail
, &sde
->descq
[tail
]);
1997 tail
= ++sde
->descq_tail
& sde
->sdma_mask
;
1999 tx
->next_descq_idx
= tail
;
2000 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2001 tx
->sn
= sde
->tail_sn
++;
2002 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
2003 WARN_ON_ONCE(sde
->tx_ring
[sde
->tx_tail
& sde
->sdma_mask
]);
2005 sde
->tx_ring
[sde
->tx_tail
++ & sde
->sdma_mask
] = tx
;
2006 sde
->desc_avail
-= tx
->num_desc
;
2011 * Check for progress
2013 static int sdma_check_progress(
2014 struct sdma_engine
*sde
,
2015 struct iowait
*wait
,
2016 struct sdma_txreq
*tx
)
2020 sde
->desc_avail
= sdma_descq_freecnt(sde
);
2021 if (tx
->num_desc
<= sde
->desc_avail
)
2023 /* pulse the head_lock */
2024 if (wait
&& wait
->sleep
) {
2027 seq
= raw_seqcount_begin(
2028 (const seqcount_t
*)&sde
->head_lock
.seqcount
);
2029 ret
= wait
->sleep(sde
, wait
, tx
, seq
);
2031 sde
->desc_avail
= sdma_descq_freecnt(sde
);
2038 * sdma_send_txreq() - submit a tx req to ring
2039 * @sde: sdma engine to use
2040 * @wait: wait structure to use when full (may be NULL)
2041 * @tx: sdma_txreq to submit
2043 * The call submits the tx into the ring. If a iowait structure is non-NULL
2044 * the packet will be queued to the list in wait.
2047 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2048 * ring (wait == NULL)
2049 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2051 int sdma_send_txreq(struct sdma_engine
*sde
,
2052 struct iowait
*wait
,
2053 struct sdma_txreq
*tx
)
2057 unsigned long flags
;
2059 /* user should have supplied entire packet */
2060 if (unlikely(tx
->tlen
))
2063 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2065 if (unlikely(!__sdma_running(sde
)))
2067 if (unlikely(tx
->num_desc
> sde
->desc_avail
))
2069 tail
= submit_tx(sde
, tx
);
2071 atomic_inc(&wait
->sdma_busy
);
2072 sdma_update_tail(sde
, tail
);
2074 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2078 atomic_inc(&wait
->sdma_busy
);
2079 tx
->next_descq_idx
= 0;
2080 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2081 tx
->sn
= sde
->tail_sn
++;
2082 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
2084 spin_lock(&sde
->flushlist_lock
);
2085 list_add_tail(&tx
->list
, &sde
->flushlist
);
2086 spin_unlock(&sde
->flushlist_lock
);
2089 wait
->count
+= tx
->num_desc
;
2091 schedule_work(&sde
->flush_worker
);
2095 ret
= sdma_check_progress(sde
, wait
, tx
);
2096 if (ret
== -EAGAIN
) {
2100 sde
->descq_full_count
++;
2105 * sdma_send_txlist() - submit a list of tx req to ring
2106 * @sde: sdma engine to use
2107 * @wait: wait structure to use when full (may be NULL)
2108 * @tx_list: list of sdma_txreqs to submit
2110 * The call submits the list into the ring.
2112 * If the iowait structure is non-NULL and not equal to the iowait list
2113 * the unprocessed part of the list will be appended to the list in wait.
2115 * In all cases, the tx_list will be updated so the head of the tx_list is
2116 * the list of descriptors that have yet to be transmitted.
2118 * The intent of this call is to provide a more efficient
2119 * way of submitting multiple packets to SDMA while holding the tail
2123 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring
2125 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2127 int sdma_send_txlist(struct sdma_engine
*sde
,
2128 struct iowait
*wait
,
2129 struct list_head
*tx_list
)
2131 struct sdma_txreq
*tx
, *tx_next
;
2133 unsigned long flags
;
2134 u16 tail
= INVALID_TAIL
;
2137 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2139 list_for_each_entry_safe(tx
, tx_next
, tx_list
, list
) {
2141 if (unlikely(!__sdma_running(sde
)))
2143 if (unlikely(tx
->num_desc
> sde
->desc_avail
))
2145 if (unlikely(tx
->tlen
)) {
2149 list_del_init(&tx
->list
);
2150 tail
= submit_tx(sde
, tx
);
2152 if (tail
!= INVALID_TAIL
&&
2153 (count
& SDMA_TAIL_UPDATE_THRESH
) == 0) {
2154 sdma_update_tail(sde
, tail
);
2155 tail
= INVALID_TAIL
;
2160 atomic_add(count
, &wait
->sdma_busy
);
2161 if (tail
!= INVALID_TAIL
)
2162 sdma_update_tail(sde
, tail
);
2163 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2166 spin_lock(&sde
->flushlist_lock
);
2167 list_for_each_entry_safe(tx
, tx_next
, tx_list
, list
) {
2169 list_del_init(&tx
->list
);
2171 atomic_inc(&wait
->sdma_busy
);
2172 tx
->next_descq_idx
= 0;
2173 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2174 tx
->sn
= sde
->tail_sn
++;
2175 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
2177 list_add_tail(&tx
->list
, &sde
->flushlist
);
2180 wait
->count
+= tx
->num_desc
;
2183 spin_unlock(&sde
->flushlist_lock
);
2184 schedule_work(&sde
->flush_worker
);
2188 ret
= sdma_check_progress(sde
, wait
, tx
);
2189 if (ret
== -EAGAIN
) {
2193 sde
->descq_full_count
++;
2197 static void sdma_process_event(struct sdma_engine
*sde
,
2198 enum sdma_events event
)
2200 unsigned long flags
;
2202 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2203 write_seqlock(&sde
->head_lock
);
2205 __sdma_process_event(sde
, event
);
2207 if (sde
->state
.current_state
== sdma_state_s99_running
)
2208 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
2210 write_sequnlock(&sde
->head_lock
);
2211 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2214 static void __sdma_process_event(struct sdma_engine
*sde
,
2215 enum sdma_events event
)
2217 struct sdma_state
*ss
= &sde
->state
;
2218 int need_progress
= 0;
2220 /* CONFIG SDMA temporary */
2221 #ifdef CONFIG_SDMA_VERBOSITY
2222 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) [%s] %s\n", sde
->this_idx
,
2223 sdma_state_names
[ss
->current_state
],
2224 sdma_event_names
[event
]);
2227 switch (ss
->current_state
) {
2228 case sdma_state_s00_hw_down
:
2230 case sdma_event_e00_go_hw_down
:
2232 case sdma_event_e30_go_running
:
2234 * If down, but running requested (usually result
2235 * of link up, then we need to start up.
2236 * This can happen when hw down is requested while
2237 * bringing the link up with traffic active on
2239 ss
->go_s99_running
= 1;
2240 /* fall through and start dma engine */
2241 case sdma_event_e10_go_hw_start
:
2242 /* This reference means the state machine is started */
2243 sdma_get(&sde
->state
);
2245 sdma_state_s10_hw_start_up_halt_wait
);
2247 case sdma_event_e15_hw_halt_done
:
2249 case sdma_event_e25_hw_clean_up_done
:
2251 case sdma_event_e40_sw_cleaned
:
2252 sdma_sw_tear_down(sde
);
2254 case sdma_event_e50_hw_cleaned
:
2256 case sdma_event_e60_hw_halted
:
2258 case sdma_event_e70_go_idle
:
2260 case sdma_event_e80_hw_freeze
:
2262 case sdma_event_e81_hw_frozen
:
2264 case sdma_event_e82_hw_unfreeze
:
2266 case sdma_event_e85_link_down
:
2268 case sdma_event_e90_sw_halted
:
2273 case sdma_state_s10_hw_start_up_halt_wait
:
2275 case sdma_event_e00_go_hw_down
:
2276 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2277 sdma_sw_tear_down(sde
);
2279 case sdma_event_e10_go_hw_start
:
2281 case sdma_event_e15_hw_halt_done
:
2283 sdma_state_s15_hw_start_up_clean_wait
);
2284 sdma_start_hw_clean_up(sde
);
2286 case sdma_event_e25_hw_clean_up_done
:
2288 case sdma_event_e30_go_running
:
2289 ss
->go_s99_running
= 1;
2291 case sdma_event_e40_sw_cleaned
:
2293 case sdma_event_e50_hw_cleaned
:
2295 case sdma_event_e60_hw_halted
:
2296 sdma_start_err_halt_wait(sde
);
2298 case sdma_event_e70_go_idle
:
2299 ss
->go_s99_running
= 0;
2301 case sdma_event_e80_hw_freeze
:
2303 case sdma_event_e81_hw_frozen
:
2305 case sdma_event_e82_hw_unfreeze
:
2307 case sdma_event_e85_link_down
:
2309 case sdma_event_e90_sw_halted
:
2314 case sdma_state_s15_hw_start_up_clean_wait
:
2316 case sdma_event_e00_go_hw_down
:
2317 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2318 sdma_sw_tear_down(sde
);
2320 case sdma_event_e10_go_hw_start
:
2322 case sdma_event_e15_hw_halt_done
:
2324 case sdma_event_e25_hw_clean_up_done
:
2325 sdma_hw_start_up(sde
);
2326 sdma_set_state(sde
, ss
->go_s99_running
?
2327 sdma_state_s99_running
:
2328 sdma_state_s20_idle
);
2330 case sdma_event_e30_go_running
:
2331 ss
->go_s99_running
= 1;
2333 case sdma_event_e40_sw_cleaned
:
2335 case sdma_event_e50_hw_cleaned
:
2337 case sdma_event_e60_hw_halted
:
2339 case sdma_event_e70_go_idle
:
2340 ss
->go_s99_running
= 0;
2342 case sdma_event_e80_hw_freeze
:
2344 case sdma_event_e81_hw_frozen
:
2346 case sdma_event_e82_hw_unfreeze
:
2348 case sdma_event_e85_link_down
:
2350 case sdma_event_e90_sw_halted
:
2355 case sdma_state_s20_idle
:
2357 case sdma_event_e00_go_hw_down
:
2358 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2359 sdma_sw_tear_down(sde
);
2361 case sdma_event_e10_go_hw_start
:
2363 case sdma_event_e15_hw_halt_done
:
2365 case sdma_event_e25_hw_clean_up_done
:
2367 case sdma_event_e30_go_running
:
2368 sdma_set_state(sde
, sdma_state_s99_running
);
2369 ss
->go_s99_running
= 1;
2371 case sdma_event_e40_sw_cleaned
:
2373 case sdma_event_e50_hw_cleaned
:
2375 case sdma_event_e60_hw_halted
:
2376 sdma_set_state(sde
, sdma_state_s50_hw_halt_wait
);
2377 sdma_start_err_halt_wait(sde
);
2379 case sdma_event_e70_go_idle
:
2381 case sdma_event_e85_link_down
:
2383 case sdma_event_e80_hw_freeze
:
2384 sdma_set_state(sde
, sdma_state_s80_hw_freeze
);
2385 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
2386 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
2388 case sdma_event_e81_hw_frozen
:
2390 case sdma_event_e82_hw_unfreeze
:
2392 case sdma_event_e90_sw_halted
:
2397 case sdma_state_s30_sw_clean_up_wait
:
2399 case sdma_event_e00_go_hw_down
:
2400 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2402 case sdma_event_e10_go_hw_start
:
2404 case sdma_event_e15_hw_halt_done
:
2406 case sdma_event_e25_hw_clean_up_done
:
2408 case sdma_event_e30_go_running
:
2409 ss
->go_s99_running
= 1;
2411 case sdma_event_e40_sw_cleaned
:
2412 sdma_set_state(sde
, sdma_state_s40_hw_clean_up_wait
);
2413 sdma_start_hw_clean_up(sde
);
2415 case sdma_event_e50_hw_cleaned
:
2417 case sdma_event_e60_hw_halted
:
2419 case sdma_event_e70_go_idle
:
2420 ss
->go_s99_running
= 0;
2422 case sdma_event_e80_hw_freeze
:
2424 case sdma_event_e81_hw_frozen
:
2426 case sdma_event_e82_hw_unfreeze
:
2428 case sdma_event_e85_link_down
:
2429 ss
->go_s99_running
= 0;
2431 case sdma_event_e90_sw_halted
:
2436 case sdma_state_s40_hw_clean_up_wait
:
2438 case sdma_event_e00_go_hw_down
:
2439 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2440 sdma_start_sw_clean_up(sde
);
2442 case sdma_event_e10_go_hw_start
:
2444 case sdma_event_e15_hw_halt_done
:
2446 case sdma_event_e25_hw_clean_up_done
:
2447 sdma_hw_start_up(sde
);
2448 sdma_set_state(sde
, ss
->go_s99_running
?
2449 sdma_state_s99_running
:
2450 sdma_state_s20_idle
);
2452 case sdma_event_e30_go_running
:
2453 ss
->go_s99_running
= 1;
2455 case sdma_event_e40_sw_cleaned
:
2457 case sdma_event_e50_hw_cleaned
:
2459 case sdma_event_e60_hw_halted
:
2461 case sdma_event_e70_go_idle
:
2462 ss
->go_s99_running
= 0;
2464 case sdma_event_e80_hw_freeze
:
2466 case sdma_event_e81_hw_frozen
:
2468 case sdma_event_e82_hw_unfreeze
:
2470 case sdma_event_e85_link_down
:
2471 ss
->go_s99_running
= 0;
2473 case sdma_event_e90_sw_halted
:
2478 case sdma_state_s50_hw_halt_wait
:
2480 case sdma_event_e00_go_hw_down
:
2481 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2482 sdma_start_sw_clean_up(sde
);
2484 case sdma_event_e10_go_hw_start
:
2486 case sdma_event_e15_hw_halt_done
:
2487 sdma_set_state(sde
, sdma_state_s30_sw_clean_up_wait
);
2488 sdma_start_sw_clean_up(sde
);
2490 case sdma_event_e25_hw_clean_up_done
:
2492 case sdma_event_e30_go_running
:
2493 ss
->go_s99_running
= 1;
2495 case sdma_event_e40_sw_cleaned
:
2497 case sdma_event_e50_hw_cleaned
:
2499 case sdma_event_e60_hw_halted
:
2500 sdma_start_err_halt_wait(sde
);
2502 case sdma_event_e70_go_idle
:
2503 ss
->go_s99_running
= 0;
2505 case sdma_event_e80_hw_freeze
:
2507 case sdma_event_e81_hw_frozen
:
2509 case sdma_event_e82_hw_unfreeze
:
2511 case sdma_event_e85_link_down
:
2512 ss
->go_s99_running
= 0;
2514 case sdma_event_e90_sw_halted
:
2519 case sdma_state_s60_idle_halt_wait
:
2521 case sdma_event_e00_go_hw_down
:
2522 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2523 sdma_start_sw_clean_up(sde
);
2525 case sdma_event_e10_go_hw_start
:
2527 case sdma_event_e15_hw_halt_done
:
2528 sdma_set_state(sde
, sdma_state_s30_sw_clean_up_wait
);
2529 sdma_start_sw_clean_up(sde
);
2531 case sdma_event_e25_hw_clean_up_done
:
2533 case sdma_event_e30_go_running
:
2534 ss
->go_s99_running
= 1;
2536 case sdma_event_e40_sw_cleaned
:
2538 case sdma_event_e50_hw_cleaned
:
2540 case sdma_event_e60_hw_halted
:
2541 sdma_start_err_halt_wait(sde
);
2543 case sdma_event_e70_go_idle
:
2544 ss
->go_s99_running
= 0;
2546 case sdma_event_e80_hw_freeze
:
2548 case sdma_event_e81_hw_frozen
:
2550 case sdma_event_e82_hw_unfreeze
:
2552 case sdma_event_e85_link_down
:
2554 case sdma_event_e90_sw_halted
:
2559 case sdma_state_s80_hw_freeze
:
2561 case sdma_event_e00_go_hw_down
:
2562 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2563 sdma_start_sw_clean_up(sde
);
2565 case sdma_event_e10_go_hw_start
:
2567 case sdma_event_e15_hw_halt_done
:
2569 case sdma_event_e25_hw_clean_up_done
:
2571 case sdma_event_e30_go_running
:
2572 ss
->go_s99_running
= 1;
2574 case sdma_event_e40_sw_cleaned
:
2576 case sdma_event_e50_hw_cleaned
:
2578 case sdma_event_e60_hw_halted
:
2580 case sdma_event_e70_go_idle
:
2581 ss
->go_s99_running
= 0;
2583 case sdma_event_e80_hw_freeze
:
2585 case sdma_event_e81_hw_frozen
:
2586 sdma_set_state(sde
, sdma_state_s82_freeze_sw_clean
);
2587 sdma_start_sw_clean_up(sde
);
2589 case sdma_event_e82_hw_unfreeze
:
2591 case sdma_event_e85_link_down
:
2593 case sdma_event_e90_sw_halted
:
2598 case sdma_state_s82_freeze_sw_clean
:
2600 case sdma_event_e00_go_hw_down
:
2601 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2602 sdma_start_sw_clean_up(sde
);
2604 case sdma_event_e10_go_hw_start
:
2606 case sdma_event_e15_hw_halt_done
:
2608 case sdma_event_e25_hw_clean_up_done
:
2610 case sdma_event_e30_go_running
:
2611 ss
->go_s99_running
= 1;
2613 case sdma_event_e40_sw_cleaned
:
2614 /* notify caller this engine is done cleaning */
2615 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
2616 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
2618 case sdma_event_e50_hw_cleaned
:
2620 case sdma_event_e60_hw_halted
:
2622 case sdma_event_e70_go_idle
:
2623 ss
->go_s99_running
= 0;
2625 case sdma_event_e80_hw_freeze
:
2627 case sdma_event_e81_hw_frozen
:
2629 case sdma_event_e82_hw_unfreeze
:
2630 sdma_hw_start_up(sde
);
2631 sdma_set_state(sde
, ss
->go_s99_running
?
2632 sdma_state_s99_running
:
2633 sdma_state_s20_idle
);
2635 case sdma_event_e85_link_down
:
2637 case sdma_event_e90_sw_halted
:
2642 case sdma_state_s99_running
:
2644 case sdma_event_e00_go_hw_down
:
2645 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2646 sdma_start_sw_clean_up(sde
);
2648 case sdma_event_e10_go_hw_start
:
2650 case sdma_event_e15_hw_halt_done
:
2652 case sdma_event_e25_hw_clean_up_done
:
2654 case sdma_event_e30_go_running
:
2656 case sdma_event_e40_sw_cleaned
:
2658 case sdma_event_e50_hw_cleaned
:
2660 case sdma_event_e60_hw_halted
:
2662 sdma_err_progress_check_schedule(sde
);
2663 case sdma_event_e90_sw_halted
:
2665 * SW initiated halt does not perform engines
2668 sdma_set_state(sde
, sdma_state_s50_hw_halt_wait
);
2669 sdma_start_err_halt_wait(sde
);
2671 case sdma_event_e70_go_idle
:
2672 sdma_set_state(sde
, sdma_state_s60_idle_halt_wait
);
2674 case sdma_event_e85_link_down
:
2675 ss
->go_s99_running
= 0;
2677 case sdma_event_e80_hw_freeze
:
2678 sdma_set_state(sde
, sdma_state_s80_hw_freeze
);
2679 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
2680 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
2682 case sdma_event_e81_hw_frozen
:
2684 case sdma_event_e82_hw_unfreeze
:
2690 ss
->last_event
= event
;
2692 sdma_make_progress(sde
, 0);
2696 * _extend_sdma_tx_descs() - helper to extend txreq
2698 * This is called once the initial nominal allocation
2699 * of descriptors in the sdma_txreq is exhausted.
2701 * The code will bump the allocation up to the max
2702 * of MAX_DESC (64) descriptors. There doesn't seem
2703 * much point in an interim step.
2706 int _extend_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
)
2710 tx
->descp
= kmalloc_array(
2712 sizeof(struct sdma_desc
),
2716 tx
->desc_limit
= MAX_DESC
;
2717 /* copy ones already built */
2718 for (i
= 0; i
< tx
->num_desc
; i
++)
2719 tx
->descp
[i
] = tx
->descs
[i
];
2723 /* Update sdes when the lmc changes */
2724 void sdma_update_lmc(struct hfi1_devdata
*dd
, u64 mask
, u32 lid
)
2726 struct sdma_engine
*sde
;
2730 sreg
= ((mask
& SD(CHECK_SLID_MASK_MASK
)) <<
2731 SD(CHECK_SLID_MASK_SHIFT
)) |
2732 (((lid
& mask
) & SD(CHECK_SLID_VALUE_MASK
)) <<
2733 SD(CHECK_SLID_VALUE_SHIFT
));
2735 for (i
= 0; i
< dd
->num_sdma
; i
++) {
2736 hfi1_cdbg(LINKVERB
, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
2738 sde
= &dd
->per_sdma
[i
];
2739 write_sde_csr(sde
, SD(CHECK_SLID
), sreg
);
2743 /* tx not dword sized - pad */
2744 int _pad_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
)
2748 if ((unlikely(tx
->num_desc
== tx
->desc_limit
))) {
2749 rval
= _extend_sdma_tx_descs(dd
, tx
);
2753 /* finish the one just added */
2759 sizeof(u32
) - (tx
->packet_len
& (sizeof(u32
) - 1)));
2760 _sdma_close_tx(dd
, tx
);
2765 * Add ahg to the sdma_txreq
2767 * The logic will consume up to 3
2768 * descriptors at the beginning of
2771 void _sdma_txreq_ahgadd(
2772 struct sdma_txreq
*tx
,
2778 u32 i
, shift
= 0, desc
= 0;
2781 WARN_ON_ONCE(num_ahg
> 9 || (ahg_hlen
& 3) || ahg_hlen
== 4);
2784 mode
= SDMA_AHG_APPLY_UPDATE1
;
2785 else if (num_ahg
<= 5)
2786 mode
= SDMA_AHG_APPLY_UPDATE2
;
2788 mode
= SDMA_AHG_APPLY_UPDATE3
;
2790 /* initialize to consumed descriptors to zero */
2792 case SDMA_AHG_APPLY_UPDATE3
:
2794 tx
->descs
[2].qw
[0] = 0;
2795 tx
->descs
[2].qw
[1] = 0;
2797 case SDMA_AHG_APPLY_UPDATE2
:
2799 tx
->descs
[1].qw
[0] = 0;
2800 tx
->descs
[1].qw
[1] = 0;
2804 tx
->descs
[0].qw
[1] |=
2805 (((u64
)ahg_entry
& SDMA_DESC1_HEADER_INDEX_MASK
)
2806 << SDMA_DESC1_HEADER_INDEX_SHIFT
) |
2807 (((u64
)ahg_hlen
& SDMA_DESC1_HEADER_DWS_MASK
)
2808 << SDMA_DESC1_HEADER_DWS_SHIFT
) |
2809 (((u64
)mode
& SDMA_DESC1_HEADER_MODE_MASK
)
2810 << SDMA_DESC1_HEADER_MODE_SHIFT
) |
2811 (((u64
)ahg
[0] & SDMA_DESC1_HEADER_UPDATE1_MASK
)
2812 << SDMA_DESC1_HEADER_UPDATE1_SHIFT
);
2813 for (i
= 0; i
< (num_ahg
- 1); i
++) {
2814 if (!shift
&& !(i
& 2))
2816 tx
->descs
[desc
].qw
[!!(i
& 2)] |=
2819 shift
= (shift
+ 32) & 63;
2824 * sdma_ahg_alloc - allocate an AHG entry
2825 * @sde: engine to allocate from
2828 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
2829 * -ENOSPC if an entry is not available
2831 int sdma_ahg_alloc(struct sdma_engine
*sde
)
2837 trace_hfi1_ahg_allocate(sde
, -EINVAL
);
2841 nr
= ffz(ACCESS_ONCE(sde
->ahg_bits
));
2843 trace_hfi1_ahg_allocate(sde
, -ENOSPC
);
2846 oldbit
= test_and_set_bit(nr
, &sde
->ahg_bits
);
2851 trace_hfi1_ahg_allocate(sde
, nr
);
2856 * sdma_ahg_free - free an AHG entry
2857 * @sde: engine to return AHG entry
2858 * @ahg_index: index to free
2860 * This routine frees the indicate AHG entry.
2862 void sdma_ahg_free(struct sdma_engine
*sde
, int ahg_index
)
2866 trace_hfi1_ahg_deallocate(sde
, ahg_index
);
2867 if (ahg_index
< 0 || ahg_index
> 31)
2869 clear_bit(ahg_index
, &sde
->ahg_bits
);
2873 * SPC freeze handling for SDMA engines. Called when the driver knows
2874 * the SPC is going into a freeze but before the freeze is fully
2875 * settled. Generally an error interrupt.
2877 * This event will pull the engine out of running so no more entries can be
2878 * added to the engine's queue.
2880 void sdma_freeze_notify(struct hfi1_devdata
*dd
, int link_down
)
2883 enum sdma_events event
= link_down
? sdma_event_e85_link_down
:
2884 sdma_event_e80_hw_freeze
;
2886 /* set up the wait but do not wait here */
2887 atomic_set(&dd
->sdma_unfreeze_count
, dd
->num_sdma
);
2889 /* tell all engines to stop running and wait */
2890 for (i
= 0; i
< dd
->num_sdma
; i
++)
2891 sdma_process_event(&dd
->per_sdma
[i
], event
);
2893 /* sdma_freeze() will wait for all engines to have stopped */
2897 * SPC freeze handling for SDMA engines. Called when the driver knows
2898 * the SPC is fully frozen.
2900 void sdma_freeze(struct hfi1_devdata
*dd
)
2906 * Make sure all engines have moved out of the running state before
2909 ret
= wait_event_interruptible(dd
->sdma_unfreeze_wq
,
2910 atomic_read(&dd
->sdma_unfreeze_count
) <= 0);
2911 /* interrupted or count is negative, then unloading - just exit */
2912 if (ret
|| atomic_read(&dd
->sdma_unfreeze_count
) < 0)
2915 /* set up the count for the next wait */
2916 atomic_set(&dd
->sdma_unfreeze_count
, dd
->num_sdma
);
2918 /* tell all engines that the SPC is frozen, they can start cleaning */
2919 for (i
= 0; i
< dd
->num_sdma
; i
++)
2920 sdma_process_event(&dd
->per_sdma
[i
], sdma_event_e81_hw_frozen
);
2923 * Wait for everyone to finish software clean before exiting. The
2924 * software clean will read engine CSRs, so must be completed before
2925 * the next step, which will clear the engine CSRs.
2927 (void) wait_event_interruptible(dd
->sdma_unfreeze_wq
,
2928 atomic_read(&dd
->sdma_unfreeze_count
) <= 0);
2929 /* no need to check results - done no matter what */
2933 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
2935 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
2936 * that is left is a software clean. We could do it after the SPC is fully
2937 * frozen, but then we'd have to add another state to wait for the unfreeze.
2938 * Instead, just defer the software clean until the unfreeze step.
2940 void sdma_unfreeze(struct hfi1_devdata
*dd
)
2944 /* tell all engines start freeze clean up */
2945 for (i
= 0; i
< dd
->num_sdma
; i
++)
2946 sdma_process_event(&dd
->per_sdma
[i
],
2947 sdma_event_e82_hw_unfreeze
);
2951 * _sdma_engine_progress_schedule() - schedule progress on engine
2952 * @sde: sdma_engine to schedule progress
2955 void _sdma_engine_progress_schedule(
2956 struct sdma_engine
*sde
)
2958 trace_hfi1_sdma_engine_progress(sde
, sde
->progress_mask
);
2959 /* assume we have selected a good cpu */
2961 CCE_INT_FORCE
+ (8*(IS_SDMA_START
/64)), sde
->progress_mask
);