2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
49 #include <linux/seqlock.h>
50 #include <linux/netdevice.h>
51 #include <linux/moduleparam.h>
52 #include <linux/bitops.h>
53 #include <linux/timer.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
64 /* must be a power of 2 >= 64 <= 32768 */
65 #define SDMA_DESCQ_CNT 2048
66 #define SDMA_DESC_INTR 64
67 #define INVALID_TAIL 0xffff
69 static uint sdma_descq_cnt
= SDMA_DESCQ_CNT
;
70 module_param(sdma_descq_cnt
, uint
, S_IRUGO
);
71 MODULE_PARM_DESC(sdma_descq_cnt
, "Number of SDMA descq entries");
73 static uint sdma_idle_cnt
= 250;
74 module_param(sdma_idle_cnt
, uint
, S_IRUGO
);
75 MODULE_PARM_DESC(sdma_idle_cnt
, "sdma interrupt idle delay (ns,default 250)");
78 module_param_named(num_sdma
, mod_num_sdma
, uint
, S_IRUGO
);
79 MODULE_PARM_DESC(num_sdma
, "Set max number SDMA engines to use");
81 static uint sdma_desct_intr
= SDMA_DESC_INTR
;
82 module_param_named(desct_intr
, sdma_desct_intr
, uint
, S_IRUGO
| S_IWUSR
);
83 MODULE_PARM_DESC(desct_intr
, "Number of SDMA descriptor before interrupt");
85 #define SDMA_WAIT_BATCH_SIZE 20
86 /* max wait time for a SDMA engine to indicate it has halted */
87 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
88 /* all SDMA engine errors that cause a halt */
90 #define SD(name) SEND_DMA_##name
91 #define ALL_SDMA_ENG_HALT_ERRS \
92 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
93 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
94 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
95 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
107 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
108 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
109 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
111 /* sdma_sendctrl operations */
112 #define SDMA_SENDCTRL_OP_ENABLE BIT(0)
113 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
114 #define SDMA_SENDCTRL_OP_HALT BIT(2)
115 #define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
117 /* handle long defines */
118 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
119 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
120 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
121 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
123 static const char * const sdma_state_names
[] = {
124 [sdma_state_s00_hw_down
] = "s00_HwDown",
125 [sdma_state_s10_hw_start_up_halt_wait
] = "s10_HwStartUpHaltWait",
126 [sdma_state_s15_hw_start_up_clean_wait
] = "s15_HwStartUpCleanWait",
127 [sdma_state_s20_idle
] = "s20_Idle",
128 [sdma_state_s30_sw_clean_up_wait
] = "s30_SwCleanUpWait",
129 [sdma_state_s40_hw_clean_up_wait
] = "s40_HwCleanUpWait",
130 [sdma_state_s50_hw_halt_wait
] = "s50_HwHaltWait",
131 [sdma_state_s60_idle_halt_wait
] = "s60_IdleHaltWait",
132 [sdma_state_s80_hw_freeze
] = "s80_HwFreeze",
133 [sdma_state_s82_freeze_sw_clean
] = "s82_FreezeSwClean",
134 [sdma_state_s99_running
] = "s99_Running",
137 #ifdef CONFIG_SDMA_VERBOSITY
138 static const char * const sdma_event_names
[] = {
139 [sdma_event_e00_go_hw_down
] = "e00_GoHwDown",
140 [sdma_event_e10_go_hw_start
] = "e10_GoHwStart",
141 [sdma_event_e15_hw_halt_done
] = "e15_HwHaltDone",
142 [sdma_event_e25_hw_clean_up_done
] = "e25_HwCleanUpDone",
143 [sdma_event_e30_go_running
] = "e30_GoRunning",
144 [sdma_event_e40_sw_cleaned
] = "e40_SwCleaned",
145 [sdma_event_e50_hw_cleaned
] = "e50_HwCleaned",
146 [sdma_event_e60_hw_halted
] = "e60_HwHalted",
147 [sdma_event_e70_go_idle
] = "e70_GoIdle",
148 [sdma_event_e80_hw_freeze
] = "e80_HwFreeze",
149 [sdma_event_e81_hw_frozen
] = "e81_HwFrozen",
150 [sdma_event_e82_hw_unfreeze
] = "e82_HwUnfreeze",
151 [sdma_event_e85_link_down
] = "e85_LinkDown",
152 [sdma_event_e90_sw_halted
] = "e90_SwHalted",
156 static const struct sdma_set_state_action sdma_action_table
[] = {
157 [sdma_state_s00_hw_down
] = {
158 .go_s99_running_tofalse
= 1,
164 [sdma_state_s10_hw_start_up_halt_wait
] = {
170 [sdma_state_s15_hw_start_up_clean_wait
] = {
176 [sdma_state_s20_idle
] = {
182 [sdma_state_s30_sw_clean_up_wait
] = {
188 [sdma_state_s40_hw_clean_up_wait
] = {
194 [sdma_state_s50_hw_halt_wait
] = {
200 [sdma_state_s60_idle_halt_wait
] = {
201 .go_s99_running_tofalse
= 1,
207 [sdma_state_s80_hw_freeze
] = {
213 [sdma_state_s82_freeze_sw_clean
] = {
219 [sdma_state_s99_running
] = {
224 .go_s99_running_totrue
= 1,
228 #define SDMA_TAIL_UPDATE_THRESH 0x1F
230 /* declare all statics here rather than keep sorting */
231 static void sdma_complete(struct kref
*);
232 static void sdma_finalput(struct sdma_state
*);
233 static void sdma_get(struct sdma_state
*);
234 static void sdma_hw_clean_up_task(unsigned long);
235 static void sdma_put(struct sdma_state
*);
236 static void sdma_set_state(struct sdma_engine
*, enum sdma_states
);
237 static void sdma_start_hw_clean_up(struct sdma_engine
*);
238 static void sdma_sw_clean_up_task(unsigned long);
239 static void sdma_sendctrl(struct sdma_engine
*, unsigned);
240 static void init_sdma_regs(struct sdma_engine
*, u32
, uint
);
241 static void sdma_process_event(
242 struct sdma_engine
*sde
,
243 enum sdma_events event
);
244 static void __sdma_process_event(
245 struct sdma_engine
*sde
,
246 enum sdma_events event
);
247 static void dump_sdma_state(struct sdma_engine
*sde
);
248 static void sdma_make_progress(struct sdma_engine
*sde
, u64 status
);
249 static void sdma_desc_avail(struct sdma_engine
*sde
, uint avail
);
250 static void sdma_flush_descq(struct sdma_engine
*sde
);
253 * sdma_state_name() - return state string from enum
256 static const char *sdma_state_name(enum sdma_states state
)
258 return sdma_state_names
[state
];
261 static void sdma_get(struct sdma_state
*ss
)
266 static void sdma_complete(struct kref
*kref
)
268 struct sdma_state
*ss
=
269 container_of(kref
, struct sdma_state
, kref
);
274 static void sdma_put(struct sdma_state
*ss
)
276 kref_put(&ss
->kref
, sdma_complete
);
279 static void sdma_finalput(struct sdma_state
*ss
)
282 wait_for_completion(&ss
->comp
);
285 static inline void write_sde_csr(
286 struct sdma_engine
*sde
,
290 write_kctxt_csr(sde
->dd
, sde
->this_idx
, offset0
, value
);
293 static inline u64
read_sde_csr(
294 struct sdma_engine
*sde
,
297 return read_kctxt_csr(sde
->dd
, sde
->this_idx
, offset0
);
301 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
302 * sdma engine 'sde' to drop to 0.
304 static void sdma_wait_for_packet_egress(struct sdma_engine
*sde
,
307 u64 off
= 8 * sde
->this_idx
;
308 struct hfi1_devdata
*dd
= sde
->dd
;
315 reg
= read_csr(dd
, off
+ SEND_EGRESS_SEND_DMA_STATUS
);
317 reg
&= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
;
318 reg
>>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
;
321 /* counter is reest if accupancy count changes */
325 /* timed out - bounce the link */
326 dd_dev_err(dd
, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
327 __func__
, sde
->this_idx
, (u32
)reg
);
328 queue_work(dd
->pport
->link_wq
,
329 &dd
->pport
->link_bounce_work
);
337 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
338 * and pause for credit return.
340 void sdma_wait(struct hfi1_devdata
*dd
)
344 for (i
= 0; i
< dd
->num_sdma
; i
++) {
345 struct sdma_engine
*sde
= &dd
->per_sdma
[i
];
347 sdma_wait_for_packet_egress(sde
, 0);
351 static inline void sdma_set_desc_cnt(struct sdma_engine
*sde
, unsigned cnt
)
355 if (!(sde
->dd
->flags
& HFI1_HAS_SDMA_TIMEOUT
))
358 reg
&= SD(DESC_CNT_CNT_MASK
);
359 reg
<<= SD(DESC_CNT_CNT_SHIFT
);
360 write_sde_csr(sde
, SD(DESC_CNT
), reg
);
363 static inline void complete_tx(struct sdma_engine
*sde
,
364 struct sdma_txreq
*tx
,
367 /* protect against complete modifying */
368 struct iowait
*wait
= tx
->wait
;
369 callback_t complete
= tx
->complete
;
371 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
372 trace_hfi1_sdma_out_sn(sde
, tx
->sn
);
373 if (WARN_ON_ONCE(sde
->head_sn
!= tx
->sn
))
374 dd_dev_err(sde
->dd
, "expected %llu got %llu\n",
375 sde
->head_sn
, tx
->sn
);
378 __sdma_txclean(sde
->dd
, tx
);
380 (*complete
)(tx
, res
);
381 if (iowait_sdma_dec(wait
))
382 iowait_drain_wakeup(wait
);
386 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
388 * Depending on timing there can be txreqs in two places:
389 * - in the descq ring
390 * - in the flush list
392 * To avoid ordering issues the descq ring needs to be flushed
393 * first followed by the flush list.
395 * This routine is called from two places
396 * - From a work queue item
397 * - Directly from the state machine just before setting the
400 * Must be called with head_lock held
403 static void sdma_flush(struct sdma_engine
*sde
)
405 struct sdma_txreq
*txp
, *txp_next
;
406 LIST_HEAD(flushlist
);
409 /* flush from head to tail */
410 sdma_flush_descq(sde
);
411 spin_lock_irqsave(&sde
->flushlist_lock
, flags
);
412 /* copy flush list */
413 list_for_each_entry_safe(txp
, txp_next
, &sde
->flushlist
, list
) {
414 list_del_init(&txp
->list
);
415 list_add_tail(&txp
->list
, &flushlist
);
417 spin_unlock_irqrestore(&sde
->flushlist_lock
, flags
);
418 /* flush from flush list */
419 list_for_each_entry_safe(txp
, txp_next
, &flushlist
, list
)
420 complete_tx(sde
, txp
, SDMA_TXREQ_S_ABORTED
);
424 * Fields a work request for flushing the descq ring
427 * If the engine has been brought to running during
428 * the scheduling delay, the flush is ignored, assuming
429 * that the process of bringing the engine to running
430 * would have done this flush prior to going to running.
433 static void sdma_field_flush(struct work_struct
*work
)
436 struct sdma_engine
*sde
=
437 container_of(work
, struct sdma_engine
, flush_worker
);
439 write_seqlock_irqsave(&sde
->head_lock
, flags
);
440 if (!__sdma_running(sde
))
442 write_sequnlock_irqrestore(&sde
->head_lock
, flags
);
445 static void sdma_err_halt_wait(struct work_struct
*work
)
447 struct sdma_engine
*sde
= container_of(work
, struct sdma_engine
,
450 unsigned long timeout
;
452 timeout
= jiffies
+ msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT
);
454 statuscsr
= read_sde_csr(sde
, SD(STATUS
));
455 statuscsr
&= SD(STATUS_ENG_HALTED_SMASK
);
458 if (time_after(jiffies
, timeout
)) {
460 "SDMA engine %d - timeout waiting for engine to halt\n",
463 * Continue anyway. This could happen if there was
464 * an uncorrectable error in the wrong spot.
468 usleep_range(80, 120);
471 sdma_process_event(sde
, sdma_event_e15_hw_halt_done
);
474 static void sdma_err_progress_check_schedule(struct sdma_engine
*sde
)
476 if (!is_bx(sde
->dd
) && HFI1_CAP_IS_KSET(SDMA_AHG
)) {
478 struct hfi1_devdata
*dd
= sde
->dd
;
480 for (index
= 0; index
< dd
->num_sdma
; index
++) {
481 struct sdma_engine
*curr_sdma
= &dd
->per_sdma
[index
];
483 if (curr_sdma
!= sde
)
484 curr_sdma
->progress_check_head
=
485 curr_sdma
->descq_head
;
488 "SDMA engine %d - check scheduled\n",
490 mod_timer(&sde
->err_progress_check_timer
, jiffies
+ 10);
494 static void sdma_err_progress_check(struct timer_list
*t
)
497 struct sdma_engine
*sde
= from_timer(sde
, t
, err_progress_check_timer
);
499 dd_dev_err(sde
->dd
, "SDE progress check event\n");
500 for (index
= 0; index
< sde
->dd
->num_sdma
; index
++) {
501 struct sdma_engine
*curr_sde
= &sde
->dd
->per_sdma
[index
];
504 /* check progress on each engine except the current one */
508 * We must lock interrupts when acquiring sde->lock,
509 * to avoid a deadlock if interrupt triggers and spins on
510 * the same lock on same CPU
512 spin_lock_irqsave(&curr_sde
->tail_lock
, flags
);
513 write_seqlock(&curr_sde
->head_lock
);
515 /* skip non-running queues */
516 if (curr_sde
->state
.current_state
!= sdma_state_s99_running
) {
517 write_sequnlock(&curr_sde
->head_lock
);
518 spin_unlock_irqrestore(&curr_sde
->tail_lock
, flags
);
522 if ((curr_sde
->descq_head
!= curr_sde
->descq_tail
) &&
523 (curr_sde
->descq_head
==
524 curr_sde
->progress_check_head
))
525 __sdma_process_event(curr_sde
,
526 sdma_event_e90_sw_halted
);
527 write_sequnlock(&curr_sde
->head_lock
);
528 spin_unlock_irqrestore(&curr_sde
->tail_lock
, flags
);
530 schedule_work(&sde
->err_halt_worker
);
533 static void sdma_hw_clean_up_task(unsigned long opaque
)
535 struct sdma_engine
*sde
= (struct sdma_engine
*)opaque
;
539 #ifdef CONFIG_SDMA_VERBOSITY
540 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
541 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
,
544 statuscsr
= read_sde_csr(sde
, SD(STATUS
));
545 statuscsr
&= SD(STATUS_ENG_CLEANED_UP_SMASK
);
551 sdma_process_event(sde
, sdma_event_e25_hw_clean_up_done
);
554 static inline struct sdma_txreq
*get_txhead(struct sdma_engine
*sde
)
556 return sde
->tx_ring
[sde
->tx_head
& sde
->sdma_mask
];
560 * flush ring for recovery
562 static void sdma_flush_descq(struct sdma_engine
*sde
)
566 struct sdma_txreq
*txp
= get_txhead(sde
);
568 /* The reason for some of the complexity of this code is that
569 * not all descriptors have corresponding txps. So, we have to
570 * be able to skip over descs until we wander into the range of
571 * the next txp on the list.
573 head
= sde
->descq_head
& sde
->sdma_mask
;
574 tail
= sde
->descq_tail
& sde
->sdma_mask
;
575 while (head
!= tail
) {
576 /* advance head, wrap if needed */
577 head
= ++sde
->descq_head
& sde
->sdma_mask
;
578 /* if now past this txp's descs, do the callback */
579 if (txp
&& txp
->next_descq_idx
== head
) {
580 /* remove from list */
581 sde
->tx_ring
[sde
->tx_head
++ & sde
->sdma_mask
] = NULL
;
582 complete_tx(sde
, txp
, SDMA_TXREQ_S_ABORTED
);
583 trace_hfi1_sdma_progress(sde
, head
, tail
, txp
);
584 txp
= get_txhead(sde
);
589 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
592 static void sdma_sw_clean_up_task(unsigned long opaque
)
594 struct sdma_engine
*sde
= (struct sdma_engine
*)opaque
;
597 spin_lock_irqsave(&sde
->tail_lock
, flags
);
598 write_seqlock(&sde
->head_lock
);
601 * At this point, the following should always be true:
602 * - We are halted, so no more descriptors are getting retired.
603 * - We are not running, so no one is submitting new work.
604 * - Only we can send the e40_sw_cleaned, so we can't start
605 * running again until we say so. So, the active list and
606 * descq are ours to play with.
610 * In the error clean up sequence, software clean must be called
611 * before the hardware clean so we can use the hardware head in
612 * the progress routine. A hardware clean or SPC unfreeze will
613 * reset the hardware head.
615 * Process all retired requests. The progress routine will use the
616 * latest physical hardware head - we are not running so speed does
619 sdma_make_progress(sde
, 0);
624 * Reset our notion of head and tail.
625 * Note that the HW registers have been reset via an earlier
630 sde
->desc_avail
= sdma_descq_freecnt(sde
);
633 __sdma_process_event(sde
, sdma_event_e40_sw_cleaned
);
635 write_sequnlock(&sde
->head_lock
);
636 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
639 static void sdma_sw_tear_down(struct sdma_engine
*sde
)
641 struct sdma_state
*ss
= &sde
->state
;
643 /* Releasing this reference means the state machine has stopped. */
646 /* stop waiting for all unfreeze events to complete */
647 atomic_set(&sde
->dd
->sdma_unfreeze_count
, -1);
648 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
651 static void sdma_start_hw_clean_up(struct sdma_engine
*sde
)
653 tasklet_hi_schedule(&sde
->sdma_hw_clean_up_task
);
656 static void sdma_set_state(struct sdma_engine
*sde
,
657 enum sdma_states next_state
)
659 struct sdma_state
*ss
= &sde
->state
;
660 const struct sdma_set_state_action
*action
= sdma_action_table
;
663 trace_hfi1_sdma_state(
665 sdma_state_names
[ss
->current_state
],
666 sdma_state_names
[next_state
]);
668 /* debugging bookkeeping */
669 ss
->previous_state
= ss
->current_state
;
670 ss
->previous_op
= ss
->current_op
;
671 ss
->current_state
= next_state
;
673 if (ss
->previous_state
!= sdma_state_s99_running
&&
674 next_state
== sdma_state_s99_running
)
677 if (action
[next_state
].op_enable
)
678 op
|= SDMA_SENDCTRL_OP_ENABLE
;
680 if (action
[next_state
].op_intenable
)
681 op
|= SDMA_SENDCTRL_OP_INTENABLE
;
683 if (action
[next_state
].op_halt
)
684 op
|= SDMA_SENDCTRL_OP_HALT
;
686 if (action
[next_state
].op_cleanup
)
687 op
|= SDMA_SENDCTRL_OP_CLEANUP
;
689 if (action
[next_state
].go_s99_running_tofalse
)
690 ss
->go_s99_running
= 0;
692 if (action
[next_state
].go_s99_running_totrue
)
693 ss
->go_s99_running
= 1;
696 sdma_sendctrl(sde
, ss
->current_op
);
700 * sdma_get_descq_cnt() - called when device probed
702 * Return a validated descq count.
704 * This is currently only used in the verbs initialization to build the tx
707 * This will probably be deleted in favor of a more scalable approach to
711 u16
sdma_get_descq_cnt(void)
713 u16 count
= sdma_descq_cnt
;
716 return SDMA_DESCQ_CNT
;
717 /* count must be a power of 2 greater than 64 and less than
718 * 32768. Otherwise return default.
720 if (!is_power_of_2(count
))
721 return SDMA_DESCQ_CNT
;
722 if (count
< 64 || count
> 32768)
723 return SDMA_DESCQ_CNT
;
728 * sdma_engine_get_vl() - return vl for a given sdma engine
731 * This function returns the vl mapped to a given engine, or an error if
732 * the mapping can't be found. The mapping fields are protected by RCU.
734 int sdma_engine_get_vl(struct sdma_engine
*sde
)
736 struct hfi1_devdata
*dd
= sde
->dd
;
737 struct sdma_vl_map
*m
;
740 if (sde
->this_idx
>= TXE_NUM_SDMA_ENGINES
)
744 m
= rcu_dereference(dd
->sdma_map
);
749 vl
= m
->engine_to_vl
[sde
->this_idx
];
756 * sdma_select_engine_vl() - select sdma engine
758 * @selector: a spreading factor
762 * This function returns an engine based on the selector and a vl. The
763 * mapping fields are protected by RCU.
765 struct sdma_engine
*sdma_select_engine_vl(
766 struct hfi1_devdata
*dd
,
770 struct sdma_vl_map
*m
;
771 struct sdma_map_elem
*e
;
772 struct sdma_engine
*rval
;
774 /* NOTE This should only happen if SC->VL changed after the initial
775 * checks on the QP/AH
776 * Default will return engine 0 below
784 m
= rcu_dereference(dd
->sdma_map
);
787 return &dd
->per_sdma
[0];
789 e
= m
->map
[vl
& m
->mask
];
790 rval
= e
->sde
[selector
& e
->mask
];
794 rval
= !rval
? &dd
->per_sdma
[0] : rval
;
795 trace_hfi1_sdma_engine_select(dd
, selector
, vl
, rval
->this_idx
);
800 * sdma_select_engine_sc() - select sdma engine
802 * @selector: a spreading factor
806 * This function returns an engine based on the selector and an sc.
808 struct sdma_engine
*sdma_select_engine_sc(
809 struct hfi1_devdata
*dd
,
813 u8 vl
= sc_to_vlt(dd
, sc5
);
815 return sdma_select_engine_vl(dd
, selector
, vl
);
818 struct sdma_rht_map_elem
{
821 struct sdma_engine
*sde
[0];
824 struct sdma_rht_node
{
825 unsigned long cpu_id
;
826 struct sdma_rht_map_elem
*map
[HFI1_MAX_VLS_SUPPORTED
];
827 struct rhash_head node
;
830 #define NR_CPUS_HINT 192
832 static const struct rhashtable_params sdma_rht_params
= {
833 .nelem_hint
= NR_CPUS_HINT
,
834 .head_offset
= offsetof(struct sdma_rht_node
, node
),
835 .key_offset
= offsetof(struct sdma_rht_node
, cpu_id
),
836 .key_len
= FIELD_SIZEOF(struct sdma_rht_node
, cpu_id
),
839 .automatic_shrinking
= true,
843 * sdma_select_user_engine() - select sdma engine based on user setup
845 * @selector: a spreading factor
848 * This function returns an sdma engine for a user sdma request.
849 * User defined sdma engine affinity setting is honored when applicable,
850 * otherwise system default sdma engine mapping is used. To ensure correct
851 * ordering, the mapping from <selector, vl> to sde must remain unchanged.
853 struct sdma_engine
*sdma_select_user_engine(struct hfi1_devdata
*dd
,
856 struct sdma_rht_node
*rht_node
;
857 struct sdma_engine
*sde
= NULL
;
858 const struct cpumask
*current_mask
= ¤t
->cpus_allowed
;
859 unsigned long cpu_id
;
862 * To ensure that always the same sdma engine(s) will be
863 * selected make sure the process is pinned to this CPU only.
865 if (cpumask_weight(current_mask
) != 1)
868 cpu_id
= smp_processor_id();
870 rht_node
= rhashtable_lookup_fast(dd
->sdma_rht
, &cpu_id
,
873 if (rht_node
&& rht_node
->map
[vl
]) {
874 struct sdma_rht_map_elem
*map
= rht_node
->map
[vl
];
876 sde
= map
->sde
[selector
& map
->mask
];
884 return sdma_select_engine_vl(dd
, selector
, vl
);
887 static void sdma_populate_sde_map(struct sdma_rht_map_elem
*map
)
891 for (i
= 0; i
< roundup_pow_of_two(map
->ctr
? : 1) - map
->ctr
; i
++)
892 map
->sde
[map
->ctr
+ i
] = map
->sde
[i
];
895 static void sdma_cleanup_sde_map(struct sdma_rht_map_elem
*map
,
896 struct sdma_engine
*sde
)
900 /* only need to check the first ctr entries for a match */
901 for (i
= 0; i
< map
->ctr
; i
++) {
902 if (map
->sde
[i
] == sde
) {
903 memmove(&map
->sde
[i
], &map
->sde
[i
+ 1],
904 (map
->ctr
- i
- 1) * sizeof(map
->sde
[0]));
906 pow
= roundup_pow_of_two(map
->ctr
? : 1);
908 sdma_populate_sde_map(map
);
915 * Prevents concurrent reads and writes of the sdma engine cpu_mask
917 static DEFINE_MUTEX(process_to_sde_mutex
);
919 ssize_t
sdma_set_cpu_to_sde_map(struct sdma_engine
*sde
, const char *buf
,
922 struct hfi1_devdata
*dd
= sde
->dd
;
923 cpumask_var_t mask
, new_mask
;
926 struct sdma_rht_node
*rht_node
;
928 vl
= sdma_engine_get_vl(sde
);
929 if (unlikely(vl
< 0 || vl
>= ARRAY_SIZE(rht_node
->map
)))
932 ret
= zalloc_cpumask_var(&mask
, GFP_KERNEL
);
936 ret
= zalloc_cpumask_var(&new_mask
, GFP_KERNEL
);
938 free_cpumask_var(mask
);
941 ret
= cpulist_parse(buf
, mask
);
945 if (!cpumask_subset(mask
, cpu_online_mask
)) {
946 dd_dev_warn(sde
->dd
, "Invalid CPU mask\n");
951 sz
= sizeof(struct sdma_rht_map_elem
) +
952 (TXE_NUM_SDMA_ENGINES
* sizeof(struct sdma_engine
*));
954 mutex_lock(&process_to_sde_mutex
);
956 for_each_cpu(cpu
, mask
) {
957 /* Check if we have this already mapped */
958 if (cpumask_test_cpu(cpu
, &sde
->cpu_mask
)) {
959 cpumask_set_cpu(cpu
, new_mask
);
963 rht_node
= rhashtable_lookup_fast(dd
->sdma_rht
, &cpu
,
966 rht_node
= kzalloc(sizeof(*rht_node
), GFP_KERNEL
);
972 rht_node
->map
[vl
] = kzalloc(sz
, GFP_KERNEL
);
973 if (!rht_node
->map
[vl
]) {
978 rht_node
->cpu_id
= cpu
;
979 rht_node
->map
[vl
]->mask
= 0;
980 rht_node
->map
[vl
]->ctr
= 1;
981 rht_node
->map
[vl
]->sde
[0] = sde
;
983 ret
= rhashtable_insert_fast(dd
->sdma_rht
,
987 kfree(rht_node
->map
[vl
]);
989 dd_dev_err(sde
->dd
, "Failed to set process to sde affinity for cpu %lu\n",
997 /* Add new user mappings */
998 if (!rht_node
->map
[vl
])
999 rht_node
->map
[vl
] = kzalloc(sz
, GFP_KERNEL
);
1001 if (!rht_node
->map
[vl
]) {
1006 rht_node
->map
[vl
]->ctr
++;
1007 ctr
= rht_node
->map
[vl
]->ctr
;
1008 rht_node
->map
[vl
]->sde
[ctr
- 1] = sde
;
1009 pow
= roundup_pow_of_two(ctr
);
1010 rht_node
->map
[vl
]->mask
= pow
- 1;
1012 /* Populate the sde map table */
1013 sdma_populate_sde_map(rht_node
->map
[vl
]);
1015 cpumask_set_cpu(cpu
, new_mask
);
1018 /* Clean up old mappings */
1019 for_each_cpu(cpu
, cpu_online_mask
) {
1020 struct sdma_rht_node
*rht_node
;
1022 /* Don't cleanup sdes that are set in the new mask */
1023 if (cpumask_test_cpu(cpu
, mask
))
1026 rht_node
= rhashtable_lookup_fast(dd
->sdma_rht
, &cpu
,
1032 /* Remove mappings for old sde */
1033 for (i
= 0; i
< HFI1_MAX_VLS_SUPPORTED
; i
++)
1034 if (rht_node
->map
[i
])
1035 sdma_cleanup_sde_map(rht_node
->map
[i
],
1038 /* Free empty hash table entries */
1039 for (i
= 0; i
< HFI1_MAX_VLS_SUPPORTED
; i
++) {
1040 if (!rht_node
->map
[i
])
1043 if (rht_node
->map
[i
]->ctr
) {
1050 ret
= rhashtable_remove_fast(dd
->sdma_rht
,
1055 for (i
= 0; i
< HFI1_MAX_VLS_SUPPORTED
; i
++)
1056 kfree(rht_node
->map
[i
]);
1063 cpumask_copy(&sde
->cpu_mask
, new_mask
);
1065 mutex_unlock(&process_to_sde_mutex
);
1067 free_cpumask_var(mask
);
1068 free_cpumask_var(new_mask
);
1069 return ret
? : strnlen(buf
, PAGE_SIZE
);
1072 ssize_t
sdma_get_cpu_to_sde_map(struct sdma_engine
*sde
, char *buf
)
1074 mutex_lock(&process_to_sde_mutex
);
1075 if (cpumask_empty(&sde
->cpu_mask
))
1076 snprintf(buf
, PAGE_SIZE
, "%s\n", "empty");
1078 cpumap_print_to_pagebuf(true, buf
, &sde
->cpu_mask
);
1079 mutex_unlock(&process_to_sde_mutex
);
1080 return strnlen(buf
, PAGE_SIZE
);
1083 static void sdma_rht_free(void *ptr
, void *arg
)
1085 struct sdma_rht_node
*rht_node
= ptr
;
1088 for (i
= 0; i
< HFI1_MAX_VLS_SUPPORTED
; i
++)
1089 kfree(rht_node
->map
[i
]);
1095 * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings
1100 * This routine dumps the process to sde mappings per cpu
1102 void sdma_seqfile_dump_cpu_list(struct seq_file
*s
,
1103 struct hfi1_devdata
*dd
,
1104 unsigned long cpuid
)
1106 struct sdma_rht_node
*rht_node
;
1109 rht_node
= rhashtable_lookup_fast(dd
->sdma_rht
, &cpuid
,
1114 seq_printf(s
, "cpu%3lu: ", cpuid
);
1115 for (i
= 0; i
< HFI1_MAX_VLS_SUPPORTED
; i
++) {
1116 if (!rht_node
->map
[i
] || !rht_node
->map
[i
]->ctr
)
1119 seq_printf(s
, " vl%d: [", i
);
1121 for (j
= 0; j
< rht_node
->map
[i
]->ctr
; j
++) {
1122 if (!rht_node
->map
[i
]->sde
[j
])
1128 seq_printf(s
, " sdma%2d",
1129 rht_node
->map
[i
]->sde
[j
]->this_idx
);
1138 * Free the indicated map struct
1140 static void sdma_map_free(struct sdma_vl_map
*m
)
1144 for (i
= 0; m
&& i
< m
->actual_vls
; i
++)
1150 * Handle RCU callback
1152 static void sdma_map_rcu_callback(struct rcu_head
*list
)
1154 struct sdma_vl_map
*m
= container_of(list
, struct sdma_vl_map
, list
);
1160 * sdma_map_init - called when # vls change
1162 * @port: port number
1163 * @num_vls: number of vls
1164 * @vl_engines: per vl engine mapping (optional)
1166 * This routine changes the mapping based on the number of vls.
1168 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
1169 * implies auto computing the loading and giving each VLs a uniform
1170 * distribution of engines per VL.
1172 * The auto algorithm computes the sde_per_vl and the number of extra
1173 * engines. Any extra engines are added from the last VL on down.
1175 * rcu locking is used here to control access to the mapping fields.
1177 * If either the num_vls or num_sdma are non-power of 2, the array sizes
1178 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
1179 * up to the next highest power of 2 and the first entry is reused
1180 * in a round robin fashion.
1182 * If an error occurs the map change is not done and the mapping is
1186 int sdma_map_init(struct hfi1_devdata
*dd
, u8 port
, u8 num_vls
, u8
*vl_engines
)
1189 int extra
, sde_per_vl
;
1191 u8 lvl_engines
[OPA_MAX_VLS
];
1192 struct sdma_vl_map
*oldmap
, *newmap
;
1194 if (!(dd
->flags
& HFI1_HAS_SEND_DMA
))
1198 /* truncate divide */
1199 sde_per_vl
= dd
->num_sdma
/ num_vls
;
1201 extra
= dd
->num_sdma
% num_vls
;
1202 vl_engines
= lvl_engines
;
1203 /* add extras from last vl down */
1204 for (i
= num_vls
- 1; i
>= 0; i
--, extra
--)
1205 vl_engines
[i
] = sde_per_vl
+ (extra
> 0 ? 1 : 0);
1209 sizeof(struct sdma_vl_map
) +
1210 roundup_pow_of_two(num_vls
) *
1211 sizeof(struct sdma_map_elem
*),
1215 newmap
->actual_vls
= num_vls
;
1216 newmap
->vls
= roundup_pow_of_two(num_vls
);
1217 newmap
->mask
= (1 << ilog2(newmap
->vls
)) - 1;
1218 /* initialize back-map */
1219 for (i
= 0; i
< TXE_NUM_SDMA_ENGINES
; i
++)
1220 newmap
->engine_to_vl
[i
] = -1;
1221 for (i
= 0; i
< newmap
->vls
; i
++) {
1222 /* save for wrap around */
1223 int first_engine
= engine
;
1225 if (i
< newmap
->actual_vls
) {
1226 int sz
= roundup_pow_of_two(vl_engines
[i
]);
1228 /* only allocate once */
1229 newmap
->map
[i
] = kzalloc(
1230 sizeof(struct sdma_map_elem
) +
1231 sz
* sizeof(struct sdma_engine
*),
1233 if (!newmap
->map
[i
])
1235 newmap
->map
[i
]->mask
= (1 << ilog2(sz
)) - 1;
1236 /* assign engines */
1237 for (j
= 0; j
< sz
; j
++) {
1238 newmap
->map
[i
]->sde
[j
] =
1239 &dd
->per_sdma
[engine
];
1240 if (++engine
>= first_engine
+ vl_engines
[i
])
1241 /* wrap back to first engine */
1242 engine
= first_engine
;
1244 /* assign back-map */
1245 for (j
= 0; j
< vl_engines
[i
]; j
++)
1246 newmap
->engine_to_vl
[first_engine
+ j
] = i
;
1248 /* just re-use entry without allocating */
1249 newmap
->map
[i
] = newmap
->map
[i
% num_vls
];
1251 engine
= first_engine
+ vl_engines
[i
];
1253 /* newmap in hand, save old map */
1254 spin_lock_irq(&dd
->sde_map_lock
);
1255 oldmap
= rcu_dereference_protected(dd
->sdma_map
,
1256 lockdep_is_held(&dd
->sde_map_lock
));
1258 /* publish newmap */
1259 rcu_assign_pointer(dd
->sdma_map
, newmap
);
1261 spin_unlock_irq(&dd
->sde_map_lock
);
1262 /* success, free any old map after grace period */
1264 call_rcu(&oldmap
->list
, sdma_map_rcu_callback
);
1267 /* free any partial allocation */
1268 sdma_map_free(newmap
);
1273 * sdma_clean() Clean up allocated memory
1274 * @dd: struct hfi1_devdata
1275 * @num_engines: num sdma engines
1277 * This routine can be called regardless of the success of
1280 void sdma_clean(struct hfi1_devdata
*dd
, size_t num_engines
)
1283 struct sdma_engine
*sde
;
1285 if (dd
->sdma_pad_dma
) {
1286 dma_free_coherent(&dd
->pcidev
->dev
, 4,
1287 (void *)dd
->sdma_pad_dma
,
1289 dd
->sdma_pad_dma
= NULL
;
1290 dd
->sdma_pad_phys
= 0;
1292 if (dd
->sdma_heads_dma
) {
1293 dma_free_coherent(&dd
->pcidev
->dev
, dd
->sdma_heads_size
,
1294 (void *)dd
->sdma_heads_dma
,
1295 dd
->sdma_heads_phys
);
1296 dd
->sdma_heads_dma
= NULL
;
1297 dd
->sdma_heads_phys
= 0;
1299 for (i
= 0; dd
->per_sdma
&& i
< num_engines
; ++i
) {
1300 sde
= &dd
->per_sdma
[i
];
1302 sde
->head_dma
= NULL
;
1308 sde
->descq_cnt
* sizeof(u64
[2]),
1313 sde
->descq_phys
= 0;
1315 kvfree(sde
->tx_ring
);
1316 sde
->tx_ring
= NULL
;
1318 spin_lock_irq(&dd
->sde_map_lock
);
1319 sdma_map_free(rcu_access_pointer(dd
->sdma_map
));
1320 RCU_INIT_POINTER(dd
->sdma_map
, NULL
);
1321 spin_unlock_irq(&dd
->sde_map_lock
);
1323 kfree(dd
->per_sdma
);
1324 dd
->per_sdma
= NULL
;
1327 rhashtable_free_and_destroy(dd
->sdma_rht
, sdma_rht_free
, NULL
);
1328 kfree(dd
->sdma_rht
);
1329 dd
->sdma_rht
= NULL
;
1334 * sdma_init() - called when device probed
1336 * @port: port number (currently only zero)
1338 * Initializes each sde and its csrs.
1339 * Interrupts are not required to be enabled.
1342 * 0 - success, -errno on failure
1344 int sdma_init(struct hfi1_devdata
*dd
, u8 port
)
1347 struct sdma_engine
*sde
;
1348 struct rhashtable
*tmp_sdma_rht
;
1351 struct hfi1_pportdata
*ppd
= dd
->pport
+ port
;
1352 u32 per_sdma_credits
;
1353 uint idle_cnt
= sdma_idle_cnt
;
1354 size_t num_engines
= chip_sdma_engines(dd
);
1357 if (!HFI1_CAP_IS_KSET(SDMA
)) {
1358 HFI1_CAP_CLEAR(SDMA_AHG
);
1362 /* can't exceed chip support */
1363 mod_num_sdma
<= chip_sdma_engines(dd
) &&
1364 /* count must be >= vls */
1365 mod_num_sdma
>= num_vls
)
1366 num_engines
= mod_num_sdma
;
1368 dd_dev_info(dd
, "SDMA mod_num_sdma: %u\n", mod_num_sdma
);
1369 dd_dev_info(dd
, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd
));
1370 dd_dev_info(dd
, "SDMA chip_sdma_mem_size: %u\n",
1371 chip_sdma_mem_size(dd
));
1374 chip_sdma_mem_size(dd
) / (num_engines
* SDMA_BLOCK_SIZE
);
1376 /* set up freeze waitqueue */
1377 init_waitqueue_head(&dd
->sdma_unfreeze_wq
);
1378 atomic_set(&dd
->sdma_unfreeze_count
, 0);
1380 descq_cnt
= sdma_get_descq_cnt();
1381 dd_dev_info(dd
, "SDMA engines %zu descq_cnt %u\n",
1382 num_engines
, descq_cnt
);
1384 /* alloc memory for array of send engines */
1385 dd
->per_sdma
= kcalloc_node(num_engines
, sizeof(*dd
->per_sdma
),
1386 GFP_KERNEL
, dd
->node
);
1390 idle_cnt
= ns_to_cclock(dd
, idle_cnt
);
1393 SDMA_DESC1_HEAD_TO_HOST_FLAG
;
1396 SDMA_DESC1_INT_REQ_FLAG
;
1398 if (!sdma_desct_intr
)
1399 sdma_desct_intr
= SDMA_DESC_INTR
;
1401 /* Allocate memory for SendDMA descriptor FIFOs */
1402 for (this_idx
= 0; this_idx
< num_engines
; ++this_idx
) {
1403 sde
= &dd
->per_sdma
[this_idx
];
1406 sde
->this_idx
= this_idx
;
1407 sde
->descq_cnt
= descq_cnt
;
1408 sde
->desc_avail
= sdma_descq_freecnt(sde
);
1409 sde
->sdma_shift
= ilog2(descq_cnt
);
1410 sde
->sdma_mask
= (1 << sde
->sdma_shift
) - 1;
1412 /* Create a mask specifically for each interrupt source */
1413 sde
->int_mask
= (u64
)1 << (0 * TXE_NUM_SDMA_ENGINES
+
1415 sde
->progress_mask
= (u64
)1 << (1 * TXE_NUM_SDMA_ENGINES
+
1417 sde
->idle_mask
= (u64
)1 << (2 * TXE_NUM_SDMA_ENGINES
+
1419 /* Create a combined mask to cover all 3 interrupt sources */
1420 sde
->imask
= sde
->int_mask
| sde
->progress_mask
|
1423 spin_lock_init(&sde
->tail_lock
);
1424 seqlock_init(&sde
->head_lock
);
1425 spin_lock_init(&sde
->senddmactrl_lock
);
1426 spin_lock_init(&sde
->flushlist_lock
);
1427 seqlock_init(&sde
->waitlock
);
1428 /* insure there is always a zero bit */
1429 sde
->ahg_bits
= 0xfffffffe00000000ULL
;
1431 sdma_set_state(sde
, sdma_state_s00_hw_down
);
1433 /* set up reference counting */
1434 kref_init(&sde
->state
.kref
);
1435 init_completion(&sde
->state
.comp
);
1437 INIT_LIST_HEAD(&sde
->flushlist
);
1438 INIT_LIST_HEAD(&sde
->dmawait
);
1441 get_kctxt_csr_addr(dd
, this_idx
, SD(TAIL
));
1443 tasklet_init(&sde
->sdma_hw_clean_up_task
, sdma_hw_clean_up_task
,
1444 (unsigned long)sde
);
1446 tasklet_init(&sde
->sdma_sw_clean_up_task
, sdma_sw_clean_up_task
,
1447 (unsigned long)sde
);
1448 INIT_WORK(&sde
->err_halt_worker
, sdma_err_halt_wait
);
1449 INIT_WORK(&sde
->flush_worker
, sdma_field_flush
);
1451 sde
->progress_check_head
= 0;
1453 timer_setup(&sde
->err_progress_check_timer
,
1454 sdma_err_progress_check
, 0);
1456 sde
->descq
= dma_zalloc_coherent(
1458 descq_cnt
* sizeof(u64
[2]),
1465 kvzalloc_node(array_size(descq_cnt
,
1466 sizeof(struct sdma_txreq
*)),
1467 GFP_KERNEL
, dd
->node
);
1472 dd
->sdma_heads_size
= L1_CACHE_BYTES
* num_engines
;
1473 /* Allocate memory for DMA of head registers to memory */
1474 dd
->sdma_heads_dma
= dma_zalloc_coherent(
1476 dd
->sdma_heads_size
,
1477 &dd
->sdma_heads_phys
,
1480 if (!dd
->sdma_heads_dma
) {
1481 dd_dev_err(dd
, "failed to allocate SendDMA head memory\n");
1485 /* Allocate memory for pad */
1486 dd
->sdma_pad_dma
= dma_zalloc_coherent(
1492 if (!dd
->sdma_pad_dma
) {
1493 dd_dev_err(dd
, "failed to allocate SendDMA pad memory\n");
1497 /* assign each engine to different cacheline and init registers */
1498 curr_head
= (void *)dd
->sdma_heads_dma
;
1499 for (this_idx
= 0; this_idx
< num_engines
; ++this_idx
) {
1500 unsigned long phys_offset
;
1502 sde
= &dd
->per_sdma
[this_idx
];
1504 sde
->head_dma
= curr_head
;
1505 curr_head
+= L1_CACHE_BYTES
;
1506 phys_offset
= (unsigned long)sde
->head_dma
-
1507 (unsigned long)dd
->sdma_heads_dma
;
1508 sde
->head_phys
= dd
->sdma_heads_phys
+ phys_offset
;
1509 init_sdma_regs(sde
, per_sdma_credits
, idle_cnt
);
1511 dd
->flags
|= HFI1_HAS_SEND_DMA
;
1512 dd
->flags
|= idle_cnt
? HFI1_HAS_SDMA_TIMEOUT
: 0;
1513 dd
->num_sdma
= num_engines
;
1514 ret
= sdma_map_init(dd
, port
, ppd
->vls_operational
, NULL
);
1518 tmp_sdma_rht
= kzalloc(sizeof(*tmp_sdma_rht
), GFP_KERNEL
);
1519 if (!tmp_sdma_rht
) {
1524 ret
= rhashtable_init(tmp_sdma_rht
, &sdma_rht_params
);
1527 dd
->sdma_rht
= tmp_sdma_rht
;
1529 dd_dev_info(dd
, "SDMA num_sdma: %u\n", dd
->num_sdma
);
1533 sdma_clean(dd
, num_engines
);
1538 * sdma_all_running() - called when the link goes up
1541 * This routine moves all engines to the running state.
1543 void sdma_all_running(struct hfi1_devdata
*dd
)
1545 struct sdma_engine
*sde
;
1548 /* move all engines to running */
1549 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1550 sde
= &dd
->per_sdma
[i
];
1551 sdma_process_event(sde
, sdma_event_e30_go_running
);
1556 * sdma_all_idle() - called when the link goes down
1559 * This routine moves all engines to the idle state.
1561 void sdma_all_idle(struct hfi1_devdata
*dd
)
1563 struct sdma_engine
*sde
;
1566 /* idle all engines */
1567 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1568 sde
= &dd
->per_sdma
[i
];
1569 sdma_process_event(sde
, sdma_event_e70_go_idle
);
1574 * sdma_start() - called to kick off state processing for all engines
1577 * This routine is for kicking off the state processing for all required
1578 * sdma engines. Interrupts need to be working at this point.
1581 void sdma_start(struct hfi1_devdata
*dd
)
1584 struct sdma_engine
*sde
;
1586 /* kick off the engines state processing */
1587 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1588 sde
= &dd
->per_sdma
[i
];
1589 sdma_process_event(sde
, sdma_event_e10_go_hw_start
);
1594 * sdma_exit() - used when module is removed
1597 void sdma_exit(struct hfi1_devdata
*dd
)
1600 struct sdma_engine
*sde
;
1602 for (this_idx
= 0; dd
->per_sdma
&& this_idx
< dd
->num_sdma
;
1604 sde
= &dd
->per_sdma
[this_idx
];
1605 if (!list_empty(&sde
->dmawait
))
1606 dd_dev_err(dd
, "sde %u: dmawait list not empty!\n",
1608 sdma_process_event(sde
, sdma_event_e00_go_hw_down
);
1610 del_timer_sync(&sde
->err_progress_check_timer
);
1613 * This waits for the state machine to exit so it is not
1614 * necessary to kill the sdma_sw_clean_up_task to make sure
1615 * it is not running.
1617 sdma_finalput(&sde
->state
);
1622 * unmap the indicated descriptor
1624 static inline void sdma_unmap_desc(
1625 struct hfi1_devdata
*dd
,
1626 struct sdma_desc
*descp
)
1628 switch (sdma_mapping_type(descp
)) {
1629 case SDMA_MAP_SINGLE
:
1632 sdma_mapping_addr(descp
),
1633 sdma_mapping_len(descp
),
1639 sdma_mapping_addr(descp
),
1640 sdma_mapping_len(descp
),
1647 * return the mode as indicated by the first
1648 * descriptor in the tx.
1650 static inline u8
ahg_mode(struct sdma_txreq
*tx
)
1652 return (tx
->descp
[0].qw
[1] & SDMA_DESC1_HEADER_MODE_SMASK
)
1653 >> SDMA_DESC1_HEADER_MODE_SHIFT
;
1657 * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
1658 * @dd: hfi1_devdata for unmapping
1659 * @tx: tx request to clean
1661 * This is used in the progress routine to clean the tx or
1662 * by the ULP to toss an in-process tx build.
1664 * The code can be called multiple times without issue.
1667 void __sdma_txclean(
1668 struct hfi1_devdata
*dd
,
1669 struct sdma_txreq
*tx
)
1674 u8 skip
= 0, mode
= ahg_mode(tx
);
1677 sdma_unmap_desc(dd
, &tx
->descp
[0]);
1678 /* determine number of AHG descriptors to skip */
1679 if (mode
> SDMA_AHG_APPLY_UPDATE1
)
1681 for (i
= 1 + skip
; i
< tx
->num_desc
; i
++)
1682 sdma_unmap_desc(dd
, &tx
->descp
[i
]);
1685 kfree(tx
->coalesce_buf
);
1686 tx
->coalesce_buf
= NULL
;
1687 /* kmalloc'ed descp */
1688 if (unlikely(tx
->desc_limit
> ARRAY_SIZE(tx
->descs
))) {
1689 tx
->desc_limit
= ARRAY_SIZE(tx
->descs
);
1694 static inline u16
sdma_gethead(struct sdma_engine
*sde
)
1696 struct hfi1_devdata
*dd
= sde
->dd
;
1700 #ifdef CONFIG_SDMA_VERBOSITY
1701 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1702 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1706 use_dmahead
= HFI1_CAP_IS_KSET(USE_SDMA_HEAD
) && __sdma_running(sde
) &&
1707 (dd
->flags
& HFI1_HAS_SDMA_TIMEOUT
);
1708 hwhead
= use_dmahead
?
1709 (u16
)le64_to_cpu(*sde
->head_dma
) :
1710 (u16
)read_sde_csr(sde
, SD(HEAD
));
1712 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK
))) {
1718 swhead
= sde
->descq_head
& sde
->sdma_mask
;
1719 /* this code is really bad for cache line trading */
1720 swtail
= READ_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
1721 cnt
= sde
->descq_cnt
;
1723 if (swhead
< swtail
)
1725 sane
= (hwhead
>= swhead
) & (hwhead
<= swtail
);
1726 else if (swhead
> swtail
)
1727 /* wrapped around */
1728 sane
= ((hwhead
>= swhead
) && (hwhead
< cnt
)) ||
1732 sane
= (hwhead
== swhead
);
1734 if (unlikely(!sane
)) {
1735 dd_dev_err(dd
, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1737 use_dmahead
? "dma" : "kreg",
1738 hwhead
, swhead
, swtail
, cnt
);
1740 /* try one more time, using csr */
1744 /* proceed as if no progress */
1752 * This is called when there are send DMA descriptors that might be
1755 * This is called with head_lock held.
1757 static void sdma_desc_avail(struct sdma_engine
*sde
, uint avail
)
1759 struct iowait
*wait
, *nw
;
1760 struct iowait
*waits
[SDMA_WAIT_BATCH_SIZE
];
1761 uint i
, n
= 0, seq
, max_idx
= 0;
1762 u8 max_starved_cnt
= 0;
1764 #ifdef CONFIG_SDMA_VERBOSITY
1765 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
1766 slashstrip(__FILE__
), __LINE__
, __func__
);
1767 dd_dev_err(sde
->dd
, "avail: %u\n", avail
);
1771 seq
= read_seqbegin(&sde
->waitlock
);
1772 if (!list_empty(&sde
->dmawait
)) {
1773 /* at least one item */
1774 write_seqlock(&sde
->waitlock
);
1775 /* Harvest waiters wanting DMA descriptors */
1776 list_for_each_entry_safe(
1785 if (n
== ARRAY_SIZE(waits
))
1787 num_desc
= iowait_get_all_desc(wait
);
1788 if (num_desc
> avail
)
1791 /* Find the most starved wait memeber */
1792 iowait_starve_find_max(wait
, &max_starved_cnt
,
1794 list_del_init(&wait
->list
);
1797 write_sequnlock(&sde
->waitlock
);
1800 } while (read_seqretry(&sde
->waitlock
, seq
));
1802 /* Schedule the most starved one first */
1804 waits
[max_idx
]->wakeup(waits
[max_idx
], SDMA_AVAIL_REASON
);
1806 for (i
= 0; i
< n
; i
++)
1808 waits
[i
]->wakeup(waits
[i
], SDMA_AVAIL_REASON
);
1811 /* head_lock must be held */
1812 static void sdma_make_progress(struct sdma_engine
*sde
, u64 status
)
1814 struct sdma_txreq
*txp
= NULL
;
1817 int idle_check_done
= 0;
1819 hwhead
= sdma_gethead(sde
);
1821 /* The reason for some of the complexity of this code is that
1822 * not all descriptors have corresponding txps. So, we have to
1823 * be able to skip over descs until we wander into the range of
1824 * the next txp on the list.
1828 txp
= get_txhead(sde
);
1829 swhead
= sde
->descq_head
& sde
->sdma_mask
;
1830 trace_hfi1_sdma_progress(sde
, hwhead
, swhead
, txp
);
1831 while (swhead
!= hwhead
) {
1832 /* advance head, wrap if needed */
1833 swhead
= ++sde
->descq_head
& sde
->sdma_mask
;
1835 /* if now past this txp's descs, do the callback */
1836 if (txp
&& txp
->next_descq_idx
== swhead
) {
1837 /* remove from list */
1838 sde
->tx_ring
[sde
->tx_head
++ & sde
->sdma_mask
] = NULL
;
1839 complete_tx(sde
, txp
, SDMA_TXREQ_S_OK
);
1840 /* see if there is another txp */
1841 txp
= get_txhead(sde
);
1843 trace_hfi1_sdma_progress(sde
, hwhead
, swhead
, txp
);
1848 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1849 * to updates to the the dma_head location in host memory. The head
1850 * value read might not be fully up to date. If there are pending
1851 * descriptors and the SDMA idle interrupt fired then read from the
1852 * CSR SDMA head instead to get the latest value from the hardware.
1853 * The hardware SDMA head should be read at most once in this invocation
1854 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1856 if ((status
& sde
->idle_mask
) && !idle_check_done
) {
1859 swtail
= READ_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
1860 if (swtail
!= hwhead
) {
1861 hwhead
= (u16
)read_sde_csr(sde
, SD(HEAD
));
1862 idle_check_done
= 1;
1867 sde
->last_status
= status
;
1869 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
1873 * sdma_engine_interrupt() - interrupt handler for engine
1875 * @status: sdma interrupt reason
1877 * Status is a mask of the 3 possible interrupts for this engine. It will
1878 * contain bits _only_ for this SDMA engine. It will contain at least one
1879 * bit, it may contain more.
1881 void sdma_engine_interrupt(struct sdma_engine
*sde
, u64 status
)
1883 trace_hfi1_sdma_engine_interrupt(sde
, status
);
1884 write_seqlock(&sde
->head_lock
);
1885 sdma_set_desc_cnt(sde
, sdma_desct_intr
);
1886 if (status
& sde
->idle_mask
)
1887 sde
->idle_int_cnt
++;
1888 else if (status
& sde
->progress_mask
)
1889 sde
->progress_int_cnt
++;
1890 else if (status
& sde
->int_mask
)
1891 sde
->sdma_int_cnt
++;
1892 sdma_make_progress(sde
, status
);
1893 write_sequnlock(&sde
->head_lock
);
1897 * sdma_engine_error() - error handler for engine
1899 * @status: sdma interrupt reason
1901 void sdma_engine_error(struct sdma_engine
*sde
, u64 status
)
1903 unsigned long flags
;
1905 #ifdef CONFIG_SDMA_VERBOSITY
1906 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1908 (unsigned long long)status
,
1909 sdma_state_names
[sde
->state
.current_state
]);
1911 spin_lock_irqsave(&sde
->tail_lock
, flags
);
1912 write_seqlock(&sde
->head_lock
);
1913 if (status
& ALL_SDMA_ENG_HALT_ERRS
)
1914 __sdma_process_event(sde
, sdma_event_e60_hw_halted
);
1915 if (status
& ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK
)) {
1917 "SDMA (%u) engine error: 0x%llx state %s\n",
1919 (unsigned long long)status
,
1920 sdma_state_names
[sde
->state
.current_state
]);
1921 dump_sdma_state(sde
);
1923 write_sequnlock(&sde
->head_lock
);
1924 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
1927 static void sdma_sendctrl(struct sdma_engine
*sde
, unsigned op
)
1929 u64 set_senddmactrl
= 0;
1930 u64 clr_senddmactrl
= 0;
1931 unsigned long flags
;
1933 #ifdef CONFIG_SDMA_VERBOSITY
1934 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1936 (op
& SDMA_SENDCTRL_OP_ENABLE
) ? 1 : 0,
1937 (op
& SDMA_SENDCTRL_OP_INTENABLE
) ? 1 : 0,
1938 (op
& SDMA_SENDCTRL_OP_HALT
) ? 1 : 0,
1939 (op
& SDMA_SENDCTRL_OP_CLEANUP
) ? 1 : 0);
1942 if (op
& SDMA_SENDCTRL_OP_ENABLE
)
1943 set_senddmactrl
|= SD(CTRL_SDMA_ENABLE_SMASK
);
1945 clr_senddmactrl
|= SD(CTRL_SDMA_ENABLE_SMASK
);
1947 if (op
& SDMA_SENDCTRL_OP_INTENABLE
)
1948 set_senddmactrl
|= SD(CTRL_SDMA_INT_ENABLE_SMASK
);
1950 clr_senddmactrl
|= SD(CTRL_SDMA_INT_ENABLE_SMASK
);
1952 if (op
& SDMA_SENDCTRL_OP_HALT
)
1953 set_senddmactrl
|= SD(CTRL_SDMA_HALT_SMASK
);
1955 clr_senddmactrl
|= SD(CTRL_SDMA_HALT_SMASK
);
1957 spin_lock_irqsave(&sde
->senddmactrl_lock
, flags
);
1959 sde
->p_senddmactrl
|= set_senddmactrl
;
1960 sde
->p_senddmactrl
&= ~clr_senddmactrl
;
1962 if (op
& SDMA_SENDCTRL_OP_CLEANUP
)
1963 write_sde_csr(sde
, SD(CTRL
),
1964 sde
->p_senddmactrl
|
1965 SD(CTRL_SDMA_CLEANUP_SMASK
));
1967 write_sde_csr(sde
, SD(CTRL
), sde
->p_senddmactrl
);
1969 spin_unlock_irqrestore(&sde
->senddmactrl_lock
, flags
);
1971 #ifdef CONFIG_SDMA_VERBOSITY
1972 sdma_dumpstate(sde
);
1976 static void sdma_setlengen(struct sdma_engine
*sde
)
1978 #ifdef CONFIG_SDMA_VERBOSITY
1979 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1980 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1984 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1985 * count to enable generation checking and load the internal
1986 * generation counter.
1988 write_sde_csr(sde
, SD(LEN_GEN
),
1989 (sde
->descq_cnt
/ 64) << SD(LEN_GEN_LENGTH_SHIFT
));
1990 write_sde_csr(sde
, SD(LEN_GEN
),
1991 ((sde
->descq_cnt
/ 64) << SD(LEN_GEN_LENGTH_SHIFT
)) |
1992 (4ULL << SD(LEN_GEN_GENERATION_SHIFT
)));
1995 static inline void sdma_update_tail(struct sdma_engine
*sde
, u16 tail
)
1997 /* Commit writes to memory and advance the tail on the chip */
1998 smp_wmb(); /* see get_txhead() */
1999 writeq(tail
, sde
->tail_csr
);
2003 * This is called when changing to state s10_hw_start_up_halt_wait as
2004 * a result of send buffer errors or send DMA descriptor errors.
2006 static void sdma_hw_start_up(struct sdma_engine
*sde
)
2010 #ifdef CONFIG_SDMA_VERBOSITY
2011 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
2012 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
2015 sdma_setlengen(sde
);
2016 sdma_update_tail(sde
, 0); /* Set SendDmaTail */
2019 reg
= SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK
) <<
2020 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT
);
2021 write_sde_csr(sde
, SD(ENG_ERR_CLEAR
), reg
);
2025 * set_sdma_integrity
2027 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
2029 static void set_sdma_integrity(struct sdma_engine
*sde
)
2031 struct hfi1_devdata
*dd
= sde
->dd
;
2033 write_sde_csr(sde
, SD(CHECK_ENABLE
),
2034 hfi1_pkt_base_sdma_integrity(dd
));
2037 static void init_sdma_regs(
2038 struct sdma_engine
*sde
,
2043 #ifdef CONFIG_SDMA_VERBOSITY
2044 struct hfi1_devdata
*dd
= sde
->dd
;
2046 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
2047 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
2050 write_sde_csr(sde
, SD(BASE_ADDR
), sde
->descq_phys
);
2051 sdma_setlengen(sde
);
2052 sdma_update_tail(sde
, 0); /* Set SendDmaTail */
2053 write_sde_csr(sde
, SD(RELOAD_CNT
), idle_cnt
);
2054 write_sde_csr(sde
, SD(DESC_CNT
), 0);
2055 write_sde_csr(sde
, SD(HEAD_ADDR
), sde
->head_phys
);
2056 write_sde_csr(sde
, SD(MEMORY
),
2057 ((u64
)credits
<< SD(MEMORY_SDMA_MEMORY_CNT_SHIFT
)) |
2058 ((u64
)(credits
* sde
->this_idx
) <<
2059 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT
)));
2060 write_sde_csr(sde
, SD(ENG_ERR_MASK
), ~0ull);
2061 set_sdma_integrity(sde
);
2062 opmask
= OPCODE_CHECK_MASK_DISABLED
;
2063 opval
= OPCODE_CHECK_VAL_DISABLED
;
2064 write_sde_csr(sde
, SD(CHECK_OPCODE
),
2065 (opmask
<< SEND_CTXT_CHECK_OPCODE_MASK_SHIFT
) |
2066 (opval
<< SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT
));
2069 #ifdef CONFIG_SDMA_VERBOSITY
2071 #define sdma_dumpstate_helper0(reg) do { \
2072 csr = read_csr(sde->dd, reg); \
2073 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
2076 #define sdma_dumpstate_helper(reg) do { \
2077 csr = read_sde_csr(sde, reg); \
2078 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
2079 #reg, sde->this_idx, csr); \
2082 #define sdma_dumpstate_helper2(reg) do { \
2083 csr = read_csr(sde->dd, reg + (8 * i)); \
2084 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
2088 void sdma_dumpstate(struct sdma_engine
*sde
)
2093 sdma_dumpstate_helper(SD(CTRL
));
2094 sdma_dumpstate_helper(SD(STATUS
));
2095 sdma_dumpstate_helper0(SD(ERR_STATUS
));
2096 sdma_dumpstate_helper0(SD(ERR_MASK
));
2097 sdma_dumpstate_helper(SD(ENG_ERR_STATUS
));
2098 sdma_dumpstate_helper(SD(ENG_ERR_MASK
));
2100 for (i
= 0; i
< CCE_NUM_INT_CSRS
; ++i
) {
2101 sdma_dumpstate_helper2(CCE_INT_STATUS
);
2102 sdma_dumpstate_helper2(CCE_INT_MASK
);
2103 sdma_dumpstate_helper2(CCE_INT_BLOCKED
);
2106 sdma_dumpstate_helper(SD(TAIL
));
2107 sdma_dumpstate_helper(SD(HEAD
));
2108 sdma_dumpstate_helper(SD(PRIORITY_THLD
));
2109 sdma_dumpstate_helper(SD(IDLE_CNT
));
2110 sdma_dumpstate_helper(SD(RELOAD_CNT
));
2111 sdma_dumpstate_helper(SD(DESC_CNT
));
2112 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT
));
2113 sdma_dumpstate_helper(SD(MEMORY
));
2114 sdma_dumpstate_helper0(SD(ENGINES
));
2115 sdma_dumpstate_helper0(SD(MEM_SIZE
));
2116 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
2117 sdma_dumpstate_helper(SD(BASE_ADDR
));
2118 sdma_dumpstate_helper(SD(LEN_GEN
));
2119 sdma_dumpstate_helper(SD(HEAD_ADDR
));
2120 sdma_dumpstate_helper(SD(CHECK_ENABLE
));
2121 sdma_dumpstate_helper(SD(CHECK_VL
));
2122 sdma_dumpstate_helper(SD(CHECK_JOB_KEY
));
2123 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY
));
2124 sdma_dumpstate_helper(SD(CHECK_SLID
));
2125 sdma_dumpstate_helper(SD(CHECK_OPCODE
));
2129 static void dump_sdma_state(struct sdma_engine
*sde
)
2131 struct hw_sdma_desc
*descqp
;
2136 u16 head
, tail
, cnt
;
2138 head
= sde
->descq_head
& sde
->sdma_mask
;
2139 tail
= sde
->descq_tail
& sde
->sdma_mask
;
2140 cnt
= sdma_descq_freecnt(sde
);
2143 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
2144 sde
->this_idx
, head
, tail
, cnt
,
2145 !list_empty(&sde
->flushlist
));
2147 /* print info for each entry in the descriptor queue */
2148 while (head
!= tail
) {
2149 char flags
[6] = { 'x', 'x', 'x', 'x', 0 };
2151 descqp
= &sde
->descq
[head
];
2152 desc
[0] = le64_to_cpu(descqp
->qw
[0]);
2153 desc
[1] = le64_to_cpu(descqp
->qw
[1]);
2154 flags
[0] = (desc
[1] & SDMA_DESC1_INT_REQ_FLAG
) ? 'I' : '-';
2155 flags
[1] = (desc
[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG
) ?
2157 flags
[2] = (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
) ? 'F' : '-';
2158 flags
[3] = (desc
[0] & SDMA_DESC0_LAST_DESC_FLAG
) ? 'L' : '-';
2159 addr
= (desc
[0] >> SDMA_DESC0_PHY_ADDR_SHIFT
)
2160 & SDMA_DESC0_PHY_ADDR_MASK
;
2161 gen
= (desc
[1] >> SDMA_DESC1_GENERATION_SHIFT
)
2162 & SDMA_DESC1_GENERATION_MASK
;
2163 len
= (desc
[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT
)
2164 & SDMA_DESC0_BYTE_COUNT_MASK
;
2166 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2167 head
, flags
, addr
, gen
, len
);
2169 "\tdesc0:0x%016llx desc1 0x%016llx\n",
2171 if (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
)
2173 "\taidx: %u amode: %u alen: %u\n",
2175 SDMA_DESC1_HEADER_INDEX_SMASK
) >>
2176 SDMA_DESC1_HEADER_INDEX_SHIFT
),
2178 SDMA_DESC1_HEADER_MODE_SMASK
) >>
2179 SDMA_DESC1_HEADER_MODE_SHIFT
),
2181 SDMA_DESC1_HEADER_DWS_SMASK
) >>
2182 SDMA_DESC1_HEADER_DWS_SHIFT
));
2184 head
&= sde
->sdma_mask
;
2189 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
2191 * sdma_seqfile_dump_sde() - debugfs dump of sde
2193 * @sde: send dma engine to dump
2195 * This routine dumps the sde to the indicated seq file.
2197 void sdma_seqfile_dump_sde(struct seq_file
*s
, struct sdma_engine
*sde
)
2200 struct hw_sdma_desc
*descqp
;
2206 head
= sde
->descq_head
& sde
->sdma_mask
;
2207 tail
= READ_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
2208 seq_printf(s
, SDE_FMT
, sde
->this_idx
,
2210 sdma_state_name(sde
->state
.current_state
),
2211 (unsigned long long)read_sde_csr(sde
, SD(CTRL
)),
2212 (unsigned long long)read_sde_csr(sde
, SD(STATUS
)),
2213 (unsigned long long)read_sde_csr(sde
, SD(ENG_ERR_STATUS
)),
2214 (unsigned long long)read_sde_csr(sde
, SD(TAIL
)), tail
,
2215 (unsigned long long)read_sde_csr(sde
, SD(HEAD
)), head
,
2216 (unsigned long long)le64_to_cpu(*sde
->head_dma
),
2217 (unsigned long long)read_sde_csr(sde
, SD(MEMORY
)),
2218 (unsigned long long)read_sde_csr(sde
, SD(LEN_GEN
)),
2219 (unsigned long long)read_sde_csr(sde
, SD(RELOAD_CNT
)),
2220 (unsigned long long)sde
->last_status
,
2221 (unsigned long long)sde
->ahg_bits
,
2226 !list_empty(&sde
->flushlist
),
2227 sde
->descq_full_count
,
2228 (unsigned long long)read_sde_csr(sde
, SEND_DMA_CHECK_SLID
));
2230 /* print info for each entry in the descriptor queue */
2231 while (head
!= tail
) {
2232 char flags
[6] = { 'x', 'x', 'x', 'x', 0 };
2234 descqp
= &sde
->descq
[head
];
2235 desc
[0] = le64_to_cpu(descqp
->qw
[0]);
2236 desc
[1] = le64_to_cpu(descqp
->qw
[1]);
2237 flags
[0] = (desc
[1] & SDMA_DESC1_INT_REQ_FLAG
) ? 'I' : '-';
2238 flags
[1] = (desc
[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG
) ?
2240 flags
[2] = (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
) ? 'F' : '-';
2241 flags
[3] = (desc
[0] & SDMA_DESC0_LAST_DESC_FLAG
) ? 'L' : '-';
2242 addr
= (desc
[0] >> SDMA_DESC0_PHY_ADDR_SHIFT
)
2243 & SDMA_DESC0_PHY_ADDR_MASK
;
2244 gen
= (desc
[1] >> SDMA_DESC1_GENERATION_SHIFT
)
2245 & SDMA_DESC1_GENERATION_MASK
;
2246 len
= (desc
[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT
)
2247 & SDMA_DESC0_BYTE_COUNT_MASK
;
2249 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2250 head
, flags
, addr
, gen
, len
);
2251 if (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
)
2252 seq_printf(s
, "\t\tahgidx: %u ahgmode: %u\n",
2254 SDMA_DESC1_HEADER_INDEX_SMASK
) >>
2255 SDMA_DESC1_HEADER_INDEX_SHIFT
),
2257 SDMA_DESC1_HEADER_MODE_SMASK
) >>
2258 SDMA_DESC1_HEADER_MODE_SHIFT
));
2259 head
= (head
+ 1) & sde
->sdma_mask
;
2264 * add the generation number into
2265 * the qw1 and return
2267 static inline u64
add_gen(struct sdma_engine
*sde
, u64 qw1
)
2269 u8 generation
= (sde
->descq_tail
>> sde
->sdma_shift
) & 3;
2271 qw1
&= ~SDMA_DESC1_GENERATION_SMASK
;
2272 qw1
|= ((u64
)generation
& SDMA_DESC1_GENERATION_MASK
)
2273 << SDMA_DESC1_GENERATION_SHIFT
;
2278 * This routine submits the indicated tx
2280 * Space has already been guaranteed and
2281 * tail side of ring is locked.
2283 * The hardware tail update is done
2284 * in the caller and that is facilitated
2285 * by returning the new tail.
2287 * There is special case logic for ahg
2288 * to not add the generation number for
2289 * up to 2 descriptors that follow the
2293 static inline u16
submit_tx(struct sdma_engine
*sde
, struct sdma_txreq
*tx
)
2297 struct sdma_desc
*descp
= tx
->descp
;
2298 u8 skip
= 0, mode
= ahg_mode(tx
);
2300 tail
= sde
->descq_tail
& sde
->sdma_mask
;
2301 sde
->descq
[tail
].qw
[0] = cpu_to_le64(descp
->qw
[0]);
2302 sde
->descq
[tail
].qw
[1] = cpu_to_le64(add_gen(sde
, descp
->qw
[1]));
2303 trace_hfi1_sdma_descriptor(sde
, descp
->qw
[0], descp
->qw
[1],
2304 tail
, &sde
->descq
[tail
]);
2305 tail
= ++sde
->descq_tail
& sde
->sdma_mask
;
2307 if (mode
> SDMA_AHG_APPLY_UPDATE1
)
2309 for (i
= 1; i
< tx
->num_desc
; i
++, descp
++) {
2312 sde
->descq
[tail
].qw
[0] = cpu_to_le64(descp
->qw
[0]);
2314 /* edits don't have generation */
2318 /* replace generation with real one for non-edits */
2319 qw1
= add_gen(sde
, descp
->qw
[1]);
2321 sde
->descq
[tail
].qw
[1] = cpu_to_le64(qw1
);
2322 trace_hfi1_sdma_descriptor(sde
, descp
->qw
[0], qw1
,
2323 tail
, &sde
->descq
[tail
]);
2324 tail
= ++sde
->descq_tail
& sde
->sdma_mask
;
2326 tx
->next_descq_idx
= tail
;
2327 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2328 tx
->sn
= sde
->tail_sn
++;
2329 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
2330 WARN_ON_ONCE(sde
->tx_ring
[sde
->tx_tail
& sde
->sdma_mask
]);
2332 sde
->tx_ring
[sde
->tx_tail
++ & sde
->sdma_mask
] = tx
;
2333 sde
->desc_avail
-= tx
->num_desc
;
2338 * Check for progress
2340 static int sdma_check_progress(
2341 struct sdma_engine
*sde
,
2342 struct iowait_work
*wait
,
2343 struct sdma_txreq
*tx
,
2348 sde
->desc_avail
= sdma_descq_freecnt(sde
);
2349 if (tx
->num_desc
<= sde
->desc_avail
)
2351 /* pulse the head_lock */
2352 if (wait
&& iowait_ioww_to_iow(wait
)->sleep
) {
2355 seq
= raw_seqcount_begin(
2356 (const seqcount_t
*)&sde
->head_lock
.seqcount
);
2357 ret
= wait
->iow
->sleep(sde
, wait
, tx
, seq
, pkts_sent
);
2359 sde
->desc_avail
= sdma_descq_freecnt(sde
);
2367 * sdma_send_txreq() - submit a tx req to ring
2368 * @sde: sdma engine to use
2369 * @wait: SE wait structure to use when full (may be NULL)
2370 * @tx: sdma_txreq to submit
2371 * @pkts_sent: has any packet been sent yet?
2373 * The call submits the tx into the ring. If a iowait structure is non-NULL
2374 * the packet will be queued to the list in wait.
2377 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2378 * ring (wait == NULL)
2379 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2381 int sdma_send_txreq(struct sdma_engine
*sde
,
2382 struct iowait_work
*wait
,
2383 struct sdma_txreq
*tx
,
2388 unsigned long flags
;
2390 /* user should have supplied entire packet */
2391 if (unlikely(tx
->tlen
))
2393 tx
->wait
= iowait_ioww_to_iow(wait
);
2394 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2396 if (unlikely(!__sdma_running(sde
)))
2398 if (unlikely(tx
->num_desc
> sde
->desc_avail
))
2400 tail
= submit_tx(sde
, tx
);
2402 iowait_sdma_inc(iowait_ioww_to_iow(wait
));
2403 sdma_update_tail(sde
, tail
);
2405 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2409 iowait_sdma_inc(iowait_ioww_to_iow(wait
));
2410 tx
->next_descq_idx
= 0;
2411 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2412 tx
->sn
= sde
->tail_sn
++;
2413 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
2415 spin_lock(&sde
->flushlist_lock
);
2416 list_add_tail(&tx
->list
, &sde
->flushlist
);
2417 spin_unlock(&sde
->flushlist_lock
);
2418 iowait_inc_wait_count(wait
, tx
->num_desc
);
2419 schedule_work(&sde
->flush_worker
);
2423 ret
= sdma_check_progress(sde
, wait
, tx
, pkts_sent
);
2424 if (ret
== -EAGAIN
) {
2428 sde
->descq_full_count
++;
2433 * sdma_send_txlist() - submit a list of tx req to ring
2434 * @sde: sdma engine to use
2435 * @wait: SE wait structure to use when full (may be NULL)
2436 * @tx_list: list of sdma_txreqs to submit
2437 * @count: pointer to a u16 which, after return will contain the total number of
2438 * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
2439 * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
2440 * which are added to SDMA engine flush list if the SDMA engine state is
2443 * The call submits the list into the ring.
2445 * If the iowait structure is non-NULL and not equal to the iowait list
2446 * the unprocessed part of the list will be appended to the list in wait.
2448 * In all cases, the tx_list will be updated so the head of the tx_list is
2449 * the list of descriptors that have yet to be transmitted.
2451 * The intent of this call is to provide a more efficient
2452 * way of submitting multiple packets to SDMA while holding the tail
2457 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
2458 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2460 int sdma_send_txlist(struct sdma_engine
*sde
, struct iowait_work
*wait
,
2461 struct list_head
*tx_list
, u16
*count_out
)
2463 struct sdma_txreq
*tx
, *tx_next
;
2465 unsigned long flags
;
2466 u16 tail
= INVALID_TAIL
;
2467 u32 submit_count
= 0, flush_count
= 0, total_count
;
2469 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2471 list_for_each_entry_safe(tx
, tx_next
, tx_list
, list
) {
2472 tx
->wait
= iowait_ioww_to_iow(wait
);
2473 if (unlikely(!__sdma_running(sde
)))
2475 if (unlikely(tx
->num_desc
> sde
->desc_avail
))
2477 if (unlikely(tx
->tlen
)) {
2481 list_del_init(&tx
->list
);
2482 tail
= submit_tx(sde
, tx
);
2484 if (tail
!= INVALID_TAIL
&&
2485 (submit_count
& SDMA_TAIL_UPDATE_THRESH
) == 0) {
2486 sdma_update_tail(sde
, tail
);
2487 tail
= INVALID_TAIL
;
2491 total_count
= submit_count
+ flush_count
;
2493 iowait_sdma_add(iowait_ioww_to_iow(wait
), total_count
);
2494 iowait_starve_clear(submit_count
> 0,
2495 iowait_ioww_to_iow(wait
));
2497 if (tail
!= INVALID_TAIL
)
2498 sdma_update_tail(sde
, tail
);
2499 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2500 *count_out
= total_count
;
2503 spin_lock(&sde
->flushlist_lock
);
2504 list_for_each_entry_safe(tx
, tx_next
, tx_list
, list
) {
2505 tx
->wait
= iowait_ioww_to_iow(wait
);
2506 list_del_init(&tx
->list
);
2507 tx
->next_descq_idx
= 0;
2508 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2509 tx
->sn
= sde
->tail_sn
++;
2510 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
2512 list_add_tail(&tx
->list
, &sde
->flushlist
);
2514 iowait_inc_wait_count(wait
, tx
->num_desc
);
2516 spin_unlock(&sde
->flushlist_lock
);
2517 schedule_work(&sde
->flush_worker
);
2521 ret
= sdma_check_progress(sde
, wait
, tx
, submit_count
> 0);
2522 if (ret
== -EAGAIN
) {
2526 sde
->descq_full_count
++;
2530 static void sdma_process_event(struct sdma_engine
*sde
, enum sdma_events event
)
2532 unsigned long flags
;
2534 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2535 write_seqlock(&sde
->head_lock
);
2537 __sdma_process_event(sde
, event
);
2539 if (sde
->state
.current_state
== sdma_state_s99_running
)
2540 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
2542 write_sequnlock(&sde
->head_lock
);
2543 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2546 static void __sdma_process_event(struct sdma_engine
*sde
,
2547 enum sdma_events event
)
2549 struct sdma_state
*ss
= &sde
->state
;
2550 int need_progress
= 0;
2552 /* CONFIG SDMA temporary */
2553 #ifdef CONFIG_SDMA_VERBOSITY
2554 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) [%s] %s\n", sde
->this_idx
,
2555 sdma_state_names
[ss
->current_state
],
2556 sdma_event_names
[event
]);
2559 switch (ss
->current_state
) {
2560 case sdma_state_s00_hw_down
:
2562 case sdma_event_e00_go_hw_down
:
2564 case sdma_event_e30_go_running
:
2566 * If down, but running requested (usually result
2567 * of link up, then we need to start up.
2568 * This can happen when hw down is requested while
2569 * bringing the link up with traffic active on
2572 ss
->go_s99_running
= 1;
2573 /* fall through -- and start dma engine */
2574 case sdma_event_e10_go_hw_start
:
2575 /* This reference means the state machine is started */
2576 sdma_get(&sde
->state
);
2578 sdma_state_s10_hw_start_up_halt_wait
);
2580 case sdma_event_e15_hw_halt_done
:
2582 case sdma_event_e25_hw_clean_up_done
:
2584 case sdma_event_e40_sw_cleaned
:
2585 sdma_sw_tear_down(sde
);
2587 case sdma_event_e50_hw_cleaned
:
2589 case sdma_event_e60_hw_halted
:
2591 case sdma_event_e70_go_idle
:
2593 case sdma_event_e80_hw_freeze
:
2595 case sdma_event_e81_hw_frozen
:
2597 case sdma_event_e82_hw_unfreeze
:
2599 case sdma_event_e85_link_down
:
2601 case sdma_event_e90_sw_halted
:
2606 case sdma_state_s10_hw_start_up_halt_wait
:
2608 case sdma_event_e00_go_hw_down
:
2609 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2610 sdma_sw_tear_down(sde
);
2612 case sdma_event_e10_go_hw_start
:
2614 case sdma_event_e15_hw_halt_done
:
2616 sdma_state_s15_hw_start_up_clean_wait
);
2617 sdma_start_hw_clean_up(sde
);
2619 case sdma_event_e25_hw_clean_up_done
:
2621 case sdma_event_e30_go_running
:
2622 ss
->go_s99_running
= 1;
2624 case sdma_event_e40_sw_cleaned
:
2626 case sdma_event_e50_hw_cleaned
:
2628 case sdma_event_e60_hw_halted
:
2629 schedule_work(&sde
->err_halt_worker
);
2631 case sdma_event_e70_go_idle
:
2632 ss
->go_s99_running
= 0;
2634 case sdma_event_e80_hw_freeze
:
2636 case sdma_event_e81_hw_frozen
:
2638 case sdma_event_e82_hw_unfreeze
:
2640 case sdma_event_e85_link_down
:
2642 case sdma_event_e90_sw_halted
:
2647 case sdma_state_s15_hw_start_up_clean_wait
:
2649 case sdma_event_e00_go_hw_down
:
2650 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2651 sdma_sw_tear_down(sde
);
2653 case sdma_event_e10_go_hw_start
:
2655 case sdma_event_e15_hw_halt_done
:
2657 case sdma_event_e25_hw_clean_up_done
:
2658 sdma_hw_start_up(sde
);
2659 sdma_set_state(sde
, ss
->go_s99_running
?
2660 sdma_state_s99_running
:
2661 sdma_state_s20_idle
);
2663 case sdma_event_e30_go_running
:
2664 ss
->go_s99_running
= 1;
2666 case sdma_event_e40_sw_cleaned
:
2668 case sdma_event_e50_hw_cleaned
:
2670 case sdma_event_e60_hw_halted
:
2672 case sdma_event_e70_go_idle
:
2673 ss
->go_s99_running
= 0;
2675 case sdma_event_e80_hw_freeze
:
2677 case sdma_event_e81_hw_frozen
:
2679 case sdma_event_e82_hw_unfreeze
:
2681 case sdma_event_e85_link_down
:
2683 case sdma_event_e90_sw_halted
:
2688 case sdma_state_s20_idle
:
2690 case sdma_event_e00_go_hw_down
:
2691 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2692 sdma_sw_tear_down(sde
);
2694 case sdma_event_e10_go_hw_start
:
2696 case sdma_event_e15_hw_halt_done
:
2698 case sdma_event_e25_hw_clean_up_done
:
2700 case sdma_event_e30_go_running
:
2701 sdma_set_state(sde
, sdma_state_s99_running
);
2702 ss
->go_s99_running
= 1;
2704 case sdma_event_e40_sw_cleaned
:
2706 case sdma_event_e50_hw_cleaned
:
2708 case sdma_event_e60_hw_halted
:
2709 sdma_set_state(sde
, sdma_state_s50_hw_halt_wait
);
2710 schedule_work(&sde
->err_halt_worker
);
2712 case sdma_event_e70_go_idle
:
2714 case sdma_event_e85_link_down
:
2716 case sdma_event_e80_hw_freeze
:
2717 sdma_set_state(sde
, sdma_state_s80_hw_freeze
);
2718 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
2719 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
2721 case sdma_event_e81_hw_frozen
:
2723 case sdma_event_e82_hw_unfreeze
:
2725 case sdma_event_e90_sw_halted
:
2730 case sdma_state_s30_sw_clean_up_wait
:
2732 case sdma_event_e00_go_hw_down
:
2733 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2735 case sdma_event_e10_go_hw_start
:
2737 case sdma_event_e15_hw_halt_done
:
2739 case sdma_event_e25_hw_clean_up_done
:
2741 case sdma_event_e30_go_running
:
2742 ss
->go_s99_running
= 1;
2744 case sdma_event_e40_sw_cleaned
:
2745 sdma_set_state(sde
, sdma_state_s40_hw_clean_up_wait
);
2746 sdma_start_hw_clean_up(sde
);
2748 case sdma_event_e50_hw_cleaned
:
2750 case sdma_event_e60_hw_halted
:
2752 case sdma_event_e70_go_idle
:
2753 ss
->go_s99_running
= 0;
2755 case sdma_event_e80_hw_freeze
:
2757 case sdma_event_e81_hw_frozen
:
2759 case sdma_event_e82_hw_unfreeze
:
2761 case sdma_event_e85_link_down
:
2762 ss
->go_s99_running
= 0;
2764 case sdma_event_e90_sw_halted
:
2769 case sdma_state_s40_hw_clean_up_wait
:
2771 case sdma_event_e00_go_hw_down
:
2772 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2773 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2775 case sdma_event_e10_go_hw_start
:
2777 case sdma_event_e15_hw_halt_done
:
2779 case sdma_event_e25_hw_clean_up_done
:
2780 sdma_hw_start_up(sde
);
2781 sdma_set_state(sde
, ss
->go_s99_running
?
2782 sdma_state_s99_running
:
2783 sdma_state_s20_idle
);
2785 case sdma_event_e30_go_running
:
2786 ss
->go_s99_running
= 1;
2788 case sdma_event_e40_sw_cleaned
:
2790 case sdma_event_e50_hw_cleaned
:
2792 case sdma_event_e60_hw_halted
:
2794 case sdma_event_e70_go_idle
:
2795 ss
->go_s99_running
= 0;
2797 case sdma_event_e80_hw_freeze
:
2799 case sdma_event_e81_hw_frozen
:
2801 case sdma_event_e82_hw_unfreeze
:
2803 case sdma_event_e85_link_down
:
2804 ss
->go_s99_running
= 0;
2806 case sdma_event_e90_sw_halted
:
2811 case sdma_state_s50_hw_halt_wait
:
2813 case sdma_event_e00_go_hw_down
:
2814 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2815 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2817 case sdma_event_e10_go_hw_start
:
2819 case sdma_event_e15_hw_halt_done
:
2820 sdma_set_state(sde
, sdma_state_s30_sw_clean_up_wait
);
2821 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2823 case sdma_event_e25_hw_clean_up_done
:
2825 case sdma_event_e30_go_running
:
2826 ss
->go_s99_running
= 1;
2828 case sdma_event_e40_sw_cleaned
:
2830 case sdma_event_e50_hw_cleaned
:
2832 case sdma_event_e60_hw_halted
:
2833 schedule_work(&sde
->err_halt_worker
);
2835 case sdma_event_e70_go_idle
:
2836 ss
->go_s99_running
= 0;
2838 case sdma_event_e80_hw_freeze
:
2840 case sdma_event_e81_hw_frozen
:
2842 case sdma_event_e82_hw_unfreeze
:
2844 case sdma_event_e85_link_down
:
2845 ss
->go_s99_running
= 0;
2847 case sdma_event_e90_sw_halted
:
2852 case sdma_state_s60_idle_halt_wait
:
2854 case sdma_event_e00_go_hw_down
:
2855 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2856 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2858 case sdma_event_e10_go_hw_start
:
2860 case sdma_event_e15_hw_halt_done
:
2861 sdma_set_state(sde
, sdma_state_s30_sw_clean_up_wait
);
2862 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2864 case sdma_event_e25_hw_clean_up_done
:
2866 case sdma_event_e30_go_running
:
2867 ss
->go_s99_running
= 1;
2869 case sdma_event_e40_sw_cleaned
:
2871 case sdma_event_e50_hw_cleaned
:
2873 case sdma_event_e60_hw_halted
:
2874 schedule_work(&sde
->err_halt_worker
);
2876 case sdma_event_e70_go_idle
:
2877 ss
->go_s99_running
= 0;
2879 case sdma_event_e80_hw_freeze
:
2881 case sdma_event_e81_hw_frozen
:
2883 case sdma_event_e82_hw_unfreeze
:
2885 case sdma_event_e85_link_down
:
2887 case sdma_event_e90_sw_halted
:
2892 case sdma_state_s80_hw_freeze
:
2894 case sdma_event_e00_go_hw_down
:
2895 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2896 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2898 case sdma_event_e10_go_hw_start
:
2900 case sdma_event_e15_hw_halt_done
:
2902 case sdma_event_e25_hw_clean_up_done
:
2904 case sdma_event_e30_go_running
:
2905 ss
->go_s99_running
= 1;
2907 case sdma_event_e40_sw_cleaned
:
2909 case sdma_event_e50_hw_cleaned
:
2911 case sdma_event_e60_hw_halted
:
2913 case sdma_event_e70_go_idle
:
2914 ss
->go_s99_running
= 0;
2916 case sdma_event_e80_hw_freeze
:
2918 case sdma_event_e81_hw_frozen
:
2919 sdma_set_state(sde
, sdma_state_s82_freeze_sw_clean
);
2920 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2922 case sdma_event_e82_hw_unfreeze
:
2924 case sdma_event_e85_link_down
:
2926 case sdma_event_e90_sw_halted
:
2931 case sdma_state_s82_freeze_sw_clean
:
2933 case sdma_event_e00_go_hw_down
:
2934 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2935 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2937 case sdma_event_e10_go_hw_start
:
2939 case sdma_event_e15_hw_halt_done
:
2941 case sdma_event_e25_hw_clean_up_done
:
2943 case sdma_event_e30_go_running
:
2944 ss
->go_s99_running
= 1;
2946 case sdma_event_e40_sw_cleaned
:
2947 /* notify caller this engine is done cleaning */
2948 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
2949 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
2951 case sdma_event_e50_hw_cleaned
:
2953 case sdma_event_e60_hw_halted
:
2955 case sdma_event_e70_go_idle
:
2956 ss
->go_s99_running
= 0;
2958 case sdma_event_e80_hw_freeze
:
2960 case sdma_event_e81_hw_frozen
:
2962 case sdma_event_e82_hw_unfreeze
:
2963 sdma_hw_start_up(sde
);
2964 sdma_set_state(sde
, ss
->go_s99_running
?
2965 sdma_state_s99_running
:
2966 sdma_state_s20_idle
);
2968 case sdma_event_e85_link_down
:
2970 case sdma_event_e90_sw_halted
:
2975 case sdma_state_s99_running
:
2977 case sdma_event_e00_go_hw_down
:
2978 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2979 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2981 case sdma_event_e10_go_hw_start
:
2983 case sdma_event_e15_hw_halt_done
:
2985 case sdma_event_e25_hw_clean_up_done
:
2987 case sdma_event_e30_go_running
:
2989 case sdma_event_e40_sw_cleaned
:
2991 case sdma_event_e50_hw_cleaned
:
2993 case sdma_event_e60_hw_halted
:
2995 sdma_err_progress_check_schedule(sde
);
2997 case sdma_event_e90_sw_halted
:
2999 * SW initiated halt does not perform engines
3002 sdma_set_state(sde
, sdma_state_s50_hw_halt_wait
);
3003 schedule_work(&sde
->err_halt_worker
);
3005 case sdma_event_e70_go_idle
:
3006 sdma_set_state(sde
, sdma_state_s60_idle_halt_wait
);
3008 case sdma_event_e85_link_down
:
3009 ss
->go_s99_running
= 0;
3011 case sdma_event_e80_hw_freeze
:
3012 sdma_set_state(sde
, sdma_state_s80_hw_freeze
);
3013 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
3014 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
3016 case sdma_event_e81_hw_frozen
:
3018 case sdma_event_e82_hw_unfreeze
:
3024 ss
->last_event
= event
;
3026 sdma_make_progress(sde
, 0);
3030 * _extend_sdma_tx_descs() - helper to extend txreq
3032 * This is called once the initial nominal allocation
3033 * of descriptors in the sdma_txreq is exhausted.
3035 * The code will bump the allocation up to the max
3036 * of MAX_DESC (64) descriptors. There doesn't seem
3037 * much point in an interim step. The last descriptor
3038 * is reserved for coalesce buffer in order to support
3039 * cases where input packet has >MAX_DESC iovecs.
3042 static int _extend_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
)
3046 /* Handle last descriptor */
3047 if (unlikely((tx
->num_desc
== (MAX_DESC
- 1)))) {
3048 /* if tlen is 0, it is for padding, release last descriptor */
3050 tx
->desc_limit
= MAX_DESC
;
3051 } else if (!tx
->coalesce_buf
) {
3052 /* allocate coalesce buffer with space for padding */
3053 tx
->coalesce_buf
= kmalloc(tx
->tlen
+ sizeof(u32
),
3055 if (!tx
->coalesce_buf
)
3057 tx
->coalesce_idx
= 0;
3062 if (unlikely(tx
->num_desc
== MAX_DESC
))
3065 tx
->descp
= kmalloc_array(
3067 sizeof(struct sdma_desc
),
3072 /* reserve last descriptor for coalescing */
3073 tx
->desc_limit
= MAX_DESC
- 1;
3074 /* copy ones already built */
3075 for (i
= 0; i
< tx
->num_desc
; i
++)
3076 tx
->descp
[i
] = tx
->descs
[i
];
3079 __sdma_txclean(dd
, tx
);
3084 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
3086 * This is called once the initial nominal allocation of descriptors
3087 * in the sdma_txreq is exhausted.
3089 * This function calls _extend_sdma_tx_descs to extend or allocate
3090 * coalesce buffer. If there is a allocated coalesce buffer, it will
3091 * copy the input packet data into the coalesce buffer. It also adds
3092 * coalesce buffer descriptor once when whole packet is received.
3096 * 0 - coalescing, don't populate descriptor
3097 * 1 - continue with populating descriptor
3099 int ext_coal_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
,
3100 int type
, void *kvaddr
, struct page
*page
,
3101 unsigned long offset
, u16 len
)
3106 rval
= _extend_sdma_tx_descs(dd
, tx
);
3108 __sdma_txclean(dd
, tx
);
3112 /* If coalesce buffer is allocated, copy data into it */
3113 if (tx
->coalesce_buf
) {
3114 if (type
== SDMA_MAP_NONE
) {
3115 __sdma_txclean(dd
, tx
);
3119 if (type
== SDMA_MAP_PAGE
) {
3120 kvaddr
= kmap(page
);
3122 } else if (WARN_ON(!kvaddr
)) {
3123 __sdma_txclean(dd
, tx
);
3127 memcpy(tx
->coalesce_buf
+ tx
->coalesce_idx
, kvaddr
, len
);
3128 tx
->coalesce_idx
+= len
;
3129 if (type
== SDMA_MAP_PAGE
)
3132 /* If there is more data, return */
3133 if (tx
->tlen
- tx
->coalesce_idx
)
3136 /* Whole packet is received; add any padding */
3137 pad_len
= tx
->packet_len
& (sizeof(u32
) - 1);
3139 pad_len
= sizeof(u32
) - pad_len
;
3140 memset(tx
->coalesce_buf
+ tx
->coalesce_idx
, 0, pad_len
);
3141 /* padding is taken care of for coalescing case */
3142 tx
->packet_len
+= pad_len
;
3143 tx
->tlen
+= pad_len
;
3146 /* dma map the coalesce buffer */
3147 addr
= dma_map_single(&dd
->pcidev
->dev
,
3152 if (unlikely(dma_mapping_error(&dd
->pcidev
->dev
, addr
))) {
3153 __sdma_txclean(dd
, tx
);
3157 /* Add descriptor for coalesce buffer */
3158 tx
->desc_limit
= MAX_DESC
;
3159 return _sdma_txadd_daddr(dd
, SDMA_MAP_SINGLE
, tx
,
3166 /* Update sdes when the lmc changes */
3167 void sdma_update_lmc(struct hfi1_devdata
*dd
, u64 mask
, u32 lid
)
3169 struct sdma_engine
*sde
;
3173 sreg
= ((mask
& SD(CHECK_SLID_MASK_MASK
)) <<
3174 SD(CHECK_SLID_MASK_SHIFT
)) |
3175 (((lid
& mask
) & SD(CHECK_SLID_VALUE_MASK
)) <<
3176 SD(CHECK_SLID_VALUE_SHIFT
));
3178 for (i
= 0; i
< dd
->num_sdma
; i
++) {
3179 hfi1_cdbg(LINKVERB
, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
3181 sde
= &dd
->per_sdma
[i
];
3182 write_sde_csr(sde
, SD(CHECK_SLID
), sreg
);
3186 /* tx not dword sized - pad */
3187 int _pad_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
)
3192 if ((unlikely(tx
->num_desc
== tx
->desc_limit
))) {
3193 rval
= _extend_sdma_tx_descs(dd
, tx
);
3195 __sdma_txclean(dd
, tx
);
3199 /* finish the one just added */
3204 sizeof(u32
) - (tx
->packet_len
& (sizeof(u32
) - 1)));
3205 _sdma_close_tx(dd
, tx
);
3210 * Add ahg to the sdma_txreq
3212 * The logic will consume up to 3
3213 * descriptors at the beginning of
3216 void _sdma_txreq_ahgadd(
3217 struct sdma_txreq
*tx
,
3223 u32 i
, shift
= 0, desc
= 0;
3226 WARN_ON_ONCE(num_ahg
> 9 || (ahg_hlen
& 3) || ahg_hlen
== 4);
3229 mode
= SDMA_AHG_APPLY_UPDATE1
;
3230 else if (num_ahg
<= 5)
3231 mode
= SDMA_AHG_APPLY_UPDATE2
;
3233 mode
= SDMA_AHG_APPLY_UPDATE3
;
3235 /* initialize to consumed descriptors to zero */
3237 case SDMA_AHG_APPLY_UPDATE3
:
3239 tx
->descs
[2].qw
[0] = 0;
3240 tx
->descs
[2].qw
[1] = 0;
3242 case SDMA_AHG_APPLY_UPDATE2
:
3244 tx
->descs
[1].qw
[0] = 0;
3245 tx
->descs
[1].qw
[1] = 0;
3249 tx
->descs
[0].qw
[1] |=
3250 (((u64
)ahg_entry
& SDMA_DESC1_HEADER_INDEX_MASK
)
3251 << SDMA_DESC1_HEADER_INDEX_SHIFT
) |
3252 (((u64
)ahg_hlen
& SDMA_DESC1_HEADER_DWS_MASK
)
3253 << SDMA_DESC1_HEADER_DWS_SHIFT
) |
3254 (((u64
)mode
& SDMA_DESC1_HEADER_MODE_MASK
)
3255 << SDMA_DESC1_HEADER_MODE_SHIFT
) |
3256 (((u64
)ahg
[0] & SDMA_DESC1_HEADER_UPDATE1_MASK
)
3257 << SDMA_DESC1_HEADER_UPDATE1_SHIFT
);
3258 for (i
= 0; i
< (num_ahg
- 1); i
++) {
3259 if (!shift
&& !(i
& 2))
3261 tx
->descs
[desc
].qw
[!!(i
& 2)] |=
3264 shift
= (shift
+ 32) & 63;
3269 * sdma_ahg_alloc - allocate an AHG entry
3270 * @sde: engine to allocate from
3273 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
3274 * -ENOSPC if an entry is not available
3276 int sdma_ahg_alloc(struct sdma_engine
*sde
)
3282 trace_hfi1_ahg_allocate(sde
, -EINVAL
);
3286 nr
= ffz(READ_ONCE(sde
->ahg_bits
));
3288 trace_hfi1_ahg_allocate(sde
, -ENOSPC
);
3291 oldbit
= test_and_set_bit(nr
, &sde
->ahg_bits
);
3296 trace_hfi1_ahg_allocate(sde
, nr
);
3301 * sdma_ahg_free - free an AHG entry
3302 * @sde: engine to return AHG entry
3303 * @ahg_index: index to free
3305 * This routine frees the indicate AHG entry.
3307 void sdma_ahg_free(struct sdma_engine
*sde
, int ahg_index
)
3311 trace_hfi1_ahg_deallocate(sde
, ahg_index
);
3312 if (ahg_index
< 0 || ahg_index
> 31)
3314 clear_bit(ahg_index
, &sde
->ahg_bits
);
3318 * SPC freeze handling for SDMA engines. Called when the driver knows
3319 * the SPC is going into a freeze but before the freeze is fully
3320 * settled. Generally an error interrupt.
3322 * This event will pull the engine out of running so no more entries can be
3323 * added to the engine's queue.
3325 void sdma_freeze_notify(struct hfi1_devdata
*dd
, int link_down
)
3328 enum sdma_events event
= link_down
? sdma_event_e85_link_down
:
3329 sdma_event_e80_hw_freeze
;
3331 /* set up the wait but do not wait here */
3332 atomic_set(&dd
->sdma_unfreeze_count
, dd
->num_sdma
);
3334 /* tell all engines to stop running and wait */
3335 for (i
= 0; i
< dd
->num_sdma
; i
++)
3336 sdma_process_event(&dd
->per_sdma
[i
], event
);
3338 /* sdma_freeze() will wait for all engines to have stopped */
3342 * SPC freeze handling for SDMA engines. Called when the driver knows
3343 * the SPC is fully frozen.
3345 void sdma_freeze(struct hfi1_devdata
*dd
)
3351 * Make sure all engines have moved out of the running state before
3354 ret
= wait_event_interruptible(dd
->sdma_unfreeze_wq
,
3355 atomic_read(&dd
->sdma_unfreeze_count
) <=
3357 /* interrupted or count is negative, then unloading - just exit */
3358 if (ret
|| atomic_read(&dd
->sdma_unfreeze_count
) < 0)
3361 /* set up the count for the next wait */
3362 atomic_set(&dd
->sdma_unfreeze_count
, dd
->num_sdma
);
3364 /* tell all engines that the SPC is frozen, they can start cleaning */
3365 for (i
= 0; i
< dd
->num_sdma
; i
++)
3366 sdma_process_event(&dd
->per_sdma
[i
], sdma_event_e81_hw_frozen
);
3369 * Wait for everyone to finish software clean before exiting. The
3370 * software clean will read engine CSRs, so must be completed before
3371 * the next step, which will clear the engine CSRs.
3373 (void)wait_event_interruptible(dd
->sdma_unfreeze_wq
,
3374 atomic_read(&dd
->sdma_unfreeze_count
) <= 0);
3375 /* no need to check results - done no matter what */
3379 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
3381 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
3382 * that is left is a software clean. We could do it after the SPC is fully
3383 * frozen, but then we'd have to add another state to wait for the unfreeze.
3384 * Instead, just defer the software clean until the unfreeze step.
3386 void sdma_unfreeze(struct hfi1_devdata
*dd
)
3390 /* tell all engines start freeze clean up */
3391 for (i
= 0; i
< dd
->num_sdma
; i
++)
3392 sdma_process_event(&dd
->per_sdma
[i
],
3393 sdma_event_e82_hw_unfreeze
);
3397 * _sdma_engine_progress_schedule() - schedule progress on engine
3398 * @sde: sdma_engine to schedule progress
3401 void _sdma_engine_progress_schedule(
3402 struct sdma_engine
*sde
)
3404 trace_hfi1_sdma_engine_progress(sde
, sde
->progress_mask
);
3405 /* assume we have selected a good cpu */
3407 CCE_INT_FORCE
+ (8 * (IS_SDMA_START
/ 64)),
3408 sde
->progress_mask
);