3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/spinlock.h>
52 #include <linux/seqlock.h>
53 #include <linux/netdevice.h>
54 #include <linux/moduleparam.h>
55 #include <linux/bitops.h>
56 #include <linux/timer.h>
57 #include <linux/vmalloc.h>
58 #include <linux/highmem.h>
67 /* must be a power of 2 >= 64 <= 32768 */
68 #define SDMA_DESCQ_CNT 2048
69 #define SDMA_DESC_INTR 64
70 #define INVALID_TAIL 0xffff
72 static uint sdma_descq_cnt
= SDMA_DESCQ_CNT
;
73 module_param(sdma_descq_cnt
, uint
, S_IRUGO
);
74 MODULE_PARM_DESC(sdma_descq_cnt
, "Number of SDMA descq entries");
76 static uint sdma_idle_cnt
= 250;
77 module_param(sdma_idle_cnt
, uint
, S_IRUGO
);
78 MODULE_PARM_DESC(sdma_idle_cnt
, "sdma interrupt idle delay (ns,default 250)");
81 module_param_named(num_sdma
, mod_num_sdma
, uint
, S_IRUGO
);
82 MODULE_PARM_DESC(num_sdma
, "Set max number SDMA engines to use");
84 static uint sdma_desct_intr
= SDMA_DESC_INTR
;
85 module_param_named(desct_intr
, sdma_desct_intr
, uint
, S_IRUGO
| S_IWUSR
);
86 MODULE_PARM_DESC(desct_intr
, "Number of SDMA descriptor before interrupt");
88 #define SDMA_WAIT_BATCH_SIZE 20
89 /* max wait time for a SDMA engine to indicate it has halted */
90 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
91 /* all SDMA engine errors that cause a halt */
93 #define SD(name) SEND_DMA_##name
94 #define ALL_SDMA_ENG_HALT_ERRS \
95 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
107 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
108 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
109 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
110 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
111 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
112 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
114 /* sdma_sendctrl operations */
115 #define SDMA_SENDCTRL_OP_ENABLE BIT(0)
116 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
117 #define SDMA_SENDCTRL_OP_HALT BIT(2)
118 #define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
120 /* handle long defines */
121 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
122 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
123 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
124 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
126 static const char * const sdma_state_names
[] = {
127 [sdma_state_s00_hw_down
] = "s00_HwDown",
128 [sdma_state_s10_hw_start_up_halt_wait
] = "s10_HwStartUpHaltWait",
129 [sdma_state_s15_hw_start_up_clean_wait
] = "s15_HwStartUpCleanWait",
130 [sdma_state_s20_idle
] = "s20_Idle",
131 [sdma_state_s30_sw_clean_up_wait
] = "s30_SwCleanUpWait",
132 [sdma_state_s40_hw_clean_up_wait
] = "s40_HwCleanUpWait",
133 [sdma_state_s50_hw_halt_wait
] = "s50_HwHaltWait",
134 [sdma_state_s60_idle_halt_wait
] = "s60_IdleHaltWait",
135 [sdma_state_s80_hw_freeze
] = "s80_HwFreeze",
136 [sdma_state_s82_freeze_sw_clean
] = "s82_FreezeSwClean",
137 [sdma_state_s99_running
] = "s99_Running",
140 static const char * const sdma_event_names
[] = {
141 [sdma_event_e00_go_hw_down
] = "e00_GoHwDown",
142 [sdma_event_e10_go_hw_start
] = "e10_GoHwStart",
143 [sdma_event_e15_hw_halt_done
] = "e15_HwHaltDone",
144 [sdma_event_e25_hw_clean_up_done
] = "e25_HwCleanUpDone",
145 [sdma_event_e30_go_running
] = "e30_GoRunning",
146 [sdma_event_e40_sw_cleaned
] = "e40_SwCleaned",
147 [sdma_event_e50_hw_cleaned
] = "e50_HwCleaned",
148 [sdma_event_e60_hw_halted
] = "e60_HwHalted",
149 [sdma_event_e70_go_idle
] = "e70_GoIdle",
150 [sdma_event_e80_hw_freeze
] = "e80_HwFreeze",
151 [sdma_event_e81_hw_frozen
] = "e81_HwFrozen",
152 [sdma_event_e82_hw_unfreeze
] = "e82_HwUnfreeze",
153 [sdma_event_e85_link_down
] = "e85_LinkDown",
154 [sdma_event_e90_sw_halted
] = "e90_SwHalted",
157 static const struct sdma_set_state_action sdma_action_table
[] = {
158 [sdma_state_s00_hw_down
] = {
159 .go_s99_running_tofalse
= 1,
165 [sdma_state_s10_hw_start_up_halt_wait
] = {
171 [sdma_state_s15_hw_start_up_clean_wait
] = {
177 [sdma_state_s20_idle
] = {
183 [sdma_state_s30_sw_clean_up_wait
] = {
189 [sdma_state_s40_hw_clean_up_wait
] = {
195 [sdma_state_s50_hw_halt_wait
] = {
201 [sdma_state_s60_idle_halt_wait
] = {
202 .go_s99_running_tofalse
= 1,
208 [sdma_state_s80_hw_freeze
] = {
214 [sdma_state_s82_freeze_sw_clean
] = {
220 [sdma_state_s99_running
] = {
225 .go_s99_running_totrue
= 1,
229 #define SDMA_TAIL_UPDATE_THRESH 0x1F
231 /* declare all statics here rather than keep sorting */
232 static void sdma_complete(struct kref
*);
233 static void sdma_finalput(struct sdma_state
*);
234 static void sdma_get(struct sdma_state
*);
235 static void sdma_hw_clean_up_task(unsigned long);
236 static void sdma_put(struct sdma_state
*);
237 static void sdma_set_state(struct sdma_engine
*, enum sdma_states
);
238 static void sdma_start_hw_clean_up(struct sdma_engine
*);
239 static void sdma_sw_clean_up_task(unsigned long);
240 static void sdma_sendctrl(struct sdma_engine
*, unsigned);
241 static void init_sdma_regs(struct sdma_engine
*, u32
, uint
);
242 static void sdma_process_event(
243 struct sdma_engine
*sde
,
244 enum sdma_events event
);
245 static void __sdma_process_event(
246 struct sdma_engine
*sde
,
247 enum sdma_events event
);
248 static void dump_sdma_state(struct sdma_engine
*sde
);
249 static void sdma_make_progress(struct sdma_engine
*sde
, u64 status
);
250 static void sdma_desc_avail(struct sdma_engine
*sde
, unsigned avail
);
251 static void sdma_flush_descq(struct sdma_engine
*sde
);
254 * sdma_state_name() - return state string from enum
257 static const char *sdma_state_name(enum sdma_states state
)
259 return sdma_state_names
[state
];
262 static void sdma_get(struct sdma_state
*ss
)
267 static void sdma_complete(struct kref
*kref
)
269 struct sdma_state
*ss
=
270 container_of(kref
, struct sdma_state
, kref
);
275 static void sdma_put(struct sdma_state
*ss
)
277 kref_put(&ss
->kref
, sdma_complete
);
280 static void sdma_finalput(struct sdma_state
*ss
)
283 wait_for_completion(&ss
->comp
);
286 static inline void write_sde_csr(
287 struct sdma_engine
*sde
,
291 write_kctxt_csr(sde
->dd
, sde
->this_idx
, offset0
, value
);
294 static inline u64
read_sde_csr(
295 struct sdma_engine
*sde
,
298 return read_kctxt_csr(sde
->dd
, sde
->this_idx
, offset0
);
302 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
303 * sdma engine 'sde' to drop to 0.
305 static void sdma_wait_for_packet_egress(struct sdma_engine
*sde
,
308 u64 off
= 8 * sde
->this_idx
;
309 struct hfi1_devdata
*dd
= sde
->dd
;
316 reg
= read_csr(dd
, off
+ SEND_EGRESS_SEND_DMA_STATUS
);
318 reg
&= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
;
319 reg
>>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
;
322 /* counter is reest if accupancy count changes */
326 /* timed out - bounce the link */
327 dd_dev_err(dd
, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
328 __func__
, sde
->this_idx
, (u32
)reg
);
329 queue_work(dd
->pport
->hfi1_wq
,
330 &dd
->pport
->link_bounce_work
);
338 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
339 * and pause for credit return.
341 void sdma_wait(struct hfi1_devdata
*dd
)
345 for (i
= 0; i
< dd
->num_sdma
; i
++) {
346 struct sdma_engine
*sde
= &dd
->per_sdma
[i
];
348 sdma_wait_for_packet_egress(sde
, 0);
352 static inline void sdma_set_desc_cnt(struct sdma_engine
*sde
, unsigned cnt
)
356 if (!(sde
->dd
->flags
& HFI1_HAS_SDMA_TIMEOUT
))
359 reg
&= SD(DESC_CNT_CNT_MASK
);
360 reg
<<= SD(DESC_CNT_CNT_SHIFT
);
361 write_sde_csr(sde
, SD(DESC_CNT
), reg
);
365 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
367 * Depending on timing there can be txreqs in two places:
368 * - in the descq ring
369 * - in the flush list
371 * To avoid ordering issues the descq ring needs to be flushed
372 * first followed by the flush list.
374 * This routine is called from two places
375 * - From a work queue item
376 * - Directly from the state machine just before setting the
379 * Must be called with head_lock held
382 static void sdma_flush(struct sdma_engine
*sde
)
384 struct sdma_txreq
*txp
, *txp_next
;
385 LIST_HEAD(flushlist
);
388 /* flush from head to tail */
389 sdma_flush_descq(sde
);
390 spin_lock_irqsave(&sde
->flushlist_lock
, flags
);
391 /* copy flush list */
392 list_for_each_entry_safe(txp
, txp_next
, &sde
->flushlist
, list
) {
393 list_del_init(&txp
->list
);
394 list_add_tail(&txp
->list
, &flushlist
);
396 spin_unlock_irqrestore(&sde
->flushlist_lock
, flags
);
397 /* flush from flush list */
398 list_for_each_entry_safe(txp
, txp_next
, &flushlist
, list
) {
400 /* protect against complete modifying */
401 struct iowait
*wait
= txp
->wait
;
403 list_del_init(&txp
->list
);
404 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
405 trace_hfi1_sdma_out_sn(sde
, txp
->sn
);
406 if (WARN_ON_ONCE(sde
->head_sn
!= txp
->sn
))
407 dd_dev_err(sde
->dd
, "expected %llu got %llu\n",
408 sde
->head_sn
, txp
->sn
);
411 sdma_txclean(sde
->dd
, txp
);
413 drained
= atomic_dec_and_test(&wait
->sdma_busy
);
415 (*txp
->complete
)(txp
, SDMA_TXREQ_S_ABORTED
, drained
);
417 iowait_drain_wakeup(wait
);
422 * Fields a work request for flushing the descq ring
425 * If the engine has been brought to running during
426 * the scheduling delay, the flush is ignored, assuming
427 * that the process of bringing the engine to running
428 * would have done this flush prior to going to running.
431 static void sdma_field_flush(struct work_struct
*work
)
434 struct sdma_engine
*sde
=
435 container_of(work
, struct sdma_engine
, flush_worker
);
437 write_seqlock_irqsave(&sde
->head_lock
, flags
);
438 if (!__sdma_running(sde
))
440 write_sequnlock_irqrestore(&sde
->head_lock
, flags
);
443 static void sdma_err_halt_wait(struct work_struct
*work
)
445 struct sdma_engine
*sde
= container_of(work
, struct sdma_engine
,
448 unsigned long timeout
;
450 timeout
= jiffies
+ msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT
);
452 statuscsr
= read_sde_csr(sde
, SD(STATUS
));
453 statuscsr
&= SD(STATUS_ENG_HALTED_SMASK
);
456 if (time_after(jiffies
, timeout
)) {
458 "SDMA engine %d - timeout waiting for engine to halt\n",
461 * Continue anyway. This could happen if there was
462 * an uncorrectable error in the wrong spot.
466 usleep_range(80, 120);
469 sdma_process_event(sde
, sdma_event_e15_hw_halt_done
);
472 static void sdma_err_progress_check_schedule(struct sdma_engine
*sde
)
474 if (!is_bx(sde
->dd
) && HFI1_CAP_IS_KSET(SDMA_AHG
)) {
477 struct hfi1_devdata
*dd
= sde
->dd
;
479 for (index
= 0; index
< dd
->num_sdma
; index
++) {
480 struct sdma_engine
*curr_sdma
= &dd
->per_sdma
[index
];
482 if (curr_sdma
!= sde
)
483 curr_sdma
->progress_check_head
=
484 curr_sdma
->descq_head
;
487 "SDMA engine %d - check scheduled\n",
489 mod_timer(&sde
->err_progress_check_timer
, jiffies
+ 10);
493 static void sdma_err_progress_check(unsigned long data
)
496 struct sdma_engine
*sde
= (struct sdma_engine
*)data
;
498 dd_dev_err(sde
->dd
, "SDE progress check event\n");
499 for (index
= 0; index
< sde
->dd
->num_sdma
; index
++) {
500 struct sdma_engine
*curr_sde
= &sde
->dd
->per_sdma
[index
];
503 /* check progress on each engine except the current one */
507 * We must lock interrupts when acquiring sde->lock,
508 * to avoid a deadlock if interrupt triggers and spins on
509 * the same lock on same CPU
511 spin_lock_irqsave(&curr_sde
->tail_lock
, flags
);
512 write_seqlock(&curr_sde
->head_lock
);
514 /* skip non-running queues */
515 if (curr_sde
->state
.current_state
!= sdma_state_s99_running
) {
516 write_sequnlock(&curr_sde
->head_lock
);
517 spin_unlock_irqrestore(&curr_sde
->tail_lock
, flags
);
521 if ((curr_sde
->descq_head
!= curr_sde
->descq_tail
) &&
522 (curr_sde
->descq_head
==
523 curr_sde
->progress_check_head
))
524 __sdma_process_event(curr_sde
,
525 sdma_event_e90_sw_halted
);
526 write_sequnlock(&curr_sde
->head_lock
);
527 spin_unlock_irqrestore(&curr_sde
->tail_lock
, flags
);
529 schedule_work(&sde
->err_halt_worker
);
532 static void sdma_hw_clean_up_task(unsigned long opaque
)
534 struct sdma_engine
*sde
= (struct sdma_engine
*) opaque
;
538 #ifdef CONFIG_SDMA_VERBOSITY
539 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
540 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
,
543 statuscsr
= read_sde_csr(sde
, SD(STATUS
));
544 statuscsr
&= SD(STATUS_ENG_CLEANED_UP_SMASK
);
550 sdma_process_event(sde
, sdma_event_e25_hw_clean_up_done
);
553 static inline struct sdma_txreq
*get_txhead(struct sdma_engine
*sde
)
555 smp_read_barrier_depends(); /* see sdma_update_tail() */
556 return sde
->tx_ring
[sde
->tx_head
& sde
->sdma_mask
];
560 * flush ring for recovery
562 static void sdma_flush_descq(struct sdma_engine
*sde
)
566 struct sdma_txreq
*txp
= get_txhead(sde
);
568 /* The reason for some of the complexity of this code is that
569 * not all descriptors have corresponding txps. So, we have to
570 * be able to skip over descs until we wander into the range of
571 * the next txp on the list.
573 head
= sde
->descq_head
& sde
->sdma_mask
;
574 tail
= sde
->descq_tail
& sde
->sdma_mask
;
575 while (head
!= tail
) {
576 /* advance head, wrap if needed */
577 head
= ++sde
->descq_head
& sde
->sdma_mask
;
578 /* if now past this txp's descs, do the callback */
579 if (txp
&& txp
->next_descq_idx
== head
) {
581 /* protect against complete modifying */
582 struct iowait
*wait
= txp
->wait
;
584 /* remove from list */
585 sde
->tx_ring
[sde
->tx_head
++ & sde
->sdma_mask
] = NULL
;
587 drained
= atomic_dec_and_test(&wait
->sdma_busy
);
588 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
589 trace_hfi1_sdma_out_sn(sde
, txp
->sn
);
590 if (WARN_ON_ONCE(sde
->head_sn
!= txp
->sn
))
591 dd_dev_err(sde
->dd
, "expected %llu got %llu\n",
592 sde
->head_sn
, txp
->sn
);
595 sdma_txclean(sde
->dd
, txp
);
596 trace_hfi1_sdma_progress(sde
, head
, tail
, txp
);
600 SDMA_TXREQ_S_ABORTED
,
603 iowait_drain_wakeup(wait
);
604 /* see if there is another txp */
605 txp
= get_txhead(sde
);
610 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
613 static void sdma_sw_clean_up_task(unsigned long opaque
)
615 struct sdma_engine
*sde
= (struct sdma_engine
*) opaque
;
618 spin_lock_irqsave(&sde
->tail_lock
, flags
);
619 write_seqlock(&sde
->head_lock
);
622 * At this point, the following should always be true:
623 * - We are halted, so no more descriptors are getting retired.
624 * - We are not running, so no one is submitting new work.
625 * - Only we can send the e40_sw_cleaned, so we can't start
626 * running again until we say so. So, the active list and
627 * descq are ours to play with.
632 * In the error clean up sequence, software clean must be called
633 * before the hardware clean so we can use the hardware head in
634 * the progress routine. A hardware clean or SPC unfreeze will
635 * reset the hardware head.
637 * Process all retired requests. The progress routine will use the
638 * latest physical hardware head - we are not running so speed does
641 sdma_make_progress(sde
, 0);
646 * Reset our notion of head and tail.
647 * Note that the HW registers have been reset via an earlier
652 sde
->desc_avail
= sdma_descq_freecnt(sde
);
655 __sdma_process_event(sde
, sdma_event_e40_sw_cleaned
);
657 write_sequnlock(&sde
->head_lock
);
658 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
661 static void sdma_sw_tear_down(struct sdma_engine
*sde
)
663 struct sdma_state
*ss
= &sde
->state
;
665 /* Releasing this reference means the state machine has stopped. */
668 /* stop waiting for all unfreeze events to complete */
669 atomic_set(&sde
->dd
->sdma_unfreeze_count
, -1);
670 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
673 static void sdma_start_hw_clean_up(struct sdma_engine
*sde
)
675 tasklet_hi_schedule(&sde
->sdma_hw_clean_up_task
);
678 static void sdma_set_state(struct sdma_engine
*sde
,
679 enum sdma_states next_state
)
681 struct sdma_state
*ss
= &sde
->state
;
682 const struct sdma_set_state_action
*action
= sdma_action_table
;
685 trace_hfi1_sdma_state(
687 sdma_state_names
[ss
->current_state
],
688 sdma_state_names
[next_state
]);
690 /* debugging bookkeeping */
691 ss
->previous_state
= ss
->current_state
;
692 ss
->previous_op
= ss
->current_op
;
693 ss
->current_state
= next_state
;
695 if (ss
->previous_state
!= sdma_state_s99_running
696 && next_state
== sdma_state_s99_running
)
699 if (action
[next_state
].op_enable
)
700 op
|= SDMA_SENDCTRL_OP_ENABLE
;
702 if (action
[next_state
].op_intenable
)
703 op
|= SDMA_SENDCTRL_OP_INTENABLE
;
705 if (action
[next_state
].op_halt
)
706 op
|= SDMA_SENDCTRL_OP_HALT
;
708 if (action
[next_state
].op_cleanup
)
709 op
|= SDMA_SENDCTRL_OP_CLEANUP
;
711 if (action
[next_state
].go_s99_running_tofalse
)
712 ss
->go_s99_running
= 0;
714 if (action
[next_state
].go_s99_running_totrue
)
715 ss
->go_s99_running
= 1;
718 sdma_sendctrl(sde
, ss
->current_op
);
722 * sdma_get_descq_cnt() - called when device probed
724 * Return a validated descq count.
726 * This is currently only used in the verbs initialization to build the tx
729 * This will probably be deleted in favor of a more scalable approach to
733 u16
sdma_get_descq_cnt(void)
735 u16 count
= sdma_descq_cnt
;
738 return SDMA_DESCQ_CNT
;
739 /* count must be a power of 2 greater than 64 and less than
740 * 32768. Otherwise return default.
742 if (!is_power_of_2(count
))
743 return SDMA_DESCQ_CNT
;
744 if (count
< 64 || count
> 32768)
745 return SDMA_DESCQ_CNT
;
750 * sdma_select_engine_vl() - select sdma engine
752 * @selector: a spreading factor
756 * This function returns an engine based on the selector and a vl. The
757 * mapping fields are protected by RCU.
759 struct sdma_engine
*sdma_select_engine_vl(
760 struct hfi1_devdata
*dd
,
764 struct sdma_vl_map
*m
;
765 struct sdma_map_elem
*e
;
766 struct sdma_engine
*rval
;
768 /* NOTE This should only happen if SC->VL changed after the initial
769 * checks on the QP/AH
770 * Default will return engine 0 below
778 m
= rcu_dereference(dd
->sdma_map
);
781 return &dd
->per_sdma
[0];
783 e
= m
->map
[vl
& m
->mask
];
784 rval
= e
->sde
[selector
& e
->mask
];
788 rval
= !rval
? &dd
->per_sdma
[0] : rval
;
789 trace_hfi1_sdma_engine_select(dd
, selector
, vl
, rval
->this_idx
);
794 * sdma_select_engine_sc() - select sdma engine
796 * @selector: a spreading factor
800 * This function returns an engine based on the selector and an sc.
802 struct sdma_engine
*sdma_select_engine_sc(
803 struct hfi1_devdata
*dd
,
807 u8 vl
= sc_to_vlt(dd
, sc5
);
809 return sdma_select_engine_vl(dd
, selector
, vl
);
813 * Free the indicated map struct
815 static void sdma_map_free(struct sdma_vl_map
*m
)
819 for (i
= 0; m
&& i
< m
->actual_vls
; i
++)
825 * Handle RCU callback
827 static void sdma_map_rcu_callback(struct rcu_head
*list
)
829 struct sdma_vl_map
*m
= container_of(list
, struct sdma_vl_map
, list
);
835 * sdma_map_init - called when # vls change
838 * @num_vls: number of vls
839 * @vl_engines: per vl engine mapping (optional)
841 * This routine changes the mapping based on the number of vls.
843 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
844 * implies auto computing the loading and giving each VLs a uniform
845 * distribution of engines per VL.
847 * The auto algorithm computes the sde_per_vl and the number of extra
848 * engines. Any extra engines are added from the last VL on down.
850 * rcu locking is used here to control access to the mapping fields.
852 * If either the num_vls or num_sdma are non-power of 2, the array sizes
853 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
854 * up to the next highest power of 2 and the first entry is reused
855 * in a round robin fashion.
857 * If an error occurs the map change is not done and the mapping is
861 int sdma_map_init(struct hfi1_devdata
*dd
, u8 port
, u8 num_vls
, u8
*vl_engines
)
864 int extra
, sde_per_vl
;
866 u8 lvl_engines
[OPA_MAX_VLS
];
867 struct sdma_vl_map
*oldmap
, *newmap
;
869 if (!(dd
->flags
& HFI1_HAS_SEND_DMA
))
873 /* truncate divide */
874 sde_per_vl
= dd
->num_sdma
/ num_vls
;
876 extra
= dd
->num_sdma
% num_vls
;
877 vl_engines
= lvl_engines
;
878 /* add extras from last vl down */
879 for (i
= num_vls
- 1; i
>= 0; i
--, extra
--)
880 vl_engines
[i
] = sde_per_vl
+ (extra
> 0 ? 1 : 0);
884 sizeof(struct sdma_vl_map
) +
885 roundup_pow_of_two(num_vls
) *
886 sizeof(struct sdma_map_elem
*),
890 newmap
->actual_vls
= num_vls
;
891 newmap
->vls
= roundup_pow_of_two(num_vls
);
892 newmap
->mask
= (1 << ilog2(newmap
->vls
)) - 1;
893 /* initialize back-map */
894 for (i
= 0; i
< TXE_NUM_SDMA_ENGINES
; i
++)
895 newmap
->engine_to_vl
[i
] = -1;
896 for (i
= 0; i
< newmap
->vls
; i
++) {
897 /* save for wrap around */
898 int first_engine
= engine
;
900 if (i
< newmap
->actual_vls
) {
901 int sz
= roundup_pow_of_two(vl_engines
[i
]);
903 /* only allocate once */
904 newmap
->map
[i
] = kzalloc(
905 sizeof(struct sdma_map_elem
) +
906 sz
* sizeof(struct sdma_engine
*),
910 newmap
->map
[i
]->mask
= (1 << ilog2(sz
)) - 1;
912 for (j
= 0; j
< sz
; j
++) {
913 newmap
->map
[i
]->sde
[j
] =
914 &dd
->per_sdma
[engine
];
915 if (++engine
>= first_engine
+ vl_engines
[i
])
916 /* wrap back to first engine */
917 engine
= first_engine
;
919 /* assign back-map */
920 for (j
= 0; j
< vl_engines
[i
]; j
++)
921 newmap
->engine_to_vl
[first_engine
+ j
] = i
;
923 /* just re-use entry without allocating */
924 newmap
->map
[i
] = newmap
->map
[i
% num_vls
];
926 engine
= first_engine
+ vl_engines
[i
];
928 /* newmap in hand, save old map */
929 spin_lock_irq(&dd
->sde_map_lock
);
930 oldmap
= rcu_dereference_protected(dd
->sdma_map
,
931 lockdep_is_held(&dd
->sde_map_lock
));
934 rcu_assign_pointer(dd
->sdma_map
, newmap
);
936 spin_unlock_irq(&dd
->sde_map_lock
);
937 /* success, free any old map after grace period */
939 call_rcu(&oldmap
->list
, sdma_map_rcu_callback
);
942 /* free any partial allocation */
943 sdma_map_free(newmap
);
948 * Clean up allocated memory.
950 * This routine is can be called regardless of the success of sdma_init()
953 static void sdma_clean(struct hfi1_devdata
*dd
, size_t num_engines
)
956 struct sdma_engine
*sde
;
958 if (dd
->sdma_pad_dma
) {
959 dma_free_coherent(&dd
->pcidev
->dev
, 4,
960 (void *)dd
->sdma_pad_dma
,
962 dd
->sdma_pad_dma
= NULL
;
963 dd
->sdma_pad_phys
= 0;
965 if (dd
->sdma_heads_dma
) {
966 dma_free_coherent(&dd
->pcidev
->dev
, dd
->sdma_heads_size
,
967 (void *)dd
->sdma_heads_dma
,
968 dd
->sdma_heads_phys
);
969 dd
->sdma_heads_dma
= NULL
;
970 dd
->sdma_heads_phys
= 0;
972 for (i
= 0; dd
->per_sdma
&& i
< num_engines
; ++i
) {
973 sde
= &dd
->per_sdma
[i
];
975 sde
->head_dma
= NULL
;
981 sde
->descq_cnt
* sizeof(u64
[2]),
988 kvfree(sde
->tx_ring
);
991 spin_lock_irq(&dd
->sde_map_lock
);
992 kfree(rcu_access_pointer(dd
->sdma_map
));
993 RCU_INIT_POINTER(dd
->sdma_map
, NULL
);
994 spin_unlock_irq(&dd
->sde_map_lock
);
1001 * sdma_init() - called when device probed
1003 * @port: port number (currently only zero)
1005 * sdma_init initializes the specified number of engines.
1007 * The code initializes each sde, its csrs. Interrupts
1008 * are not required to be enabled.
1011 * 0 - success, -errno on failure
1013 int sdma_init(struct hfi1_devdata
*dd
, u8 port
)
1016 struct sdma_engine
*sde
;
1019 struct hfi1_pportdata
*ppd
= dd
->pport
+ port
;
1020 u32 per_sdma_credits
;
1021 uint idle_cnt
= sdma_idle_cnt
;
1022 size_t num_engines
= dd
->chip_sdma_engines
;
1024 if (!HFI1_CAP_IS_KSET(SDMA
)) {
1025 HFI1_CAP_CLEAR(SDMA_AHG
);
1029 /* can't exceed chip support */
1030 mod_num_sdma
<= dd
->chip_sdma_engines
&&
1031 /* count must be >= vls */
1032 mod_num_sdma
>= num_vls
)
1033 num_engines
= mod_num_sdma
;
1035 dd_dev_info(dd
, "SDMA mod_num_sdma: %u\n", mod_num_sdma
);
1036 dd_dev_info(dd
, "SDMA chip_sdma_engines: %u\n", dd
->chip_sdma_engines
);
1037 dd_dev_info(dd
, "SDMA chip_sdma_mem_size: %u\n",
1038 dd
->chip_sdma_mem_size
);
1041 dd
->chip_sdma_mem_size
/(num_engines
* SDMA_BLOCK_SIZE
);
1043 /* set up freeze waitqueue */
1044 init_waitqueue_head(&dd
->sdma_unfreeze_wq
);
1045 atomic_set(&dd
->sdma_unfreeze_count
, 0);
1047 descq_cnt
= sdma_get_descq_cnt();
1048 dd_dev_info(dd
, "SDMA engines %zu descq_cnt %u\n",
1049 num_engines
, descq_cnt
);
1051 /* alloc memory for array of send engines */
1052 dd
->per_sdma
= kcalloc(num_engines
, sizeof(*dd
->per_sdma
), GFP_KERNEL
);
1056 idle_cnt
= ns_to_cclock(dd
, idle_cnt
);
1057 if (!sdma_desct_intr
)
1058 sdma_desct_intr
= SDMA_DESC_INTR
;
1060 /* Allocate memory for SendDMA descriptor FIFOs */
1061 for (this_idx
= 0; this_idx
< num_engines
; ++this_idx
) {
1062 sde
= &dd
->per_sdma
[this_idx
];
1065 sde
->this_idx
= this_idx
;
1066 sde
->descq_cnt
= descq_cnt
;
1067 sde
->desc_avail
= sdma_descq_freecnt(sde
);
1068 sde
->sdma_shift
= ilog2(descq_cnt
);
1069 sde
->sdma_mask
= (1 << sde
->sdma_shift
) - 1;
1071 /* Create a mask specifically for each interrupt source */
1072 sde
->int_mask
= (u64
)1 << (0 * TXE_NUM_SDMA_ENGINES
+
1074 sde
->progress_mask
= (u64
)1 << (1 * TXE_NUM_SDMA_ENGINES
+
1076 sde
->idle_mask
= (u64
)1 << (2 * TXE_NUM_SDMA_ENGINES
+
1078 /* Create a combined mask to cover all 3 interrupt sources */
1079 sde
->imask
= sde
->int_mask
| sde
->progress_mask
|
1082 spin_lock_init(&sde
->tail_lock
);
1083 seqlock_init(&sde
->head_lock
);
1084 spin_lock_init(&sde
->senddmactrl_lock
);
1085 spin_lock_init(&sde
->flushlist_lock
);
1086 /* insure there is always a zero bit */
1087 sde
->ahg_bits
= 0xfffffffe00000000ULL
;
1089 sdma_set_state(sde
, sdma_state_s00_hw_down
);
1091 /* set up reference counting */
1092 kref_init(&sde
->state
.kref
);
1093 init_completion(&sde
->state
.comp
);
1095 INIT_LIST_HEAD(&sde
->flushlist
);
1096 INIT_LIST_HEAD(&sde
->dmawait
);
1099 get_kctxt_csr_addr(dd
, this_idx
, SD(TAIL
));
1103 SDMA_DESC1_HEAD_TO_HOST_FLAG
;
1106 SDMA_DESC1_INT_REQ_FLAG
;
1108 tasklet_init(&sde
->sdma_hw_clean_up_task
, sdma_hw_clean_up_task
,
1109 (unsigned long)sde
);
1111 tasklet_init(&sde
->sdma_sw_clean_up_task
, sdma_sw_clean_up_task
,
1112 (unsigned long)sde
);
1113 INIT_WORK(&sde
->err_halt_worker
, sdma_err_halt_wait
);
1114 INIT_WORK(&sde
->flush_worker
, sdma_field_flush
);
1116 sde
->progress_check_head
= 0;
1118 setup_timer(&sde
->err_progress_check_timer
,
1119 sdma_err_progress_check
, (unsigned long)sde
);
1121 sde
->descq
= dma_zalloc_coherent(
1123 descq_cnt
* sizeof(u64
[2]),
1130 kcalloc(descq_cnt
, sizeof(struct sdma_txreq
*),
1135 sizeof(struct sdma_txreq
*) *
1141 dd
->sdma_heads_size
= L1_CACHE_BYTES
* num_engines
;
1142 /* Allocate memory for DMA of head registers to memory */
1143 dd
->sdma_heads_dma
= dma_zalloc_coherent(
1145 dd
->sdma_heads_size
,
1146 &dd
->sdma_heads_phys
,
1149 if (!dd
->sdma_heads_dma
) {
1150 dd_dev_err(dd
, "failed to allocate SendDMA head memory\n");
1154 /* Allocate memory for pad */
1155 dd
->sdma_pad_dma
= dma_zalloc_coherent(
1161 if (!dd
->sdma_pad_dma
) {
1162 dd_dev_err(dd
, "failed to allocate SendDMA pad memory\n");
1166 /* assign each engine to different cacheline and init registers */
1167 curr_head
= (void *)dd
->sdma_heads_dma
;
1168 for (this_idx
= 0; this_idx
< num_engines
; ++this_idx
) {
1169 unsigned long phys_offset
;
1171 sde
= &dd
->per_sdma
[this_idx
];
1173 sde
->head_dma
= curr_head
;
1174 curr_head
+= L1_CACHE_BYTES
;
1175 phys_offset
= (unsigned long)sde
->head_dma
-
1176 (unsigned long)dd
->sdma_heads_dma
;
1177 sde
->head_phys
= dd
->sdma_heads_phys
+ phys_offset
;
1178 init_sdma_regs(sde
, per_sdma_credits
, idle_cnt
);
1180 dd
->flags
|= HFI1_HAS_SEND_DMA
;
1181 dd
->flags
|= idle_cnt
? HFI1_HAS_SDMA_TIMEOUT
: 0;
1182 dd
->num_sdma
= num_engines
;
1183 if (sdma_map_init(dd
, port
, ppd
->vls_operational
, NULL
))
1185 dd_dev_info(dd
, "SDMA num_sdma: %u\n", dd
->num_sdma
);
1189 sdma_clean(dd
, num_engines
);
1194 * sdma_all_running() - called when the link goes up
1197 * This routine moves all engines to the running state.
1199 void sdma_all_running(struct hfi1_devdata
*dd
)
1201 struct sdma_engine
*sde
;
1204 /* move all engines to running */
1205 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1206 sde
= &dd
->per_sdma
[i
];
1207 sdma_process_event(sde
, sdma_event_e30_go_running
);
1212 * sdma_all_idle() - called when the link goes down
1215 * This routine moves all engines to the idle state.
1217 void sdma_all_idle(struct hfi1_devdata
*dd
)
1219 struct sdma_engine
*sde
;
1222 /* idle all engines */
1223 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1224 sde
= &dd
->per_sdma
[i
];
1225 sdma_process_event(sde
, sdma_event_e70_go_idle
);
1230 * sdma_start() - called to kick off state processing for all engines
1233 * This routine is for kicking off the state processing for all required
1234 * sdma engines. Interrupts need to be working at this point.
1237 void sdma_start(struct hfi1_devdata
*dd
)
1240 struct sdma_engine
*sde
;
1242 /* kick off the engines state processing */
1243 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1244 sde
= &dd
->per_sdma
[i
];
1245 sdma_process_event(sde
, sdma_event_e10_go_hw_start
);
1250 * sdma_exit() - used when module is removed
1253 void sdma_exit(struct hfi1_devdata
*dd
)
1256 struct sdma_engine
*sde
;
1258 for (this_idx
= 0; dd
->per_sdma
&& this_idx
< dd
->num_sdma
;
1261 sde
= &dd
->per_sdma
[this_idx
];
1262 if (!list_empty(&sde
->dmawait
))
1263 dd_dev_err(dd
, "sde %u: dmawait list not empty!\n",
1265 sdma_process_event(sde
, sdma_event_e00_go_hw_down
);
1267 del_timer_sync(&sde
->err_progress_check_timer
);
1270 * This waits for the state machine to exit so it is not
1271 * necessary to kill the sdma_sw_clean_up_task to make sure
1272 * it is not running.
1274 sdma_finalput(&sde
->state
);
1276 sdma_clean(dd
, dd
->num_sdma
);
1280 * unmap the indicated descriptor
1282 static inline void sdma_unmap_desc(
1283 struct hfi1_devdata
*dd
,
1284 struct sdma_desc
*descp
)
1286 switch (sdma_mapping_type(descp
)) {
1287 case SDMA_MAP_SINGLE
:
1290 sdma_mapping_addr(descp
),
1291 sdma_mapping_len(descp
),
1297 sdma_mapping_addr(descp
),
1298 sdma_mapping_len(descp
),
1305 * return the mode as indicated by the first
1306 * descriptor in the tx.
1308 static inline u8
ahg_mode(struct sdma_txreq
*tx
)
1310 return (tx
->descp
[0].qw
[1] & SDMA_DESC1_HEADER_MODE_SMASK
)
1311 >> SDMA_DESC1_HEADER_MODE_SHIFT
;
1315 * sdma_txclean() - clean tx of mappings, descp *kmalloc's
1316 * @dd: hfi1_devdata for unmapping
1317 * @tx: tx request to clean
1319 * This is used in the progress routine to clean the tx or
1320 * by the ULP to toss an in-process tx build.
1322 * The code can be called multiple times without issue.
1326 struct hfi1_devdata
*dd
,
1327 struct sdma_txreq
*tx
)
1332 u8 skip
= 0, mode
= ahg_mode(tx
);
1335 sdma_unmap_desc(dd
, &tx
->descp
[0]);
1336 /* determine number of AHG descriptors to skip */
1337 if (mode
> SDMA_AHG_APPLY_UPDATE1
)
1339 for (i
= 1 + skip
; i
< tx
->num_desc
; i
++)
1340 sdma_unmap_desc(dd
, &tx
->descp
[i
]);
1343 kfree(tx
->coalesce_buf
);
1344 tx
->coalesce_buf
= NULL
;
1345 /* kmalloc'ed descp */
1346 if (unlikely(tx
->desc_limit
> ARRAY_SIZE(tx
->descs
))) {
1347 tx
->desc_limit
= ARRAY_SIZE(tx
->descs
);
1352 static inline u16
sdma_gethead(struct sdma_engine
*sde
)
1354 struct hfi1_devdata
*dd
= sde
->dd
;
1358 #ifdef CONFIG_SDMA_VERBOSITY
1359 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1360 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1364 use_dmahead
= HFI1_CAP_IS_KSET(USE_SDMA_HEAD
) && __sdma_running(sde
) &&
1365 (dd
->flags
& HFI1_HAS_SDMA_TIMEOUT
);
1366 hwhead
= use_dmahead
?
1367 (u16
) le64_to_cpu(*sde
->head_dma
) :
1368 (u16
) read_sde_csr(sde
, SD(HEAD
));
1370 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK
))) {
1376 swhead
= sde
->descq_head
& sde
->sdma_mask
;
1377 /* this code is really bad for cache line trading */
1378 swtail
= ACCESS_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
1379 cnt
= sde
->descq_cnt
;
1381 if (swhead
< swtail
)
1383 sane
= (hwhead
>= swhead
) & (hwhead
<= swtail
);
1384 else if (swhead
> swtail
)
1385 /* wrapped around */
1386 sane
= ((hwhead
>= swhead
) && (hwhead
< cnt
)) ||
1390 sane
= (hwhead
== swhead
);
1392 if (unlikely(!sane
)) {
1393 dd_dev_err(dd
, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1395 use_dmahead
? "dma" : "kreg",
1396 hwhead
, swhead
, swtail
, cnt
);
1398 /* try one more time, using csr */
1402 /* proceed as if no progress */
1410 * This is called when there are send DMA descriptors that might be
1413 * This is called with head_lock held.
1415 static void sdma_desc_avail(struct sdma_engine
*sde
, unsigned avail
)
1417 struct iowait
*wait
, *nw
;
1418 struct iowait
*waits
[SDMA_WAIT_BATCH_SIZE
];
1419 unsigned i
, n
= 0, seq
;
1420 struct sdma_txreq
*stx
;
1421 struct hfi1_ibdev
*dev
= &sde
->dd
->verbs_dev
;
1423 #ifdef CONFIG_SDMA_VERBOSITY
1424 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
1425 slashstrip(__FILE__
), __LINE__
, __func__
);
1426 dd_dev_err(sde
->dd
, "avail: %u\n", avail
);
1430 seq
= read_seqbegin(&dev
->iowait_lock
);
1431 if (!list_empty(&sde
->dmawait
)) {
1432 /* at least one item */
1433 write_seqlock(&dev
->iowait_lock
);
1434 /* Harvest waiters wanting DMA descriptors */
1435 list_for_each_entry_safe(
1444 if (n
== ARRAY_SIZE(waits
))
1446 if (!list_empty(&wait
->tx_head
)) {
1447 stx
= list_first_entry(
1451 num_desc
= stx
->num_desc
;
1453 if (num_desc
> avail
)
1456 list_del_init(&wait
->list
);
1459 write_sequnlock(&dev
->iowait_lock
);
1462 } while (read_seqretry(&dev
->iowait_lock
, seq
));
1464 for (i
= 0; i
< n
; i
++)
1465 waits
[i
]->wakeup(waits
[i
], SDMA_AVAIL_REASON
);
1468 /* head_lock must be held */
1469 static void sdma_make_progress(struct sdma_engine
*sde
, u64 status
)
1471 struct sdma_txreq
*txp
= NULL
;
1473 u16 hwhead
, swhead
, swtail
;
1474 int idle_check_done
= 0;
1476 hwhead
= sdma_gethead(sde
);
1478 /* The reason for some of the complexity of this code is that
1479 * not all descriptors have corresponding txps. So, we have to
1480 * be able to skip over descs until we wander into the range of
1481 * the next txp on the list.
1485 txp
= get_txhead(sde
);
1486 swhead
= sde
->descq_head
& sde
->sdma_mask
;
1487 trace_hfi1_sdma_progress(sde
, hwhead
, swhead
, txp
);
1488 while (swhead
!= hwhead
) {
1489 /* advance head, wrap if needed */
1490 swhead
= ++sde
->descq_head
& sde
->sdma_mask
;
1492 /* if now past this txp's descs, do the callback */
1493 if (txp
&& txp
->next_descq_idx
== swhead
) {
1495 /* protect against complete modifying */
1496 struct iowait
*wait
= txp
->wait
;
1498 /* remove from list */
1499 sde
->tx_ring
[sde
->tx_head
++ & sde
->sdma_mask
] = NULL
;
1501 drained
= atomic_dec_and_test(&wait
->sdma_busy
);
1502 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
1503 trace_hfi1_sdma_out_sn(sde
, txp
->sn
);
1504 if (WARN_ON_ONCE(sde
->head_sn
!= txp
->sn
))
1505 dd_dev_err(sde
->dd
, "expected %llu got %llu\n",
1506 sde
->head_sn
, txp
->sn
);
1509 sdma_txclean(sde
->dd
, txp
);
1515 if (wait
&& drained
)
1516 iowait_drain_wakeup(wait
);
1517 /* see if there is another txp */
1518 txp
= get_txhead(sde
);
1520 trace_hfi1_sdma_progress(sde
, hwhead
, swhead
, txp
);
1525 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1526 * to updates to the the dma_head location in host memory. The head
1527 * value read might not be fully up to date. If there are pending
1528 * descriptors and the SDMA idle interrupt fired then read from the
1529 * CSR SDMA head instead to get the latest value from the hardware.
1530 * The hardware SDMA head should be read at most once in this invocation
1531 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1533 if ((status
& sde
->idle_mask
) && !idle_check_done
) {
1534 swtail
= ACCESS_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
1535 if (swtail
!= hwhead
) {
1536 hwhead
= (u16
)read_sde_csr(sde
, SD(HEAD
));
1537 idle_check_done
= 1;
1542 sde
->last_status
= status
;
1544 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
1548 * sdma_engine_interrupt() - interrupt handler for engine
1550 * @status: sdma interrupt reason
1552 * Status is a mask of the 3 possible interrupts for this engine. It will
1553 * contain bits _only_ for this SDMA engine. It will contain at least one
1554 * bit, it may contain more.
1556 void sdma_engine_interrupt(struct sdma_engine
*sde
, u64 status
)
1558 trace_hfi1_sdma_engine_interrupt(sde
, status
);
1559 write_seqlock(&sde
->head_lock
);
1560 sdma_set_desc_cnt(sde
, sdma_desct_intr
);
1561 if (status
& sde
->idle_mask
)
1562 sde
->idle_int_cnt
++;
1563 else if (status
& sde
->progress_mask
)
1564 sde
->progress_int_cnt
++;
1565 else if (status
& sde
->int_mask
)
1566 sde
->sdma_int_cnt
++;
1567 sdma_make_progress(sde
, status
);
1568 write_sequnlock(&sde
->head_lock
);
1572 * sdma_engine_error() - error handler for engine
1574 * @status: sdma interrupt reason
1576 void sdma_engine_error(struct sdma_engine
*sde
, u64 status
)
1578 unsigned long flags
;
1580 #ifdef CONFIG_SDMA_VERBOSITY
1581 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1583 (unsigned long long)status
,
1584 sdma_state_names
[sde
->state
.current_state
]);
1586 spin_lock_irqsave(&sde
->tail_lock
, flags
);
1587 write_seqlock(&sde
->head_lock
);
1588 if (status
& ALL_SDMA_ENG_HALT_ERRS
)
1589 __sdma_process_event(sde
, sdma_event_e60_hw_halted
);
1590 if (status
& ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK
)) {
1592 "SDMA (%u) engine error: 0x%llx state %s\n",
1594 (unsigned long long)status
,
1595 sdma_state_names
[sde
->state
.current_state
]);
1596 dump_sdma_state(sde
);
1598 write_sequnlock(&sde
->head_lock
);
1599 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
1602 static void sdma_sendctrl(struct sdma_engine
*sde
, unsigned op
)
1604 u64 set_senddmactrl
= 0;
1605 u64 clr_senddmactrl
= 0;
1606 unsigned long flags
;
1608 #ifdef CONFIG_SDMA_VERBOSITY
1609 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1611 (op
& SDMA_SENDCTRL_OP_ENABLE
) ? 1 : 0,
1612 (op
& SDMA_SENDCTRL_OP_INTENABLE
) ? 1 : 0,
1613 (op
& SDMA_SENDCTRL_OP_HALT
) ? 1 : 0,
1614 (op
& SDMA_SENDCTRL_OP_CLEANUP
) ? 1 : 0);
1617 if (op
& SDMA_SENDCTRL_OP_ENABLE
)
1618 set_senddmactrl
|= SD(CTRL_SDMA_ENABLE_SMASK
);
1620 clr_senddmactrl
|= SD(CTRL_SDMA_ENABLE_SMASK
);
1622 if (op
& SDMA_SENDCTRL_OP_INTENABLE
)
1623 set_senddmactrl
|= SD(CTRL_SDMA_INT_ENABLE_SMASK
);
1625 clr_senddmactrl
|= SD(CTRL_SDMA_INT_ENABLE_SMASK
);
1627 if (op
& SDMA_SENDCTRL_OP_HALT
)
1628 set_senddmactrl
|= SD(CTRL_SDMA_HALT_SMASK
);
1630 clr_senddmactrl
|= SD(CTRL_SDMA_HALT_SMASK
);
1632 spin_lock_irqsave(&sde
->senddmactrl_lock
, flags
);
1634 sde
->p_senddmactrl
|= set_senddmactrl
;
1635 sde
->p_senddmactrl
&= ~clr_senddmactrl
;
1637 if (op
& SDMA_SENDCTRL_OP_CLEANUP
)
1638 write_sde_csr(sde
, SD(CTRL
),
1639 sde
->p_senddmactrl
|
1640 SD(CTRL_SDMA_CLEANUP_SMASK
));
1642 write_sde_csr(sde
, SD(CTRL
), sde
->p_senddmactrl
);
1644 spin_unlock_irqrestore(&sde
->senddmactrl_lock
, flags
);
1646 #ifdef CONFIG_SDMA_VERBOSITY
1647 sdma_dumpstate(sde
);
1651 static void sdma_setlengen(struct sdma_engine
*sde
)
1653 #ifdef CONFIG_SDMA_VERBOSITY
1654 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1655 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1659 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1660 * count to enable generation checking and load the internal
1661 * generation counter.
1663 write_sde_csr(sde
, SD(LEN_GEN
),
1664 (sde
->descq_cnt
/64) << SD(LEN_GEN_LENGTH_SHIFT
)
1666 write_sde_csr(sde
, SD(LEN_GEN
),
1667 ((sde
->descq_cnt
/64) << SD(LEN_GEN_LENGTH_SHIFT
))
1668 | (4ULL << SD(LEN_GEN_GENERATION_SHIFT
))
1672 static inline void sdma_update_tail(struct sdma_engine
*sde
, u16 tail
)
1674 /* Commit writes to memory and advance the tail on the chip */
1675 smp_wmb(); /* see get_txhead() */
1676 writeq(tail
, sde
->tail_csr
);
1680 * This is called when changing to state s10_hw_start_up_halt_wait as
1681 * a result of send buffer errors or send DMA descriptor errors.
1683 static void sdma_hw_start_up(struct sdma_engine
*sde
)
1687 #ifdef CONFIG_SDMA_VERBOSITY
1688 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1689 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1692 sdma_setlengen(sde
);
1693 sdma_update_tail(sde
, 0); /* Set SendDmaTail */
1696 reg
= SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK
) <<
1697 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT
);
1698 write_sde_csr(sde
, SD(ENG_ERR_CLEAR
), reg
);
1701 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
1702 (r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1704 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
1705 (r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1707 * set_sdma_integrity
1709 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
1711 static void set_sdma_integrity(struct sdma_engine
*sde
)
1713 struct hfi1_devdata
*dd
= sde
->dd
;
1716 if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY
)))
1719 reg
= hfi1_pkt_base_sdma_integrity(dd
);
1721 if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL
))
1722 CLEAR_STATIC_RATE_CONTROL_SMASK(reg
);
1724 SET_STATIC_RATE_CONTROL_SMASK(reg
);
1726 write_sde_csr(sde
, SD(CHECK_ENABLE
), reg
);
1730 static void init_sdma_regs(
1731 struct sdma_engine
*sde
,
1736 #ifdef CONFIG_SDMA_VERBOSITY
1737 struct hfi1_devdata
*dd
= sde
->dd
;
1739 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1740 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1743 write_sde_csr(sde
, SD(BASE_ADDR
), sde
->descq_phys
);
1744 sdma_setlengen(sde
);
1745 sdma_update_tail(sde
, 0); /* Set SendDmaTail */
1746 write_sde_csr(sde
, SD(RELOAD_CNT
), idle_cnt
);
1747 write_sde_csr(sde
, SD(DESC_CNT
), 0);
1748 write_sde_csr(sde
, SD(HEAD_ADDR
), sde
->head_phys
);
1749 write_sde_csr(sde
, SD(MEMORY
),
1751 SD(MEMORY_SDMA_MEMORY_CNT_SHIFT
)) |
1752 ((u64
)(credits
* sde
->this_idx
) <<
1753 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT
)));
1754 write_sde_csr(sde
, SD(ENG_ERR_MASK
), ~0ull);
1755 set_sdma_integrity(sde
);
1756 opmask
= OPCODE_CHECK_MASK_DISABLED
;
1757 opval
= OPCODE_CHECK_VAL_DISABLED
;
1758 write_sde_csr(sde
, SD(CHECK_OPCODE
),
1759 (opmask
<< SEND_CTXT_CHECK_OPCODE_MASK_SHIFT
) |
1760 (opval
<< SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT
));
1763 #ifdef CONFIG_SDMA_VERBOSITY
1765 #define sdma_dumpstate_helper0(reg) do { \
1766 csr = read_csr(sde->dd, reg); \
1767 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
1770 #define sdma_dumpstate_helper(reg) do { \
1771 csr = read_sde_csr(sde, reg); \
1772 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
1773 #reg, sde->this_idx, csr); \
1776 #define sdma_dumpstate_helper2(reg) do { \
1777 csr = read_csr(sde->dd, reg + (8 * i)); \
1778 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
1782 void sdma_dumpstate(struct sdma_engine
*sde
)
1787 sdma_dumpstate_helper(SD(CTRL
));
1788 sdma_dumpstate_helper(SD(STATUS
));
1789 sdma_dumpstate_helper0(SD(ERR_STATUS
));
1790 sdma_dumpstate_helper0(SD(ERR_MASK
));
1791 sdma_dumpstate_helper(SD(ENG_ERR_STATUS
));
1792 sdma_dumpstate_helper(SD(ENG_ERR_MASK
));
1794 for (i
= 0; i
< CCE_NUM_INT_CSRS
; ++i
) {
1795 sdma_dumpstate_helper2(CCE_INT_STATUS
);
1796 sdma_dumpstate_helper2(CCE_INT_MASK
);
1797 sdma_dumpstate_helper2(CCE_INT_BLOCKED
);
1800 sdma_dumpstate_helper(SD(TAIL
));
1801 sdma_dumpstate_helper(SD(HEAD
));
1802 sdma_dumpstate_helper(SD(PRIORITY_THLD
));
1803 sdma_dumpstate_helper(SD(IDLE_CNT
));
1804 sdma_dumpstate_helper(SD(RELOAD_CNT
));
1805 sdma_dumpstate_helper(SD(DESC_CNT
));
1806 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT
));
1807 sdma_dumpstate_helper(SD(MEMORY
));
1808 sdma_dumpstate_helper0(SD(ENGINES
));
1809 sdma_dumpstate_helper0(SD(MEM_SIZE
));
1810 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
1811 sdma_dumpstate_helper(SD(BASE_ADDR
));
1812 sdma_dumpstate_helper(SD(LEN_GEN
));
1813 sdma_dumpstate_helper(SD(HEAD_ADDR
));
1814 sdma_dumpstate_helper(SD(CHECK_ENABLE
));
1815 sdma_dumpstate_helper(SD(CHECK_VL
));
1816 sdma_dumpstate_helper(SD(CHECK_JOB_KEY
));
1817 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY
));
1818 sdma_dumpstate_helper(SD(CHECK_SLID
));
1819 sdma_dumpstate_helper(SD(CHECK_OPCODE
));
1823 static void dump_sdma_state(struct sdma_engine
*sde
)
1825 struct hw_sdma_desc
*descq
;
1826 struct hw_sdma_desc
*descqp
;
1831 u16 head
, tail
, cnt
;
1833 head
= sde
->descq_head
& sde
->sdma_mask
;
1834 tail
= sde
->descq_tail
& sde
->sdma_mask
;
1835 cnt
= sdma_descq_freecnt(sde
);
1839 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
1844 !list_empty(&sde
->flushlist
));
1846 /* print info for each entry in the descriptor queue */
1847 while (head
!= tail
) {
1848 char flags
[6] = { 'x', 'x', 'x', 'x', 0 };
1850 descqp
= &sde
->descq
[head
];
1851 desc
[0] = le64_to_cpu(descqp
->qw
[0]);
1852 desc
[1] = le64_to_cpu(descqp
->qw
[1]);
1853 flags
[0] = (desc
[1] & SDMA_DESC1_INT_REQ_FLAG
) ? 'I' : '-';
1854 flags
[1] = (desc
[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG
) ?
1856 flags
[2] = (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
) ? 'F' : '-';
1857 flags
[3] = (desc
[0] & SDMA_DESC0_LAST_DESC_FLAG
) ? 'L' : '-';
1858 addr
= (desc
[0] >> SDMA_DESC0_PHY_ADDR_SHIFT
)
1859 & SDMA_DESC0_PHY_ADDR_MASK
;
1860 gen
= (desc
[1] >> SDMA_DESC1_GENERATION_SHIFT
)
1861 & SDMA_DESC1_GENERATION_MASK
;
1862 len
= (desc
[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT
)
1863 & SDMA_DESC0_BYTE_COUNT_MASK
;
1865 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1866 head
, flags
, addr
, gen
, len
);
1868 "\tdesc0:0x%016llx desc1 0x%016llx\n",
1870 if (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
)
1872 "\taidx: %u amode: %u alen: %u\n",
1873 (u8
)((desc
[1] & SDMA_DESC1_HEADER_INDEX_SMASK
)
1874 >> SDMA_DESC1_HEADER_INDEX_SHIFT
),
1875 (u8
)((desc
[1] & SDMA_DESC1_HEADER_MODE_SMASK
)
1876 >> SDMA_DESC1_HEADER_MODE_SHIFT
),
1877 (u8
)((desc
[1] & SDMA_DESC1_HEADER_DWS_SMASK
)
1878 >> SDMA_DESC1_HEADER_DWS_SHIFT
));
1880 head
&= sde
->sdma_mask
;
1885 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
1887 * sdma_seqfile_dump_sde() - debugfs dump of sde
1889 * @sde: send dma engine to dump
1891 * This routine dumps the sde to the indicated seq file.
1893 void sdma_seqfile_dump_sde(struct seq_file
*s
, struct sdma_engine
*sde
)
1896 struct hw_sdma_desc
*descqp
;
1902 head
= sde
->descq_head
& sde
->sdma_mask
;
1903 tail
= ACCESS_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
1904 seq_printf(s
, SDE_FMT
, sde
->this_idx
,
1906 sdma_state_name(sde
->state
.current_state
),
1907 (unsigned long long)read_sde_csr(sde
, SD(CTRL
)),
1908 (unsigned long long)read_sde_csr(sde
, SD(STATUS
)),
1909 (unsigned long long)read_sde_csr(sde
,
1910 SD(ENG_ERR_STATUS
)),
1911 (unsigned long long)read_sde_csr(sde
, SD(TAIL
)),
1913 (unsigned long long)read_sde_csr(sde
, SD(HEAD
)),
1915 (unsigned long long)le64_to_cpu(*sde
->head_dma
),
1916 (unsigned long long)read_sde_csr(sde
, SD(MEMORY
)),
1917 (unsigned long long)read_sde_csr(sde
, SD(LEN_GEN
)),
1918 (unsigned long long)read_sde_csr(sde
, SD(RELOAD_CNT
)),
1919 (unsigned long long)sde
->last_status
,
1920 (unsigned long long)sde
->ahg_bits
,
1925 !list_empty(&sde
->flushlist
),
1926 sde
->descq_full_count
,
1927 (unsigned long long)read_sde_csr(sde
, SEND_DMA_CHECK_SLID
));
1929 /* print info for each entry in the descriptor queue */
1930 while (head
!= tail
) {
1931 char flags
[6] = { 'x', 'x', 'x', 'x', 0 };
1933 descqp
= &sde
->descq
[head
];
1934 desc
[0] = le64_to_cpu(descqp
->qw
[0]);
1935 desc
[1] = le64_to_cpu(descqp
->qw
[1]);
1936 flags
[0] = (desc
[1] & SDMA_DESC1_INT_REQ_FLAG
) ? 'I' : '-';
1937 flags
[1] = (desc
[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG
) ?
1939 flags
[2] = (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
) ? 'F' : '-';
1940 flags
[3] = (desc
[0] & SDMA_DESC0_LAST_DESC_FLAG
) ? 'L' : '-';
1941 addr
= (desc
[0] >> SDMA_DESC0_PHY_ADDR_SHIFT
)
1942 & SDMA_DESC0_PHY_ADDR_MASK
;
1943 gen
= (desc
[1] >> SDMA_DESC1_GENERATION_SHIFT
)
1944 & SDMA_DESC1_GENERATION_MASK
;
1945 len
= (desc
[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT
)
1946 & SDMA_DESC0_BYTE_COUNT_MASK
;
1948 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1949 head
, flags
, addr
, gen
, len
);
1950 if (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
)
1951 seq_printf(s
, "\t\tahgidx: %u ahgmode: %u\n",
1952 (u8
)((desc
[1] & SDMA_DESC1_HEADER_INDEX_SMASK
)
1953 >> SDMA_DESC1_HEADER_INDEX_SHIFT
),
1954 (u8
)((desc
[1] & SDMA_DESC1_HEADER_MODE_SMASK
)
1955 >> SDMA_DESC1_HEADER_MODE_SHIFT
));
1956 head
= (head
+ 1) & sde
->sdma_mask
;
1961 * add the generation number into
1962 * the qw1 and return
1964 static inline u64
add_gen(struct sdma_engine
*sde
, u64 qw1
)
1966 u8 generation
= (sde
->descq_tail
>> sde
->sdma_shift
) & 3;
1968 qw1
&= ~SDMA_DESC1_GENERATION_SMASK
;
1969 qw1
|= ((u64
)generation
& SDMA_DESC1_GENERATION_MASK
)
1970 << SDMA_DESC1_GENERATION_SHIFT
;
1975 * This routine submits the indicated tx
1977 * Space has already been guaranteed and
1978 * tail side of ring is locked.
1980 * The hardware tail update is done
1981 * in the caller and that is facilitated
1982 * by returning the new tail.
1984 * There is special case logic for ahg
1985 * to not add the generation number for
1986 * up to 2 descriptors that follow the
1990 static inline u16
submit_tx(struct sdma_engine
*sde
, struct sdma_txreq
*tx
)
1994 struct sdma_desc
*descp
= tx
->descp
;
1995 u8 skip
= 0, mode
= ahg_mode(tx
);
1997 tail
= sde
->descq_tail
& sde
->sdma_mask
;
1998 sde
->descq
[tail
].qw
[0] = cpu_to_le64(descp
->qw
[0]);
1999 sde
->descq
[tail
].qw
[1] = cpu_to_le64(add_gen(sde
, descp
->qw
[1]));
2000 trace_hfi1_sdma_descriptor(sde
, descp
->qw
[0], descp
->qw
[1],
2001 tail
, &sde
->descq
[tail
]);
2002 tail
= ++sde
->descq_tail
& sde
->sdma_mask
;
2004 if (mode
> SDMA_AHG_APPLY_UPDATE1
)
2006 for (i
= 1; i
< tx
->num_desc
; i
++, descp
++) {
2009 sde
->descq
[tail
].qw
[0] = cpu_to_le64(descp
->qw
[0]);
2011 /* edits don't have generation */
2015 /* replace generation with real one for non-edits */
2016 qw1
= add_gen(sde
, descp
->qw
[1]);
2018 sde
->descq
[tail
].qw
[1] = cpu_to_le64(qw1
);
2019 trace_hfi1_sdma_descriptor(sde
, descp
->qw
[0], qw1
,
2020 tail
, &sde
->descq
[tail
]);
2021 tail
= ++sde
->descq_tail
& sde
->sdma_mask
;
2023 tx
->next_descq_idx
= tail
;
2024 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2025 tx
->sn
= sde
->tail_sn
++;
2026 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
2027 WARN_ON_ONCE(sde
->tx_ring
[sde
->tx_tail
& sde
->sdma_mask
]);
2029 sde
->tx_ring
[sde
->tx_tail
++ & sde
->sdma_mask
] = tx
;
2030 sde
->desc_avail
-= tx
->num_desc
;
2035 * Check for progress
2037 static int sdma_check_progress(
2038 struct sdma_engine
*sde
,
2039 struct iowait
*wait
,
2040 struct sdma_txreq
*tx
)
2044 sde
->desc_avail
= sdma_descq_freecnt(sde
);
2045 if (tx
->num_desc
<= sde
->desc_avail
)
2047 /* pulse the head_lock */
2048 if (wait
&& wait
->sleep
) {
2051 seq
= raw_seqcount_begin(
2052 (const seqcount_t
*)&sde
->head_lock
.seqcount
);
2053 ret
= wait
->sleep(sde
, wait
, tx
, seq
);
2055 sde
->desc_avail
= sdma_descq_freecnt(sde
);
2062 * sdma_send_txreq() - submit a tx req to ring
2063 * @sde: sdma engine to use
2064 * @wait: wait structure to use when full (may be NULL)
2065 * @tx: sdma_txreq to submit
2067 * The call submits the tx into the ring. If a iowait structure is non-NULL
2068 * the packet will be queued to the list in wait.
2071 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2072 * ring (wait == NULL)
2073 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2075 int sdma_send_txreq(struct sdma_engine
*sde
,
2076 struct iowait
*wait
,
2077 struct sdma_txreq
*tx
)
2081 unsigned long flags
;
2083 /* user should have supplied entire packet */
2084 if (unlikely(tx
->tlen
))
2087 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2089 if (unlikely(!__sdma_running(sde
)))
2091 if (unlikely(tx
->num_desc
> sde
->desc_avail
))
2093 tail
= submit_tx(sde
, tx
);
2095 atomic_inc(&wait
->sdma_busy
);
2096 sdma_update_tail(sde
, tail
);
2098 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2102 atomic_inc(&wait
->sdma_busy
);
2103 tx
->next_descq_idx
= 0;
2104 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2105 tx
->sn
= sde
->tail_sn
++;
2106 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
2108 spin_lock(&sde
->flushlist_lock
);
2109 list_add_tail(&tx
->list
, &sde
->flushlist
);
2110 spin_unlock(&sde
->flushlist_lock
);
2113 wait
->count
+= tx
->num_desc
;
2115 schedule_work(&sde
->flush_worker
);
2119 ret
= sdma_check_progress(sde
, wait
, tx
);
2120 if (ret
== -EAGAIN
) {
2124 sde
->descq_full_count
++;
2129 * sdma_send_txlist() - submit a list of tx req to ring
2130 * @sde: sdma engine to use
2131 * @wait: wait structure to use when full (may be NULL)
2132 * @tx_list: list of sdma_txreqs to submit
2134 * The call submits the list into the ring.
2136 * If the iowait structure is non-NULL and not equal to the iowait list
2137 * the unprocessed part of the list will be appended to the list in wait.
2139 * In all cases, the tx_list will be updated so the head of the tx_list is
2140 * the list of descriptors that have yet to be transmitted.
2142 * The intent of this call is to provide a more efficient
2143 * way of submitting multiple packets to SDMA while holding the tail
2147 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring
2149 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2151 int sdma_send_txlist(struct sdma_engine
*sde
,
2152 struct iowait
*wait
,
2153 struct list_head
*tx_list
)
2155 struct sdma_txreq
*tx
, *tx_next
;
2157 unsigned long flags
;
2158 u16 tail
= INVALID_TAIL
;
2161 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2163 list_for_each_entry_safe(tx
, tx_next
, tx_list
, list
) {
2165 if (unlikely(!__sdma_running(sde
)))
2167 if (unlikely(tx
->num_desc
> sde
->desc_avail
))
2169 if (unlikely(tx
->tlen
)) {
2173 list_del_init(&tx
->list
);
2174 tail
= submit_tx(sde
, tx
);
2176 if (tail
!= INVALID_TAIL
&&
2177 (count
& SDMA_TAIL_UPDATE_THRESH
) == 0) {
2178 sdma_update_tail(sde
, tail
);
2179 tail
= INVALID_TAIL
;
2184 atomic_add(count
, &wait
->sdma_busy
);
2185 if (tail
!= INVALID_TAIL
)
2186 sdma_update_tail(sde
, tail
);
2187 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2190 spin_lock(&sde
->flushlist_lock
);
2191 list_for_each_entry_safe(tx
, tx_next
, tx_list
, list
) {
2193 list_del_init(&tx
->list
);
2195 atomic_inc(&wait
->sdma_busy
);
2196 tx
->next_descq_idx
= 0;
2197 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2198 tx
->sn
= sde
->tail_sn
++;
2199 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
2201 list_add_tail(&tx
->list
, &sde
->flushlist
);
2204 wait
->count
+= tx
->num_desc
;
2207 spin_unlock(&sde
->flushlist_lock
);
2208 schedule_work(&sde
->flush_worker
);
2212 ret
= sdma_check_progress(sde
, wait
, tx
);
2213 if (ret
== -EAGAIN
) {
2217 sde
->descq_full_count
++;
2221 static void sdma_process_event(struct sdma_engine
*sde
,
2222 enum sdma_events event
)
2224 unsigned long flags
;
2226 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2227 write_seqlock(&sde
->head_lock
);
2229 __sdma_process_event(sde
, event
);
2231 if (sde
->state
.current_state
== sdma_state_s99_running
)
2232 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
2234 write_sequnlock(&sde
->head_lock
);
2235 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2238 static void __sdma_process_event(struct sdma_engine
*sde
,
2239 enum sdma_events event
)
2241 struct sdma_state
*ss
= &sde
->state
;
2242 int need_progress
= 0;
2244 /* CONFIG SDMA temporary */
2245 #ifdef CONFIG_SDMA_VERBOSITY
2246 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) [%s] %s\n", sde
->this_idx
,
2247 sdma_state_names
[ss
->current_state
],
2248 sdma_event_names
[event
]);
2251 switch (ss
->current_state
) {
2252 case sdma_state_s00_hw_down
:
2254 case sdma_event_e00_go_hw_down
:
2256 case sdma_event_e30_go_running
:
2258 * If down, but running requested (usually result
2259 * of link up, then we need to start up.
2260 * This can happen when hw down is requested while
2261 * bringing the link up with traffic active on
2263 ss
->go_s99_running
= 1;
2264 /* fall through and start dma engine */
2265 case sdma_event_e10_go_hw_start
:
2266 /* This reference means the state machine is started */
2267 sdma_get(&sde
->state
);
2269 sdma_state_s10_hw_start_up_halt_wait
);
2271 case sdma_event_e15_hw_halt_done
:
2273 case sdma_event_e25_hw_clean_up_done
:
2275 case sdma_event_e40_sw_cleaned
:
2276 sdma_sw_tear_down(sde
);
2278 case sdma_event_e50_hw_cleaned
:
2280 case sdma_event_e60_hw_halted
:
2282 case sdma_event_e70_go_idle
:
2284 case sdma_event_e80_hw_freeze
:
2286 case sdma_event_e81_hw_frozen
:
2288 case sdma_event_e82_hw_unfreeze
:
2290 case sdma_event_e85_link_down
:
2292 case sdma_event_e90_sw_halted
:
2297 case sdma_state_s10_hw_start_up_halt_wait
:
2299 case sdma_event_e00_go_hw_down
:
2300 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2301 sdma_sw_tear_down(sde
);
2303 case sdma_event_e10_go_hw_start
:
2305 case sdma_event_e15_hw_halt_done
:
2307 sdma_state_s15_hw_start_up_clean_wait
);
2308 sdma_start_hw_clean_up(sde
);
2310 case sdma_event_e25_hw_clean_up_done
:
2312 case sdma_event_e30_go_running
:
2313 ss
->go_s99_running
= 1;
2315 case sdma_event_e40_sw_cleaned
:
2317 case sdma_event_e50_hw_cleaned
:
2319 case sdma_event_e60_hw_halted
:
2320 schedule_work(&sde
->err_halt_worker
);
2322 case sdma_event_e70_go_idle
:
2323 ss
->go_s99_running
= 0;
2325 case sdma_event_e80_hw_freeze
:
2327 case sdma_event_e81_hw_frozen
:
2329 case sdma_event_e82_hw_unfreeze
:
2331 case sdma_event_e85_link_down
:
2333 case sdma_event_e90_sw_halted
:
2338 case sdma_state_s15_hw_start_up_clean_wait
:
2340 case sdma_event_e00_go_hw_down
:
2341 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2342 sdma_sw_tear_down(sde
);
2344 case sdma_event_e10_go_hw_start
:
2346 case sdma_event_e15_hw_halt_done
:
2348 case sdma_event_e25_hw_clean_up_done
:
2349 sdma_hw_start_up(sde
);
2350 sdma_set_state(sde
, ss
->go_s99_running
?
2351 sdma_state_s99_running
:
2352 sdma_state_s20_idle
);
2354 case sdma_event_e30_go_running
:
2355 ss
->go_s99_running
= 1;
2357 case sdma_event_e40_sw_cleaned
:
2359 case sdma_event_e50_hw_cleaned
:
2361 case sdma_event_e60_hw_halted
:
2363 case sdma_event_e70_go_idle
:
2364 ss
->go_s99_running
= 0;
2366 case sdma_event_e80_hw_freeze
:
2368 case sdma_event_e81_hw_frozen
:
2370 case sdma_event_e82_hw_unfreeze
:
2372 case sdma_event_e85_link_down
:
2374 case sdma_event_e90_sw_halted
:
2379 case sdma_state_s20_idle
:
2381 case sdma_event_e00_go_hw_down
:
2382 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2383 sdma_sw_tear_down(sde
);
2385 case sdma_event_e10_go_hw_start
:
2387 case sdma_event_e15_hw_halt_done
:
2389 case sdma_event_e25_hw_clean_up_done
:
2391 case sdma_event_e30_go_running
:
2392 sdma_set_state(sde
, sdma_state_s99_running
);
2393 ss
->go_s99_running
= 1;
2395 case sdma_event_e40_sw_cleaned
:
2397 case sdma_event_e50_hw_cleaned
:
2399 case sdma_event_e60_hw_halted
:
2400 sdma_set_state(sde
, sdma_state_s50_hw_halt_wait
);
2401 schedule_work(&sde
->err_halt_worker
);
2403 case sdma_event_e70_go_idle
:
2405 case sdma_event_e85_link_down
:
2407 case sdma_event_e80_hw_freeze
:
2408 sdma_set_state(sde
, sdma_state_s80_hw_freeze
);
2409 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
2410 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
2412 case sdma_event_e81_hw_frozen
:
2414 case sdma_event_e82_hw_unfreeze
:
2416 case sdma_event_e90_sw_halted
:
2421 case sdma_state_s30_sw_clean_up_wait
:
2423 case sdma_event_e00_go_hw_down
:
2424 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2426 case sdma_event_e10_go_hw_start
:
2428 case sdma_event_e15_hw_halt_done
:
2430 case sdma_event_e25_hw_clean_up_done
:
2432 case sdma_event_e30_go_running
:
2433 ss
->go_s99_running
= 1;
2435 case sdma_event_e40_sw_cleaned
:
2436 sdma_set_state(sde
, sdma_state_s40_hw_clean_up_wait
);
2437 sdma_start_hw_clean_up(sde
);
2439 case sdma_event_e50_hw_cleaned
:
2441 case sdma_event_e60_hw_halted
:
2443 case sdma_event_e70_go_idle
:
2444 ss
->go_s99_running
= 0;
2446 case sdma_event_e80_hw_freeze
:
2448 case sdma_event_e81_hw_frozen
:
2450 case sdma_event_e82_hw_unfreeze
:
2452 case sdma_event_e85_link_down
:
2453 ss
->go_s99_running
= 0;
2455 case sdma_event_e90_sw_halted
:
2460 case sdma_state_s40_hw_clean_up_wait
:
2462 case sdma_event_e00_go_hw_down
:
2463 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2464 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2466 case sdma_event_e10_go_hw_start
:
2468 case sdma_event_e15_hw_halt_done
:
2470 case sdma_event_e25_hw_clean_up_done
:
2471 sdma_hw_start_up(sde
);
2472 sdma_set_state(sde
, ss
->go_s99_running
?
2473 sdma_state_s99_running
:
2474 sdma_state_s20_idle
);
2476 case sdma_event_e30_go_running
:
2477 ss
->go_s99_running
= 1;
2479 case sdma_event_e40_sw_cleaned
:
2481 case sdma_event_e50_hw_cleaned
:
2483 case sdma_event_e60_hw_halted
:
2485 case sdma_event_e70_go_idle
:
2486 ss
->go_s99_running
= 0;
2488 case sdma_event_e80_hw_freeze
:
2490 case sdma_event_e81_hw_frozen
:
2492 case sdma_event_e82_hw_unfreeze
:
2494 case sdma_event_e85_link_down
:
2495 ss
->go_s99_running
= 0;
2497 case sdma_event_e90_sw_halted
:
2502 case sdma_state_s50_hw_halt_wait
:
2504 case sdma_event_e00_go_hw_down
:
2505 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2506 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2508 case sdma_event_e10_go_hw_start
:
2510 case sdma_event_e15_hw_halt_done
:
2511 sdma_set_state(sde
, sdma_state_s30_sw_clean_up_wait
);
2512 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2514 case sdma_event_e25_hw_clean_up_done
:
2516 case sdma_event_e30_go_running
:
2517 ss
->go_s99_running
= 1;
2519 case sdma_event_e40_sw_cleaned
:
2521 case sdma_event_e50_hw_cleaned
:
2523 case sdma_event_e60_hw_halted
:
2524 schedule_work(&sde
->err_halt_worker
);
2526 case sdma_event_e70_go_idle
:
2527 ss
->go_s99_running
= 0;
2529 case sdma_event_e80_hw_freeze
:
2531 case sdma_event_e81_hw_frozen
:
2533 case sdma_event_e82_hw_unfreeze
:
2535 case sdma_event_e85_link_down
:
2536 ss
->go_s99_running
= 0;
2538 case sdma_event_e90_sw_halted
:
2543 case sdma_state_s60_idle_halt_wait
:
2545 case sdma_event_e00_go_hw_down
:
2546 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2547 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2549 case sdma_event_e10_go_hw_start
:
2551 case sdma_event_e15_hw_halt_done
:
2552 sdma_set_state(sde
, sdma_state_s30_sw_clean_up_wait
);
2553 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2555 case sdma_event_e25_hw_clean_up_done
:
2557 case sdma_event_e30_go_running
:
2558 ss
->go_s99_running
= 1;
2560 case sdma_event_e40_sw_cleaned
:
2562 case sdma_event_e50_hw_cleaned
:
2564 case sdma_event_e60_hw_halted
:
2565 schedule_work(&sde
->err_halt_worker
);
2567 case sdma_event_e70_go_idle
:
2568 ss
->go_s99_running
= 0;
2570 case sdma_event_e80_hw_freeze
:
2572 case sdma_event_e81_hw_frozen
:
2574 case sdma_event_e82_hw_unfreeze
:
2576 case sdma_event_e85_link_down
:
2578 case sdma_event_e90_sw_halted
:
2583 case sdma_state_s80_hw_freeze
:
2585 case sdma_event_e00_go_hw_down
:
2586 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2587 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2589 case sdma_event_e10_go_hw_start
:
2591 case sdma_event_e15_hw_halt_done
:
2593 case sdma_event_e25_hw_clean_up_done
:
2595 case sdma_event_e30_go_running
:
2596 ss
->go_s99_running
= 1;
2598 case sdma_event_e40_sw_cleaned
:
2600 case sdma_event_e50_hw_cleaned
:
2602 case sdma_event_e60_hw_halted
:
2604 case sdma_event_e70_go_idle
:
2605 ss
->go_s99_running
= 0;
2607 case sdma_event_e80_hw_freeze
:
2609 case sdma_event_e81_hw_frozen
:
2610 sdma_set_state(sde
, sdma_state_s82_freeze_sw_clean
);
2611 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2613 case sdma_event_e82_hw_unfreeze
:
2615 case sdma_event_e85_link_down
:
2617 case sdma_event_e90_sw_halted
:
2622 case sdma_state_s82_freeze_sw_clean
:
2624 case sdma_event_e00_go_hw_down
:
2625 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2626 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2628 case sdma_event_e10_go_hw_start
:
2630 case sdma_event_e15_hw_halt_done
:
2632 case sdma_event_e25_hw_clean_up_done
:
2634 case sdma_event_e30_go_running
:
2635 ss
->go_s99_running
= 1;
2637 case sdma_event_e40_sw_cleaned
:
2638 /* notify caller this engine is done cleaning */
2639 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
2640 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
2642 case sdma_event_e50_hw_cleaned
:
2644 case sdma_event_e60_hw_halted
:
2646 case sdma_event_e70_go_idle
:
2647 ss
->go_s99_running
= 0;
2649 case sdma_event_e80_hw_freeze
:
2651 case sdma_event_e81_hw_frozen
:
2653 case sdma_event_e82_hw_unfreeze
:
2654 sdma_hw_start_up(sde
);
2655 sdma_set_state(sde
, ss
->go_s99_running
?
2656 sdma_state_s99_running
:
2657 sdma_state_s20_idle
);
2659 case sdma_event_e85_link_down
:
2661 case sdma_event_e90_sw_halted
:
2666 case sdma_state_s99_running
:
2668 case sdma_event_e00_go_hw_down
:
2669 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2670 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2672 case sdma_event_e10_go_hw_start
:
2674 case sdma_event_e15_hw_halt_done
:
2676 case sdma_event_e25_hw_clean_up_done
:
2678 case sdma_event_e30_go_running
:
2680 case sdma_event_e40_sw_cleaned
:
2682 case sdma_event_e50_hw_cleaned
:
2684 case sdma_event_e60_hw_halted
:
2686 sdma_err_progress_check_schedule(sde
);
2687 case sdma_event_e90_sw_halted
:
2689 * SW initiated halt does not perform engines
2692 sdma_set_state(sde
, sdma_state_s50_hw_halt_wait
);
2693 schedule_work(&sde
->err_halt_worker
);
2695 case sdma_event_e70_go_idle
:
2696 sdma_set_state(sde
, sdma_state_s60_idle_halt_wait
);
2698 case sdma_event_e85_link_down
:
2699 ss
->go_s99_running
= 0;
2701 case sdma_event_e80_hw_freeze
:
2702 sdma_set_state(sde
, sdma_state_s80_hw_freeze
);
2703 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
2704 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
2706 case sdma_event_e81_hw_frozen
:
2708 case sdma_event_e82_hw_unfreeze
:
2714 ss
->last_event
= event
;
2716 sdma_make_progress(sde
, 0);
2720 * _extend_sdma_tx_descs() - helper to extend txreq
2722 * This is called once the initial nominal allocation
2723 * of descriptors in the sdma_txreq is exhausted.
2725 * The code will bump the allocation up to the max
2726 * of MAX_DESC (64) descriptors. There doesn't seem
2727 * much point in an interim step. The last descriptor
2728 * is reserved for coalesce buffer in order to support
2729 * cases where input packet has >MAX_DESC iovecs.
2732 static int _extend_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
)
2736 /* Handle last descriptor */
2737 if (unlikely((tx
->num_desc
== (MAX_DESC
- 1)))) {
2738 /* if tlen is 0, it is for padding, release last descriptor */
2740 tx
->desc_limit
= MAX_DESC
;
2741 } else if (!tx
->coalesce_buf
) {
2742 /* allocate coalesce buffer with space for padding */
2743 tx
->coalesce_buf
= kmalloc(tx
->tlen
+ sizeof(u32
),
2745 if (!tx
->coalesce_buf
)
2747 tx
->coalesce_idx
= 0;
2752 if (unlikely(tx
->num_desc
== MAX_DESC
))
2755 tx
->descp
= kmalloc_array(
2757 sizeof(struct sdma_desc
),
2762 /* reserve last descriptor for coalescing */
2763 tx
->desc_limit
= MAX_DESC
- 1;
2764 /* copy ones already built */
2765 for (i
= 0; i
< tx
->num_desc
; i
++)
2766 tx
->descp
[i
] = tx
->descs
[i
];
2769 sdma_txclean(dd
, tx
);
2774 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
2776 * This is called once the initial nominal allocation of descriptors
2777 * in the sdma_txreq is exhausted.
2779 * This function calls _extend_sdma_tx_descs to extend or allocate
2780 * coalesce buffer. If there is a allocated coalesce buffer, it will
2781 * copy the input packet data into the coalesce buffer. It also adds
2782 * coalesce buffer descriptor once whe whole packet is received.
2786 * 0 - coalescing, don't populate descriptor
2787 * 1 - continue with populating descriptor
2789 int ext_coal_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
,
2790 int type
, void *kvaddr
, struct page
*page
,
2791 unsigned long offset
, u16 len
)
2796 rval
= _extend_sdma_tx_descs(dd
, tx
);
2798 sdma_txclean(dd
, tx
);
2802 /* If coalesce buffer is allocated, copy data into it */
2803 if (tx
->coalesce_buf
) {
2804 if (type
== SDMA_MAP_NONE
) {
2805 sdma_txclean(dd
, tx
);
2809 if (type
== SDMA_MAP_PAGE
) {
2810 kvaddr
= kmap(page
);
2812 } else if (WARN_ON(!kvaddr
)) {
2813 sdma_txclean(dd
, tx
);
2817 memcpy(tx
->coalesce_buf
+ tx
->coalesce_idx
, kvaddr
, len
);
2818 tx
->coalesce_idx
+= len
;
2819 if (type
== SDMA_MAP_PAGE
)
2822 /* If there is more data, return */
2823 if (tx
->tlen
- tx
->coalesce_idx
)
2826 /* Whole packet is received; add any padding */
2827 pad_len
= tx
->packet_len
& (sizeof(u32
) - 1);
2829 pad_len
= sizeof(u32
) - pad_len
;
2830 memset(tx
->coalesce_buf
+ tx
->coalesce_idx
, 0, pad_len
);
2831 /* padding is taken care of for coalescing case */
2832 tx
->packet_len
+= pad_len
;
2833 tx
->tlen
+= pad_len
;
2836 /* dma map the coalesce buffer */
2837 addr
= dma_map_single(&dd
->pcidev
->dev
,
2842 if (unlikely(dma_mapping_error(&dd
->pcidev
->dev
, addr
))) {
2843 sdma_txclean(dd
, tx
);
2847 /* Add descriptor for coalesce buffer */
2848 tx
->desc_limit
= MAX_DESC
;
2849 return _sdma_txadd_daddr(dd
, SDMA_MAP_SINGLE
, tx
,
2856 /* Update sdes when the lmc changes */
2857 void sdma_update_lmc(struct hfi1_devdata
*dd
, u64 mask
, u32 lid
)
2859 struct sdma_engine
*sde
;
2863 sreg
= ((mask
& SD(CHECK_SLID_MASK_MASK
)) <<
2864 SD(CHECK_SLID_MASK_SHIFT
)) |
2865 (((lid
& mask
) & SD(CHECK_SLID_VALUE_MASK
)) <<
2866 SD(CHECK_SLID_VALUE_SHIFT
));
2868 for (i
= 0; i
< dd
->num_sdma
; i
++) {
2869 hfi1_cdbg(LINKVERB
, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
2871 sde
= &dd
->per_sdma
[i
];
2872 write_sde_csr(sde
, SD(CHECK_SLID
), sreg
);
2876 /* tx not dword sized - pad */
2877 int _pad_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
)
2882 if ((unlikely(tx
->num_desc
== tx
->desc_limit
))) {
2883 rval
= _extend_sdma_tx_descs(dd
, tx
);
2885 sdma_txclean(dd
, tx
);
2889 /* finish the one just added */
2894 sizeof(u32
) - (tx
->packet_len
& (sizeof(u32
) - 1)));
2895 _sdma_close_tx(dd
, tx
);
2900 * Add ahg to the sdma_txreq
2902 * The logic will consume up to 3
2903 * descriptors at the beginning of
2906 void _sdma_txreq_ahgadd(
2907 struct sdma_txreq
*tx
,
2913 u32 i
, shift
= 0, desc
= 0;
2916 WARN_ON_ONCE(num_ahg
> 9 || (ahg_hlen
& 3) || ahg_hlen
== 4);
2919 mode
= SDMA_AHG_APPLY_UPDATE1
;
2920 else if (num_ahg
<= 5)
2921 mode
= SDMA_AHG_APPLY_UPDATE2
;
2923 mode
= SDMA_AHG_APPLY_UPDATE3
;
2925 /* initialize to consumed descriptors to zero */
2927 case SDMA_AHG_APPLY_UPDATE3
:
2929 tx
->descs
[2].qw
[0] = 0;
2930 tx
->descs
[2].qw
[1] = 0;
2932 case SDMA_AHG_APPLY_UPDATE2
:
2934 tx
->descs
[1].qw
[0] = 0;
2935 tx
->descs
[1].qw
[1] = 0;
2939 tx
->descs
[0].qw
[1] |=
2940 (((u64
)ahg_entry
& SDMA_DESC1_HEADER_INDEX_MASK
)
2941 << SDMA_DESC1_HEADER_INDEX_SHIFT
) |
2942 (((u64
)ahg_hlen
& SDMA_DESC1_HEADER_DWS_MASK
)
2943 << SDMA_DESC1_HEADER_DWS_SHIFT
) |
2944 (((u64
)mode
& SDMA_DESC1_HEADER_MODE_MASK
)
2945 << SDMA_DESC1_HEADER_MODE_SHIFT
) |
2946 (((u64
)ahg
[0] & SDMA_DESC1_HEADER_UPDATE1_MASK
)
2947 << SDMA_DESC1_HEADER_UPDATE1_SHIFT
);
2948 for (i
= 0; i
< (num_ahg
- 1); i
++) {
2949 if (!shift
&& !(i
& 2))
2951 tx
->descs
[desc
].qw
[!!(i
& 2)] |=
2954 shift
= (shift
+ 32) & 63;
2959 * sdma_ahg_alloc - allocate an AHG entry
2960 * @sde: engine to allocate from
2963 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
2964 * -ENOSPC if an entry is not available
2966 int sdma_ahg_alloc(struct sdma_engine
*sde
)
2972 trace_hfi1_ahg_allocate(sde
, -EINVAL
);
2976 nr
= ffz(ACCESS_ONCE(sde
->ahg_bits
));
2978 trace_hfi1_ahg_allocate(sde
, -ENOSPC
);
2981 oldbit
= test_and_set_bit(nr
, &sde
->ahg_bits
);
2986 trace_hfi1_ahg_allocate(sde
, nr
);
2991 * sdma_ahg_free - free an AHG entry
2992 * @sde: engine to return AHG entry
2993 * @ahg_index: index to free
2995 * This routine frees the indicate AHG entry.
2997 void sdma_ahg_free(struct sdma_engine
*sde
, int ahg_index
)
3001 trace_hfi1_ahg_deallocate(sde
, ahg_index
);
3002 if (ahg_index
< 0 || ahg_index
> 31)
3004 clear_bit(ahg_index
, &sde
->ahg_bits
);
3008 * SPC freeze handling for SDMA engines. Called when the driver knows
3009 * the SPC is going into a freeze but before the freeze is fully
3010 * settled. Generally an error interrupt.
3012 * This event will pull the engine out of running so no more entries can be
3013 * added to the engine's queue.
3015 void sdma_freeze_notify(struct hfi1_devdata
*dd
, int link_down
)
3018 enum sdma_events event
= link_down
? sdma_event_e85_link_down
:
3019 sdma_event_e80_hw_freeze
;
3021 /* set up the wait but do not wait here */
3022 atomic_set(&dd
->sdma_unfreeze_count
, dd
->num_sdma
);
3024 /* tell all engines to stop running and wait */
3025 for (i
= 0; i
< dd
->num_sdma
; i
++)
3026 sdma_process_event(&dd
->per_sdma
[i
], event
);
3028 /* sdma_freeze() will wait for all engines to have stopped */
3032 * SPC freeze handling for SDMA engines. Called when the driver knows
3033 * the SPC is fully frozen.
3035 void sdma_freeze(struct hfi1_devdata
*dd
)
3041 * Make sure all engines have moved out of the running state before
3044 ret
= wait_event_interruptible(dd
->sdma_unfreeze_wq
,
3045 atomic_read(&dd
->sdma_unfreeze_count
) <= 0);
3046 /* interrupted or count is negative, then unloading - just exit */
3047 if (ret
|| atomic_read(&dd
->sdma_unfreeze_count
) < 0)
3050 /* set up the count for the next wait */
3051 atomic_set(&dd
->sdma_unfreeze_count
, dd
->num_sdma
);
3053 /* tell all engines that the SPC is frozen, they can start cleaning */
3054 for (i
= 0; i
< dd
->num_sdma
; i
++)
3055 sdma_process_event(&dd
->per_sdma
[i
], sdma_event_e81_hw_frozen
);
3058 * Wait for everyone to finish software clean before exiting. The
3059 * software clean will read engine CSRs, so must be completed before
3060 * the next step, which will clear the engine CSRs.
3062 (void) wait_event_interruptible(dd
->sdma_unfreeze_wq
,
3063 atomic_read(&dd
->sdma_unfreeze_count
) <= 0);
3064 /* no need to check results - done no matter what */
3068 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
3070 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
3071 * that is left is a software clean. We could do it after the SPC is fully
3072 * frozen, but then we'd have to add another state to wait for the unfreeze.
3073 * Instead, just defer the software clean until the unfreeze step.
3075 void sdma_unfreeze(struct hfi1_devdata
*dd
)
3079 /* tell all engines start freeze clean up */
3080 for (i
= 0; i
< dd
->num_sdma
; i
++)
3081 sdma_process_event(&dd
->per_sdma
[i
],
3082 sdma_event_e82_hw_unfreeze
);
3086 * _sdma_engine_progress_schedule() - schedule progress on engine
3087 * @sde: sdma_engine to schedule progress
3090 void _sdma_engine_progress_schedule(
3091 struct sdma_engine
*sde
)
3093 trace_hfi1_sdma_engine_progress(sde
, sde
->progress_mask
);
3094 /* assume we have selected a good cpu */
3096 CCE_INT_FORCE
+ (8*(IS_SDMA_START
/64)), sde
->progress_mask
);