]>
Commit | Line | Data |
---|---|---|
77241056 MM |
1 | #ifndef _HFI1_SDMA_H |
2 | #define _HFI1_SDMA_H | |
3 | /* | |
05d6ac1d | 4 | * Copyright(c) 2015, 2016 Intel Corporation. |
77241056 MM |
5 | * |
6 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
7 | * redistributing this file, you may do so under either license. | |
8 | * | |
9 | * GPL LICENSE SUMMARY | |
10 | * | |
77241056 MM |
11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of version 2 of the GNU General Public License as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * BSD LICENSE | |
21 | * | |
77241056 MM |
22 | * Redistribution and use in source and binary forms, with or without |
23 | * modification, are permitted provided that the following conditions | |
24 | * are met: | |
25 | * | |
26 | * - Redistributions of source code must retain the above copyright | |
27 | * notice, this list of conditions and the following disclaimer. | |
28 | * - Redistributions in binary form must reproduce the above copyright | |
29 | * notice, this list of conditions and the following disclaimer in | |
30 | * the documentation and/or other materials provided with the | |
31 | * distribution. | |
32 | * - Neither the name of Intel Corporation nor the names of its | |
33 | * contributors may be used to endorse or promote products derived | |
34 | * from this software without specific prior written permission. | |
35 | * | |
36 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
37 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
38 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
39 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
40 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
41 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
42 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
43 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
44 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
45 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
46 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
47 | * | |
48 | */ | |
49 | ||
50 | #include <linux/types.h> | |
51 | #include <linux/list.h> | |
52 | #include <asm/byteorder.h> | |
53 | #include <linux/workqueue.h> | |
54 | #include <linux/rculist.h> | |
55 | ||
56 | #include "hfi.h" | |
57 | #include "verbs.h" | |
45842abb | 58 | #include "sdma_txreq.h" |
77241056 | 59 | |
77241056 MM |
60 | /* Hardware limit */ |
61 | #define MAX_DESC 64 | |
62 | /* Hardware limit for SDMA packet size */ | |
63 | #define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1) | |
64 | ||
77241056 MM |
65 | #define SDMA_TXREQ_S_OK 0 |
66 | #define SDMA_TXREQ_S_SENDERROR 1 | |
67 | #define SDMA_TXREQ_S_ABORTED 2 | |
68 | #define SDMA_TXREQ_S_SHUTDOWN 3 | |
69 | ||
70 | /* flags bits */ | |
71 | #define SDMA_TXREQ_F_URGENT 0x0001 | |
72 | #define SDMA_TXREQ_F_AHG_COPY 0x0002 | |
73 | #define SDMA_TXREQ_F_USE_AHG 0x0004 | |
74 | ||
75 | #define SDMA_MAP_NONE 0 | |
76 | #define SDMA_MAP_SINGLE 1 | |
77 | #define SDMA_MAP_PAGE 2 | |
78 | ||
79 | #define SDMA_AHG_VALUE_MASK 0xffff | |
80 | #define SDMA_AHG_VALUE_SHIFT 0 | |
81 | #define SDMA_AHG_INDEX_MASK 0xf | |
82 | #define SDMA_AHG_INDEX_SHIFT 16 | |
83 | #define SDMA_AHG_FIELD_LEN_MASK 0xf | |
84 | #define SDMA_AHG_FIELD_LEN_SHIFT 20 | |
85 | #define SDMA_AHG_FIELD_START_MASK 0x1f | |
86 | #define SDMA_AHG_FIELD_START_SHIFT 24 | |
87 | #define SDMA_AHG_UPDATE_ENABLE_MASK 0x1 | |
88 | #define SDMA_AHG_UPDATE_ENABLE_SHIFT 31 | |
89 | ||
90 | /* AHG modes */ | |
91 | ||
92 | /* | |
93 | * Be aware the ordering and values | |
94 | * for SDMA_AHG_APPLY_UPDATE[123] | |
95 | * are assumed in generating a skip | |
96 | * count in submit_tx() in sdma.c | |
97 | */ | |
98 | #define SDMA_AHG_NO_AHG 0 | |
99 | #define SDMA_AHG_COPY 1 | |
100 | #define SDMA_AHG_APPLY_UPDATE1 2 | |
101 | #define SDMA_AHG_APPLY_UPDATE2 3 | |
102 | #define SDMA_AHG_APPLY_UPDATE3 4 | |
103 | ||
104 | /* | |
105 | * Bits defined in the send DMA descriptor. | |
106 | */ | |
3f34d958 JJ |
107 | #define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63) |
108 | #define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62) | |
77241056 MM |
109 | #define SDMA_DESC0_BYTE_COUNT_SHIFT 48 |
110 | #define SDMA_DESC0_BYTE_COUNT_WIDTH 14 | |
111 | #define SDMA_DESC0_BYTE_COUNT_MASK \ | |
3f2686a2 | 112 | ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1) |
77241056 | 113 | #define SDMA_DESC0_BYTE_COUNT_SMASK \ |
3f2686a2 | 114 | (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT) |
77241056 MM |
115 | #define SDMA_DESC0_PHY_ADDR_SHIFT 0 |
116 | #define SDMA_DESC0_PHY_ADDR_WIDTH 48 | |
117 | #define SDMA_DESC0_PHY_ADDR_MASK \ | |
3f2686a2 | 118 | ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1) |
77241056 | 119 | #define SDMA_DESC0_PHY_ADDR_SMASK \ |
3f2686a2 | 120 | (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT) |
77241056 MM |
121 | |
122 | #define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32 | |
123 | #define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32 | |
124 | #define SDMA_DESC1_HEADER_UPDATE1_MASK \ | |
3f2686a2 | 125 | ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1) |
77241056 | 126 | #define SDMA_DESC1_HEADER_UPDATE1_SMASK \ |
3f2686a2 | 127 | (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT) |
77241056 MM |
128 | #define SDMA_DESC1_HEADER_MODE_SHIFT 13 |
129 | #define SDMA_DESC1_HEADER_MODE_WIDTH 3 | |
130 | #define SDMA_DESC1_HEADER_MODE_MASK \ | |
3f2686a2 | 131 | ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1) |
77241056 | 132 | #define SDMA_DESC1_HEADER_MODE_SMASK \ |
3f2686a2 | 133 | (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT) |
77241056 MM |
134 | #define SDMA_DESC1_HEADER_INDEX_SHIFT 8 |
135 | #define SDMA_DESC1_HEADER_INDEX_WIDTH 5 | |
136 | #define SDMA_DESC1_HEADER_INDEX_MASK \ | |
3f2686a2 | 137 | ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1) |
77241056 | 138 | #define SDMA_DESC1_HEADER_INDEX_SMASK \ |
3f2686a2 | 139 | (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT) |
77241056 MM |
140 | #define SDMA_DESC1_HEADER_DWS_SHIFT 4 |
141 | #define SDMA_DESC1_HEADER_DWS_WIDTH 4 | |
142 | #define SDMA_DESC1_HEADER_DWS_MASK \ | |
3f2686a2 | 143 | ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1) |
77241056 | 144 | #define SDMA_DESC1_HEADER_DWS_SMASK \ |
3f2686a2 | 145 | (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT) |
77241056 MM |
146 | #define SDMA_DESC1_GENERATION_SHIFT 2 |
147 | #define SDMA_DESC1_GENERATION_WIDTH 2 | |
148 | #define SDMA_DESC1_GENERATION_MASK \ | |
3f2686a2 | 149 | ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1) |
77241056 | 150 | #define SDMA_DESC1_GENERATION_SMASK \ |
3f2686a2 | 151 | (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT) |
3f34d958 JJ |
152 | #define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1) |
153 | #define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0) | |
77241056 MM |
154 | |
155 | enum sdma_states { | |
156 | sdma_state_s00_hw_down, | |
157 | sdma_state_s10_hw_start_up_halt_wait, | |
158 | sdma_state_s15_hw_start_up_clean_wait, | |
159 | sdma_state_s20_idle, | |
160 | sdma_state_s30_sw_clean_up_wait, | |
161 | sdma_state_s40_hw_clean_up_wait, | |
162 | sdma_state_s50_hw_halt_wait, | |
163 | sdma_state_s60_idle_halt_wait, | |
164 | sdma_state_s80_hw_freeze, | |
165 | sdma_state_s82_freeze_sw_clean, | |
166 | sdma_state_s99_running, | |
167 | }; | |
168 | ||
169 | enum sdma_events { | |
170 | sdma_event_e00_go_hw_down, | |
171 | sdma_event_e10_go_hw_start, | |
172 | sdma_event_e15_hw_halt_done, | |
173 | sdma_event_e25_hw_clean_up_done, | |
174 | sdma_event_e30_go_running, | |
175 | sdma_event_e40_sw_cleaned, | |
176 | sdma_event_e50_hw_cleaned, | |
177 | sdma_event_e60_hw_halted, | |
178 | sdma_event_e70_go_idle, | |
179 | sdma_event_e80_hw_freeze, | |
180 | sdma_event_e81_hw_frozen, | |
181 | sdma_event_e82_hw_unfreeze, | |
182 | sdma_event_e85_link_down, | |
183 | sdma_event_e90_sw_halted, | |
184 | }; | |
185 | ||
186 | struct sdma_set_state_action { | |
187 | unsigned op_enable:1; | |
188 | unsigned op_intenable:1; | |
189 | unsigned op_halt:1; | |
190 | unsigned op_cleanup:1; | |
191 | unsigned go_s99_running_tofalse:1; | |
192 | unsigned go_s99_running_totrue:1; | |
193 | }; | |
194 | ||
195 | struct sdma_state { | |
196 | struct kref kref; | |
197 | struct completion comp; | |
198 | enum sdma_states current_state; | |
199 | unsigned current_op; | |
200 | unsigned go_s99_running; | |
201 | /* debugging/development */ | |
202 | enum sdma_states previous_state; | |
203 | unsigned previous_op; | |
204 | enum sdma_events last_event; | |
205 | }; | |
206 | ||
207 | /** | |
208 | * DOC: sdma exported routines | |
209 | * | |
210 | * These sdma routines fit into three categories: | |
211 | * - The SDMA API for building and submitting packets | |
212 | * to the ring | |
213 | * | |
214 | * - Initialization and tear down routines to buildup | |
215 | * and tear down SDMA | |
216 | * | |
217 | * - ISR entrances to handle interrupts, state changes | |
218 | * and errors | |
219 | */ | |
220 | ||
221 | /** | |
222 | * DOC: sdma PSM/verbs API | |
223 | * | |
224 | * The sdma API is designed to be used by both PSM | |
225 | * and verbs to supply packets to the SDMA ring. | |
226 | * | |
227 | * The usage of the API is as follows: | |
228 | * | |
229 | * Embed a struct iowait in the QP or | |
230 | * PQ. The iowait should be initialized with a | |
231 | * call to iowait_init(). | |
232 | * | |
233 | * The user of the API should create an allocation method | |
234 | * for their version of the txreq. slabs, pre-allocated lists, | |
235 | * and dma pools can be used. Once the user's overload of | |
236 | * the sdma_txreq has been allocated, the sdma_txreq member | |
237 | * must be initialized with sdma_txinit() or sdma_txinit_ahg(). | |
238 | * | |
239 | * The txreq must be declared with the sdma_txreq first. | |
240 | * | |
241 | * The tx request, once initialized, is manipulated with calls to | |
242 | * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr() | |
243 | * for each disjoint memory location. It is the user's responsibility | |
244 | * to understand the packet boundaries and page boundaries to do the | |
245 | * appropriate number of sdma_txadd_* calls.. The user | |
246 | * must be prepared to deal with failures from these routines due to | |
247 | * either memory allocation or dma_mapping failures. | |
248 | * | |
249 | * The mapping specifics for each memory location are recorded | |
250 | * in the tx. Memory locations added with sdma_txadd_page() | |
251 | * and sdma_txadd_kvaddr() are automatically mapped when added | |
252 | * to the tx and nmapped as part of the progress processing in the | |
253 | * SDMA interrupt handling. | |
254 | * | |
255 | * sdma_txadd_daddr() is used to add an dma_addr_t memory to the | |
256 | * tx. An example of a use case would be a pre-allocated | |
257 | * set of headers allocated via dma_pool_alloc() or | |
258 | * dma_alloc_coherent(). For these memory locations, it | |
259 | * is the responsibility of the user to handle that unmapping. | |
260 | * (This would usually be at an unload or job termination.) | |
261 | * | |
262 | * The routine sdma_send_txreq() is used to submit | |
263 | * a tx to the ring after the appropriate number of | |
264 | * sdma_txadd_* have been done. | |
265 | * | |
266 | * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist() | |
267 | * can be used to submit a list of packets. | |
268 | * | |
269 | * The user is free to use the link overhead in the struct sdma_txreq as | |
270 | * long as the tx isn't in flight. | |
271 | * | |
272 | * The extreme degenerate case of the number of descriptors | |
273 | * exceeding the ring size is automatically handled as | |
274 | * memory locations are added. An overflow of the descriptor | |
275 | * array that is part of the sdma_txreq is also automatically | |
276 | * handled. | |
277 | * | |
278 | */ | |
279 | ||
280 | /** | |
281 | * DOC: Infrastructure calls | |
282 | * | |
283 | * sdma_init() is used to initialize data structures and | |
284 | * CSRs for the desired number of SDMA engines. | |
285 | * | |
286 | * sdma_start() is used to kick the SDMA engines initialized | |
287 | * with sdma_init(). Interrupts must be enabled at this | |
288 | * point since aspects of the state machine are interrupt | |
289 | * driven. | |
290 | * | |
291 | * sdma_engine_error() and sdma_engine_interrupt() are | |
292 | * entrances for interrupts. | |
293 | * | |
294 | * sdma_map_init() is for the management of the mapping | |
295 | * table when the number of vls is changed. | |
296 | * | |
297 | */ | |
298 | ||
299 | /* | |
300 | * struct hw_sdma_desc - raw 128 bit SDMA descriptor | |
301 | * | |
302 | * This is the raw descriptor in the SDMA ring | |
303 | */ | |
304 | struct hw_sdma_desc { | |
305 | /* private: don't use directly */ | |
306 | __le64 qw[2]; | |
307 | }; | |
308 | ||
77241056 MM |
309 | /** |
310 | * struct sdma_engine - Data pertaining to each SDMA engine. | |
311 | * @dd: a back-pointer to the device data | |
312 | * @ppd: per port back-pointer | |
313 | * @imask: mask for irq manipulation | |
314 | * @idle_mask: mask for determining if an interrupt is due to sdma_idle | |
315 | * | |
316 | * This structure has the state for each sdma_engine. | |
317 | * | |
318 | * Accessing to non public fields are not supported | |
319 | * since the private members are subject to change. | |
320 | */ | |
321 | struct sdma_engine { | |
322 | /* read mostly */ | |
323 | struct hfi1_devdata *dd; | |
324 | struct hfi1_pportdata *ppd; | |
325 | /* private: */ | |
326 | void __iomem *tail_csr; | |
327 | u64 imask; /* clear interrupt mask */ | |
328 | u64 idle_mask; | |
329 | u64 progress_mask; | |
a699c6c2 | 330 | u64 int_mask; |
77241056 | 331 | /* private: */ |
77241056 MM |
332 | volatile __le64 *head_dma; /* DMA'ed by chip */ |
333 | /* private: */ | |
334 | dma_addr_t head_phys; | |
335 | /* private: */ | |
336 | struct hw_sdma_desc *descq; | |
337 | /* private: */ | |
338 | unsigned descq_full_count; | |
339 | struct sdma_txreq **tx_ring; | |
340 | /* private: */ | |
341 | dma_addr_t descq_phys; | |
342 | /* private */ | |
343 | u32 sdma_mask; | |
344 | /* private */ | |
345 | struct sdma_state state; | |
0a226edd MM |
346 | /* private */ |
347 | int cpu; | |
77241056 MM |
348 | /* private: */ |
349 | u8 sdma_shift; | |
350 | /* private: */ | |
351 | u8 this_idx; /* zero relative engine */ | |
352 | /* protect changes to senddmactrl shadow */ | |
353 | spinlock_t senddmactrl_lock; | |
354 | /* private: */ | |
355 | u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */ | |
356 | ||
357 | /* read/write using tail_lock */ | |
358 | spinlock_t tail_lock ____cacheline_aligned_in_smp; | |
359 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER | |
360 | /* private: */ | |
361 | u64 tail_sn; | |
362 | #endif | |
363 | /* private: */ | |
364 | u32 descq_tail; | |
365 | /* private: */ | |
366 | unsigned long ahg_bits; | |
367 | /* private: */ | |
368 | u16 desc_avail; | |
369 | /* private: */ | |
370 | u16 tx_tail; | |
371 | /* private: */ | |
372 | u16 descq_cnt; | |
373 | ||
374 | /* read/write using head_lock */ | |
375 | /* private: */ | |
376 | seqlock_t head_lock ____cacheline_aligned_in_smp; | |
377 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER | |
378 | /* private: */ | |
379 | u64 head_sn; | |
380 | #endif | |
381 | /* private: */ | |
382 | u32 descq_head; | |
383 | /* private: */ | |
384 | u16 tx_head; | |
385 | /* private: */ | |
386 | u64 last_status; | |
a699c6c2 VM |
387 | /* private */ |
388 | u64 err_cnt; | |
389 | /* private */ | |
390 | u64 sdma_int_cnt; | |
391 | u64 idle_int_cnt; | |
392 | u64 progress_int_cnt; | |
77241056 MM |
393 | |
394 | /* private: */ | |
395 | struct list_head dmawait; | |
396 | ||
397 | /* CONFIG SDMA for now, just blindly duplicate */ | |
398 | /* private: */ | |
399 | struct tasklet_struct sdma_hw_clean_up_task | |
400 | ____cacheline_aligned_in_smp; | |
401 | ||
402 | /* private: */ | |
403 | struct tasklet_struct sdma_sw_clean_up_task | |
404 | ____cacheline_aligned_in_smp; | |
405 | /* private: */ | |
406 | struct work_struct err_halt_worker; | |
407 | /* private */ | |
408 | struct timer_list err_progress_check_timer; | |
409 | u32 progress_check_head; | |
410 | /* private: */ | |
411 | struct work_struct flush_worker; | |
6a14c5ea | 412 | /* protect flush list */ |
77241056 MM |
413 | spinlock_t flushlist_lock; |
414 | /* private: */ | |
415 | struct list_head flushlist; | |
0cb2aa69 TS |
416 | struct cpumask cpu_mask; |
417 | struct kobject kobj; | |
77241056 MM |
418 | }; |
419 | ||
77241056 MM |
420 | int sdma_init(struct hfi1_devdata *dd, u8 port); |
421 | void sdma_start(struct hfi1_devdata *dd); | |
422 | void sdma_exit(struct hfi1_devdata *dd); | |
423 | void sdma_all_running(struct hfi1_devdata *dd); | |
424 | void sdma_all_idle(struct hfi1_devdata *dd); | |
425 | void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle); | |
426 | void sdma_freeze(struct hfi1_devdata *dd); | |
427 | void sdma_unfreeze(struct hfi1_devdata *dd); | |
428 | void sdma_wait(struct hfi1_devdata *dd); | |
429 | ||
430 | /** | |
431 | * sdma_empty() - idle engine test | |
432 | * @engine: sdma engine | |
433 | * | |
434 | * Currently used by verbs as a latency optimization. | |
435 | * | |
436 | * Return: | |
437 | * 1 - empty, 0 - non-empty | |
438 | */ | |
439 | static inline int sdma_empty(struct sdma_engine *sde) | |
440 | { | |
441 | return sde->descq_tail == sde->descq_head; | |
442 | } | |
443 | ||
444 | static inline u16 sdma_descq_freecnt(struct sdma_engine *sde) | |
445 | { | |
446 | return sde->descq_cnt - | |
447 | (sde->descq_tail - | |
6aa7de05 | 448 | READ_ONCE(sde->descq_head)) - 1; |
77241056 MM |
449 | } |
450 | ||
451 | static inline u16 sdma_descq_inprocess(struct sdma_engine *sde) | |
452 | { | |
453 | return sde->descq_cnt - sdma_descq_freecnt(sde); | |
454 | } | |
455 | ||
456 | /* | |
457 | * Either head_lock or tail lock required to see | |
458 | * a steady state. | |
459 | */ | |
460 | static inline int __sdma_running(struct sdma_engine *engine) | |
461 | { | |
462 | return engine->state.current_state == sdma_state_s99_running; | |
463 | } | |
464 | ||
77241056 MM |
465 | /** |
466 | * sdma_running() - state suitability test | |
467 | * @engine: sdma engine | |
468 | * | |
469 | * sdma_running probes the internal state to determine if it is suitable | |
470 | * for submitting packets. | |
471 | * | |
472 | * Return: | |
473 | * 1 - ok to submit, 0 - not ok to submit | |
474 | * | |
475 | */ | |
476 | static inline int sdma_running(struct sdma_engine *engine) | |
477 | { | |
478 | unsigned long flags; | |
479 | int ret; | |
480 | ||
481 | spin_lock_irqsave(&engine->tail_lock, flags); | |
482 | ret = __sdma_running(engine); | |
483 | spin_unlock_irqrestore(&engine->tail_lock, flags); | |
484 | return ret; | |
485 | } | |
486 | ||
487 | void _sdma_txreq_ahgadd( | |
488 | struct sdma_txreq *tx, | |
489 | u8 num_ahg, | |
490 | u8 ahg_entry, | |
491 | u32 *ahg, | |
492 | u8 ahg_hlen); | |
493 | ||
77241056 MM |
494 | /** |
495 | * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG | |
496 | * @tx: tx request to initialize | |
497 | * @flags: flags to key last descriptor additions | |
498 | * @tlen: total packet length (pbc + headers + data) | |
499 | * @ahg_entry: ahg entry to use (0 - 31) | |
500 | * @num_ahg: ahg descriptor for first descriptor (0 - 9) | |
501 | * @ahg: array of AHG descriptors (up to 9 entries) | |
502 | * @ahg_hlen: number of bytes from ASIC entry to use | |
503 | * @cb: callback | |
504 | * | |
505 | * The allocation of the sdma_txreq and it enclosing structure is user | |
506 | * dependent. This routine must be called to initialize the user independent | |
507 | * fields. | |
508 | * | |
509 | * The currently supported flags are SDMA_TXREQ_F_URGENT, | |
510 | * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG. | |
511 | * | |
512 | * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the | |
513 | * completion is desired as soon as possible. | |
514 | * | |
515 | * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be | |
516 | * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in | |
517 | * the AHG descriptors into the first 1 to 3 descriptors. | |
518 | * | |
519 | * Completions of submitted requests can be gotten on selected | |
520 | * txreqs by giving a completion routine callback to sdma_txinit() or | |
521 | * sdma_txinit_ahg(). The environment in which the callback runs | |
522 | * can be from an ISR, a tasklet, or a thread, so no sleeping | |
523 | * kernel routines can be used. Aspects of the sdma ring may | |
524 | * be locked so care should be taken with locking. | |
525 | * | |
526 | * The callback pointer can be NULL to avoid any callback for the packet | |
527 | * being submitted. The callback will be provided this tx, a status, and a flag. | |
528 | * | |
529 | * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR, | |
530 | * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN. | |
531 | * | |
532 | * The flag, if the is the iowait had been used, indicates the iowait | |
533 | * sdma_busy count has reached zero. | |
534 | * | |
535 | * user data portion of tlen should be precise. The sdma_txadd_* entrances | |
536 | * will pad with a descriptor references 1 - 3 bytes when the number of bytes | |
537 | * specified in tlen have been supplied to the sdma_txreq. | |
538 | * | |
539 | * ahg_hlen is used to determine the number of on-chip entry bytes to | |
540 | * use as the header. This is for cases where the stored header is | |
541 | * larger than the header to be used in a packet. This is typical | |
542 | * for verbs where an RDMA_WRITE_FIRST is larger than the packet in | |
543 | * and RDMA_WRITE_MIDDLE. | |
544 | * | |
545 | */ | |
546 | static inline int sdma_txinit_ahg( | |
547 | struct sdma_txreq *tx, | |
548 | u16 flags, | |
549 | u16 tlen, | |
550 | u8 ahg_entry, | |
551 | u8 num_ahg, | |
552 | u32 *ahg, | |
553 | u8 ahg_hlen, | |
a545f530 | 554 | void (*cb)(struct sdma_txreq *, int)) |
77241056 MM |
555 | { |
556 | if (tlen == 0) | |
557 | return -ENODATA; | |
558 | if (tlen > MAX_SDMA_PKT_SIZE) | |
559 | return -EMSGSIZE; | |
560 | tx->desc_limit = ARRAY_SIZE(tx->descs); | |
561 | tx->descp = &tx->descs[0]; | |
562 | INIT_LIST_HEAD(&tx->list); | |
563 | tx->num_desc = 0; | |
564 | tx->flags = flags; | |
565 | tx->complete = cb; | |
566 | tx->coalesce_buf = NULL; | |
567 | tx->wait = NULL; | |
f3ff8189 JJ |
568 | tx->packet_len = tlen; |
569 | tx->tlen = tx->packet_len; | |
77241056 MM |
570 | tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG; |
571 | tx->descs[0].qw[1] = 0; | |
572 | if (flags & SDMA_TXREQ_F_AHG_COPY) | |
573 | tx->descs[0].qw[1] |= | |
574 | (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK) | |
575 | << SDMA_DESC1_HEADER_INDEX_SHIFT) | | |
576 | (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK) | |
577 | << SDMA_DESC1_HEADER_MODE_SHIFT); | |
578 | else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg) | |
579 | _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen); | |
580 | return 0; | |
581 | } | |
582 | ||
583 | /** | |
584 | * sdma_txinit() - initialize an sdma_txreq struct (no AHG) | |
585 | * @tx: tx request to initialize | |
586 | * @flags: flags to key last descriptor additions | |
587 | * @tlen: total packet length (pbc + headers + data) | |
588 | * @cb: callback pointer | |
589 | * | |
590 | * The allocation of the sdma_txreq and it enclosing structure is user | |
591 | * dependent. This routine must be called to initialize the user | |
592 | * independent fields. | |
593 | * | |
594 | * The currently supported flags is SDMA_TXREQ_F_URGENT. | |
595 | * | |
596 | * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the | |
597 | * completion is desired as soon as possible. | |
598 | * | |
599 | * Completions of submitted requests can be gotten on selected | |
600 | * txreqs by giving a completion routine callback to sdma_txinit() or | |
601 | * sdma_txinit_ahg(). The environment in which the callback runs | |
602 | * can be from an ISR, a tasklet, or a thread, so no sleeping | |
603 | * kernel routines can be used. The head size of the sdma ring may | |
604 | * be locked so care should be taken with locking. | |
605 | * | |
606 | * The callback pointer can be NULL to avoid any callback for the packet | |
607 | * being submitted. | |
608 | * | |
609 | * The callback, if non-NULL, will be provided this tx and a status. The | |
610 | * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR, | |
611 | * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN. | |
612 | * | |
613 | */ | |
614 | static inline int sdma_txinit( | |
615 | struct sdma_txreq *tx, | |
616 | u16 flags, | |
617 | u16 tlen, | |
a545f530 | 618 | void (*cb)(struct sdma_txreq *, int)) |
77241056 MM |
619 | { |
620 | return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb); | |
621 | } | |
622 | ||
623 | /* helpers - don't use */ | |
624 | static inline int sdma_mapping_type(struct sdma_desc *d) | |
625 | { | |
626 | return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK) | |
627 | >> SDMA_DESC1_GENERATION_SHIFT; | |
628 | } | |
629 | ||
630 | static inline size_t sdma_mapping_len(struct sdma_desc *d) | |
631 | { | |
632 | return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK) | |
633 | >> SDMA_DESC0_BYTE_COUNT_SHIFT; | |
634 | } | |
635 | ||
636 | static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d) | |
637 | { | |
638 | return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK) | |
639 | >> SDMA_DESC0_PHY_ADDR_SHIFT; | |
640 | } | |
641 | ||
642 | static inline void make_tx_sdma_desc( | |
643 | struct sdma_txreq *tx, | |
644 | int type, | |
645 | dma_addr_t addr, | |
646 | size_t len) | |
647 | { | |
648 | struct sdma_desc *desc = &tx->descp[tx->num_desc]; | |
649 | ||
650 | if (!tx->num_desc) { | |
651 | /* qw[0] zero; qw[1] first, ahg mode already in from init */ | |
652 | desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK) | |
653 | << SDMA_DESC1_GENERATION_SHIFT; | |
654 | } else { | |
655 | desc->qw[0] = 0; | |
656 | desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK) | |
657 | << SDMA_DESC1_GENERATION_SHIFT; | |
658 | } | |
659 | desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK) | |
660 | << SDMA_DESC0_PHY_ADDR_SHIFT) | | |
661 | (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK) | |
662 | << SDMA_DESC0_BYTE_COUNT_SHIFT); | |
663 | } | |
664 | ||
665 | /* helper to extend txreq */ | |
f4d26d81 NV |
666 | int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, |
667 | int type, void *kvaddr, struct page *page, | |
668 | unsigned long offset, u16 len); | |
77241056 | 669 | int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *); |
63df8e09 MM |
670 | void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *); |
671 | ||
672 | static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx) | |
673 | { | |
674 | if (tx->num_desc) | |
675 | __sdma_txclean(dd, tx); | |
676 | } | |
77241056 MM |
677 | |
678 | /* helpers used by public routines */ | |
679 | static inline void _sdma_close_tx(struct hfi1_devdata *dd, | |
680 | struct sdma_txreq *tx) | |
681 | { | |
682 | tx->descp[tx->num_desc].qw[0] |= | |
683 | SDMA_DESC0_LAST_DESC_FLAG; | |
684 | tx->descp[tx->num_desc].qw[1] |= | |
685 | dd->default_desc1; | |
686 | if (tx->flags & SDMA_TXREQ_F_URGENT) | |
687 | tx->descp[tx->num_desc].qw[1] |= | |
8638b77f | 688 | (SDMA_DESC1_HEAD_TO_HOST_FLAG | |
77241056 MM |
689 | SDMA_DESC1_INT_REQ_FLAG); |
690 | } | |
691 | ||
692 | static inline int _sdma_txadd_daddr( | |
693 | struct hfi1_devdata *dd, | |
694 | int type, | |
695 | struct sdma_txreq *tx, | |
696 | dma_addr_t addr, | |
697 | u16 len) | |
698 | { | |
699 | int rval = 0; | |
700 | ||
77241056 MM |
701 | make_tx_sdma_desc( |
702 | tx, | |
703 | type, | |
704 | addr, len); | |
705 | WARN_ON(len > tx->tlen); | |
706 | tx->tlen -= len; | |
707 | /* special cases for last */ | |
708 | if (!tx->tlen) { | |
a5a9e8cc | 709 | if (tx->packet_len & (sizeof(u32) - 1)) { |
77241056 | 710 | rval = _pad_sdma_tx_descs(dd, tx); |
a5a9e8cc MM |
711 | if (rval) |
712 | return rval; | |
713 | } else { | |
77241056 | 714 | _sdma_close_tx(dd, tx); |
a5a9e8cc | 715 | } |
77241056 MM |
716 | } |
717 | tx->num_desc++; | |
718 | return rval; | |
719 | } | |
720 | ||
721 | /** | |
722 | * sdma_txadd_page() - add a page to the sdma_txreq | |
723 | * @dd: the device to use for mapping | |
724 | * @tx: tx request to which the page is added | |
725 | * @page: page to map | |
726 | * @offset: offset within the page | |
727 | * @len: length in bytes | |
728 | * | |
729 | * This is used to add a page/offset/length descriptor. | |
730 | * | |
731 | * The mapping/unmapping of the page/offset/len is automatically handled. | |
732 | * | |
733 | * Return: | |
734 | * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't | |
f4d26d81 | 735 | * extend/coalesce descriptor array |
77241056 MM |
736 | */ |
737 | static inline int sdma_txadd_page( | |
738 | struct hfi1_devdata *dd, | |
739 | struct sdma_txreq *tx, | |
740 | struct page *page, | |
741 | unsigned long offset, | |
742 | u16 len) | |
743 | { | |
f4d26d81 NV |
744 | dma_addr_t addr; |
745 | int rval; | |
746 | ||
747 | if ((unlikely(tx->num_desc == tx->desc_limit))) { | |
748 | rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE, | |
749 | NULL, page, offset, len); | |
750 | if (rval <= 0) | |
751 | return rval; | |
752 | } | |
753 | ||
754 | addr = dma_map_page( | |
755 | &dd->pcidev->dev, | |
756 | page, | |
757 | offset, | |
758 | len, | |
759 | DMA_TO_DEVICE); | |
760 | ||
77241056 | 761 | if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { |
63df8e09 | 762 | __sdma_txclean(dd, tx); |
77241056 MM |
763 | return -ENOSPC; |
764 | } | |
f4d26d81 | 765 | |
77241056 MM |
766 | return _sdma_txadd_daddr( |
767 | dd, SDMA_MAP_PAGE, tx, addr, len); | |
768 | } | |
769 | ||
770 | /** | |
771 | * sdma_txadd_daddr() - add a dma address to the sdma_txreq | |
772 | * @dd: the device to use for mapping | |
773 | * @tx: sdma_txreq to which the page is added | |
774 | * @addr: dma address mapped by caller | |
775 | * @len: length in bytes | |
776 | * | |
777 | * This is used to add a descriptor for memory that is already dma mapped. | |
778 | * | |
779 | * In this case, there is no unmapping as part of the progress processing for | |
780 | * this memory location. | |
781 | * | |
782 | * Return: | |
783 | * 0 - success, -ENOMEM - couldn't extend descriptor array | |
784 | */ | |
785 | ||
786 | static inline int sdma_txadd_daddr( | |
787 | struct hfi1_devdata *dd, | |
788 | struct sdma_txreq *tx, | |
789 | dma_addr_t addr, | |
790 | u16 len) | |
791 | { | |
f4d26d81 NV |
792 | int rval; |
793 | ||
794 | if ((unlikely(tx->num_desc == tx->desc_limit))) { | |
795 | rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE, | |
796 | NULL, NULL, 0, 0); | |
797 | if (rval <= 0) | |
798 | return rval; | |
799 | } | |
800 | ||
77241056 MM |
801 | return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len); |
802 | } | |
803 | ||
804 | /** | |
805 | * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq | |
806 | * @dd: the device to use for mapping | |
807 | * @tx: sdma_txreq to which the page is added | |
808 | * @kvaddr: the kernel virtual address | |
809 | * @len: length in bytes | |
810 | * | |
811 | * This is used to add a descriptor referenced by the indicated kvaddr and | |
812 | * len. | |
813 | * | |
814 | * The mapping/unmapping of the kvaddr and len is automatically handled. | |
815 | * | |
816 | * Return: | |
f4d26d81 | 817 | * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce |
77241056 MM |
818 | * descriptor array |
819 | */ | |
820 | static inline int sdma_txadd_kvaddr( | |
821 | struct hfi1_devdata *dd, | |
822 | struct sdma_txreq *tx, | |
823 | void *kvaddr, | |
824 | u16 len) | |
825 | { | |
f4d26d81 NV |
826 | dma_addr_t addr; |
827 | int rval; | |
828 | ||
829 | if ((unlikely(tx->num_desc == tx->desc_limit))) { | |
830 | rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE, | |
831 | kvaddr, NULL, 0, len); | |
832 | if (rval <= 0) | |
833 | return rval; | |
834 | } | |
835 | ||
836 | addr = dma_map_single( | |
837 | &dd->pcidev->dev, | |
838 | kvaddr, | |
839 | len, | |
840 | DMA_TO_DEVICE); | |
841 | ||
77241056 | 842 | if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { |
63df8e09 | 843 | __sdma_txclean(dd, tx); |
77241056 MM |
844 | return -ENOSPC; |
845 | } | |
f4d26d81 | 846 | |
77241056 MM |
847 | return _sdma_txadd_daddr( |
848 | dd, SDMA_MAP_SINGLE, tx, addr, len); | |
849 | } | |
850 | ||
851 | struct iowait; | |
852 | ||
853 | int sdma_send_txreq(struct sdma_engine *sde, | |
854 | struct iowait *wait, | |
bcad2913 KW |
855 | struct sdma_txreq *tx, |
856 | bool pkts_sent); | |
77241056 MM |
857 | int sdma_send_txlist(struct sdma_engine *sde, |
858 | struct iowait *wait, | |
0b115ef1 HC |
859 | struct list_head *tx_list, |
860 | u32 *count); | |
77241056 MM |
861 | |
862 | int sdma_ahg_alloc(struct sdma_engine *sde); | |
863 | void sdma_ahg_free(struct sdma_engine *sde, int ahg_index); | |
864 | ||
865 | /** | |
866 | * sdma_build_ahg - build ahg descriptor | |
867 | * @data | |
868 | * @dwindex | |
869 | * @startbit | |
870 | * @bits | |
871 | * | |
872 | * Build and return a 32 bit descriptor. | |
873 | */ | |
874 | static inline u32 sdma_build_ahg_descriptor( | |
875 | u16 data, | |
876 | u8 dwindex, | |
877 | u8 startbit, | |
878 | u8 bits) | |
879 | { | |
880 | return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT | | |
881 | ((startbit & SDMA_AHG_FIELD_START_MASK) << | |
882 | SDMA_AHG_FIELD_START_SHIFT) | | |
883 | ((bits & SDMA_AHG_FIELD_LEN_MASK) << | |
884 | SDMA_AHG_FIELD_LEN_SHIFT) | | |
885 | ((dwindex & SDMA_AHG_INDEX_MASK) << | |
886 | SDMA_AHG_INDEX_SHIFT) | | |
887 | ((data & SDMA_AHG_VALUE_MASK) << | |
888 | SDMA_AHG_VALUE_SHIFT)); | |
889 | } | |
890 | ||
891 | /** | |
892 | * sdma_progress - use seq number of detect head progress | |
893 | * @sde: sdma_engine to check | |
894 | * @seq: base seq count | |
895 | * @tx: txreq for which we need to check descriptor availability | |
896 | * | |
897 | * This is used in the appropriate spot in the sleep routine | |
898 | * to check for potential ring progress. This routine gets the | |
899 | * seqcount before queuing the iowait structure for progress. | |
900 | * | |
901 | * If the seqcount indicates that progress needs to be checked, | |
902 | * re-submission is detected by checking whether the descriptor | |
903 | * queue has enough descriptor for the txreq. | |
904 | */ | |
905 | static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq, | |
906 | struct sdma_txreq *tx) | |
907 | { | |
908 | if (read_seqretry(&sde->head_lock, seq)) { | |
909 | sde->desc_avail = sdma_descq_freecnt(sde); | |
910 | if (tx->num_desc > sde->desc_avail) | |
911 | return 0; | |
912 | return 1; | |
913 | } | |
914 | return 0; | |
915 | } | |
916 | ||
917 | /** | |
918 | * sdma_iowait_schedule() - initialize wait structure | |
919 | * @sde: sdma_engine to schedule | |
920 | * @wait: wait struct to schedule | |
921 | * | |
922 | * This function initializes the iowait | |
923 | * structure embedded in the QP or PQ. | |
924 | * | |
925 | */ | |
926 | static inline void sdma_iowait_schedule( | |
927 | struct sdma_engine *sde, | |
928 | struct iowait *wait) | |
929 | { | |
0a226edd MM |
930 | struct hfi1_pportdata *ppd = sde->dd->pport; |
931 | ||
932 | iowait_schedule(wait, ppd->hfi1_wq, sde->cpu); | |
77241056 MM |
933 | } |
934 | ||
935 | /* for use by interrupt handling */ | |
936 | void sdma_engine_error(struct sdma_engine *sde, u64 status); | |
937 | void sdma_engine_interrupt(struct sdma_engine *sde, u64 status); | |
938 | ||
939 | /* | |
940 | * | |
941 | * The diagram below details the relationship of the mapping structures | |
942 | * | |
943 | * Since the mapping now allows for non-uniform engines per vl, the | |
944 | * number of engines for a vl is either the vl_engines[vl] or | |
945 | * a computation based on num_sdma/num_vls: | |
946 | * | |
947 | * For example: | |
948 | * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls | |
949 | * | |
950 | * n = roundup to next highest power of 2 using nactual | |
951 | * | |
952 | * In the case where there are num_sdma/num_vls doesn't divide | |
953 | * evenly, the extras are added from the last vl downward. | |
954 | * | |
955 | * For the case where n > nactual, the engines are assigned | |
956 | * in a round robin fashion wrapping back to the first engine | |
957 | * for a particular vl. | |
958 | * | |
959 | * dd->sdma_map | |
960 | * | sdma_map_elem[0] | |
961 | * | +--------------------+ | |
962 | * v | mask | | |
963 | * sdma_vl_map |--------------------| | |
964 | * +--------------------------+ | sde[0] -> eng 1 | | |
965 | * | list (RCU) | |--------------------| | |
966 | * |--------------------------| ->| sde[1] -> eng 2 | | |
967 | * | mask | --/ |--------------------| | |
968 | * |--------------------------| -/ | * | | |
969 | * | actual_vls (max 8) | -/ |--------------------| | |
e8ea95af | 970 | * |--------------------------| --/ | sde[n-1] -> eng n | |
77241056 MM |
971 | * | vls (max 8) | -/ +--------------------+ |
972 | * |--------------------------| --/ | |
973 | * | map[0] |-/ | |
e8ea95af IW |
974 | * |--------------------------| +---------------------+ |
975 | * | map[1] |--- | mask | | |
976 | * |--------------------------| \---- |---------------------| | |
977 | * | * | \-- | sde[0] -> eng 1+n | | |
978 | * | * | \---- |---------------------| | |
979 | * | * | \->| sde[1] -> eng 2+n | | |
980 | * |--------------------------| |---------------------| | |
981 | * | map[vls - 1] |- | * | | |
982 | * +--------------------------+ \- |---------------------| | |
983 | * \- | sde[m-1] -> eng m+n | | |
984 | * \ +---------------------+ | |
77241056 MM |
985 | * \- |
986 | * \ | |
e8ea95af IW |
987 | * \- +----------------------+ |
988 | * \- | mask | | |
989 | * \ |----------------------| | |
990 | * \- | sde[0] -> eng 1+m+n | | |
991 | * \- |----------------------| | |
992 | * >| sde[1] -> eng 2+m+n | | |
993 | * |----------------------| | |
994 | * | * | | |
995 | * |----------------------| | |
996 | * | sde[o-1] -> eng o+m+n| | |
997 | * +----------------------+ | |
77241056 MM |
998 | * |
999 | */ | |
1000 | ||
1001 | /** | |
1002 | * struct sdma_map_elem - mapping for a vl | |
1003 | * @mask - selector mask | |
1004 | * @sde - array of engines for this vl | |
1005 | * | |
1006 | * The mask is used to "mod" the selector | |
1007 | * to produce index into the trailing | |
1008 | * array of sdes. | |
1009 | */ | |
1010 | struct sdma_map_elem { | |
1011 | u32 mask; | |
1012 | struct sdma_engine *sde[0]; | |
1013 | }; | |
1014 | ||
1015 | /** | |
1016 | * struct sdma_map_el - mapping for a vl | |
69a00b8e | 1017 | * @engine_to_vl - map of an engine to a vl |
77241056 MM |
1018 | * @list - rcu head for free callback |
1019 | * @mask - vl mask to "mod" the vl to produce an index to map array | |
1020 | * @actual_vls - number of vls | |
1021 | * @vls - number of vls rounded to next power of 2 | |
1022 | * @map - array of sdma_map_elem entries | |
1023 | * | |
1024 | * This is the parent mapping structure. The trailing | |
1025 | * members of the struct point to sdma_map_elem entries, which | |
1026 | * in turn point to an array of sde's for that vl. | |
1027 | */ | |
1028 | struct sdma_vl_map { | |
69a00b8e | 1029 | s8 engine_to_vl[TXE_NUM_SDMA_ENGINES]; |
77241056 MM |
1030 | struct rcu_head list; |
1031 | u32 mask; | |
1032 | u8 actual_vls; | |
1033 | u8 vls; | |
1034 | struct sdma_map_elem *map[0]; | |
1035 | }; | |
1036 | ||
1037 | int sdma_map_init( | |
1038 | struct hfi1_devdata *dd, | |
1039 | u8 port, | |
1040 | u8 num_vls, | |
1041 | u8 *vl_engines); | |
1042 | ||
1043 | /* slow path */ | |
1044 | void _sdma_engine_progress_schedule(struct sdma_engine *sde); | |
1045 | ||
1046 | /** | |
1047 | * sdma_engine_progress_schedule() - schedule progress on engine | |
1048 | * @sde: sdma_engine to schedule progress | |
1049 | * | |
1050 | * This is the fast path. | |
1051 | * | |
1052 | */ | |
1053 | static inline void sdma_engine_progress_schedule( | |
1054 | struct sdma_engine *sde) | |
1055 | { | |
1056 | if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8)) | |
1057 | return; | |
1058 | _sdma_engine_progress_schedule(sde); | |
1059 | } | |
1060 | ||
1061 | struct sdma_engine *sdma_select_engine_sc( | |
1062 | struct hfi1_devdata *dd, | |
1063 | u32 selector, | |
1064 | u8 sc5); | |
1065 | ||
1066 | struct sdma_engine *sdma_select_engine_vl( | |
1067 | struct hfi1_devdata *dd, | |
1068 | u32 selector, | |
1069 | u8 vl); | |
1070 | ||
0cb2aa69 TS |
1071 | struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, |
1072 | u32 selector, u8 vl); | |
1073 | ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf); | |
1074 | ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, | |
1075 | size_t count); | |
1076 | int sdma_engine_get_vl(struct sdma_engine *sde); | |
77241056 | 1077 | void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *); |
af3674d6 TS |
1078 | void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd, |
1079 | unsigned long cpuid); | |
77241056 MM |
1080 | |
1081 | #ifdef CONFIG_SDMA_VERBOSITY | |
1082 | void sdma_dumpstate(struct sdma_engine *); | |
1083 | #endif | |
1084 | static inline char *slashstrip(char *s) | |
1085 | { | |
1086 | char *r = s; | |
1087 | ||
1088 | while (*s) | |
1089 | if (*s++ == '/') | |
1090 | r = s; | |
1091 | return r; | |
1092 | } | |
1093 | ||
1094 | u16 sdma_get_descq_cnt(void); | |
1095 | ||
1096 | extern uint mod_num_sdma; | |
1097 | ||
1098 | void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid); | |
1099 | ||
1100 | #endif |