]>
Commit | Line | Data |
---|---|---|
11324132 RV |
1 | /****************************************************************************** |
2 | * This software may be used and distributed according to the terms of | |
3 | * the GNU General Public License (GPL), incorporated herein by reference. | |
4 | * Drivers based on or derived from this code fall under the GPL and must | |
5 | * retain the authorship, copyright and license notice. This file is not | |
6 | * a complete program and may only be used when the entire operating | |
7 | * system is licensed under the GPL. | |
8 | * See the file COPYING in this distribution for more information. | |
9 | * | |
926bd900 | 10 | * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O |
11324132 | 11 | * Virtualized Server Adapter. |
926bd900 | 12 | * Copyright(c) 2002-2010 Exar Corp. |
11324132 RV |
13 | ******************************************************************************/ |
14 | #include <linux/etherdevice.h> | |
15 | ||
16 | #include "vxge-traffic.h" | |
17 | #include "vxge-config.h" | |
18 | #include "vxge-main.h" | |
19 | ||
20 | /* | |
21 | * vxge_hw_vpath_intr_enable - Enable vpath interrupts. | |
22 | * @vp: Virtual Path handle. | |
23 | * | |
24 | * Enable vpath interrupts. The function is to be executed the last in | |
25 | * vpath initialization sequence. | |
26 | * | |
27 | * See also: vxge_hw_vpath_intr_disable() | |
28 | */ | |
29 | enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp) | |
30 | { | |
31 | u64 val64; | |
32 | ||
33 | struct __vxge_hw_virtualpath *vpath; | |
34 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
35 | enum vxge_hw_status status = VXGE_HW_OK; | |
36 | if (vp == NULL) { | |
37 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
38 | goto exit; | |
39 | } | |
40 | ||
41 | vpath = vp->vpath; | |
42 | ||
43 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
44 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
45 | goto exit; | |
46 | } | |
47 | ||
48 | vp_reg = vpath->vp_reg; | |
49 | ||
50 | writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg); | |
51 | ||
52 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
53 | &vp_reg->general_errors_reg); | |
54 | ||
55 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
56 | &vp_reg->pci_config_errors_reg); | |
57 | ||
58 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
59 | &vp_reg->mrpcim_to_vpath_alarm_reg); | |
60 | ||
61 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
62 | &vp_reg->srpcim_to_vpath_alarm_reg); | |
63 | ||
64 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
65 | &vp_reg->vpath_ppif_int_status); | |
66 | ||
67 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
68 | &vp_reg->srpcim_msg_to_vpath_reg); | |
69 | ||
70 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
71 | &vp_reg->vpath_pcipif_int_status); | |
72 | ||
73 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
74 | &vp_reg->prc_alarm_reg); | |
75 | ||
76 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
77 | &vp_reg->wrdma_alarm_status); | |
78 | ||
79 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
80 | &vp_reg->asic_ntwk_vp_err_reg); | |
81 | ||
82 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
83 | &vp_reg->xgmac_vp_int_status); | |
84 | ||
85 | val64 = readq(&vp_reg->vpath_general_int_status); | |
86 | ||
87 | /* Mask unwanted interrupts */ | |
88 | ||
89 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
90 | &vp_reg->vpath_pcipif_int_mask); | |
91 | ||
92 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
93 | &vp_reg->srpcim_msg_to_vpath_mask); | |
94 | ||
95 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
96 | &vp_reg->srpcim_to_vpath_alarm_mask); | |
97 | ||
98 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
99 | &vp_reg->mrpcim_to_vpath_alarm_mask); | |
100 | ||
101 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
102 | &vp_reg->pci_config_errors_mask); | |
103 | ||
104 | /* Unmask the individual interrupts */ | |
105 | ||
106 | writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW| | |
107 | VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW| | |
108 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ| | |
109 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32), | |
110 | &vp_reg->general_errors_mask); | |
111 | ||
112 | __vxge_hw_pio_mem_write32_upper( | |
113 | (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR| | |
114 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR| | |
115 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON| | |
116 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON| | |
117 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR| | |
d77dd8d2 | 118 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32), |
11324132 RV |
119 | &vp_reg->kdfcctl_errors_mask); |
120 | ||
121 | __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask); | |
122 | ||
123 | __vxge_hw_pio_mem_write32_upper( | |
124 | (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32), | |
125 | &vp_reg->prc_alarm_mask); | |
126 | ||
127 | __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask); | |
128 | __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask); | |
129 | ||
130 | if (vpath->hldev->first_vp_id != vpath->vp_id) | |
131 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
132 | &vp_reg->asic_ntwk_vp_err_mask); | |
133 | else | |
134 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(( | |
135 | VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT | | |
136 | VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32), | |
137 | &vp_reg->asic_ntwk_vp_err_mask); | |
138 | ||
139 | __vxge_hw_pio_mem_write32_upper(0, | |
140 | &vp_reg->vpath_general_int_mask); | |
141 | exit: | |
142 | return status; | |
143 | ||
144 | } | |
145 | ||
146 | /* | |
147 | * vxge_hw_vpath_intr_disable - Disable vpath interrupts. | |
148 | * @vp: Virtual Path handle. | |
149 | * | |
150 | * Disable vpath interrupts. The function is to be executed the last in | |
151 | * vpath initialization sequence. | |
152 | * | |
153 | * See also: vxge_hw_vpath_intr_enable() | |
154 | */ | |
155 | enum vxge_hw_status vxge_hw_vpath_intr_disable( | |
156 | struct __vxge_hw_vpath_handle *vp) | |
157 | { | |
158 | u64 val64; | |
159 | ||
160 | struct __vxge_hw_virtualpath *vpath; | |
161 | enum vxge_hw_status status = VXGE_HW_OK; | |
162 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
163 | if (vp == NULL) { | |
164 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
165 | goto exit; | |
166 | } | |
167 | ||
168 | vpath = vp->vpath; | |
169 | ||
170 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
171 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
172 | goto exit; | |
173 | } | |
174 | vp_reg = vpath->vp_reg; | |
175 | ||
176 | __vxge_hw_pio_mem_write32_upper( | |
177 | (u32)VXGE_HW_INTR_MASK_ALL, | |
178 | &vp_reg->vpath_general_int_mask); | |
179 | ||
180 | val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id)); | |
181 | ||
182 | writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask); | |
183 | ||
184 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
185 | &vp_reg->general_errors_mask); | |
186 | ||
187 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
188 | &vp_reg->pci_config_errors_mask); | |
189 | ||
190 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
191 | &vp_reg->mrpcim_to_vpath_alarm_mask); | |
192 | ||
193 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
194 | &vp_reg->srpcim_to_vpath_alarm_mask); | |
195 | ||
196 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
197 | &vp_reg->vpath_ppif_int_mask); | |
198 | ||
199 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
200 | &vp_reg->srpcim_msg_to_vpath_mask); | |
201 | ||
202 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
203 | &vp_reg->vpath_pcipif_int_mask); | |
204 | ||
205 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
206 | &vp_reg->wrdma_alarm_mask); | |
207 | ||
208 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
209 | &vp_reg->prc_alarm_mask); | |
210 | ||
211 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
212 | &vp_reg->xgmac_vp_int_mask); | |
213 | ||
214 | __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, | |
215 | &vp_reg->asic_ntwk_vp_err_mask); | |
216 | ||
217 | exit: | |
218 | return status; | |
219 | } | |
220 | ||
16fded7d JM |
221 | void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo) |
222 | { | |
223 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
224 | struct vxge_hw_vp_config *config; | |
225 | u64 val64; | |
226 | ||
227 | if (fifo->config->enable != VXGE_HW_FIFO_ENABLE) | |
228 | return; | |
229 | ||
230 | vp_reg = fifo->vp_reg; | |
231 | config = container_of(fifo->config, struct vxge_hw_vp_config, fifo); | |
232 | ||
233 | if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { | |
234 | config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; | |
235 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | |
236 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | |
237 | fifo->tim_tti_cfg1_saved = val64; | |
238 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | |
239 | } | |
240 | } | |
241 | ||
242 | void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring) | |
243 | { | |
244 | u64 val64 = ring->tim_rti_cfg1_saved; | |
245 | ||
246 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | |
247 | ring->tim_rti_cfg1_saved = val64; | |
248 | writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); | |
249 | } | |
250 | ||
251 | void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo) | |
252 | { | |
253 | u64 val64 = fifo->tim_tti_cfg3_saved; | |
254 | u64 timer = (fifo->rtimer * 1000) / 272; | |
255 | ||
256 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); | |
257 | if (timer) | |
258 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | | |
259 | VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5); | |
260 | ||
261 | writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); | |
262 | /* tti_cfg3_saved is not updated again because it is | |
263 | * initialized at one place only - init time. | |
264 | */ | |
265 | } | |
266 | ||
267 | void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring) | |
268 | { | |
269 | u64 val64 = ring->tim_rti_cfg3_saved; | |
270 | u64 timer = (ring->rtimer * 1000) / 272; | |
271 | ||
272 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); | |
273 | if (timer) | |
274 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | | |
275 | VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4); | |
276 | ||
277 | writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); | |
278 | /* rti_cfg3_saved is not updated again because it is | |
279 | * initialized at one place only - init time. | |
280 | */ | |
281 | } | |
282 | ||
11324132 RV |
283 | /** |
284 | * vxge_hw_channel_msix_mask - Mask MSIX Vector. | |
285 | * @channeh: Channel for rx or tx handle | |
286 | * @msix_id: MSIX ID | |
287 | * | |
288 | * The function masks the msix interrupt for the given msix_id | |
289 | * | |
290 | * Returns: 0 | |
291 | */ | |
292 | void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id) | |
293 | { | |
294 | ||
295 | __vxge_hw_pio_mem_write32_upper( | |
b59c9457 | 296 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
11324132 | 297 | &channel->common_reg->set_msix_mask_vect[msix_id%4]); |
11324132 RV |
298 | } |
299 | ||
300 | /** | |
301 | * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector. | |
302 | * @channeh: Channel for rx or tx handle | |
303 | * @msix_id: MSI ID | |
304 | * | |
305 | * The function unmasks the msix interrupt for the given msix_id | |
306 | * | |
307 | * Returns: 0 | |
308 | */ | |
309 | void | |
310 | vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) | |
311 | { | |
312 | ||
313 | __vxge_hw_pio_mem_write32_upper( | |
b59c9457 | 314 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
11324132 | 315 | &channel->common_reg->clear_msix_mask_vect[msix_id%4]); |
11324132 RV |
316 | } |
317 | ||
16fded7d JM |
318 | /** |
319 | * vxge_hw_channel_msix_clear - Unmask the MSIX Vector. | |
320 | * @channel: Channel for rx or tx handle | |
321 | * @msix_id: MSI ID | |
322 | * | |
323 | * The function unmasks the msix interrupt for the given msix_id | |
324 | * if configured in MSIX oneshot mode | |
325 | * | |
326 | * Returns: 0 | |
327 | */ | |
328 | void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id) | |
329 | { | |
330 | __vxge_hw_pio_mem_write32_upper( | |
331 | (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), | |
332 | &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]); | |
333 | } | |
334 | ||
11324132 RV |
335 | /** |
336 | * vxge_hw_device_set_intr_type - Updates the configuration | |
337 | * with new interrupt type. | |
338 | * @hldev: HW device handle. | |
339 | * @intr_mode: New interrupt type | |
340 | */ | |
341 | u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode) | |
342 | { | |
343 | ||
344 | if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | |
345 | (intr_mode != VXGE_HW_INTR_MODE_MSIX) && | |
346 | (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | |
347 | (intr_mode != VXGE_HW_INTR_MODE_DEF)) | |
348 | intr_mode = VXGE_HW_INTR_MODE_IRQLINE; | |
349 | ||
350 | hldev->config.intr_mode = intr_mode; | |
351 | return intr_mode; | |
352 | } | |
353 | ||
354 | /** | |
355 | * vxge_hw_device_intr_enable - Enable interrupts. | |
356 | * @hldev: HW device handle. | |
357 | * @op: One of the enum vxge_hw_device_intr enumerated values specifying | |
358 | * the type(s) of interrupts to enable. | |
359 | * | |
360 | * Enable Titan interrupts. The function is to be executed the last in | |
361 | * Titan initialization sequence. | |
362 | * | |
363 | * See also: vxge_hw_device_intr_disable() | |
364 | */ | |
365 | void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev) | |
366 | { | |
367 | u32 i; | |
368 | u64 val64; | |
369 | u32 val32; | |
370 | ||
eb5f10c2 SH |
371 | vxge_hw_device_mask_all(hldev); |
372 | ||
11324132 RV |
373 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
374 | ||
375 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | |
376 | continue; | |
377 | ||
378 | vxge_hw_vpath_intr_enable( | |
379 | VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); | |
380 | } | |
381 | ||
382 | if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) { | |
383 | val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | |
384 | hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]; | |
385 | ||
386 | if (val64 != 0) { | |
387 | writeq(val64, &hldev->common_reg->tim_int_status0); | |
388 | ||
389 | writeq(~val64, &hldev->common_reg->tim_int_mask0); | |
390 | } | |
391 | ||
392 | val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | |
393 | hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]; | |
394 | ||
395 | if (val32 != 0) { | |
396 | __vxge_hw_pio_mem_write32_upper(val32, | |
397 | &hldev->common_reg->tim_int_status1); | |
398 | ||
399 | __vxge_hw_pio_mem_write32_upper(~val32, | |
400 | &hldev->common_reg->tim_int_mask1); | |
401 | } | |
402 | } | |
403 | ||
404 | val64 = readq(&hldev->common_reg->titan_general_int_status); | |
405 | ||
406 | vxge_hw_device_unmask_all(hldev); | |
11324132 RV |
407 | } |
408 | ||
409 | /** | |
410 | * vxge_hw_device_intr_disable - Disable Titan interrupts. | |
411 | * @hldev: HW device handle. | |
412 | * @op: One of the enum vxge_hw_device_intr enumerated values specifying | |
413 | * the type(s) of interrupts to disable. | |
414 | * | |
415 | * Disable Titan interrupts. | |
416 | * | |
417 | * See also: vxge_hw_device_intr_enable() | |
418 | */ | |
419 | void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev) | |
420 | { | |
421 | u32 i; | |
422 | ||
423 | vxge_hw_device_mask_all(hldev); | |
424 | ||
425 | /* mask all the tim interrupts */ | |
426 | writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0); | |
427 | __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32, | |
428 | &hldev->common_reg->tim_int_mask1); | |
429 | ||
430 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
431 | ||
432 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | |
433 | continue; | |
434 | ||
435 | vxge_hw_vpath_intr_disable( | |
436 | VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); | |
437 | } | |
11324132 RV |
438 | } |
439 | ||
440 | /** | |
441 | * vxge_hw_device_mask_all - Mask all device interrupts. | |
442 | * @hldev: HW device handle. | |
443 | * | |
444 | * Mask all device interrupts. | |
445 | * | |
446 | * See also: vxge_hw_device_unmask_all() | |
447 | */ | |
448 | void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev) | |
449 | { | |
450 | u64 val64; | |
451 | ||
452 | val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM | | |
453 | VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; | |
454 | ||
455 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | |
456 | &hldev->common_reg->titan_mask_all_int); | |
11324132 RV |
457 | } |
458 | ||
459 | /** | |
460 | * vxge_hw_device_unmask_all - Unmask all device interrupts. | |
461 | * @hldev: HW device handle. | |
462 | * | |
463 | * Unmask all device interrupts. | |
464 | * | |
465 | * See also: vxge_hw_device_mask_all() | |
466 | */ | |
467 | void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev) | |
468 | { | |
469 | u64 val64 = 0; | |
470 | ||
471 | if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) | |
472 | val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; | |
473 | ||
474 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | |
475 | &hldev->common_reg->titan_mask_all_int); | |
11324132 RV |
476 | } |
477 | ||
478 | /** | |
479 | * vxge_hw_device_flush_io - Flush io writes. | |
480 | * @hldev: HW device handle. | |
481 | * | |
482 | * The function performs a read operation to flush io writes. | |
483 | * | |
484 | * Returns: void | |
485 | */ | |
486 | void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev) | |
487 | { | |
488 | u32 val32; | |
489 | ||
490 | val32 = readl(&hldev->common_reg->titan_general_int_status); | |
491 | } | |
492 | ||
11324132 RV |
493 | /** |
494 | * __vxge_hw_device_handle_error - Handle error | |
495 | * @hldev: HW device | |
496 | * @vp_id: Vpath Id | |
497 | * @type: Error type. Please see enum vxge_hw_event{} | |
498 | * | |
499 | * Handle error. | |
500 | */ | |
42821a5b | 501 | static enum vxge_hw_status |
528f7272 JM |
502 | __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id, |
503 | enum vxge_hw_event type) | |
11324132 RV |
504 | { |
505 | switch (type) { | |
506 | case VXGE_HW_EVENT_UNKNOWN: | |
507 | break; | |
508 | case VXGE_HW_EVENT_RESET_START: | |
509 | case VXGE_HW_EVENT_RESET_COMPLETE: | |
510 | case VXGE_HW_EVENT_LINK_DOWN: | |
511 | case VXGE_HW_EVENT_LINK_UP: | |
512 | goto out; | |
513 | case VXGE_HW_EVENT_ALARM_CLEARED: | |
514 | goto out; | |
515 | case VXGE_HW_EVENT_ECCERR: | |
516 | case VXGE_HW_EVENT_MRPCIM_ECCERR: | |
517 | goto out; | |
518 | case VXGE_HW_EVENT_FIFO_ERR: | |
519 | case VXGE_HW_EVENT_VPATH_ERR: | |
520 | case VXGE_HW_EVENT_CRITICAL_ERR: | |
521 | case VXGE_HW_EVENT_SERR: | |
522 | break; | |
523 | case VXGE_HW_EVENT_SRPCIM_SERR: | |
524 | case VXGE_HW_EVENT_MRPCIM_SERR: | |
525 | goto out; | |
526 | case VXGE_HW_EVENT_SLOT_FREEZE: | |
527 | break; | |
528 | default: | |
529 | vxge_assert(0); | |
530 | goto out; | |
531 | } | |
532 | ||
533 | /* notify driver */ | |
534 | if (hldev->uld_callbacks.crit_err) | |
535 | hldev->uld_callbacks.crit_err( | |
536 | (struct __vxge_hw_device *)hldev, | |
537 | type, vp_id); | |
538 | out: | |
539 | ||
540 | return VXGE_HW_OK; | |
541 | } | |
542 | ||
528f7272 JM |
543 | /* |
544 | * __vxge_hw_device_handle_link_down_ind | |
545 | * @hldev: HW device handle. | |
11324132 | 546 | * |
528f7272 JM |
547 | * Link down indication handler. The function is invoked by HW when |
548 | * Titan indicates that the link is down. | |
11324132 | 549 | */ |
528f7272 JM |
550 | static enum vxge_hw_status |
551 | __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) | |
11324132 | 552 | { |
528f7272 JM |
553 | /* |
554 | * If the previous link state is not down, return. | |
555 | */ | |
556 | if (hldev->link_state == VXGE_HW_LINK_DOWN) | |
557 | goto exit; | |
11324132 | 558 | |
528f7272 | 559 | hldev->link_state = VXGE_HW_LINK_DOWN; |
11324132 | 560 | |
528f7272 JM |
561 | /* notify driver */ |
562 | if (hldev->uld_callbacks.link_down) | |
563 | hldev->uld_callbacks.link_down(hldev); | |
564 | exit: | |
565 | return VXGE_HW_OK; | |
11324132 RV |
566 | } |
567 | ||
568 | /* | |
528f7272 JM |
569 | * __vxge_hw_device_handle_link_up_ind |
570 | * @hldev: HW device handle. | |
11324132 | 571 | * |
528f7272 JM |
572 | * Link up indication handler. The function is invoked by HW when |
573 | * Titan indicates that the link is up for programmable amount of time. | |
11324132 | 574 | */ |
42821a5b | 575 | static enum vxge_hw_status |
528f7272 | 576 | __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) |
11324132 | 577 | { |
528f7272 JM |
578 | /* |
579 | * If the previous link state is not down, return. | |
580 | */ | |
581 | if (hldev->link_state == VXGE_HW_LINK_UP) | |
582 | goto exit; | |
11324132 | 583 | |
528f7272 | 584 | hldev->link_state = VXGE_HW_LINK_UP; |
11324132 | 585 | |
528f7272 JM |
586 | /* notify driver */ |
587 | if (hldev->uld_callbacks.link_up) | |
588 | hldev->uld_callbacks.link_up(hldev); | |
589 | exit: | |
590 | return VXGE_HW_OK; | |
11324132 RV |
591 | } |
592 | ||
593 | /* | |
528f7272 JM |
594 | * __vxge_hw_vpath_alarm_process - Process Alarms. |
595 | * @vpath: Virtual Path. | |
596 | * @skip_alarms: Do not clear the alarms | |
11324132 | 597 | * |
528f7272 | 598 | * Process vpath alarms. |
11324132 RV |
599 | * |
600 | */ | |
528f7272 JM |
601 | static enum vxge_hw_status |
602 | __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, | |
603 | u32 skip_alarms) | |
11324132 | 604 | { |
528f7272 JM |
605 | u64 val64; |
606 | u64 alarm_status; | |
607 | u64 pic_status; | |
608 | struct __vxge_hw_device *hldev = NULL; | |
609 | enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; | |
610 | u64 mask64; | |
611 | struct vxge_hw_vpath_stats_sw_info *sw_stats; | |
612 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
613 | ||
614 | if (vpath == NULL) { | |
615 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | |
616 | alarm_event); | |
617 | goto out2; | |
618 | } | |
619 | ||
620 | hldev = vpath->hldev; | |
621 | vp_reg = vpath->vp_reg; | |
622 | alarm_status = readq(&vp_reg->vpath_general_int_status); | |
623 | ||
624 | if (alarm_status == VXGE_HW_ALL_FOXES) { | |
625 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, | |
626 | alarm_event); | |
627 | goto out; | |
628 | } | |
629 | ||
630 | sw_stats = vpath->sw_stats; | |
631 | ||
632 | if (alarm_status & ~( | |
633 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | | |
634 | VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | | |
635 | VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | | |
636 | VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { | |
637 | sw_stats->error_stats.unknown_alarms++; | |
638 | ||
639 | alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, | |
640 | alarm_event); | |
641 | goto out; | |
642 | } | |
643 | ||
644 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { | |
645 | ||
646 | val64 = readq(&vp_reg->xgmac_vp_int_status); | |
647 | ||
648 | if (val64 & | |
649 | VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { | |
650 | ||
651 | val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); | |
652 | ||
653 | if (((val64 & | |
654 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && | |
655 | (!(val64 & | |
656 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || | |
657 | ((val64 & | |
658 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) && | |
659 | (!(val64 & | |
660 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) | |
661 | ))) { | |
662 | sw_stats->error_stats.network_sustained_fault++; | |
663 | ||
664 | writeq( | |
665 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, | |
666 | &vp_reg->asic_ntwk_vp_err_mask); | |
667 | ||
668 | __vxge_hw_device_handle_link_down_ind(hldev); | |
669 | alarm_event = VXGE_HW_SET_LEVEL( | |
670 | VXGE_HW_EVENT_LINK_DOWN, alarm_event); | |
671 | } | |
672 | ||
673 | if (((val64 & | |
674 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && | |
675 | (!(val64 & | |
676 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || | |
677 | ((val64 & | |
678 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) && | |
679 | (!(val64 & | |
680 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) | |
681 | ))) { | |
682 | ||
683 | sw_stats->error_stats.network_sustained_ok++; | |
684 | ||
685 | writeq( | |
686 | VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, | |
687 | &vp_reg->asic_ntwk_vp_err_mask); | |
688 | ||
689 | __vxge_hw_device_handle_link_up_ind(hldev); | |
690 | alarm_event = VXGE_HW_SET_LEVEL( | |
691 | VXGE_HW_EVENT_LINK_UP, alarm_event); | |
692 | } | |
693 | ||
694 | writeq(VXGE_HW_INTR_MASK_ALL, | |
695 | &vp_reg->asic_ntwk_vp_err_reg); | |
696 | ||
697 | alarm_event = VXGE_HW_SET_LEVEL( | |
698 | VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); | |
699 | ||
700 | if (skip_alarms) | |
701 | return VXGE_HW_OK; | |
702 | } | |
703 | } | |
704 | ||
705 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { | |
706 | ||
707 | pic_status = readq(&vp_reg->vpath_ppif_int_status); | |
708 | ||
709 | if (pic_status & | |
710 | VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { | |
711 | ||
712 | val64 = readq(&vp_reg->general_errors_reg); | |
713 | mask64 = readq(&vp_reg->general_errors_mask); | |
714 | ||
715 | if ((val64 & | |
716 | VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & | |
717 | ~mask64) { | |
718 | sw_stats->error_stats.ini_serr_det++; | |
719 | ||
720 | alarm_event = VXGE_HW_SET_LEVEL( | |
721 | VXGE_HW_EVENT_SERR, alarm_event); | |
722 | } | |
723 | ||
724 | if ((val64 & | |
725 | VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & | |
726 | ~mask64) { | |
727 | sw_stats->error_stats.dblgen_fifo0_overflow++; | |
728 | ||
729 | alarm_event = VXGE_HW_SET_LEVEL( | |
730 | VXGE_HW_EVENT_FIFO_ERR, alarm_event); | |
731 | } | |
732 | ||
733 | if ((val64 & | |
734 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & | |
735 | ~mask64) | |
736 | sw_stats->error_stats.statsb_pif_chain_error++; | |
737 | ||
738 | if ((val64 & | |
739 | VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & | |
740 | ~mask64) | |
741 | sw_stats->error_stats.statsb_drop_timeout++; | |
742 | ||
743 | if ((val64 & | |
744 | VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & | |
745 | ~mask64) | |
746 | sw_stats->error_stats.target_illegal_access++; | |
747 | ||
748 | if (!skip_alarms) { | |
749 | writeq(VXGE_HW_INTR_MASK_ALL, | |
750 | &vp_reg->general_errors_reg); | |
751 | alarm_event = VXGE_HW_SET_LEVEL( | |
752 | VXGE_HW_EVENT_ALARM_CLEARED, | |
753 | alarm_event); | |
754 | } | |
755 | } | |
756 | ||
757 | if (pic_status & | |
758 | VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { | |
759 | ||
760 | val64 = readq(&vp_reg->kdfcctl_errors_reg); | |
761 | mask64 = readq(&vp_reg->kdfcctl_errors_mask); | |
762 | ||
763 | if ((val64 & | |
764 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & | |
765 | ~mask64) { | |
766 | sw_stats->error_stats.kdfcctl_fifo0_overwrite++; | |
767 | ||
768 | alarm_event = VXGE_HW_SET_LEVEL( | |
769 | VXGE_HW_EVENT_FIFO_ERR, | |
770 | alarm_event); | |
771 | } | |
772 | ||
773 | if ((val64 & | |
774 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & | |
775 | ~mask64) { | |
776 | sw_stats->error_stats.kdfcctl_fifo0_poison++; | |
777 | ||
778 | alarm_event = VXGE_HW_SET_LEVEL( | |
779 | VXGE_HW_EVENT_FIFO_ERR, | |
780 | alarm_event); | |
781 | } | |
782 | ||
783 | if ((val64 & | |
784 | VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & | |
785 | ~mask64) { | |
786 | sw_stats->error_stats.kdfcctl_fifo0_dma_error++; | |
787 | ||
788 | alarm_event = VXGE_HW_SET_LEVEL( | |
789 | VXGE_HW_EVENT_FIFO_ERR, | |
790 | alarm_event); | |
791 | } | |
792 | ||
793 | if (!skip_alarms) { | |
794 | writeq(VXGE_HW_INTR_MASK_ALL, | |
795 | &vp_reg->kdfcctl_errors_reg); | |
796 | alarm_event = VXGE_HW_SET_LEVEL( | |
797 | VXGE_HW_EVENT_ALARM_CLEARED, | |
798 | alarm_event); | |
799 | } | |
800 | } | |
801 | ||
802 | } | |
803 | ||
804 | if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { | |
805 | ||
806 | val64 = readq(&vp_reg->wrdma_alarm_status); | |
807 | ||
808 | if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { | |
809 | ||
810 | val64 = readq(&vp_reg->prc_alarm_reg); | |
811 | mask64 = readq(&vp_reg->prc_alarm_mask); | |
812 | ||
813 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& | |
814 | ~mask64) | |
815 | sw_stats->error_stats.prc_ring_bumps++; | |
816 | ||
817 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & | |
818 | ~mask64) { | |
819 | sw_stats->error_stats.prc_rxdcm_sc_err++; | |
820 | ||
821 | alarm_event = VXGE_HW_SET_LEVEL( | |
822 | VXGE_HW_EVENT_VPATH_ERR, | |
823 | alarm_event); | |
824 | } | |
825 | ||
826 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) | |
827 | & ~mask64) { | |
828 | sw_stats->error_stats.prc_rxdcm_sc_abort++; | |
829 | ||
830 | alarm_event = VXGE_HW_SET_LEVEL( | |
831 | VXGE_HW_EVENT_VPATH_ERR, | |
832 | alarm_event); | |
833 | } | |
834 | ||
835 | if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) | |
836 | & ~mask64) { | |
837 | sw_stats->error_stats.prc_quanta_size_err++; | |
838 | ||
839 | alarm_event = VXGE_HW_SET_LEVEL( | |
840 | VXGE_HW_EVENT_VPATH_ERR, | |
841 | alarm_event); | |
842 | } | |
843 | ||
844 | if (!skip_alarms) { | |
845 | writeq(VXGE_HW_INTR_MASK_ALL, | |
846 | &vp_reg->prc_alarm_reg); | |
847 | alarm_event = VXGE_HW_SET_LEVEL( | |
848 | VXGE_HW_EVENT_ALARM_CLEARED, | |
849 | alarm_event); | |
850 | } | |
851 | } | |
852 | } | |
853 | out: | |
854 | hldev->stats.sw_dev_err_stats.vpath_alarms++; | |
855 | out2: | |
856 | if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || | |
857 | (alarm_event == VXGE_HW_EVENT_UNKNOWN)) | |
858 | return VXGE_HW_OK; | |
859 | ||
860 | __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); | |
861 | ||
862 | if (alarm_event == VXGE_HW_EVENT_SERR) | |
863 | return VXGE_HW_ERR_CRITICAL; | |
864 | ||
865 | return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? | |
866 | VXGE_HW_ERR_SLOT_FREEZE : | |
867 | (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : | |
868 | VXGE_HW_ERR_VPATH; | |
869 | } | |
870 | ||
871 | /** | |
872 | * vxge_hw_device_begin_irq - Begin IRQ processing. | |
873 | * @hldev: HW device handle. | |
874 | * @skip_alarms: Do not clear the alarms | |
875 | * @reason: "Reason" for the interrupt, the value of Titan's | |
876 | * general_int_status register. | |
877 | * | |
878 | * The function performs two actions, It first checks whether (shared IRQ) the | |
879 | * interrupt was raised by the device. Next, it masks the device interrupts. | |
880 | * | |
881 | * Note: | |
882 | * vxge_hw_device_begin_irq() does not flush MMIO writes through the | |
883 | * bridge. Therefore, two back-to-back interrupts are potentially possible. | |
884 | * | |
885 | * Returns: 0, if the interrupt is not "ours" (note that in this case the | |
886 | * device remain enabled). | |
887 | * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter | |
888 | * status. | |
889 | */ | |
890 | enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev, | |
891 | u32 skip_alarms, u64 *reason) | |
892 | { | |
893 | u32 i; | |
894 | u64 val64; | |
895 | u64 adapter_status; | |
896 | u64 vpath_mask; | |
897 | enum vxge_hw_status ret = VXGE_HW_OK; | |
898 | ||
899 | val64 = readq(&hldev->common_reg->titan_general_int_status); | |
900 | ||
901 | if (unlikely(!val64)) { | |
902 | /* not Titan interrupt */ | |
903 | *reason = 0; | |
904 | ret = VXGE_HW_ERR_WRONG_IRQ; | |
905 | goto exit; | |
906 | } | |
907 | ||
908 | if (unlikely(val64 == VXGE_HW_ALL_FOXES)) { | |
909 | ||
910 | adapter_status = readq(&hldev->common_reg->adapter_status); | |
911 | ||
912 | if (adapter_status == VXGE_HW_ALL_FOXES) { | |
913 | ||
914 | __vxge_hw_device_handle_error(hldev, | |
915 | NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE); | |
916 | *reason = 0; | |
917 | ret = VXGE_HW_ERR_SLOT_FREEZE; | |
918 | goto exit; | |
919 | } | |
920 | } | |
921 | ||
922 | hldev->stats.sw_dev_info_stats.total_intr_cnt++; | |
923 | ||
924 | *reason = val64; | |
925 | ||
926 | vpath_mask = hldev->vpaths_deployed >> | |
927 | (64 - VXGE_HW_MAX_VIRTUAL_PATHS); | |
928 | ||
929 | if (val64 & | |
930 | VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) { | |
931 | hldev->stats.sw_dev_info_stats.traffic_intr_cnt++; | |
932 | ||
933 | return VXGE_HW_OK; | |
934 | } | |
935 | ||
936 | hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; | |
937 | ||
938 | if (unlikely(val64 & | |
939 | VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) { | |
940 | ||
941 | enum vxge_hw_status error_level = VXGE_HW_OK; | |
942 | ||
943 | hldev->stats.sw_dev_err_stats.vpath_alarms++; | |
944 | ||
945 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
946 | ||
947 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | |
948 | continue; | |
949 | ||
950 | ret = __vxge_hw_vpath_alarm_process( | |
951 | &hldev->virtual_paths[i], skip_alarms); | |
952 | ||
953 | error_level = VXGE_HW_SET_LEVEL(ret, error_level); | |
954 | ||
955 | if (unlikely((ret == VXGE_HW_ERR_CRITICAL) || | |
956 | (ret == VXGE_HW_ERR_SLOT_FREEZE))) | |
957 | break; | |
958 | } | |
959 | ||
960 | ret = error_level; | |
961 | } | |
962 | exit: | |
963 | return ret; | |
964 | } | |
965 | ||
966 | /** | |
967 | * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the | |
968 | * condition that has caused the Tx and RX interrupt. | |
969 | * @hldev: HW device. | |
970 | * | |
971 | * Acknowledge (that is, clear) the condition that has caused | |
972 | * the Tx and Rx interrupt. | |
973 | * See also: vxge_hw_device_begin_irq(), | |
974 | * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx(). | |
975 | */ | |
976 | void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev) | |
977 | { | |
978 | ||
979 | if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || | |
980 | (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
981 | writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | |
982 | hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]), | |
983 | &hldev->common_reg->tim_int_status0); | |
984 | } | |
985 | ||
986 | if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || | |
987 | (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
988 | __vxge_hw_pio_mem_write32_upper( | |
989 | (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | |
990 | hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), | |
991 | &hldev->common_reg->tim_int_status1); | |
992 | } | |
993 | } | |
994 | ||
995 | /* | |
996 | * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel | |
997 | * @channel: Channel | |
998 | * @dtrh: Buffer to return the DTR pointer | |
999 | * | |
1000 | * Allocates a dtr from the reserve array. If the reserve array is empty, | |
1001 | * it swaps the reserve and free arrays. | |
1002 | * | |
1003 | */ | |
1004 | static enum vxge_hw_status | |
1005 | vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) | |
1006 | { | |
1007 | void **tmp_arr; | |
1008 | ||
1009 | if (channel->reserve_ptr - channel->reserve_top > 0) { | |
1010 | _alloc_after_swap: | |
1011 | *dtrh = channel->reserve_arr[--channel->reserve_ptr]; | |
1012 | ||
1013 | return VXGE_HW_OK; | |
1014 | } | |
1015 | ||
1016 | /* switch between empty and full arrays */ | |
1017 | ||
1018 | /* the idea behind such a design is that by having free and reserved | |
1019 | * arrays separated we basically separated irq and non-irq parts. | |
1020 | * i.e. no additional lock need to be done when we free a resource */ | |
1021 | ||
1022 | if (channel->length - channel->free_ptr > 0) { | |
1023 | ||
1024 | tmp_arr = channel->reserve_arr; | |
1025 | channel->reserve_arr = channel->free_arr; | |
1026 | channel->free_arr = tmp_arr; | |
1027 | channel->reserve_ptr = channel->length; | |
1028 | channel->reserve_top = channel->free_ptr; | |
1029 | channel->free_ptr = channel->length; | |
1030 | ||
1031 | channel->stats->reserve_free_swaps_cnt++; | |
1032 | ||
1033 | goto _alloc_after_swap; | |
1034 | } | |
1035 | ||
1036 | channel->stats->full_cnt++; | |
1037 | ||
1038 | *dtrh = NULL; | |
1039 | return VXGE_HW_INF_OUT_OF_DESCRIPTORS; | |
1040 | } | |
1041 | ||
1042 | /* | |
1043 | * vxge_hw_channel_dtr_post - Post a dtr to the channel | |
1044 | * @channelh: Channel | |
1045 | * @dtrh: DTR pointer | |
1046 | * | |
1047 | * Posts a dtr to work array. | |
1048 | * | |
1049 | */ | |
1050 | static void | |
1051 | vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) | |
1052 | { | |
1053 | vxge_assert(channel->work_arr[channel->post_index] == NULL); | |
1054 | ||
11324132 RV |
1055 | channel->work_arr[channel->post_index++] = dtrh; |
1056 | ||
1057 | /* wrap-around */ | |
1058 | if (channel->post_index == channel->length) | |
1059 | channel->post_index = 0; | |
1060 | } | |
1061 | ||
1062 | /* | |
1063 | * vxge_hw_channel_dtr_try_complete - Returns next completed dtr | |
1064 | * @channel: Channel | |
1065 | * @dtr: Buffer to return the next completed DTR pointer | |
1066 | * | |
1067 | * Returns the next completed dtr with out removing it from work array | |
1068 | * | |
1069 | */ | |
1070 | void | |
1071 | vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh) | |
1072 | { | |
1073 | vxge_assert(channel->compl_index < channel->length); | |
1074 | ||
1075 | *dtrh = channel->work_arr[channel->compl_index]; | |
3f23e436 | 1076 | prefetch(*dtrh); |
11324132 RV |
1077 | } |
1078 | ||
1079 | /* | |
1080 | * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array | |
1081 | * @channel: Channel handle | |
1082 | * | |
1083 | * Removes the next completed dtr from work array | |
1084 | * | |
1085 | */ | |
1086 | void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel) | |
1087 | { | |
1088 | channel->work_arr[channel->compl_index] = NULL; | |
1089 | ||
1090 | /* wrap-around */ | |
1091 | if (++channel->compl_index == channel->length) | |
1092 | channel->compl_index = 0; | |
1093 | ||
1094 | channel->stats->total_compl_cnt++; | |
1095 | } | |
1096 | ||
1097 | /* | |
1098 | * vxge_hw_channel_dtr_free - Frees a dtr | |
1099 | * @channel: Channel handle | |
1100 | * @dtr: DTR pointer | |
1101 | * | |
1102 | * Returns the dtr to free array | |
1103 | * | |
1104 | */ | |
1105 | void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh) | |
1106 | { | |
1107 | channel->free_arr[--channel->free_ptr] = dtrh; | |
1108 | } | |
1109 | ||
1110 | /* | |
1111 | * vxge_hw_channel_dtr_count | |
1112 | * @channel: Channel handle. Obtained via vxge_hw_channel_open(). | |
1113 | * | |
25985edc | 1114 | * Retrieve number of DTRs available. This function can not be called |
11324132 RV |
1115 | * from data path. ring_initial_replenishi() is the only user. |
1116 | */ | |
1117 | int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel) | |
1118 | { | |
1119 | return (channel->reserve_ptr - channel->reserve_top) + | |
1120 | (channel->length - channel->free_ptr); | |
1121 | } | |
1122 | ||
1123 | /** | |
1124 | * vxge_hw_ring_rxd_reserve - Reserve ring descriptor. | |
1125 | * @ring: Handle to the ring object used for receive | |
1126 | * @rxdh: Reserved descriptor. On success HW fills this "out" parameter | |
1127 | * with a valid handle. | |
1128 | * | |
1129 | * Reserve Rx descriptor for the subsequent filling-in driver | |
1130 | * and posting on the corresponding channel (@channelh) | |
1131 | * via vxge_hw_ring_rxd_post(). | |
1132 | * | |
1133 | * Returns: VXGE_HW_OK - success. | |
1134 | * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. | |
1135 | * | |
1136 | */ | |
1137 | enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring, | |
1138 | void **rxdh) | |
1139 | { | |
1140 | enum vxge_hw_status status; | |
1141 | struct __vxge_hw_channel *channel; | |
1142 | ||
1143 | channel = &ring->channel; | |
1144 | ||
1145 | status = vxge_hw_channel_dtr_alloc(channel, rxdh); | |
1146 | ||
1147 | if (status == VXGE_HW_OK) { | |
1148 | struct vxge_hw_ring_rxd_1 *rxdp = | |
1149 | (struct vxge_hw_ring_rxd_1 *)*rxdh; | |
1150 | ||
1151 | rxdp->control_0 = rxdp->control_1 = 0; | |
1152 | } | |
1153 | ||
1154 | return status; | |
1155 | } | |
1156 | ||
1157 | /** | |
1158 | * vxge_hw_ring_rxd_free - Free descriptor. | |
1159 | * @ring: Handle to the ring object used for receive | |
1160 | * @rxdh: Descriptor handle. | |
1161 | * | |
1162 | * Free the reserved descriptor. This operation is "symmetrical" to | |
1163 | * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's | |
1164 | * lifecycle. | |
1165 | * | |
1166 | * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can | |
1167 | * be: | |
1168 | * | |
1169 | * - reserved (vxge_hw_ring_rxd_reserve); | |
1170 | * | |
1171 | * - posted (vxge_hw_ring_rxd_post); | |
1172 | * | |
1173 | * - completed (vxge_hw_ring_rxd_next_completed); | |
1174 | * | |
1175 | * - and recycled again (vxge_hw_ring_rxd_free). | |
1176 | * | |
1177 | * For alternative state transitions and more details please refer to | |
1178 | * the design doc. | |
1179 | * | |
1180 | */ | |
1181 | void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh) | |
1182 | { | |
1183 | struct __vxge_hw_channel *channel; | |
1184 | ||
1185 | channel = &ring->channel; | |
1186 | ||
1187 | vxge_hw_channel_dtr_free(channel, rxdh); | |
1188 | ||
1189 | } | |
1190 | ||
1191 | /** | |
1192 | * vxge_hw_ring_rxd_pre_post - Prepare rxd and post | |
1193 | * @ring: Handle to the ring object used for receive | |
1194 | * @rxdh: Descriptor handle. | |
1195 | * | |
1196 | * This routine prepares a rxd and posts | |
1197 | */ | |
1198 | void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh) | |
1199 | { | |
1200 | struct __vxge_hw_channel *channel; | |
1201 | ||
1202 | channel = &ring->channel; | |
1203 | ||
1204 | vxge_hw_channel_dtr_post(channel, rxdh); | |
1205 | } | |
1206 | ||
1207 | /** | |
1208 | * vxge_hw_ring_rxd_post_post - Process rxd after post. | |
1209 | * @ring: Handle to the ring object used for receive | |
1210 | * @rxdh: Descriptor handle. | |
1211 | * | |
1212 | * Processes rxd after post | |
1213 | */ | |
1214 | void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh) | |
1215 | { | |
1216 | struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; | |
1217 | struct __vxge_hw_channel *channel; | |
1218 | ||
1219 | channel = &ring->channel; | |
1220 | ||
18dec74c | 1221 | rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; |
11324132 RV |
1222 | |
1223 | if (ring->stats->common_stats.usage_cnt > 0) | |
1224 | ring->stats->common_stats.usage_cnt--; | |
1225 | } | |
1226 | ||
1227 | /** | |
1228 | * vxge_hw_ring_rxd_post - Post descriptor on the ring. | |
1229 | * @ring: Handle to the ring object used for receive | |
1230 | * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve(). | |
1231 | * | |
1232 | * Post descriptor on the ring. | |
1233 | * Prior to posting the descriptor should be filled in accordance with | |
1234 | * Host/Titan interface specification for a given service (LL, etc.). | |
1235 | * | |
1236 | */ | |
1237 | void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) | |
1238 | { | |
1239 | struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; | |
1240 | struct __vxge_hw_channel *channel; | |
1241 | ||
1242 | channel = &ring->channel; | |
1243 | ||
1244 | wmb(); | |
18dec74c | 1245 | rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; |
11324132 RV |
1246 | |
1247 | vxge_hw_channel_dtr_post(channel, rxdh); | |
1248 | ||
1249 | if (ring->stats->common_stats.usage_cnt > 0) | |
1250 | ring->stats->common_stats.usage_cnt--; | |
1251 | } | |
1252 | ||
1253 | /** | |
1254 | * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier. | |
1255 | * @ring: Handle to the ring object used for receive | |
1256 | * @rxdh: Descriptor handle. | |
1257 | * | |
1258 | * Processes rxd after post with memory barrier. | |
1259 | */ | |
1260 | void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) | |
1261 | { | |
11324132 RV |
1262 | wmb(); |
1263 | vxge_hw_ring_rxd_post_post(ring, rxdh); | |
1264 | } | |
1265 | ||
1266 | /** | |
1267 | * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor. | |
1268 | * @ring: Handle to the ring object used for receive | |
1269 | * @rxdh: Descriptor handle. Returned by HW. | |
1270 | * @t_code: Transfer code, as per Titan User Guide, | |
1271 | * Receive Descriptor Format. Returned by HW. | |
1272 | * | |
1273 | * Retrieve the _next_ completed descriptor. | |
1274 | * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy | |
1275 | * driver of new completed descriptors. After that | |
1276 | * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest | |
1277 | * completions (the very first completion is passed by HW via | |
1278 | * vxge_hw_ring_callback_f). | |
1279 | * | |
1280 | * Implementation-wise, the driver is free to call | |
1281 | * vxge_hw_ring_rxd_next_completed either immediately from inside the | |
1282 | * ring callback, or in a deferred fashion and separate (from HW) | |
1283 | * context. | |
1284 | * | |
1285 | * Non-zero @t_code means failure to fill-in receive buffer(s) | |
1286 | * of the descriptor. | |
1287 | * For instance, parity error detected during the data transfer. | |
1288 | * In this case Titan will complete the descriptor and indicate | |
1289 | * for the host that the received data is not to be used. | |
1290 | * For details please refer to Titan User Guide. | |
1291 | * | |
1292 | * Returns: VXGE_HW_OK - success. | |
1293 | * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors | |
1294 | * are currently available for processing. | |
1295 | * | |
1296 | * See also: vxge_hw_ring_callback_f{}, | |
1297 | * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}. | |
1298 | */ | |
1299 | enum vxge_hw_status vxge_hw_ring_rxd_next_completed( | |
1300 | struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code) | |
1301 | { | |
1302 | struct __vxge_hw_channel *channel; | |
1303 | struct vxge_hw_ring_rxd_1 *rxdp; | |
1304 | enum vxge_hw_status status = VXGE_HW_OK; | |
18dec74c | 1305 | u64 control_0, own; |
11324132 RV |
1306 | |
1307 | channel = &ring->channel; | |
1308 | ||
1309 | vxge_hw_channel_dtr_try_complete(channel, rxdh); | |
1310 | ||
1311 | rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh; | |
1312 | if (rxdp == NULL) { | |
1313 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | |
1314 | goto exit; | |
1315 | } | |
1316 | ||
18dec74c SH |
1317 | control_0 = rxdp->control_0; |
1318 | own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; | |
1319 | *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); | |
1320 | ||
11324132 | 1321 | /* check whether it is not the end */ |
a7dd6027 | 1322 | if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) { |
11324132 RV |
1323 | |
1324 | vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != | |
1325 | 0); | |
1326 | ||
1327 | ++ring->cmpl_cnt; | |
1328 | vxge_hw_channel_dtr_complete(channel); | |
1329 | ||
11324132 RV |
1330 | vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); |
1331 | ||
1332 | ring->stats->common_stats.usage_cnt++; | |
1333 | if (ring->stats->common_stats.usage_max < | |
1334 | ring->stats->common_stats.usage_cnt) | |
1335 | ring->stats->common_stats.usage_max = | |
1336 | ring->stats->common_stats.usage_cnt; | |
1337 | ||
1338 | status = VXGE_HW_OK; | |
1339 | goto exit; | |
1340 | } | |
1341 | ||
1342 | /* reset it. since we don't want to return | |
1343 | * garbage to the driver */ | |
1344 | *rxdh = NULL; | |
1345 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | |
1346 | exit: | |
1347 | return status; | |
1348 | } | |
1349 | ||
1350 | /** | |
1351 | * vxge_hw_ring_handle_tcode - Handle transfer code. | |
1352 | * @ring: Handle to the ring object used for receive | |
1353 | * @rxdh: Descriptor handle. | |
1354 | * @t_code: One of the enumerated (and documented in the Titan user guide) | |
1355 | * "transfer codes". | |
1356 | * | |
1357 | * Handle descriptor's transfer code. The latter comes with each completed | |
1358 | * descriptor. | |
1359 | * | |
1360 | * Returns: one of the enum vxge_hw_status{} enumerated types. | |
1361 | * VXGE_HW_OK - for success. | |
1362 | * VXGE_HW_ERR_CRITICAL - when encounters critical error. | |
1363 | */ | |
1364 | enum vxge_hw_status vxge_hw_ring_handle_tcode( | |
1365 | struct __vxge_hw_ring *ring, void *rxdh, u8 t_code) | |
1366 | { | |
1367 | struct __vxge_hw_channel *channel; | |
1368 | enum vxge_hw_status status = VXGE_HW_OK; | |
1369 | ||
1370 | channel = &ring->channel; | |
1371 | ||
1372 | /* If the t_code is not supported and if the | |
1373 | * t_code is other than 0x5 (unparseable packet | |
1374 | * such as unknown UPV6 header), Drop it !!! | |
1375 | */ | |
1376 | ||
18dec74c SH |
1377 | if (t_code == VXGE_HW_RING_T_CODE_OK || |
1378 | t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) { | |
11324132 RV |
1379 | status = VXGE_HW_OK; |
1380 | goto exit; | |
1381 | } | |
1382 | ||
18dec74c | 1383 | if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) { |
11324132 RV |
1384 | status = VXGE_HW_ERR_INVALID_TCODE; |
1385 | goto exit; | |
1386 | } | |
1387 | ||
1388 | ring->stats->rxd_t_code_err_cnt[t_code]++; | |
1389 | exit: | |
1390 | return status; | |
1391 | } | |
1392 | ||
1393 | /** | |
1394 | * __vxge_hw_non_offload_db_post - Post non offload doorbell | |
1395 | * | |
1396 | * @fifo: fifohandle | |
1397 | * @txdl_ptr: The starting location of the TxDL in host memory | |
1398 | * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256) | |
1399 | * @no_snoop: No snoop flags | |
1400 | * | |
1401 | * This function posts a non-offload doorbell to doorbell FIFO | |
1402 | * | |
1403 | */ | |
1404 | static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, | |
1405 | u64 txdl_ptr, u32 num_txds, u32 no_snoop) | |
1406 | { | |
1407 | struct __vxge_hw_channel *channel; | |
1408 | ||
1409 | channel = &fifo->channel; | |
1410 | ||
1411 | writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) | | |
1412 | VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) | | |
1413 | VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop), | |
1414 | &fifo->nofl_db->control_0); | |
1415 | ||
ff1b974c | 1416 | mmiowb(); |
11324132 RV |
1417 | |
1418 | writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr); | |
11324132 | 1419 | |
ff1b974c | 1420 | mmiowb(); |
11324132 RV |
1421 | } |
1422 | ||
1423 | /** | |
1424 | * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in | |
1425 | * the fifo | |
1426 | * @fifoh: Handle to the fifo object used for non offload send | |
1427 | */ | |
1428 | u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh) | |
1429 | { | |
1430 | return vxge_hw_channel_dtr_count(&fifoh->channel); | |
1431 | } | |
1432 | ||
1433 | /** | |
1434 | * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor. | |
1435 | * @fifoh: Handle to the fifo object used for non offload send | |
1436 | * @txdlh: Reserved descriptor. On success HW fills this "out" parameter | |
1437 | * with a valid handle. | |
1438 | * @txdl_priv: Buffer to return the pointer to per txdl space | |
1439 | * | |
1440 | * Reserve a single TxDL (that is, fifo descriptor) | |
1441 | * for the subsequent filling-in by driver) | |
1442 | * and posting on the corresponding channel (@channelh) | |
1443 | * via vxge_hw_fifo_txdl_post(). | |
1444 | * | |
1445 | * Note: it is the responsibility of driver to reserve multiple descriptors | |
1446 | * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor | |
1447 | * carries up to configured number (fifo.max_frags) of contiguous buffers. | |
1448 | * | |
1449 | * Returns: VXGE_HW_OK - success; | |
1450 | * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available | |
1451 | * | |
1452 | */ | |
1453 | enum vxge_hw_status vxge_hw_fifo_txdl_reserve( | |
1454 | struct __vxge_hw_fifo *fifo, | |
1455 | void **txdlh, void **txdl_priv) | |
1456 | { | |
1457 | struct __vxge_hw_channel *channel; | |
1458 | enum vxge_hw_status status; | |
1459 | int i; | |
1460 | ||
1461 | channel = &fifo->channel; | |
1462 | ||
1463 | status = vxge_hw_channel_dtr_alloc(channel, txdlh); | |
1464 | ||
1465 | if (status == VXGE_HW_OK) { | |
1466 | struct vxge_hw_fifo_txd *txdp = | |
1467 | (struct vxge_hw_fifo_txd *)*txdlh; | |
1468 | struct __vxge_hw_fifo_txdl_priv *priv; | |
1469 | ||
1470 | priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); | |
1471 | ||
1472 | /* reset the TxDL's private */ | |
1473 | priv->align_dma_offset = 0; | |
1474 | priv->align_vaddr_start = priv->align_vaddr; | |
1475 | priv->align_used_frags = 0; | |
1476 | priv->frags = 0; | |
1477 | priv->alloc_frags = fifo->config->max_frags; | |
1478 | priv->next_txdl_priv = NULL; | |
1479 | ||
1480 | *txdl_priv = (void *)(size_t)txdp->host_control; | |
1481 | ||
1482 | for (i = 0; i < fifo->config->max_frags; i++) { | |
1483 | txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i; | |
1484 | txdp->control_0 = txdp->control_1 = 0; | |
1485 | } | |
1486 | } | |
1487 | ||
1488 | return status; | |
1489 | } | |
1490 | ||
1491 | /** | |
1492 | * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the | |
1493 | * descriptor. | |
1494 | * @fifo: Handle to the fifo object used for non offload send | |
1495 | * @txdlh: Descriptor handle. | |
1496 | * @frag_idx: Index of the data buffer in the caller's scatter-gather list | |
1497 | * (of buffers). | |
1498 | * @dma_pointer: DMA address of the data buffer referenced by @frag_idx. | |
1499 | * @size: Size of the data buffer (in bytes). | |
1500 | * | |
1501 | * This API is part of the preparation of the transmit descriptor for posting | |
1502 | * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include | |
1503 | * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits(). | |
1504 | * All three APIs fill in the fields of the fifo descriptor, | |
1505 | * in accordance with the Titan specification. | |
1506 | * | |
1507 | */ | |
1508 | void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo, | |
1509 | void *txdlh, u32 frag_idx, | |
1510 | dma_addr_t dma_pointer, u32 size) | |
1511 | { | |
1512 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | |
1513 | struct vxge_hw_fifo_txd *txdp, *txdp_last; | |
1514 | struct __vxge_hw_channel *channel; | |
1515 | ||
1516 | channel = &fifo->channel; | |
1517 | ||
1518 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); | |
1519 | txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags; | |
1520 | ||
1521 | if (frag_idx != 0) | |
1522 | txdp->control_0 = txdp->control_1 = 0; | |
1523 | else { | |
1524 | txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( | |
1525 | VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST); | |
1526 | txdp->control_1 |= fifo->interrupt_type; | |
1527 | txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER( | |
1528 | fifo->tx_intr_num); | |
1529 | if (txdl_priv->frags) { | |
1530 | txdp_last = (struct vxge_hw_fifo_txd *)txdlh + | |
1531 | (txdl_priv->frags - 1); | |
1532 | txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( | |
1533 | VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); | |
1534 | } | |
1535 | } | |
1536 | ||
1537 | vxge_assert(frag_idx < txdl_priv->alloc_frags); | |
1538 | ||
1539 | txdp->buffer_pointer = (u64)dma_pointer; | |
1540 | txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size); | |
1541 | fifo->stats->total_buffers++; | |
1542 | txdl_priv->frags++; | |
1543 | } | |
1544 | ||
1545 | /** | |
1546 | * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel. | |
1547 | * @fifo: Handle to the fifo object used for non offload send | |
1548 | * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve() | |
1549 | * @frags: Number of contiguous buffers that are part of a single | |
1550 | * transmit operation. | |
1551 | * | |
1552 | * Post descriptor on the 'fifo' type channel for transmission. | |
1553 | * Prior to posting the descriptor should be filled in accordance with | |
1554 | * Host/Titan interface specification for a given service (LL, etc.). | |
1555 | * | |
1556 | */ | |
1557 | void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh) | |
1558 | { | |
1559 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | |
1560 | struct vxge_hw_fifo_txd *txdp_last; | |
1561 | struct vxge_hw_fifo_txd *txdp_first; | |
1562 | struct __vxge_hw_channel *channel; | |
1563 | ||
1564 | channel = &fifo->channel; | |
1565 | ||
1566 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); | |
1567 | txdp_first = (struct vxge_hw_fifo_txd *)txdlh; | |
1568 | ||
1569 | txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1); | |
1570 | txdp_last->control_0 |= | |
1571 | VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); | |
1572 | txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER; | |
1573 | ||
1574 | vxge_hw_channel_dtr_post(&fifo->channel, txdlh); | |
1575 | ||
1576 | __vxge_hw_non_offload_db_post(fifo, | |
a4a987d8 | 1577 | (u64)txdl_priv->dma_addr, |
11324132 RV |
1578 | txdl_priv->frags - 1, |
1579 | fifo->no_snoop_bits); | |
1580 | ||
1581 | fifo->stats->total_posts++; | |
1582 | fifo->stats->common_stats.usage_cnt++; | |
1583 | if (fifo->stats->common_stats.usage_max < | |
1584 | fifo->stats->common_stats.usage_cnt) | |
1585 | fifo->stats->common_stats.usage_max = | |
1586 | fifo->stats->common_stats.usage_cnt; | |
1587 | } | |
1588 | ||
1589 | /** | |
1590 | * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor. | |
1591 | * @fifo: Handle to the fifo object used for non offload send | |
1592 | * @txdlh: Descriptor handle. Returned by HW. | |
1593 | * @t_code: Transfer code, as per Titan User Guide, | |
1594 | * Transmit Descriptor Format. | |
1595 | * Returned by HW. | |
1596 | * | |
1597 | * Retrieve the _next_ completed descriptor. | |
1598 | * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy | |
1599 | * driver of new completed descriptors. After that | |
1600 | * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest | |
1601 | * completions (the very first completion is passed by HW via | |
1602 | * vxge_hw_channel_callback_f). | |
1603 | * | |
1604 | * Implementation-wise, the driver is free to call | |
1605 | * vxge_hw_fifo_txdl_next_completed either immediately from inside the | |
1606 | * channel callback, or in a deferred fashion and separate (from HW) | |
1607 | * context. | |
1608 | * | |
1609 | * Non-zero @t_code means failure to process the descriptor. | |
1610 | * The failure could happen, for instance, when the link is | |
1611 | * down, in which case Titan completes the descriptor because it | |
1612 | * is not able to send the data out. | |
1613 | * | |
1614 | * For details please refer to Titan User Guide. | |
1615 | * | |
1616 | * Returns: VXGE_HW_OK - success. | |
1617 | * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors | |
1618 | * are currently available for processing. | |
1619 | * | |
1620 | */ | |
1621 | enum vxge_hw_status vxge_hw_fifo_txdl_next_completed( | |
1622 | struct __vxge_hw_fifo *fifo, void **txdlh, | |
1623 | enum vxge_hw_fifo_tcode *t_code) | |
1624 | { | |
1625 | struct __vxge_hw_channel *channel; | |
1626 | struct vxge_hw_fifo_txd *txdp; | |
1627 | enum vxge_hw_status status = VXGE_HW_OK; | |
1628 | ||
1629 | channel = &fifo->channel; | |
1630 | ||
1631 | vxge_hw_channel_dtr_try_complete(channel, txdlh); | |
1632 | ||
1633 | txdp = (struct vxge_hw_fifo_txd *)*txdlh; | |
1634 | if (txdp == NULL) { | |
1635 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | |
1636 | goto exit; | |
1637 | } | |
1638 | ||
1639 | /* check whether host owns it */ | |
1640 | if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) { | |
1641 | ||
1642 | vxge_assert(txdp->host_control != 0); | |
1643 | ||
1644 | vxge_hw_channel_dtr_complete(channel); | |
1645 | ||
1646 | *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0); | |
1647 | ||
1648 | if (fifo->stats->common_stats.usage_cnt > 0) | |
1649 | fifo->stats->common_stats.usage_cnt--; | |
1650 | ||
1651 | status = VXGE_HW_OK; | |
1652 | goto exit; | |
1653 | } | |
1654 | ||
1655 | /* no more completions */ | |
1656 | *txdlh = NULL; | |
1657 | status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; | |
1658 | exit: | |
1659 | return status; | |
1660 | } | |
1661 | ||
1662 | /** | |
1663 | * vxge_hw_fifo_handle_tcode - Handle transfer code. | |
1664 | * @fifo: Handle to the fifo object used for non offload send | |
1665 | * @txdlh: Descriptor handle. | |
1666 | * @t_code: One of the enumerated (and documented in the Titan user guide) | |
1667 | * "transfer codes". | |
1668 | * | |
1669 | * Handle descriptor's transfer code. The latter comes with each completed | |
1670 | * descriptor. | |
1671 | * | |
1672 | * Returns: one of the enum vxge_hw_status{} enumerated types. | |
1673 | * VXGE_HW_OK - for success. | |
1674 | * VXGE_HW_ERR_CRITICAL - when encounters critical error. | |
1675 | */ | |
1676 | enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo, | |
1677 | void *txdlh, | |
1678 | enum vxge_hw_fifo_tcode t_code) | |
1679 | { | |
1680 | struct __vxge_hw_channel *channel; | |
1681 | ||
1682 | enum vxge_hw_status status = VXGE_HW_OK; | |
1683 | channel = &fifo->channel; | |
1684 | ||
1685 | if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) { | |
1686 | status = VXGE_HW_ERR_INVALID_TCODE; | |
1687 | goto exit; | |
1688 | } | |
1689 | ||
1690 | fifo->stats->txd_t_code_err_cnt[t_code]++; | |
1691 | exit: | |
1692 | return status; | |
1693 | } | |
1694 | ||
1695 | /** | |
1696 | * vxge_hw_fifo_txdl_free - Free descriptor. | |
1697 | * @fifo: Handle to the fifo object used for non offload send | |
1698 | * @txdlh: Descriptor handle. | |
1699 | * | |
1700 | * Free the reserved descriptor. This operation is "symmetrical" to | |
1701 | * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's | |
1702 | * lifecycle. | |
1703 | * | |
1704 | * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can | |
1705 | * be: | |
1706 | * | |
1707 | * - reserved (vxge_hw_fifo_txdl_reserve); | |
1708 | * | |
1709 | * - posted (vxge_hw_fifo_txdl_post); | |
1710 | * | |
1711 | * - completed (vxge_hw_fifo_txdl_next_completed); | |
1712 | * | |
1713 | * - and recycled again (vxge_hw_fifo_txdl_free). | |
1714 | * | |
1715 | * For alternative state transitions and more details please refer to | |
1716 | * the design doc. | |
1717 | * | |
1718 | */ | |
1719 | void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh) | |
1720 | { | |
1721 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | |
1722 | u32 max_frags; | |
1723 | struct __vxge_hw_channel *channel; | |
1724 | ||
1725 | channel = &fifo->channel; | |
1726 | ||
1727 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, | |
1728 | (struct vxge_hw_fifo_txd *)txdlh); | |
1729 | ||
1730 | max_frags = fifo->config->max_frags; | |
1731 | ||
1732 | vxge_hw_channel_dtr_free(channel, txdlh); | |
1733 | } | |
1734 | ||
1735 | /** | |
1736 | * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath | |
1737 | * to MAC address table. | |
1738 | * @vp: Vpath handle. | |
1739 | * @macaddr: MAC address to be added for this vpath into the list | |
1740 | * @macaddr_mask: MAC address mask for macaddr | |
1741 | * @duplicate_mode: Duplicate MAC address add mode. Please see | |
1742 | * enum vxge_hw_vpath_mac_addr_add_mode{} | |
1743 | * | |
1744 | * Adds the given mac address and mac address mask into the list for this | |
1745 | * vpath. | |
1746 | * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and | |
1747 | * vxge_hw_vpath_mac_addr_get_next | |
1748 | * | |
1749 | */ | |
1750 | enum vxge_hw_status | |
1751 | vxge_hw_vpath_mac_addr_add( | |
1752 | struct __vxge_hw_vpath_handle *vp, | |
1753 | u8 (macaddr)[ETH_ALEN], | |
1754 | u8 (macaddr_mask)[ETH_ALEN], | |
1755 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode) | |
1756 | { | |
1757 | u32 i; | |
1758 | u64 data1 = 0ULL; | |
1759 | u64 data2 = 0ULL; | |
1760 | enum vxge_hw_status status = VXGE_HW_OK; | |
1761 | ||
1762 | if (vp == NULL) { | |
1763 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1764 | goto exit; | |
1765 | } | |
1766 | ||
1767 | for (i = 0; i < ETH_ALEN; i++) { | |
1768 | data1 <<= 8; | |
1769 | data1 |= (u8)macaddr[i]; | |
1770 | ||
1771 | data2 <<= 8; | |
1772 | data2 |= (u8)macaddr_mask[i]; | |
1773 | } | |
1774 | ||
1775 | switch (duplicate_mode) { | |
1776 | case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE: | |
1777 | i = 0; | |
1778 | break; | |
1779 | case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE: | |
1780 | i = 1; | |
1781 | break; | |
1782 | case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE: | |
1783 | i = 2; | |
1784 | break; | |
1785 | default: | |
1786 | i = 0; | |
1787 | break; | |
1788 | } | |
1789 | ||
1790 | status = __vxge_hw_vpath_rts_table_set(vp, | |
1791 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, | |
1792 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | |
1793 | 0, | |
1794 | VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), | |
1795 | VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)| | |
1796 | VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i)); | |
1797 | exit: | |
1798 | return status; | |
1799 | } | |
1800 | ||
1801 | /** | |
1802 | * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath | |
1803 | * from MAC address table. | |
1804 | * @vp: Vpath handle. | |
1805 | * @macaddr: First MAC address entry for this vpath in the list | |
1806 | * @macaddr_mask: MAC address mask for macaddr | |
1807 | * | |
1808 | * Returns the first mac address and mac address mask in the list for this | |
1809 | * vpath. | |
1810 | * see also: vxge_hw_vpath_mac_addr_get_next | |
1811 | * | |
1812 | */ | |
1813 | enum vxge_hw_status | |
1814 | vxge_hw_vpath_mac_addr_get( | |
1815 | struct __vxge_hw_vpath_handle *vp, | |
1816 | u8 (macaddr)[ETH_ALEN], | |
1817 | u8 (macaddr_mask)[ETH_ALEN]) | |
1818 | { | |
1819 | u32 i; | |
1820 | u64 data1 = 0ULL; | |
1821 | u64 data2 = 0ULL; | |
1822 | enum vxge_hw_status status = VXGE_HW_OK; | |
1823 | ||
1824 | if (vp == NULL) { | |
1825 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1826 | goto exit; | |
1827 | } | |
1828 | ||
1829 | status = __vxge_hw_vpath_rts_table_get(vp, | |
1830 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, | |
1831 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | |
1832 | 0, &data1, &data2); | |
1833 | ||
1834 | if (status != VXGE_HW_OK) | |
1835 | goto exit; | |
1836 | ||
1837 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); | |
1838 | ||
1839 | data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); | |
1840 | ||
1841 | for (i = ETH_ALEN; i > 0; i--) { | |
1842 | macaddr[i-1] = (u8)(data1 & 0xFF); | |
1843 | data1 >>= 8; | |
1844 | ||
1845 | macaddr_mask[i-1] = (u8)(data2 & 0xFF); | |
1846 | data2 >>= 8; | |
1847 | } | |
1848 | exit: | |
1849 | return status; | |
1850 | } | |
1851 | ||
1852 | /** | |
1853 | * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this | |
1854 | * vpath | |
1855 | * from MAC address table. | |
1856 | * @vp: Vpath handle. | |
1857 | * @macaddr: Next MAC address entry for this vpath in the list | |
1858 | * @macaddr_mask: MAC address mask for macaddr | |
1859 | * | |
1860 | * Returns the next mac address and mac address mask in the list for this | |
1861 | * vpath. | |
1862 | * see also: vxge_hw_vpath_mac_addr_get | |
1863 | * | |
1864 | */ | |
1865 | enum vxge_hw_status | |
1866 | vxge_hw_vpath_mac_addr_get_next( | |
1867 | struct __vxge_hw_vpath_handle *vp, | |
1868 | u8 (macaddr)[ETH_ALEN], | |
1869 | u8 (macaddr_mask)[ETH_ALEN]) | |
1870 | { | |
1871 | u32 i; | |
1872 | u64 data1 = 0ULL; | |
1873 | u64 data2 = 0ULL; | |
1874 | enum vxge_hw_status status = VXGE_HW_OK; | |
1875 | ||
1876 | if (vp == NULL) { | |
1877 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
1878 | goto exit; | |
1879 | } | |
1880 | ||
1881 | status = __vxge_hw_vpath_rts_table_get(vp, | |
1882 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY, | |
1883 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | |
1884 | 0, &data1, &data2); | |
1885 | ||
1886 | if (status != VXGE_HW_OK) | |
1887 | goto exit; | |
1888 | ||
528f7272 | 1889 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); |
11324132 | 1890 | |
528f7272 | 1891 | data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); |
11324132 | 1892 | |
528f7272 JM |
1893 | for (i = ETH_ALEN; i > 0; i--) { |
1894 | macaddr[i-1] = (u8)(data1 & 0xFF); | |
1895 | data1 >>= 8; | |
11324132 | 1896 | |
528f7272 JM |
1897 | macaddr_mask[i-1] = (u8)(data2 & 0xFF); |
1898 | data2 >>= 8; | |
11324132 | 1899 | } |
528f7272 | 1900 | |
11324132 RV |
1901 | exit: |
1902 | return status; | |
1903 | } | |
1904 | ||
528f7272 JM |
1905 | /** |
1906 | * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath | |
1907 | * to MAC address table. | |
11324132 | 1908 | * @vp: Vpath handle. |
528f7272 JM |
1909 | * @macaddr: MAC address to be added for this vpath into the list |
1910 | * @macaddr_mask: MAC address mask for macaddr | |
1911 | * | |
1912 | * Delete the given mac address and mac address mask into the list for this | |
1913 | * vpath. | |
1914 | * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and | |
1915 | * vxge_hw_vpath_mac_addr_get_next | |
11324132 | 1916 | * |
11324132 | 1917 | */ |
528f7272 JM |
1918 | enum vxge_hw_status |
1919 | vxge_hw_vpath_mac_addr_delete( | |
1920 | struct __vxge_hw_vpath_handle *vp, | |
1921 | u8 (macaddr)[ETH_ALEN], | |
1922 | u8 (macaddr_mask)[ETH_ALEN]) | |
11324132 | 1923 | { |
528f7272 JM |
1924 | u32 i; |
1925 | u64 data1 = 0ULL; | |
1926 | u64 data2 = 0ULL; | |
11324132 RV |
1927 | enum vxge_hw_status status = VXGE_HW_OK; |
1928 | ||
528f7272 | 1929 | if (vp == NULL) { |
11324132 RV |
1930 | status = VXGE_HW_ERR_INVALID_HANDLE; |
1931 | goto exit; | |
1932 | } | |
1933 | ||
528f7272 JM |
1934 | for (i = 0; i < ETH_ALEN; i++) { |
1935 | data1 <<= 8; | |
1936 | data1 |= (u8)macaddr[i]; | |
11324132 | 1937 | |
528f7272 JM |
1938 | data2 <<= 8; |
1939 | data2 |= (u8)macaddr_mask[i]; | |
11324132 | 1940 | } |
528f7272 JM |
1941 | |
1942 | status = __vxge_hw_vpath_rts_table_set(vp, | |
1943 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, | |
1944 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | |
1945 | 0, | |
1946 | VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), | |
1947 | VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)); | |
11324132 RV |
1948 | exit: |
1949 | return status; | |
1950 | } | |
1951 | ||
1952 | /** | |
528f7272 JM |
1953 | * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath |
1954 | * to vlan id table. | |
11324132 | 1955 | * @vp: Vpath handle. |
528f7272 | 1956 | * @vid: vlan id to be added for this vpath into the list |
11324132 | 1957 | * |
528f7272 JM |
1958 | * Adds the given vlan id into the list for this vpath. |
1959 | * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and | |
1960 | * vxge_hw_vpath_vid_get_next | |
11324132 RV |
1961 | * |
1962 | */ | |
528f7272 JM |
1963 | enum vxge_hw_status |
1964 | vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid) | |
11324132 | 1965 | { |
11324132 RV |
1966 | enum vxge_hw_status status = VXGE_HW_OK; |
1967 | ||
528f7272 | 1968 | if (vp == NULL) { |
11324132 RV |
1969 | status = VXGE_HW_ERR_INVALID_HANDLE; |
1970 | goto exit; | |
1971 | } | |
1972 | ||
528f7272 JM |
1973 | status = __vxge_hw_vpath_rts_table_set(vp, |
1974 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, | |
1975 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, | |
1976 | 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); | |
11324132 RV |
1977 | exit: |
1978 | return status; | |
1979 | } | |
1980 | ||
1981 | /** | |
528f7272 JM |
1982 | * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath |
1983 | * from vlan id table. | |
11324132 | 1984 | * @vp: Vpath handle. |
528f7272 | 1985 | * @vid: Buffer to return vlan id |
11324132 | 1986 | * |
528f7272 JM |
1987 | * Returns the first vlan id in the list for this vpath. |
1988 | * see also: vxge_hw_vpath_vid_get_next | |
11324132 RV |
1989 | * |
1990 | */ | |
1991 | enum vxge_hw_status | |
528f7272 | 1992 | vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid) |
11324132 | 1993 | { |
528f7272 | 1994 | u64 data; |
11324132 RV |
1995 | enum vxge_hw_status status = VXGE_HW_OK; |
1996 | ||
528f7272 | 1997 | if (vp == NULL) { |
11324132 RV |
1998 | status = VXGE_HW_ERR_INVALID_HANDLE; |
1999 | goto exit; | |
2000 | } | |
2001 | ||
528f7272 JM |
2002 | status = __vxge_hw_vpath_rts_table_get(vp, |
2003 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, | |
2004 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, | |
2005 | 0, vid, &data); | |
11324132 | 2006 | |
528f7272 | 2007 | *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid); |
11324132 RV |
2008 | exit: |
2009 | return status; | |
2010 | } | |
2011 | ||
528f7272 JM |
2012 | /** |
2013 | * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath | |
2014 | * to vlan id table. | |
2015 | * @vp: Vpath handle. | |
2016 | * @vid: vlan id to be added for this vpath into the list | |
11324132 | 2017 | * |
528f7272 JM |
2018 | * Adds the given vlan id into the list for this vpath. |
2019 | * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and | |
2020 | * vxge_hw_vpath_vid_get_next | |
11324132 RV |
2021 | * |
2022 | */ | |
528f7272 JM |
2023 | enum vxge_hw_status |
2024 | vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid) | |
11324132 | 2025 | { |
528f7272 | 2026 | enum vxge_hw_status status = VXGE_HW_OK; |
11324132 | 2027 | |
528f7272 JM |
2028 | if (vp == NULL) { |
2029 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
2030 | goto exit; | |
11324132 RV |
2031 | } |
2032 | ||
528f7272 JM |
2033 | status = __vxge_hw_vpath_rts_table_set(vp, |
2034 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, | |
2035 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, | |
2036 | 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); | |
2037 | exit: | |
2038 | return status; | |
2039 | } | |
11324132 | 2040 | |
528f7272 JM |
2041 | /** |
2042 | * vxge_hw_vpath_promisc_enable - Enable promiscuous mode. | |
2043 | * @vp: Vpath handle. | |
2044 | * | |
2045 | * Enable promiscuous mode of Titan-e operation. | |
2046 | * | |
2047 | * See also: vxge_hw_vpath_promisc_disable(). | |
2048 | */ | |
2049 | enum vxge_hw_status vxge_hw_vpath_promisc_enable( | |
2050 | struct __vxge_hw_vpath_handle *vp) | |
2051 | { | |
2052 | u64 val64; | |
2053 | struct __vxge_hw_virtualpath *vpath; | |
2054 | enum vxge_hw_status status = VXGE_HW_OK; | |
11324132 | 2055 | |
528f7272 JM |
2056 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { |
2057 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
2058 | goto exit; | |
2059 | } | |
11324132 | 2060 | |
528f7272 | 2061 | vpath = vp->vpath; |
11324132 | 2062 | |
25985edc | 2063 | /* Enable promiscuous mode for function 0 only */ |
528f7272 JM |
2064 | if (!(vpath->hldev->access_rights & |
2065 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) | |
2066 | return VXGE_HW_OK; | |
11324132 | 2067 | |
528f7272 | 2068 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); |
11324132 | 2069 | |
528f7272 | 2070 | if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) { |
11324132 | 2071 | |
528f7272 JM |
2072 | val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | |
2073 | VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | | |
2074 | VXGE_HW_RXMAC_VCFG0_BCAST_EN | | |
2075 | VXGE_HW_RXMAC_VCFG0_ALL_VID_EN; | |
11324132 | 2076 | |
528f7272 JM |
2077 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); |
2078 | } | |
2079 | exit: | |
2080 | return status; | |
2081 | } | |
11324132 | 2082 | |
528f7272 JM |
2083 | /** |
2084 | * vxge_hw_vpath_promisc_disable - Disable promiscuous mode. | |
2085 | * @vp: Vpath handle. | |
2086 | * | |
2087 | * Disable promiscuous mode of Titan-e operation. | |
2088 | * | |
2089 | * See also: vxge_hw_vpath_promisc_enable(). | |
2090 | */ | |
2091 | enum vxge_hw_status vxge_hw_vpath_promisc_disable( | |
2092 | struct __vxge_hw_vpath_handle *vp) | |
2093 | { | |
2094 | u64 val64; | |
2095 | struct __vxge_hw_virtualpath *vpath; | |
2096 | enum vxge_hw_status status = VXGE_HW_OK; | |
11324132 | 2097 | |
528f7272 JM |
2098 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { |
2099 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
2100 | goto exit; | |
2101 | } | |
11324132 | 2102 | |
528f7272 | 2103 | vpath = vp->vpath; |
11324132 | 2104 | |
528f7272 | 2105 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); |
11324132 | 2106 | |
528f7272 | 2107 | if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) { |
11324132 | 2108 | |
528f7272 JM |
2109 | val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | |
2110 | VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | | |
2111 | VXGE_HW_RXMAC_VCFG0_ALL_VID_EN); | |
11324132 | 2112 | |
528f7272 | 2113 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); |
11324132 | 2114 | } |
528f7272 JM |
2115 | exit: |
2116 | return status; | |
2117 | } | |
11324132 | 2118 | |
528f7272 JM |
2119 | /* |
2120 | * vxge_hw_vpath_bcast_enable - Enable broadcast | |
2121 | * @vp: Vpath handle. | |
2122 | * | |
2123 | * Enable receiving broadcasts. | |
2124 | */ | |
2125 | enum vxge_hw_status vxge_hw_vpath_bcast_enable( | |
2126 | struct __vxge_hw_vpath_handle *vp) | |
2127 | { | |
2128 | u64 val64; | |
2129 | struct __vxge_hw_virtualpath *vpath; | |
2130 | enum vxge_hw_status status = VXGE_HW_OK; | |
11324132 | 2131 | |
528f7272 JM |
2132 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { |
2133 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
2134 | goto exit; | |
2135 | } | |
11324132 | 2136 | |
528f7272 | 2137 | vpath = vp->vpath; |
11324132 | 2138 | |
528f7272 | 2139 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); |
11324132 | 2140 | |
528f7272 JM |
2141 | if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) { |
2142 | val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN; | |
2143 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | |
2144 | } | |
2145 | exit: | |
2146 | return status; | |
2147 | } | |
11324132 | 2148 | |
528f7272 JM |
2149 | /** |
2150 | * vxge_hw_vpath_mcast_enable - Enable multicast addresses. | |
2151 | * @vp: Vpath handle. | |
2152 | * | |
2153 | * Enable Titan-e multicast addresses. | |
2154 | * Returns: VXGE_HW_OK on success. | |
2155 | * | |
2156 | */ | |
2157 | enum vxge_hw_status vxge_hw_vpath_mcast_enable( | |
2158 | struct __vxge_hw_vpath_handle *vp) | |
2159 | { | |
2160 | u64 val64; | |
2161 | struct __vxge_hw_virtualpath *vpath; | |
2162 | enum vxge_hw_status status = VXGE_HW_OK; | |
11324132 | 2163 | |
528f7272 JM |
2164 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { |
2165 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
2166 | goto exit; | |
2167 | } | |
11324132 | 2168 | |
528f7272 | 2169 | vpath = vp->vpath; |
11324132 | 2170 | |
528f7272 | 2171 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); |
11324132 | 2172 | |
528f7272 JM |
2173 | if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) { |
2174 | val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; | |
2175 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | |
2176 | } | |
2177 | exit: | |
2178 | return status; | |
2179 | } | |
11324132 | 2180 | |
528f7272 JM |
2181 | /** |
2182 | * vxge_hw_vpath_mcast_disable - Disable multicast addresses. | |
2183 | * @vp: Vpath handle. | |
2184 | * | |
2185 | * Disable Titan-e multicast addresses. | |
2186 | * Returns: VXGE_HW_OK - success. | |
2187 | * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle | |
2188 | * | |
2189 | */ | |
2190 | enum vxge_hw_status | |
2191 | vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp) | |
2192 | { | |
2193 | u64 val64; | |
2194 | struct __vxge_hw_virtualpath *vpath; | |
2195 | enum vxge_hw_status status = VXGE_HW_OK; | |
11324132 | 2196 | |
528f7272 JM |
2197 | if ((vp == NULL) || (vp->vpath->ringh == NULL)) { |
2198 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
2199 | goto exit; | |
11324132 | 2200 | } |
11324132 | 2201 | |
528f7272 | 2202 | vpath = vp->vpath; |
11324132 | 2203 | |
528f7272 | 2204 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); |
11324132 | 2205 | |
528f7272 JM |
2206 | if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) { |
2207 | val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; | |
2208 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | |
2209 | } | |
2210 | exit: | |
2211 | return status; | |
11324132 RV |
2212 | } |
2213 | ||
2214 | /* | |
2215 | * vxge_hw_vpath_alarm_process - Process Alarms. | |
2216 | * @vpath: Virtual Path. | |
2217 | * @skip_alarms: Do not clear the alarms | |
2218 | * | |
2219 | * Process vpath alarms. | |
2220 | * | |
2221 | */ | |
2222 | enum vxge_hw_status vxge_hw_vpath_alarm_process( | |
2223 | struct __vxge_hw_vpath_handle *vp, | |
2224 | u32 skip_alarms) | |
2225 | { | |
2226 | enum vxge_hw_status status = VXGE_HW_OK; | |
2227 | ||
2228 | if (vp == NULL) { | |
2229 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
2230 | goto exit; | |
2231 | } | |
2232 | ||
2233 | status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms); | |
2234 | exit: | |
2235 | return status; | |
2236 | } | |
2237 | ||
2238 | /** | |
2239 | * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and | |
2240 | * alrms | |
2241 | * @vp: Virtual Path handle. | |
2242 | * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of | |
2243 | * interrupts(Can be repeated). If fifo or ring are not enabled | |
2244 | * the MSIX vector for that should be set to 0 | |
2245 | * @alarm_msix_id: MSIX vector for alarm. | |
2246 | * | |
2247 | * This API will associate a given MSIX vector numbers with the four TIM | |
2248 | * interrupts and alarm interrupt. | |
2249 | */ | |
b59c9457 | 2250 | void |
11324132 RV |
2251 | vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, |
2252 | int alarm_msix_id) | |
2253 | { | |
2254 | u64 val64; | |
2255 | struct __vxge_hw_virtualpath *vpath = vp->vpath; | |
2256 | struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; | |
b59c9457 | 2257 | u32 vp_id = vp->vpath->vp_id; |
11324132 RV |
2258 | |
2259 | val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( | |
b59c9457 | 2260 | (vp_id * 4) + tim_msix_id[0]) | |
11324132 | 2261 | VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( |
b59c9457 | 2262 | (vp_id * 4) + tim_msix_id[1]); |
11324132 RV |
2263 | |
2264 | writeq(val64, &vp_reg->interrupt_cfg0); | |
2265 | ||
2266 | writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( | |
b59c9457 | 2267 | (vpath->hldev->first_vp_id * 4) + alarm_msix_id), |
11324132 RV |
2268 | &vp_reg->interrupt_cfg2); |
2269 | ||
2270 | if (vpath->hldev->config.intr_mode == | |
2271 | VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { | |
16fded7d JM |
2272 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( |
2273 | VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN, | |
2274 | 0, 32), &vp_reg->one_shot_vect0_en); | |
11324132 RV |
2275 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( |
2276 | VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, | |
2277 | 0, 32), &vp_reg->one_shot_vect1_en); | |
11324132 RV |
2278 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( |
2279 | VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, | |
2280 | 0, 32), &vp_reg->one_shot_vect2_en); | |
11324132 | 2281 | } |
11324132 RV |
2282 | } |
2283 | ||
2284 | /** | |
2285 | * vxge_hw_vpath_msix_mask - Mask MSIX Vector. | |
2286 | * @vp: Virtual Path handle. | |
2287 | * @msix_id: MSIX ID | |
2288 | * | |
2289 | * The function masks the msix interrupt for the given msix_id | |
2290 | * | |
2291 | * Returns: 0, | |
2292 | * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range | |
2293 | * status. | |
2294 | * See also: | |
2295 | */ | |
2296 | void | |
2297 | vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) | |
2298 | { | |
2299 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | |
2300 | __vxge_hw_pio_mem_write32_upper( | |
b59c9457 | 2301 | (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
11324132 | 2302 | &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); |
11324132 RV |
2303 | } |
2304 | ||
16fded7d JM |
2305 | /** |
2306 | * vxge_hw_vpath_msix_clear - Clear MSIX Vector. | |
2307 | * @vp: Virtual Path handle. | |
2308 | * @msix_id: MSI ID | |
2309 | * | |
2310 | * The function clears the msix interrupt for the given msix_id | |
2311 | * | |
2312 | * Returns: 0, | |
2313 | * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range | |
2314 | * status. | |
2315 | * See also: | |
2316 | */ | |
2317 | void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) | |
2318 | { | |
2319 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | |
2320 | ||
2321 | if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)) | |
2322 | __vxge_hw_pio_mem_write32_upper( | |
2323 | (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), | |
2324 | &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]); | |
2325 | else | |
2326 | __vxge_hw_pio_mem_write32_upper( | |
2327 | (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), | |
2328 | &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]); | |
2329 | } | |
2330 | ||
11324132 RV |
2331 | /** |
2332 | * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. | |
2333 | * @vp: Virtual Path handle. | |
2334 | * @msix_id: MSI ID | |
2335 | * | |
2336 | * The function unmasks the msix interrupt for the given msix_id | |
2337 | * | |
2338 | * Returns: 0, | |
2339 | * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range | |
2340 | * status. | |
2341 | * See also: | |
2342 | */ | |
2343 | void | |
2344 | vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id) | |
2345 | { | |
2346 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | |
2347 | __vxge_hw_pio_mem_write32_upper( | |
b59c9457 | 2348 | (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), |
11324132 | 2349 | &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); |
11324132 RV |
2350 | } |
2351 | ||
11324132 RV |
2352 | /** |
2353 | * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. | |
2354 | * @vp: Virtual Path handle. | |
2355 | * | |
2356 | * Mask Tx and Rx vpath interrupts. | |
2357 | * | |
2358 | * See also: vxge_hw_vpath_inta_mask_tx_rx() | |
2359 | */ | |
2360 | void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp) | |
2361 | { | |
2362 | u64 tim_int_mask0[4] = {[0 ...3] = 0}; | |
2363 | u32 tim_int_mask1[4] = {[0 ...3] = 0}; | |
2364 | u64 val64; | |
2365 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | |
2366 | ||
2367 | VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, | |
2368 | tim_int_mask1, vp->vpath->vp_id); | |
2369 | ||
2370 | val64 = readq(&hldev->common_reg->tim_int_mask0); | |
2371 | ||
2372 | if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || | |
2373 | (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
2374 | writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | |
2375 | tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64), | |
2376 | &hldev->common_reg->tim_int_mask0); | |
2377 | } | |
2378 | ||
2379 | val64 = readl(&hldev->common_reg->tim_int_mask1); | |
2380 | ||
2381 | if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || | |
2382 | (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
2383 | __vxge_hw_pio_mem_write32_upper( | |
2384 | (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | |
2385 | tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64), | |
2386 | &hldev->common_reg->tim_int_mask1); | |
2387 | } | |
11324132 RV |
2388 | } |
2389 | ||
2390 | /** | |
2391 | * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts. | |
2392 | * @vp: Virtual Path handle. | |
2393 | * | |
2394 | * Unmask Tx and Rx vpath interrupts. | |
2395 | * | |
2396 | * See also: vxge_hw_vpath_inta_mask_tx_rx() | |
2397 | */ | |
2398 | void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp) | |
2399 | { | |
2400 | u64 tim_int_mask0[4] = {[0 ...3] = 0}; | |
2401 | u32 tim_int_mask1[4] = {[0 ...3] = 0}; | |
2402 | u64 val64; | |
2403 | struct __vxge_hw_device *hldev = vp->vpath->hldev; | |
2404 | ||
2405 | VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, | |
2406 | tim_int_mask1, vp->vpath->vp_id); | |
2407 | ||
2408 | val64 = readq(&hldev->common_reg->tim_int_mask0); | |
2409 | ||
2410 | if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || | |
2411 | (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
2412 | writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | | |
2413 | tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64, | |
2414 | &hldev->common_reg->tim_int_mask0); | |
2415 | } | |
2416 | ||
2417 | if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || | |
2418 | (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { | |
2419 | __vxge_hw_pio_mem_write32_upper( | |
2420 | (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | | |
2421 | tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64, | |
2422 | &hldev->common_reg->tim_int_mask1); | |
2423 | } | |
11324132 RV |
2424 | } |
2425 | ||
2426 | /** | |
2427 | * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed | |
2428 | * descriptors and process the same. | |
2429 | * @ring: Handle to the ring object used for receive | |
2430 | * | |
2431 | * The function polls the Rx for the completed descriptors and calls | |
2432 | * the driver via supplied completion callback. | |
2433 | * | |
2434 | * Returns: VXGE_HW_OK, if the polling is completed successful. | |
2435 | * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed | |
2436 | * descriptors available which are yet to be processed. | |
2437 | * | |
2438 | * See also: vxge_hw_vpath_poll_rx() | |
2439 | */ | |
2440 | enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring) | |
2441 | { | |
2442 | u8 t_code; | |
2443 | enum vxge_hw_status status = VXGE_HW_OK; | |
2444 | void *first_rxdh; | |
2445 | u64 val64 = 0; | |
2446 | int new_count = 0; | |
2447 | ||
2448 | ring->cmpl_cnt = 0; | |
2449 | ||
2450 | status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code); | |
2451 | if (status == VXGE_HW_OK) | |
2452 | ring->callback(ring, first_rxdh, | |
2453 | t_code, ring->channel.userdata); | |
2454 | ||
2455 | if (ring->cmpl_cnt != 0) { | |
2456 | ring->doorbell_cnt += ring->cmpl_cnt; | |
2457 | if (ring->doorbell_cnt >= ring->rxds_limit) { | |
2458 | /* | |
2459 | * Each RxD is of 4 qwords, update the number of | |
2460 | * qwords replenished | |
2461 | */ | |
2462 | new_count = (ring->doorbell_cnt * 4); | |
2463 | ||
2464 | /* For each block add 4 more qwords */ | |
2465 | ring->total_db_cnt += ring->doorbell_cnt; | |
2466 | if (ring->total_db_cnt >= ring->rxds_per_block) { | |
2467 | new_count += 4; | |
2468 | /* Reset total count */ | |
2469 | ring->total_db_cnt %= ring->rxds_per_block; | |
2470 | } | |
2471 | writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count), | |
2472 | &ring->vp_reg->prc_rxd_doorbell); | |
2473 | val64 = | |
2474 | readl(&ring->common_reg->titan_general_int_status); | |
2475 | ring->doorbell_cnt = 0; | |
2476 | } | |
2477 | } | |
2478 | ||
2479 | return status; | |
2480 | } | |
2481 | ||
2482 | /** | |
2483 | * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process | |
2484 | * the same. | |
2485 | * @fifo: Handle to the fifo object used for non offload send | |
2486 | * | |
98f45da2 | 2487 | * The function polls the Tx for the completed descriptors and calls |
11324132 RV |
2488 | * the driver via supplied completion callback. |
2489 | * | |
2490 | * Returns: VXGE_HW_OK, if the polling is completed successful. | |
2491 | * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed | |
2492 | * descriptors available which are yet to be processed. | |
11324132 RV |
2493 | */ |
2494 | enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo, | |
ff67df55 BL |
2495 | struct sk_buff ***skb_ptr, int nr_skb, |
2496 | int *more) | |
11324132 RV |
2497 | { |
2498 | enum vxge_hw_fifo_tcode t_code; | |
2499 | void *first_txdlh; | |
2500 | enum vxge_hw_status status = VXGE_HW_OK; | |
2501 | struct __vxge_hw_channel *channel; | |
2502 | ||
2503 | channel = &fifo->channel; | |
2504 | ||
2505 | status = vxge_hw_fifo_txdl_next_completed(fifo, | |
2506 | &first_txdlh, &t_code); | |
2507 | if (status == VXGE_HW_OK) | |
ff67df55 BL |
2508 | if (fifo->callback(fifo, first_txdlh, t_code, |
2509 | channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK) | |
11324132 RV |
2510 | status = VXGE_HW_COMPLETIONS_REMAIN; |
2511 | ||
2512 | return status; | |
2513 | } |