]>
Commit | Line | Data |
---|---|---|
40a3a915 RV |
1 | /****************************************************************************** |
2 | * This software may be used and distributed according to the terms of | |
3 | * the GNU General Public License (GPL), incorporated herein by reference. | |
4 | * Drivers based on or derived from this code fall under the GPL and must | |
5 | * retain the authorship, copyright and license notice. This file is not | |
6 | * a complete program and may only be used when the entire operating | |
7 | * system is licensed under the GPL. | |
8 | * See the file COPYING in this distribution for more information. | |
9 | * | |
926bd900 | 10 | * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O |
40a3a915 | 11 | * Virtualized Server Adapter. |
926bd900 | 12 | * Copyright(c) 2002-2010 Exar Corp. |
40a3a915 RV |
13 | ******************************************************************************/ |
14 | #include <linux/vmalloc.h> | |
15 | #include <linux/etherdevice.h> | |
16 | #include <linux/pci.h> | |
17 | #include <linux/pci_hotplug.h> | |
5a0e3ad6 | 18 | #include <linux/slab.h> |
40a3a915 RV |
19 | |
20 | #include "vxge-traffic.h" | |
21 | #include "vxge-config.h" | |
8424e00d | 22 | #include "vxge-main.h" |
40a3a915 | 23 | |
42821a5b | 24 | static enum vxge_hw_status |
25 | __vxge_hw_fifo_delete( | |
26 | struct __vxge_hw_vpath_handle *vpath_handle); | |
27 | ||
28 | static struct __vxge_hw_blockpool_entry * | |
29 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev, | |
30 | u32 size); | |
31 | ||
32 | static void | |
33 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev, | |
34 | struct __vxge_hw_blockpool_entry *entry); | |
35 | ||
36 | static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, | |
37 | void *block_addr, | |
38 | u32 length, | |
39 | struct pci_dev *dma_h, | |
40 | struct pci_dev *acc_handle); | |
41 | ||
42 | static enum vxge_hw_status | |
43 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, | |
44 | struct __vxge_hw_blockpool *blockpool, | |
45 | u32 pool_size, | |
46 | u32 pool_max); | |
47 | ||
48 | static void | |
49 | __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool); | |
50 | ||
51 | static void * | |
52 | __vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev, | |
53 | u32 size, | |
54 | struct vxge_hw_mempool_dma *dma_object); | |
55 | ||
56 | static void | |
57 | __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev, | |
58 | void *memblock, | |
59 | u32 size, | |
60 | struct vxge_hw_mempool_dma *dma_object); | |
61 | ||
42821a5b | 62 | static void |
63 | __vxge_hw_channel_free( | |
64 | struct __vxge_hw_channel *channel); | |
65 | ||
42821a5b | 66 | static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp); |
67 | ||
42821a5b | 68 | static enum vxge_hw_status |
69 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config); | |
70 | ||
42821a5b | 71 | static enum vxge_hw_status |
72 | __vxge_hw_device_register_poll( | |
73 | void __iomem *reg, | |
74 | u64 mask, u32 max_millis); | |
75 | ||
76 | static inline enum vxge_hw_status | |
77 | __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, | |
78 | u64 mask, u32 max_millis) | |
79 | { | |
80 | __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); | |
81 | wmb(); | |
82 | ||
83 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); | |
84 | wmb(); | |
85 | ||
86 | return __vxge_hw_device_register_poll(addr, mask, max_millis); | |
87 | } | |
88 | ||
89 | static struct vxge_hw_mempool* | |
90 | __vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size, | |
2c91308f JM |
91 | u32 item_size, u32 private_size, u32 items_initial, |
92 | u32 items_max, struct vxge_hw_mempool_cbs *mp_callback, | |
93 | void *userdata); | |
94 | ||
42821a5b | 95 | static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool); |
96 | ||
97 | static enum vxge_hw_status | |
98 | __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, | |
99 | struct vxge_hw_vpath_stats_hw_info *hw_stats); | |
100 | ||
101 | static enum vxge_hw_status | |
102 | vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle); | |
103 | ||
104 | static enum vxge_hw_status | |
105 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg); | |
106 | ||
42821a5b | 107 | static void |
108 | __vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id); | |
109 | ||
42821a5b | 110 | static enum vxge_hw_status |
111 | __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, | |
112 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats); | |
113 | ||
114 | static enum vxge_hw_status | |
115 | __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, | |
116 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats); | |
117 | ||
4d2a5b40 JM |
118 | static void |
119 | vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg) | |
120 | { | |
121 | u64 val64; | |
122 | ||
123 | val64 = readq(&vp_reg->rxmac_vcfg0); | |
124 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); | |
125 | writeq(val64, &vp_reg->rxmac_vcfg0); | |
126 | val64 = readq(&vp_reg->rxmac_vcfg0); | |
127 | ||
128 | return; | |
129 | } | |
130 | ||
131 | /* | |
132 | * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle | |
133 | */ | |
134 | int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id) | |
135 | { | |
136 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
137 | struct __vxge_hw_virtualpath *vpath; | |
138 | u64 val64, rxd_count, rxd_spat; | |
139 | int count = 0, total_count = 0; | |
140 | ||
141 | vpath = &hldev->virtual_paths[vp_id]; | |
142 | vp_reg = vpath->vp_reg; | |
143 | ||
144 | vxge_hw_vpath_set_zero_rx_frm_len(vp_reg); | |
145 | ||
146 | /* Check that the ring controller for this vpath has enough free RxDs | |
147 | * to send frames to the host. This is done by reading the | |
148 | * PRC_RXD_DOORBELL_VPn register and comparing the read value to the | |
149 | * RXD_SPAT value for the vpath. | |
150 | */ | |
151 | val64 = readq(&vp_reg->prc_cfg6); | |
152 | rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1; | |
153 | /* Use a factor of 2 when comparing rxd_count against rxd_spat for some | |
154 | * leg room. | |
155 | */ | |
156 | rxd_spat *= 2; | |
157 | ||
158 | do { | |
159 | mdelay(1); | |
160 | ||
161 | rxd_count = readq(&vp_reg->prc_rxd_doorbell); | |
162 | ||
163 | /* Check that the ring controller for this vpath does | |
164 | * not have any frame in its pipeline. | |
165 | */ | |
166 | val64 = readq(&vp_reg->frm_in_progress_cnt); | |
167 | if ((rxd_count <= rxd_spat) || (val64 > 0)) | |
168 | count = 0; | |
169 | else | |
170 | count++; | |
171 | total_count++; | |
172 | } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) && | |
173 | (total_count < VXGE_HW_MAX_POLLING_COUNT)); | |
174 | ||
175 | if (total_count >= VXGE_HW_MAX_POLLING_COUNT) | |
176 | printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n", | |
177 | __func__); | |
178 | ||
179 | return total_count; | |
180 | } | |
181 | ||
182 | /* vxge_hw_device_wait_receive_idle - This function waits until all frames | |
183 | * stored in the frame buffer for each vpath assigned to the given | |
184 | * function (hldev) have been sent to the host. | |
185 | */ | |
186 | void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev) | |
187 | { | |
188 | int i, total_count = 0; | |
189 | ||
190 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
191 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | |
192 | continue; | |
193 | ||
194 | total_count += vxge_hw_vpath_wait_receive_idle(hldev, i); | |
195 | if (total_count >= VXGE_HW_MAX_POLLING_COUNT) | |
196 | break; | |
197 | } | |
198 | } | |
199 | ||
8424e00d JM |
200 | static enum vxge_hw_status |
201 | vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action, | |
202 | u32 fw_memo, u32 offset, u64 *data0, u64 *data1, | |
203 | u64 *steer_ctrl) | |
204 | { | |
205 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
206 | enum vxge_hw_status status; | |
207 | u64 val64; | |
208 | u32 retry = 0, max_retry = 100; | |
209 | ||
210 | vp_reg = vpath->vp_reg; | |
211 | ||
212 | if (vpath->vp_open) { | |
213 | max_retry = 3; | |
214 | spin_lock(&vpath->lock); | |
215 | } | |
216 | ||
217 | writeq(*data0, &vp_reg->rts_access_steer_data0); | |
218 | writeq(*data1, &vp_reg->rts_access_steer_data1); | |
219 | wmb(); | |
220 | ||
221 | val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | | |
222 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) | | |
223 | VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) | | |
224 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | | |
225 | *steer_ctrl; | |
226 | ||
227 | status = __vxge_hw_pio_mem_write64(val64, | |
228 | &vp_reg->rts_access_steer_ctrl, | |
229 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | |
230 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | |
231 | ||
232 | /* The __vxge_hw_device_register_poll can udelay for a significant | |
233 | * amount of time, blocking other proccess from the CPU. If it delays | |
234 | * for ~5secs, a NMI error can occur. A way around this is to give up | |
235 | * the processor via msleep, but this is not allowed is under lock. | |
236 | * So, only allow it to sleep for ~4secs if open. Otherwise, delay for | |
237 | * 1sec and sleep for 10ms until the firmware operation has completed | |
238 | * or timed-out. | |
239 | */ | |
240 | while ((status != VXGE_HW_OK) && retry++ < max_retry) { | |
241 | if (!vpath->vp_open) | |
242 | msleep(20); | |
243 | status = __vxge_hw_device_register_poll( | |
244 | &vp_reg->rts_access_steer_ctrl, | |
245 | VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, | |
246 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | |
247 | } | |
248 | ||
249 | if (status != VXGE_HW_OK) | |
250 | goto out; | |
251 | ||
252 | val64 = readq(&vp_reg->rts_access_steer_ctrl); | |
253 | if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { | |
254 | *data0 = readq(&vp_reg->rts_access_steer_data0); | |
255 | *data1 = readq(&vp_reg->rts_access_steer_data1); | |
256 | *steer_ctrl = val64; | |
257 | } else | |
258 | status = VXGE_HW_FAIL; | |
259 | ||
260 | out: | |
261 | if (vpath->vp_open) | |
262 | spin_unlock(&vpath->lock); | |
263 | return status; | |
264 | } | |
265 | ||
e8ac1756 JM |
266 | enum vxge_hw_status |
267 | vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major, | |
268 | u32 *minor, u32 *build) | |
269 | { | |
270 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | |
271 | struct __vxge_hw_virtualpath *vpath; | |
272 | enum vxge_hw_status status; | |
273 | ||
274 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | |
275 | ||
276 | status = vxge_hw_vpath_fw_api(vpath, | |
277 | VXGE_HW_FW_UPGRADE_ACTION, | |
278 | VXGE_HW_FW_UPGRADE_MEMO, | |
279 | VXGE_HW_FW_UPGRADE_OFFSET_READ, | |
280 | &data0, &data1, &steer_ctrl); | |
281 | if (status != VXGE_HW_OK) | |
282 | return status; | |
283 | ||
284 | *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); | |
285 | *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); | |
286 | *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); | |
287 | ||
288 | return status; | |
289 | } | |
290 | ||
291 | enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev) | |
292 | { | |
293 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | |
294 | struct __vxge_hw_virtualpath *vpath; | |
295 | enum vxge_hw_status status; | |
296 | u32 ret; | |
297 | ||
298 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | |
299 | ||
300 | status = vxge_hw_vpath_fw_api(vpath, | |
301 | VXGE_HW_FW_UPGRADE_ACTION, | |
302 | VXGE_HW_FW_UPGRADE_MEMO, | |
303 | VXGE_HW_FW_UPGRADE_OFFSET_COMMIT, | |
304 | &data0, &data1, &steer_ctrl); | |
305 | if (status != VXGE_HW_OK) { | |
306 | vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__); | |
307 | goto exit; | |
308 | } | |
309 | ||
310 | ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F; | |
311 | if (ret != 1) { | |
312 | vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d", | |
313 | __func__, ret); | |
314 | status = VXGE_HW_FAIL; | |
315 | } | |
316 | ||
317 | exit: | |
318 | return status; | |
319 | } | |
320 | ||
321 | enum vxge_hw_status | |
322 | vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size) | |
323 | { | |
324 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | |
325 | struct __vxge_hw_virtualpath *vpath; | |
326 | enum vxge_hw_status status; | |
327 | int ret_code, sec_code; | |
328 | ||
329 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | |
330 | ||
331 | /* send upgrade start command */ | |
332 | status = vxge_hw_vpath_fw_api(vpath, | |
333 | VXGE_HW_FW_UPGRADE_ACTION, | |
334 | VXGE_HW_FW_UPGRADE_MEMO, | |
335 | VXGE_HW_FW_UPGRADE_OFFSET_START, | |
336 | &data0, &data1, &steer_ctrl); | |
337 | if (status != VXGE_HW_OK) { | |
338 | vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed", | |
339 | __func__); | |
340 | return status; | |
341 | } | |
342 | ||
343 | /* Transfer fw image to adapter 16 bytes at a time */ | |
344 | for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) { | |
345 | steer_ctrl = 0; | |
346 | ||
347 | /* The next 128bits of fwdata to be loaded onto the adapter */ | |
348 | data0 = *((u64 *)fwdata); | |
349 | data1 = *((u64 *)fwdata + 1); | |
350 | ||
351 | status = vxge_hw_vpath_fw_api(vpath, | |
352 | VXGE_HW_FW_UPGRADE_ACTION, | |
353 | VXGE_HW_FW_UPGRADE_MEMO, | |
354 | VXGE_HW_FW_UPGRADE_OFFSET_SEND, | |
355 | &data0, &data1, &steer_ctrl); | |
356 | if (status != VXGE_HW_OK) { | |
357 | vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed", | |
358 | __func__); | |
359 | goto out; | |
360 | } | |
361 | ||
362 | ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0); | |
363 | switch (ret_code) { | |
364 | case VXGE_HW_FW_UPGRADE_OK: | |
365 | /* All OK, send next 16 bytes. */ | |
366 | break; | |
367 | case VXGE_FW_UPGRADE_BYTES2SKIP: | |
368 | /* skip bytes in the stream */ | |
369 | fwdata += (data0 >> 8) & 0xFFFFFFFF; | |
370 | break; | |
371 | case VXGE_HW_FW_UPGRADE_DONE: | |
372 | goto out; | |
373 | case VXGE_HW_FW_UPGRADE_ERR: | |
374 | sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0); | |
375 | switch (sec_code) { | |
376 | case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: | |
377 | case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: | |
378 | printk(KERN_ERR | |
379 | "corrupted data from .ncf file\n"); | |
380 | break; | |
381 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: | |
382 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: | |
383 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: | |
384 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: | |
385 | case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: | |
386 | printk(KERN_ERR "invalid .ncf file\n"); | |
387 | break; | |
388 | case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: | |
389 | printk(KERN_ERR "buffer overflow\n"); | |
390 | break; | |
391 | case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: | |
392 | printk(KERN_ERR "failed to flash the image\n"); | |
393 | break; | |
394 | case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: | |
395 | printk(KERN_ERR | |
396 | "generic error. Unknown error type\n"); | |
397 | break; | |
398 | default: | |
399 | printk(KERN_ERR "Unknown error of type %d\n", | |
400 | sec_code); | |
401 | break; | |
402 | } | |
403 | status = VXGE_HW_FAIL; | |
404 | goto out; | |
405 | default: | |
406 | printk(KERN_ERR "Unknown FW error: %d\n", ret_code); | |
407 | status = VXGE_HW_FAIL; | |
408 | goto out; | |
409 | } | |
410 | /* point to next 16 bytes */ | |
411 | fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE; | |
412 | } | |
413 | out: | |
414 | return status; | |
415 | } | |
416 | ||
417 | enum vxge_hw_status | |
418 | vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, | |
419 | struct eprom_image *img) | |
420 | { | |
421 | u64 data0 = 0, data1 = 0, steer_ctrl = 0; | |
422 | struct __vxge_hw_virtualpath *vpath; | |
423 | enum vxge_hw_status status; | |
424 | int i; | |
425 | ||
426 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; | |
427 | ||
428 | for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { | |
429 | data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i); | |
430 | data1 = steer_ctrl = 0; | |
431 | ||
432 | status = vxge_hw_vpath_fw_api(vpath, | |
433 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | |
434 | VXGE_HW_FW_API_GET_EPROM_REV, | |
435 | 0, &data0, &data1, &steer_ctrl); | |
436 | if (status != VXGE_HW_OK) | |
437 | break; | |
438 | ||
439 | img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0); | |
440 | img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0); | |
441 | img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0); | |
442 | img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0); | |
443 | } | |
444 | ||
445 | return status; | |
446 | } | |
447 | ||
40a3a915 RV |
448 | /* |
449 | * __vxge_hw_channel_allocate - Allocate memory for channel | |
450 | * This function allocates required memory for the channel and various arrays | |
451 | * in the channel | |
452 | */ | |
2c91308f | 453 | static struct __vxge_hw_channel * |
40a3a915 RV |
454 | __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, |
455 | enum __vxge_hw_channel_type type, | |
456 | u32 length, u32 per_dtr_space, void *userdata) | |
457 | { | |
458 | struct __vxge_hw_channel *channel; | |
459 | struct __vxge_hw_device *hldev; | |
460 | int size = 0; | |
461 | u32 vp_id; | |
462 | ||
463 | hldev = vph->vpath->hldev; | |
464 | vp_id = vph->vpath->vp_id; | |
465 | ||
466 | switch (type) { | |
467 | case VXGE_HW_CHANNEL_TYPE_FIFO: | |
468 | size = sizeof(struct __vxge_hw_fifo); | |
469 | break; | |
470 | case VXGE_HW_CHANNEL_TYPE_RING: | |
471 | size = sizeof(struct __vxge_hw_ring); | |
472 | break; | |
473 | default: | |
474 | break; | |
475 | } | |
476 | ||
477 | channel = kzalloc(size, GFP_KERNEL); | |
478 | if (channel == NULL) | |
479 | goto exit0; | |
480 | INIT_LIST_HEAD(&channel->item); | |
481 | ||
482 | channel->common_reg = hldev->common_reg; | |
483 | channel->first_vp_id = hldev->first_vp_id; | |
484 | channel->type = type; | |
485 | channel->devh = hldev; | |
486 | channel->vph = vph; | |
487 | channel->userdata = userdata; | |
488 | channel->per_dtr_space = per_dtr_space; | |
489 | channel->length = length; | |
490 | channel->vp_id = vp_id; | |
491 | ||
492 | channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | |
493 | if (channel->work_arr == NULL) | |
494 | goto exit1; | |
495 | ||
496 | channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | |
497 | if (channel->free_arr == NULL) | |
498 | goto exit1; | |
499 | channel->free_ptr = length; | |
500 | ||
501 | channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | |
502 | if (channel->reserve_arr == NULL) | |
503 | goto exit1; | |
504 | channel->reserve_ptr = length; | |
505 | channel->reserve_top = 0; | |
506 | ||
507 | channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); | |
508 | if (channel->orig_arr == NULL) | |
509 | goto exit1; | |
510 | ||
511 | return channel; | |
512 | exit1: | |
513 | __vxge_hw_channel_free(channel); | |
514 | ||
515 | exit0: | |
516 | return NULL; | |
517 | } | |
518 | ||
519 | /* | |
520 | * __vxge_hw_channel_free - Free memory allocated for channel | |
521 | * This function deallocates memory from the channel and various arrays | |
522 | * in the channel | |
523 | */ | |
2c91308f | 524 | static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) |
40a3a915 RV |
525 | { |
526 | kfree(channel->work_arr); | |
527 | kfree(channel->free_arr); | |
528 | kfree(channel->reserve_arr); | |
529 | kfree(channel->orig_arr); | |
530 | kfree(channel); | |
531 | } | |
532 | ||
533 | /* | |
534 | * __vxge_hw_channel_initialize - Initialize a channel | |
535 | * This function initializes a channel by properly setting the | |
536 | * various references | |
537 | */ | |
2c91308f | 538 | static enum vxge_hw_status |
40a3a915 RV |
539 | __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) |
540 | { | |
541 | u32 i; | |
542 | struct __vxge_hw_virtualpath *vpath; | |
543 | ||
544 | vpath = channel->vph->vpath; | |
545 | ||
546 | if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) { | |
547 | for (i = 0; i < channel->length; i++) | |
548 | channel->orig_arr[i] = channel->reserve_arr[i]; | |
549 | } | |
550 | ||
551 | switch (channel->type) { | |
552 | case VXGE_HW_CHANNEL_TYPE_FIFO: | |
553 | vpath->fifoh = (struct __vxge_hw_fifo *)channel; | |
554 | channel->stats = &((struct __vxge_hw_fifo *) | |
555 | channel)->stats->common_stats; | |
556 | break; | |
557 | case VXGE_HW_CHANNEL_TYPE_RING: | |
558 | vpath->ringh = (struct __vxge_hw_ring *)channel; | |
559 | channel->stats = &((struct __vxge_hw_ring *) | |
560 | channel)->stats->common_stats; | |
561 | break; | |
562 | default: | |
563 | break; | |
564 | } | |
565 | ||
566 | return VXGE_HW_OK; | |
567 | } | |
568 | ||
569 | /* | |
570 | * __vxge_hw_channel_reset - Resets a channel | |
571 | * This function resets a channel by properly setting the various references | |
572 | */ | |
2c91308f | 573 | static enum vxge_hw_status |
40a3a915 RV |
574 | __vxge_hw_channel_reset(struct __vxge_hw_channel *channel) |
575 | { | |
576 | u32 i; | |
577 | ||
578 | for (i = 0; i < channel->length; i++) { | |
579 | if (channel->reserve_arr != NULL) | |
580 | channel->reserve_arr[i] = channel->orig_arr[i]; | |
581 | if (channel->free_arr != NULL) | |
582 | channel->free_arr[i] = NULL; | |
583 | if (channel->work_arr != NULL) | |
584 | channel->work_arr[i] = NULL; | |
585 | } | |
586 | channel->free_ptr = channel->length; | |
587 | channel->reserve_ptr = channel->length; | |
588 | channel->reserve_top = 0; | |
589 | channel->post_index = 0; | |
590 | channel->compl_index = 0; | |
591 | ||
592 | return VXGE_HW_OK; | |
593 | } | |
594 | ||
595 | /* | |
596 | * __vxge_hw_device_pci_e_init | |
597 | * Initialize certain PCI/PCI-X configuration registers | |
598 | * with recommended values. Save config space for future hw resets. | |
599 | */ | |
2c91308f | 600 | static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) |
40a3a915 RV |
601 | { |
602 | u16 cmd = 0; | |
603 | ||
604 | /* Set the PErr Repconse bit and SERR in PCI command register. */ | |
605 | pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd); | |
606 | cmd |= 0x140; | |
607 | pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd); | |
608 | ||
609 | pci_save_state(hldev->pdev); | |
40a3a915 RV |
610 | } |
611 | ||
612 | /* | |
613 | * __vxge_hw_device_register_poll | |
614 | * Will poll certain register for specified amount of time. | |
615 | * Will poll until masked bit is not cleared. | |
616 | */ | |
42821a5b | 617 | static enum vxge_hw_status |
40a3a915 RV |
618 | __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) |
619 | { | |
620 | u64 val64; | |
621 | u32 i = 0; | |
622 | enum vxge_hw_status ret = VXGE_HW_FAIL; | |
623 | ||
624 | udelay(10); | |
625 | ||
626 | do { | |
627 | val64 = readq(reg); | |
628 | if (!(val64 & mask)) | |
629 | return VXGE_HW_OK; | |
630 | udelay(100); | |
631 | } while (++i <= 9); | |
632 | ||
633 | i = 0; | |
634 | do { | |
635 | val64 = readq(reg); | |
636 | if (!(val64 & mask)) | |
637 | return VXGE_HW_OK; | |
638 | mdelay(1); | |
639 | } while (++i <= max_millis); | |
640 | ||
641 | return ret; | |
642 | } | |
643 | ||
4d2a5b40 | 644 | /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset |
40a3a915 RV |
645 | * in progress |
646 | * This routine checks the vpath reset in progress register is turned zero | |
647 | */ | |
42821a5b | 648 | static enum vxge_hw_status |
40a3a915 RV |
649 | __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) |
650 | { | |
651 | enum vxge_hw_status status; | |
652 | status = __vxge_hw_device_register_poll(vpath_rst_in_prog, | |
653 | VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff), | |
654 | VXGE_HW_DEF_DEVICE_POLL_MILLIS); | |
655 | return status; | |
656 | } | |
657 | ||
658 | /* | |
659 | * __vxge_hw_device_toc_get | |
660 | * This routine sets the swapper and reads the toc pointer and returns the | |
661 | * memory mapped address of the toc | |
662 | */ | |
42821a5b | 663 | static struct vxge_hw_toc_reg __iomem * |
40a3a915 RV |
664 | __vxge_hw_device_toc_get(void __iomem *bar0) |
665 | { | |
666 | u64 val64; | |
667 | struct vxge_hw_toc_reg __iomem *toc = NULL; | |
668 | enum vxge_hw_status status; | |
669 | ||
670 | struct vxge_hw_legacy_reg __iomem *legacy_reg = | |
671 | (struct vxge_hw_legacy_reg __iomem *)bar0; | |
672 | ||
673 | status = __vxge_hw_legacy_swapper_set(legacy_reg); | |
674 | if (status != VXGE_HW_OK) | |
675 | goto exit; | |
676 | ||
677 | val64 = readq(&legacy_reg->toc_first_pointer); | |
678 | toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64); | |
679 | exit: | |
680 | return toc; | |
681 | } | |
682 | ||
683 | /* | |
684 | * __vxge_hw_device_reg_addr_get | |
685 | * This routine sets the swapper and reads the toc pointer and initializes the | |
686 | * register location pointers in the device object. It waits until the ric is | |
687 | * completed initializing registers. | |
688 | */ | |
2c91308f | 689 | static enum vxge_hw_status |
40a3a915 RV |
690 | __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) |
691 | { | |
692 | u64 val64; | |
693 | u32 i; | |
694 | enum vxge_hw_status status = VXGE_HW_OK; | |
695 | ||
696 | hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0; | |
697 | ||
698 | hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0); | |
699 | if (hldev->toc_reg == NULL) { | |
700 | status = VXGE_HW_FAIL; | |
701 | goto exit; | |
702 | } | |
703 | ||
704 | val64 = readq(&hldev->toc_reg->toc_common_pointer); | |
705 | hldev->common_reg = | |
706 | (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64); | |
707 | ||
708 | val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer); | |
709 | hldev->mrpcim_reg = | |
710 | (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64); | |
711 | ||
712 | for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) { | |
713 | val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]); | |
714 | hldev->srpcim_reg[i] = | |
715 | (struct vxge_hw_srpcim_reg __iomem *) | |
716 | (hldev->bar0 + val64); | |
717 | } | |
718 | ||
719 | for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) { | |
720 | val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]); | |
721 | hldev->vpmgmt_reg[i] = | |
722 | (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64); | |
723 | } | |
724 | ||
725 | for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) { | |
726 | val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]); | |
727 | hldev->vpath_reg[i] = | |
728 | (struct vxge_hw_vpath_reg __iomem *) | |
729 | (hldev->bar0 + val64); | |
730 | } | |
731 | ||
732 | val64 = readq(&hldev->toc_reg->toc_kdfc); | |
733 | ||
734 | switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) { | |
735 | case 0: | |
736 | hldev->kdfc = (u8 __iomem *)(hldev->bar0 + | |
737 | VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64)); | |
738 | break; | |
40a3a915 RV |
739 | default: |
740 | break; | |
741 | } | |
742 | ||
743 | status = __vxge_hw_device_vpath_reset_in_prog_check( | |
744 | (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog); | |
745 | exit: | |
746 | return status; | |
747 | } | |
748 | ||
40a3a915 RV |
749 | /* |
750 | * __vxge_hw_device_access_rights_get: Get Access Rights of the driver | |
751 | * This routine returns the Access Rights of the driver | |
752 | */ | |
753 | static u32 | |
754 | __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id) | |
755 | { | |
756 | u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH; | |
757 | ||
758 | switch (host_type) { | |
759 | case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: | |
1dc47a9b SH |
760 | if (func_id == 0) { |
761 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | | |
762 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | |
763 | } | |
40a3a915 RV |
764 | break; |
765 | case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: | |
766 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | | |
767 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | |
768 | break; | |
769 | case VXGE_HW_NO_MR_SR_VH0_FUNCTION0: | |
770 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | | |
771 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | |
772 | break; | |
773 | case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION: | |
774 | case VXGE_HW_SR_VH_VIRTUAL_FUNCTION: | |
775 | case VXGE_HW_MR_SR_VH0_INVALID_CONFIG: | |
776 | break; | |
777 | case VXGE_HW_SR_VH_FUNCTION0: | |
778 | case VXGE_HW_VH_NORMAL_FUNCTION: | |
779 | access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; | |
780 | break; | |
781 | } | |
782 | ||
783 | return access_rights; | |
784 | } | |
92cdd7c3 SH |
785 | /* |
786 | * __vxge_hw_device_is_privilaged | |
787 | * This routine checks if the device function is privilaged or not | |
788 | */ | |
789 | ||
790 | enum vxge_hw_status | |
791 | __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id) | |
792 | { | |
793 | if (__vxge_hw_device_access_rights_get(host_type, | |
794 | func_id) & | |
795 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) | |
796 | return VXGE_HW_OK; | |
797 | else | |
798 | return VXGE_HW_ERR_PRIVILAGED_OPEARATION; | |
799 | } | |
800 | ||
8424e00d JM |
801 | /* |
802 | * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. | |
803 | * Returns the function number of the vpath. | |
804 | */ | |
805 | static u32 | |
806 | __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) | |
807 | { | |
808 | u64 val64; | |
809 | ||
810 | val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); | |
811 | ||
812 | return | |
813 | (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); | |
814 | } | |
815 | ||
40a3a915 RV |
816 | /* |
817 | * __vxge_hw_device_host_info_get | |
818 | * This routine returns the host type assignments | |
819 | */ | |
8424e00d | 820 | static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) |
40a3a915 RV |
821 | { |
822 | u64 val64; | |
823 | u32 i; | |
824 | ||
825 | val64 = readq(&hldev->common_reg->host_type_assignments); | |
826 | ||
827 | hldev->host_type = | |
828 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); | |
829 | ||
830 | hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); | |
831 | ||
832 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
40a3a915 RV |
833 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) |
834 | continue; | |
835 | ||
836 | hldev->func_id = | |
8424e00d | 837 | __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]); |
40a3a915 RV |
838 | |
839 | hldev->access_rights = __vxge_hw_device_access_rights_get( | |
840 | hldev->host_type, hldev->func_id); | |
841 | ||
8424e00d JM |
842 | hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN; |
843 | hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i]; | |
844 | ||
40a3a915 RV |
845 | hldev->first_vp_id = i; |
846 | break; | |
847 | } | |
40a3a915 RV |
848 | } |
849 | ||
850 | /* | |
851 | * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as | |
852 | * link width and signalling rate. | |
853 | */ | |
854 | static enum vxge_hw_status | |
855 | __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev) | |
856 | { | |
857 | int exp_cap; | |
858 | u16 lnk; | |
859 | ||
860 | /* Get the negotiated link width and speed from PCI config space */ | |
861 | exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP); | |
862 | pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk); | |
863 | ||
864 | if ((lnk & PCI_EXP_LNKSTA_CLS) != 1) | |
865 | return VXGE_HW_ERR_INVALID_PCI_INFO; | |
866 | ||
867 | switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) { | |
868 | case PCIE_LNK_WIDTH_RESRV: | |
869 | case PCIE_LNK_X1: | |
870 | case PCIE_LNK_X2: | |
871 | case PCIE_LNK_X4: | |
872 | case PCIE_LNK_X8: | |
873 | break; | |
874 | default: | |
875 | return VXGE_HW_ERR_INVALID_PCI_INFO; | |
876 | } | |
877 | ||
878 | return VXGE_HW_OK; | |
879 | } | |
880 | ||
40a3a915 RV |
881 | /* |
882 | * __vxge_hw_device_initialize | |
883 | * Initialize Titan-V hardware. | |
884 | */ | |
2c91308f JM |
885 | static enum vxge_hw_status |
886 | __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) | |
40a3a915 RV |
887 | { |
888 | enum vxge_hw_status status = VXGE_HW_OK; | |
889 | ||
92cdd7c3 SH |
890 | if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type, |
891 | hldev->func_id)) { | |
5dbc9011 SS |
892 | /* Validate the pci-e link width and speed */ |
893 | status = __vxge_hw_verify_pci_e_info(hldev); | |
894 | if (status != VXGE_HW_OK) | |
895 | goto exit; | |
896 | } | |
40a3a915 | 897 | |
40a3a915 RV |
898 | exit: |
899 | return status; | |
900 | } | |
901 | ||
8424e00d JM |
902 | /* |
903 | * __vxge_hw_vpath_fw_ver_get - Get the fw version | |
904 | * Returns FW Version | |
905 | */ | |
906 | static enum vxge_hw_status | |
907 | __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath, | |
908 | struct vxge_hw_device_hw_info *hw_info) | |
909 | { | |
910 | struct vxge_hw_device_version *fw_version = &hw_info->fw_version; | |
911 | struct vxge_hw_device_date *fw_date = &hw_info->fw_date; | |
912 | struct vxge_hw_device_version *flash_version = &hw_info->flash_version; | |
913 | struct vxge_hw_device_date *flash_date = &hw_info->flash_date; | |
914 | u64 data0, data1 = 0, steer_ctrl = 0; | |
915 | enum vxge_hw_status status; | |
916 | ||
917 | status = vxge_hw_vpath_fw_api(vpath, | |
918 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, | |
919 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | |
920 | 0, &data0, &data1, &steer_ctrl); | |
921 | if (status != VXGE_HW_OK) | |
922 | goto exit; | |
923 | ||
924 | fw_date->day = | |
925 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0); | |
926 | fw_date->month = | |
927 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0); | |
928 | fw_date->year = | |
929 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0); | |
930 | ||
931 | snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", | |
932 | fw_date->month, fw_date->day, fw_date->year); | |
933 | ||
934 | fw_version->major = | |
935 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); | |
936 | fw_version->minor = | |
937 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); | |
938 | fw_version->build = | |
939 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); | |
940 | ||
941 | snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | |
942 | fw_version->major, fw_version->minor, fw_version->build); | |
943 | ||
944 | flash_date->day = | |
945 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1); | |
946 | flash_date->month = | |
947 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1); | |
948 | flash_date->year = | |
949 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1); | |
950 | ||
951 | snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", | |
952 | flash_date->month, flash_date->day, flash_date->year); | |
953 | ||
954 | flash_version->major = | |
955 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1); | |
956 | flash_version->minor = | |
957 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1); | |
958 | flash_version->build = | |
959 | (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1); | |
960 | ||
961 | snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", | |
962 | flash_version->major, flash_version->minor, | |
963 | flash_version->build); | |
964 | ||
965 | exit: | |
966 | return status; | |
967 | } | |
968 | ||
969 | /* | |
970 | * __vxge_hw_vpath_card_info_get - Get the serial numbers, | |
971 | * part number and product description. | |
972 | */ | |
973 | static enum vxge_hw_status | |
974 | __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath, | |
975 | struct vxge_hw_device_hw_info *hw_info) | |
976 | { | |
977 | enum vxge_hw_status status; | |
978 | u64 data0, data1 = 0, steer_ctrl = 0; | |
979 | u8 *serial_number = hw_info->serial_number; | |
980 | u8 *part_number = hw_info->part_number; | |
981 | u8 *product_desc = hw_info->product_desc; | |
982 | u32 i, j = 0; | |
983 | ||
984 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER; | |
985 | ||
986 | status = vxge_hw_vpath_fw_api(vpath, | |
987 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | |
988 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | |
989 | 0, &data0, &data1, &steer_ctrl); | |
990 | if (status != VXGE_HW_OK) | |
991 | return status; | |
992 | ||
993 | ((u64 *)serial_number)[0] = be64_to_cpu(data0); | |
994 | ((u64 *)serial_number)[1] = be64_to_cpu(data1); | |
995 | ||
996 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER; | |
997 | data1 = steer_ctrl = 0; | |
998 | ||
999 | status = vxge_hw_vpath_fw_api(vpath, | |
1000 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | |
1001 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | |
1002 | 0, &data0, &data1, &steer_ctrl); | |
1003 | if (status != VXGE_HW_OK) | |
1004 | return status; | |
1005 | ||
1006 | ((u64 *)part_number)[0] = be64_to_cpu(data0); | |
1007 | ((u64 *)part_number)[1] = be64_to_cpu(data1); | |
1008 | ||
1009 | for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; | |
1010 | i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { | |
1011 | data0 = i; | |
1012 | data1 = steer_ctrl = 0; | |
1013 | ||
1014 | status = vxge_hw_vpath_fw_api(vpath, | |
1015 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, | |
1016 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | |
1017 | 0, &data0, &data1, &steer_ctrl); | |
1018 | if (status != VXGE_HW_OK) | |
1019 | return status; | |
1020 | ||
1021 | ((u64 *)product_desc)[j++] = be64_to_cpu(data0); | |
1022 | ((u64 *)product_desc)[j++] = be64_to_cpu(data1); | |
1023 | } | |
1024 | ||
1025 | return status; | |
1026 | } | |
1027 | ||
1028 | /* | |
1029 | * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode | |
1030 | * Returns pci function mode | |
1031 | */ | |
c3150eac JM |
1032 | static enum vxge_hw_status |
1033 | __vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath, | |
1034 | struct vxge_hw_device_hw_info *hw_info) | |
8424e00d JM |
1035 | { |
1036 | u64 data0, data1 = 0, steer_ctrl = 0; | |
1037 | enum vxge_hw_status status; | |
1038 | ||
ca3e3b8f | 1039 | data0 = 0; |
8424e00d JM |
1040 | |
1041 | status = vxge_hw_vpath_fw_api(vpath, | |
ca3e3b8f | 1042 | VXGE_HW_FW_API_GET_FUNC_MODE, |
8424e00d JM |
1043 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, |
1044 | 0, &data0, &data1, &steer_ctrl); | |
c3150eac JM |
1045 | if (status != VXGE_HW_OK) |
1046 | return status; | |
8424e00d | 1047 | |
ca3e3b8f | 1048 | hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0); |
c3150eac | 1049 | return status; |
8424e00d JM |
1050 | } |
1051 | ||
1052 | /* | |
1053 | * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath | |
1054 | * from MAC address table. | |
1055 | */ | |
1056 | static enum vxge_hw_status | |
1057 | __vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath, | |
1058 | u8 *macaddr, u8 *macaddr_mask) | |
1059 | { | |
1060 | u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, | |
1061 | data0 = 0, data1 = 0, steer_ctrl = 0; | |
1062 | enum vxge_hw_status status; | |
1063 | int i; | |
1064 | ||
1065 | do { | |
1066 | status = vxge_hw_vpath_fw_api(vpath, action, | |
1067 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, | |
1068 | 0, &data0, &data1, &steer_ctrl); | |
1069 | if (status != VXGE_HW_OK) | |
1070 | goto exit; | |
1071 | ||
1072 | data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0); | |
1073 | data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( | |
1074 | data1); | |
1075 | ||
1076 | for (i = ETH_ALEN; i > 0; i--) { | |
1077 | macaddr[i - 1] = (u8) (data0 & 0xFF); | |
1078 | data0 >>= 8; | |
1079 | ||
1080 | macaddr_mask[i - 1] = (u8) (data1 & 0xFF); | |
1081 | data1 >>= 8; | |
1082 | } | |
1083 | ||
1084 | action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY; | |
1085 | data0 = 0, data1 = 0, steer_ctrl = 0; | |
1086 | ||
1087 | } while (!is_valid_ether_addr(macaddr)); | |
1088 | exit: | |
1089 | return status; | |
1090 | } | |
1091 | ||
40a3a915 RV |
1092 | /** |
1093 | * vxge_hw_device_hw_info_get - Get the hw information | |
1094 | * Returns the vpath mask that has the bits set for each vpath allocated | |
1095 | * for the driver, FW version information and the first mac addresse for | |
1096 | * each vpath | |
1097 | */ | |
1098 | enum vxge_hw_status __devinit | |
1099 | vxge_hw_device_hw_info_get(void __iomem *bar0, | |
1100 | struct vxge_hw_device_hw_info *hw_info) | |
1101 | { | |
1102 | u32 i; | |
1103 | u64 val64; | |
1104 | struct vxge_hw_toc_reg __iomem *toc; | |
1105 | struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; | |
1106 | struct vxge_hw_common_reg __iomem *common_reg; | |
40a3a915 RV |
1107 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; |
1108 | enum vxge_hw_status status; | |
8424e00d | 1109 | struct __vxge_hw_virtualpath vpath; |
40a3a915 RV |
1110 | |
1111 | memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); | |
1112 | ||
1113 | toc = __vxge_hw_device_toc_get(bar0); | |
1114 | if (toc == NULL) { | |
1115 | status = VXGE_HW_ERR_CRITICAL; | |
1116 | goto exit; | |
1117 | } | |
1118 | ||
1119 | val64 = readq(&toc->toc_common_pointer); | |
1120 | common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64); | |
1121 | ||
1122 | status = __vxge_hw_device_vpath_reset_in_prog_check( | |
1123 | (u64 __iomem *)&common_reg->vpath_rst_in_prog); | |
1124 | if (status != VXGE_HW_OK) | |
1125 | goto exit; | |
1126 | ||
1127 | hw_info->vpath_mask = readq(&common_reg->vpath_assignments); | |
1128 | ||
1129 | val64 = readq(&common_reg->host_type_assignments); | |
1130 | ||
1131 | hw_info->host_type = | |
1132 | (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); | |
1133 | ||
1134 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
1135 | ||
1136 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) | |
1137 | continue; | |
1138 | ||
1139 | val64 = readq(&toc->toc_vpmgmt_pointer[i]); | |
1140 | ||
1141 | vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) | |
1142 | (bar0 + val64); | |
1143 | ||
8424e00d | 1144 | hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg); |
40a3a915 RV |
1145 | if (__vxge_hw_device_access_rights_get(hw_info->host_type, |
1146 | hw_info->func_id) & | |
1147 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { | |
1148 | ||
1149 | val64 = readq(&toc->toc_mrpcim_pointer); | |
1150 | ||
1151 | mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *) | |
1152 | (bar0 + val64); | |
1153 | ||
1154 | writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask); | |
1155 | wmb(); | |
1156 | } | |
1157 | ||
1158 | val64 = readq(&toc->toc_vpath_pointer[i]); | |
1159 | ||
8424e00d JM |
1160 | vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) |
1161 | (bar0 + val64); | |
1162 | vpath.vp_open = 0; | |
40a3a915 | 1163 | |
c3150eac JM |
1164 | status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info); |
1165 | if (status != VXGE_HW_OK) | |
1166 | goto exit; | |
40a3a915 | 1167 | |
8424e00d | 1168 | status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info); |
40a3a915 RV |
1169 | if (status != VXGE_HW_OK) |
1170 | goto exit; | |
1171 | ||
8424e00d | 1172 | status = __vxge_hw_vpath_card_info_get(&vpath, hw_info); |
40a3a915 RV |
1173 | if (status != VXGE_HW_OK) |
1174 | goto exit; | |
1175 | ||
1176 | break; | |
1177 | } | |
1178 | ||
1179 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
40a3a915 RV |
1180 | if (!((hw_info->vpath_mask) & vxge_mBIT(i))) |
1181 | continue; | |
1182 | ||
1183 | val64 = readq(&toc->toc_vpath_pointer[i]); | |
8424e00d JM |
1184 | vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) |
1185 | (bar0 + val64); | |
1186 | vpath.vp_open = 0; | |
40a3a915 | 1187 | |
8424e00d | 1188 | status = __vxge_hw_vpath_addr_get(&vpath, |
40a3a915 RV |
1189 | hw_info->mac_addrs[i], |
1190 | hw_info->mac_addr_masks[i]); | |
1191 | if (status != VXGE_HW_OK) | |
1192 | goto exit; | |
1193 | } | |
1194 | exit: | |
1195 | return status; | |
1196 | } | |
1197 | ||
1198 | /* | |
1199 | * vxge_hw_device_initialize - Initialize Titan device. | |
1200 | * Initialize Titan device. Note that all the arguments of this public API | |
1201 | * are 'IN', including @hldev. Driver cooperates with | |
1202 | * OS to find new Titan device, locate its PCI and memory spaces. | |
1203 | * | |
1204 | * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW | |
1205 | * to enable the latter to perform Titan hardware initialization. | |
1206 | */ | |
1207 | enum vxge_hw_status __devinit | |
1208 | vxge_hw_device_initialize( | |
1209 | struct __vxge_hw_device **devh, | |
1210 | struct vxge_hw_device_attr *attr, | |
1211 | struct vxge_hw_device_config *device_config) | |
1212 | { | |
1213 | u32 i; | |
1214 | u32 nblocks = 0; | |
1215 | struct __vxge_hw_device *hldev = NULL; | |
1216 | enum vxge_hw_status status = VXGE_HW_OK; | |
1217 | ||
1218 | status = __vxge_hw_device_config_check(device_config); | |
1219 | if (status != VXGE_HW_OK) | |
1220 | goto exit; | |
1221 | ||
1222 | hldev = (struct __vxge_hw_device *) | |
89bf67f1 | 1223 | vzalloc(sizeof(struct __vxge_hw_device)); |
40a3a915 RV |
1224 | if (hldev == NULL) { |
1225 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
1226 | goto exit; | |
1227 | } | |
1228 | ||
40a3a915 RV |
1229 | hldev->magic = VXGE_HW_DEVICE_MAGIC; |
1230 | ||
1231 | vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); | |
1232 | ||
1233 | /* apply config */ | |
1234 | memcpy(&hldev->config, device_config, | |
1235 | sizeof(struct vxge_hw_device_config)); | |
1236 | ||
1237 | hldev->bar0 = attr->bar0; | |
40a3a915 RV |
1238 | hldev->pdev = attr->pdev; |
1239 | ||
1240 | hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up; | |
1241 | hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down; | |
1242 | hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err; | |
1243 | ||
1244 | __vxge_hw_device_pci_e_init(hldev); | |
1245 | ||
1246 | status = __vxge_hw_device_reg_addr_get(hldev); | |
aaffbd9f SH |
1247 | if (status != VXGE_HW_OK) { |
1248 | vfree(hldev); | |
40a3a915 | 1249 | goto exit; |
aaffbd9f | 1250 | } |
40a3a915 RV |
1251 | |
1252 | __vxge_hw_device_host_info_get(hldev); | |
1253 | ||
1254 | /* Incrementing for stats blocks */ | |
1255 | nblocks++; | |
1256 | ||
1257 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
40a3a915 RV |
1258 | if (!(hldev->vpath_assignments & vxge_mBIT(i))) |
1259 | continue; | |
1260 | ||
1261 | if (device_config->vp_config[i].ring.enable == | |
1262 | VXGE_HW_RING_ENABLE) | |
1263 | nblocks += device_config->vp_config[i].ring.ring_blocks; | |
1264 | ||
1265 | if (device_config->vp_config[i].fifo.enable == | |
1266 | VXGE_HW_FIFO_ENABLE) | |
1267 | nblocks += device_config->vp_config[i].fifo.fifo_blocks; | |
1268 | nblocks++; | |
1269 | } | |
1270 | ||
1271 | if (__vxge_hw_blockpool_create(hldev, | |
1272 | &hldev->block_pool, | |
1273 | device_config->dma_blockpool_initial + nblocks, | |
1274 | device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) { | |
1275 | ||
1276 | vxge_hw_device_terminate(hldev); | |
1277 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
1278 | goto exit; | |
1279 | } | |
1280 | ||
1281 | status = __vxge_hw_device_initialize(hldev); | |
40a3a915 RV |
1282 | if (status != VXGE_HW_OK) { |
1283 | vxge_hw_device_terminate(hldev); | |
1284 | goto exit; | |
1285 | } | |
1286 | ||
1287 | *devh = hldev; | |
1288 | exit: | |
1289 | return status; | |
1290 | } | |
1291 | ||
1292 | /* | |
1293 | * vxge_hw_device_terminate - Terminate Titan device. | |
1294 | * Terminate HW device. | |
1295 | */ | |
1296 | void | |
1297 | vxge_hw_device_terminate(struct __vxge_hw_device *hldev) | |
1298 | { | |
1299 | vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC); | |
1300 | ||
1301 | hldev->magic = VXGE_HW_DEVICE_DEAD; | |
1302 | __vxge_hw_blockpool_destroy(&hldev->block_pool); | |
1303 | vfree(hldev); | |
1304 | } | |
1305 | ||
1306 | /* | |
1307 | * vxge_hw_device_stats_get - Get the device hw statistics. | |
1308 | * Returns the vpath h/w stats for the device. | |
1309 | */ | |
1310 | enum vxge_hw_status | |
1311 | vxge_hw_device_stats_get(struct __vxge_hw_device *hldev, | |
1312 | struct vxge_hw_device_stats_hw_info *hw_stats) | |
1313 | { | |
1314 | u32 i; | |
1315 | enum vxge_hw_status status = VXGE_HW_OK; | |
1316 | ||
1317 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
40a3a915 RV |
1318 | if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || |
1319 | (hldev->virtual_paths[i].vp_open == | |
1320 | VXGE_HW_VP_NOT_OPEN)) | |
1321 | continue; | |
1322 | ||
1323 | memcpy(hldev->virtual_paths[i].hw_stats_sav, | |
1324 | hldev->virtual_paths[i].hw_stats, | |
1325 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | |
1326 | ||
1327 | status = __vxge_hw_vpath_stats_get( | |
1328 | &hldev->virtual_paths[i], | |
1329 | hldev->virtual_paths[i].hw_stats); | |
1330 | } | |
1331 | ||
1332 | memcpy(hw_stats, &hldev->stats.hw_dev_info_stats, | |
1333 | sizeof(struct vxge_hw_device_stats_hw_info)); | |
1334 | ||
1335 | return status; | |
1336 | } | |
1337 | ||
1338 | /* | |
1339 | * vxge_hw_driver_stats_get - Get the device sw statistics. | |
1340 | * Returns the vpath s/w stats for the device. | |
1341 | */ | |
1342 | enum vxge_hw_status vxge_hw_driver_stats_get( | |
1343 | struct __vxge_hw_device *hldev, | |
1344 | struct vxge_hw_device_stats_sw_info *sw_stats) | |
1345 | { | |
1346 | enum vxge_hw_status status = VXGE_HW_OK; | |
1347 | ||
1348 | memcpy(sw_stats, &hldev->stats.sw_dev_info_stats, | |
1349 | sizeof(struct vxge_hw_device_stats_sw_info)); | |
1350 | ||
1351 | return status; | |
1352 | } | |
1353 | ||
1354 | /* | |
1355 | * vxge_hw_mrpcim_stats_access - Access the statistics from the given location | |
1356 | * and offset and perform an operation | |
1357 | * Get the statistics from the given location and offset. | |
1358 | */ | |
1359 | enum vxge_hw_status | |
1360 | vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev, | |
1361 | u32 operation, u32 location, u32 offset, u64 *stat) | |
1362 | { | |
1363 | u64 val64; | |
1364 | enum vxge_hw_status status = VXGE_HW_OK; | |
1365 | ||
92cdd7c3 SH |
1366 | status = __vxge_hw_device_is_privilaged(hldev->host_type, |
1367 | hldev->func_id); | |
40a3a915 RV |
1368 | if (status != VXGE_HW_OK) |
1369 | goto exit; | |
1370 | ||
1371 | val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) | | |
1372 | VXGE_HW_XMAC_STATS_SYS_CMD_STROBE | | |
1373 | VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) | | |
1374 | VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset); | |
1375 | ||
1376 | status = __vxge_hw_pio_mem_write64(val64, | |
1377 | &hldev->mrpcim_reg->xmac_stats_sys_cmd, | |
1378 | VXGE_HW_XMAC_STATS_SYS_CMD_STROBE, | |
1379 | hldev->config.device_poll_millis); | |
1380 | ||
1381 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | |
1382 | *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data); | |
1383 | else | |
1384 | *stat = 0; | |
1385 | exit: | |
1386 | return status; | |
1387 | } | |
1388 | ||
1389 | /* | |
1390 | * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port | |
1391 | * Get the Statistics on aggregate port | |
1392 | */ | |
42821a5b | 1393 | static enum vxge_hw_status |
40a3a915 RV |
1394 | vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, |
1395 | struct vxge_hw_xmac_aggr_stats *aggr_stats) | |
1396 | { | |
1397 | u64 *val64; | |
1398 | int i; | |
1399 | u32 offset = VXGE_HW_STATS_AGGRn_OFFSET; | |
1400 | enum vxge_hw_status status = VXGE_HW_OK; | |
1401 | ||
1402 | val64 = (u64 *)aggr_stats; | |
1403 | ||
92cdd7c3 SH |
1404 | status = __vxge_hw_device_is_privilaged(hldev->host_type, |
1405 | hldev->func_id); | |
40a3a915 RV |
1406 | if (status != VXGE_HW_OK) |
1407 | goto exit; | |
1408 | ||
1409 | for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) { | |
1410 | status = vxge_hw_mrpcim_stats_access(hldev, | |
1411 | VXGE_HW_STATS_OP_READ, | |
1412 | VXGE_HW_STATS_LOC_AGGR, | |
1413 | ((offset + (104 * port)) >> 3), val64); | |
1414 | if (status != VXGE_HW_OK) | |
1415 | goto exit; | |
1416 | ||
1417 | offset += 8; | |
1418 | val64++; | |
1419 | } | |
1420 | exit: | |
1421 | return status; | |
1422 | } | |
1423 | ||
1424 | /* | |
1425 | * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port | |
1426 | * Get the Statistics on port | |
1427 | */ | |
42821a5b | 1428 | static enum vxge_hw_status |
40a3a915 RV |
1429 | vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, |
1430 | struct vxge_hw_xmac_port_stats *port_stats) | |
1431 | { | |
1432 | u64 *val64; | |
1433 | enum vxge_hw_status status = VXGE_HW_OK; | |
1434 | int i; | |
1435 | u32 offset = 0x0; | |
1436 | val64 = (u64 *) port_stats; | |
1437 | ||
92cdd7c3 SH |
1438 | status = __vxge_hw_device_is_privilaged(hldev->host_type, |
1439 | hldev->func_id); | |
40a3a915 RV |
1440 | if (status != VXGE_HW_OK) |
1441 | goto exit; | |
1442 | ||
1443 | for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) { | |
1444 | status = vxge_hw_mrpcim_stats_access(hldev, | |
1445 | VXGE_HW_STATS_OP_READ, | |
1446 | VXGE_HW_STATS_LOC_AGGR, | |
1447 | ((offset + (608 * port)) >> 3), val64); | |
1448 | if (status != VXGE_HW_OK) | |
1449 | goto exit; | |
1450 | ||
1451 | offset += 8; | |
1452 | val64++; | |
1453 | } | |
1454 | ||
1455 | exit: | |
1456 | return status; | |
1457 | } | |
1458 | ||
1459 | /* | |
1460 | * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics | |
1461 | * Get the XMAC Statistics | |
1462 | */ | |
1463 | enum vxge_hw_status | |
1464 | vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev, | |
1465 | struct vxge_hw_xmac_stats *xmac_stats) | |
1466 | { | |
1467 | enum vxge_hw_status status = VXGE_HW_OK; | |
1468 | u32 i; | |
1469 | ||
1470 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, | |
1471 | 0, &xmac_stats->aggr_stats[0]); | |
1472 | ||
1473 | if (status != VXGE_HW_OK) | |
1474 | goto exit; | |
1475 | ||
1476 | status = vxge_hw_device_xmac_aggr_stats_get(hldev, | |
1477 | 1, &xmac_stats->aggr_stats[1]); | |
1478 | if (status != VXGE_HW_OK) | |
1479 | goto exit; | |
1480 | ||
1481 | for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { | |
1482 | ||
1483 | status = vxge_hw_device_xmac_port_stats_get(hldev, | |
1484 | i, &xmac_stats->port_stats[i]); | |
1485 | if (status != VXGE_HW_OK) | |
1486 | goto exit; | |
1487 | } | |
1488 | ||
1489 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
1490 | ||
1491 | if (!(hldev->vpaths_deployed & vxge_mBIT(i))) | |
1492 | continue; | |
1493 | ||
1494 | status = __vxge_hw_vpath_xmac_tx_stats_get( | |
1495 | &hldev->virtual_paths[i], | |
1496 | &xmac_stats->vpath_tx_stats[i]); | |
1497 | if (status != VXGE_HW_OK) | |
1498 | goto exit; | |
1499 | ||
1500 | status = __vxge_hw_vpath_xmac_rx_stats_get( | |
1501 | &hldev->virtual_paths[i], | |
1502 | &xmac_stats->vpath_rx_stats[i]); | |
1503 | if (status != VXGE_HW_OK) | |
1504 | goto exit; | |
1505 | } | |
1506 | exit: | |
1507 | return status; | |
1508 | } | |
1509 | ||
1510 | /* | |
1511 | * vxge_hw_device_debug_set - Set the debug module, level and timestamp | |
1512 | * This routine is used to dynamically change the debug output | |
1513 | */ | |
1514 | void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev, | |
1515 | enum vxge_debug_level level, u32 mask) | |
1516 | { | |
1517 | if (hldev == NULL) | |
1518 | return; | |
1519 | ||
1520 | #if defined(VXGE_DEBUG_TRACE_MASK) || \ | |
1521 | defined(VXGE_DEBUG_ERR_MASK) | |
1522 | hldev->debug_module_mask = mask; | |
1523 | hldev->debug_level = level; | |
1524 | #endif | |
1525 | ||
1526 | #if defined(VXGE_DEBUG_ERR_MASK) | |
1527 | hldev->level_err = level & VXGE_ERR; | |
1528 | #endif | |
1529 | ||
1530 | #if defined(VXGE_DEBUG_TRACE_MASK) | |
1531 | hldev->level_trace = level & VXGE_TRACE; | |
1532 | #endif | |
1533 | } | |
1534 | ||
1535 | /* | |
1536 | * vxge_hw_device_error_level_get - Get the error level | |
1537 | * This routine returns the current error level set | |
1538 | */ | |
1539 | u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev) | |
1540 | { | |
1541 | #if defined(VXGE_DEBUG_ERR_MASK) | |
1542 | if (hldev == NULL) | |
1543 | return VXGE_ERR; | |
1544 | else | |
1545 | return hldev->level_err; | |
1546 | #else | |
1547 | return 0; | |
1548 | #endif | |
1549 | } | |
1550 | ||
1551 | /* | |
1552 | * vxge_hw_device_trace_level_get - Get the trace level | |
1553 | * This routine returns the current trace level set | |
1554 | */ | |
1555 | u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev) | |
1556 | { | |
1557 | #if defined(VXGE_DEBUG_TRACE_MASK) | |
1558 | if (hldev == NULL) | |
1559 | return VXGE_TRACE; | |
1560 | else | |
1561 | return hldev->level_trace; | |
1562 | #else | |
1563 | return 0; | |
1564 | #endif | |
1565 | } | |
40a3a915 RV |
1566 | |
1567 | /* | |
1568 | * vxge_hw_getpause_data -Pause frame frame generation and reception. | |
1569 | * Returns the Pause frame generation and reception capability of the NIC. | |
1570 | */ | |
1571 | enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev, | |
1572 | u32 port, u32 *tx, u32 *rx) | |
1573 | { | |
1574 | u64 val64; | |
1575 | enum vxge_hw_status status = VXGE_HW_OK; | |
1576 | ||
1577 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | |
1578 | status = VXGE_HW_ERR_INVALID_DEVICE; | |
1579 | goto exit; | |
1580 | } | |
1581 | ||
1582 | if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { | |
1583 | status = VXGE_HW_ERR_INVALID_PORT; | |
1584 | goto exit; | |
1585 | } | |
1586 | ||
1587 | if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { | |
1588 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | |
1589 | goto exit; | |
1590 | } | |
1591 | ||
1592 | val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); | |
1593 | if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN) | |
1594 | *tx = 1; | |
1595 | if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN) | |
1596 | *rx = 1; | |
1597 | exit: | |
1598 | return status; | |
1599 | } | |
1600 | ||
1601 | /* | |
1602 | * vxge_hw_device_setpause_data - set/reset pause frame generation. | |
1603 | * It can be used to set or reset Pause frame generation or reception | |
1604 | * support of the NIC. | |
1605 | */ | |
40a3a915 RV |
1606 | enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, |
1607 | u32 port, u32 tx, u32 rx) | |
1608 | { | |
1609 | u64 val64; | |
1610 | enum vxge_hw_status status = VXGE_HW_OK; | |
1611 | ||
1612 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | |
1613 | status = VXGE_HW_ERR_INVALID_DEVICE; | |
1614 | goto exit; | |
1615 | } | |
1616 | ||
1617 | if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { | |
1618 | status = VXGE_HW_ERR_INVALID_PORT; | |
1619 | goto exit; | |
1620 | } | |
1621 | ||
92cdd7c3 SH |
1622 | status = __vxge_hw_device_is_privilaged(hldev->host_type, |
1623 | hldev->func_id); | |
40a3a915 RV |
1624 | if (status != VXGE_HW_OK) |
1625 | goto exit; | |
1626 | ||
1627 | val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); | |
1628 | if (tx) | |
1629 | val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; | |
1630 | else | |
1631 | val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; | |
1632 | if (rx) | |
1633 | val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; | |
1634 | else | |
1635 | val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; | |
1636 | ||
1637 | writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); | |
1638 | exit: | |
1639 | return status; | |
1640 | } | |
1641 | ||
1642 | u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev) | |
1643 | { | |
1644 | int link_width, exp_cap; | |
1645 | u16 lnk; | |
1646 | ||
1647 | exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP); | |
1648 | pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk); | |
1649 | link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4; | |
1650 | return link_width; | |
1651 | } | |
1652 | ||
1653 | /* | |
1654 | * __vxge_hw_ring_block_memblock_idx - Return the memblock index | |
1655 | * This function returns the index of memory block | |
1656 | */ | |
1657 | static inline u32 | |
1658 | __vxge_hw_ring_block_memblock_idx(u8 *block) | |
1659 | { | |
1660 | return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)); | |
1661 | } | |
1662 | ||
1663 | /* | |
1664 | * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index | |
1665 | * This function sets index to a memory block | |
1666 | */ | |
1667 | static inline void | |
1668 | __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx) | |
1669 | { | |
1670 | *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx; | |
1671 | } | |
1672 | ||
1673 | /* | |
1674 | * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer | |
1675 | * in RxD block | |
1676 | * Sets the next block pointer in RxD block | |
1677 | */ | |
1678 | static inline void | |
1679 | __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next) | |
1680 | { | |
1681 | *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next; | |
1682 | } | |
1683 | ||
1684 | /* | |
1685 | * __vxge_hw_ring_first_block_address_get - Returns the dma address of the | |
1686 | * first block | |
1687 | * Returns the dma address of the first RxD block | |
1688 | */ | |
42821a5b | 1689 | static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) |
40a3a915 RV |
1690 | { |
1691 | struct vxge_hw_mempool_dma *dma_object; | |
1692 | ||
1693 | dma_object = ring->mempool->memblocks_dma_arr; | |
1694 | vxge_assert(dma_object != NULL); | |
1695 | ||
1696 | return dma_object->addr; | |
1697 | } | |
1698 | ||
1699 | /* | |
1700 | * __vxge_hw_ring_item_dma_addr - Return the dma address of an item | |
1701 | * This function returns the dma address of a given item | |
1702 | */ | |
1703 | static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh, | |
1704 | void *item) | |
1705 | { | |
1706 | u32 memblock_idx; | |
1707 | void *memblock; | |
1708 | struct vxge_hw_mempool_dma *memblock_dma_object; | |
1709 | ptrdiff_t dma_item_offset; | |
1710 | ||
1711 | /* get owner memblock index */ | |
1712 | memblock_idx = __vxge_hw_ring_block_memblock_idx(item); | |
1713 | ||
1714 | /* get owner memblock by memblock index */ | |
1715 | memblock = mempoolh->memblocks_arr[memblock_idx]; | |
1716 | ||
1717 | /* get memblock DMA object by memblock index */ | |
1718 | memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx; | |
1719 | ||
1720 | /* calculate offset in the memblock of this item */ | |
1721 | dma_item_offset = (u8 *)item - (u8 *)memblock; | |
1722 | ||
1723 | return memblock_dma_object->addr + dma_item_offset; | |
1724 | } | |
1725 | ||
1726 | /* | |
1727 | * __vxge_hw_ring_rxdblock_link - Link the RxD blocks | |
1728 | * This function returns the dma address of a given item | |
1729 | */ | |
1730 | static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh, | |
1731 | struct __vxge_hw_ring *ring, u32 from, | |
1732 | u32 to) | |
1733 | { | |
1734 | u8 *to_item , *from_item; | |
1735 | dma_addr_t to_dma; | |
1736 | ||
1737 | /* get "from" RxD block */ | |
1738 | from_item = mempoolh->items_arr[from]; | |
1739 | vxge_assert(from_item); | |
1740 | ||
1741 | /* get "to" RxD block */ | |
1742 | to_item = mempoolh->items_arr[to]; | |
1743 | vxge_assert(to_item); | |
1744 | ||
1745 | /* return address of the beginning of previous RxD block */ | |
1746 | to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item); | |
1747 | ||
1748 | /* set next pointer for this RxD block to point on | |
1749 | * previous item's DMA start address */ | |
1750 | __vxge_hw_ring_block_next_pointer_set(from_item, to_dma); | |
1751 | } | |
1752 | ||
1753 | /* | |
1754 | * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD | |
1755 | * block callback | |
1756 | * This function is callback passed to __vxge_hw_mempool_create to create memory | |
1757 | * pool for RxD block | |
1758 | */ | |
1759 | static void | |
1760 | __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh, | |
1761 | u32 memblock_index, | |
1762 | struct vxge_hw_mempool_dma *dma_object, | |
1763 | u32 index, u32 is_last) | |
1764 | { | |
1765 | u32 i; | |
1766 | void *item = mempoolh->items_arr[index]; | |
1767 | struct __vxge_hw_ring *ring = | |
1768 | (struct __vxge_hw_ring *)mempoolh->userdata; | |
1769 | ||
1770 | /* format rxds array */ | |
1771 | for (i = 0; i < ring->rxds_per_block; i++) { | |
1772 | void *rxdblock_priv; | |
1773 | void *uld_priv; | |
1774 | struct vxge_hw_ring_rxd_1 *rxdp; | |
1775 | ||
1776 | u32 reserve_index = ring->channel.reserve_ptr - | |
1777 | (index * ring->rxds_per_block + i + 1); | |
1778 | u32 memblock_item_idx; | |
1779 | ||
1780 | ring->channel.reserve_arr[reserve_index] = ((u8 *)item) + | |
1781 | i * ring->rxd_size; | |
1782 | ||
1783 | /* Note: memblock_item_idx is index of the item within | |
1784 | * the memblock. For instance, in case of three RxD-blocks | |
1785 | * per memblock this value can be 0, 1 or 2. */ | |
1786 | rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh, | |
1787 | memblock_index, item, | |
1788 | &memblock_item_idx); | |
1789 | ||
1790 | rxdp = (struct vxge_hw_ring_rxd_1 *) | |
1791 | ring->channel.reserve_arr[reserve_index]; | |
1792 | ||
1793 | uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i); | |
1794 | ||
1795 | /* pre-format Host_Control */ | |
1796 | rxdp->host_control = (u64)(size_t)uld_priv; | |
1797 | } | |
1798 | ||
1799 | __vxge_hw_ring_block_memblock_idx_set(item, memblock_index); | |
1800 | ||
1801 | if (is_last) { | |
1802 | /* link last one with first one */ | |
1803 | __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0); | |
1804 | } | |
1805 | ||
1806 | if (index > 0) { | |
1807 | /* link this RxD block with previous one */ | |
1808 | __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index); | |
1809 | } | |
40a3a915 RV |
1810 | } |
1811 | ||
1812 | /* | |
3363276f | 1813 | * __vxge_hw_ring_replenish - Initial replenish of RxDs |
40a3a915 RV |
1814 | * This function replenishes the RxDs from reserve array to work array |
1815 | */ | |
1816 | enum vxge_hw_status | |
3363276f | 1817 | vxge_hw_ring_replenish(struct __vxge_hw_ring *ring) |
40a3a915 RV |
1818 | { |
1819 | void *rxd; | |
40a3a915 RV |
1820 | struct __vxge_hw_channel *channel; |
1821 | enum vxge_hw_status status = VXGE_HW_OK; | |
1822 | ||
1823 | channel = &ring->channel; | |
1824 | ||
1825 | while (vxge_hw_channel_dtr_count(channel) > 0) { | |
1826 | ||
1827 | status = vxge_hw_ring_rxd_reserve(ring, &rxd); | |
1828 | ||
1829 | vxge_assert(status == VXGE_HW_OK); | |
1830 | ||
1831 | if (ring->rxd_init) { | |
1832 | status = ring->rxd_init(rxd, channel->userdata); | |
1833 | if (status != VXGE_HW_OK) { | |
1834 | vxge_hw_ring_rxd_free(ring, rxd); | |
1835 | goto exit; | |
1836 | } | |
1837 | } | |
1838 | ||
1839 | vxge_hw_ring_rxd_post(ring, rxd); | |
40a3a915 RV |
1840 | } |
1841 | status = VXGE_HW_OK; | |
1842 | exit: | |
1843 | return status; | |
1844 | } | |
1845 | ||
1846 | /* | |
1847 | * __vxge_hw_ring_create - Create a Ring | |
1848 | * This function creates Ring and initializes it. | |
40a3a915 | 1849 | */ |
42821a5b | 1850 | static enum vxge_hw_status |
40a3a915 RV |
1851 | __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, |
1852 | struct vxge_hw_ring_attr *attr) | |
1853 | { | |
1854 | enum vxge_hw_status status = VXGE_HW_OK; | |
1855 | struct __vxge_hw_ring *ring; | |
1856 | u32 ring_length; | |
1857 | struct vxge_hw_ring_config *config; | |
1858 | struct __vxge_hw_device *hldev; | |
1859 | u32 vp_id; | |
1860 | struct vxge_hw_mempool_cbs ring_mp_callback; | |
1861 | ||
1862 | if ((vp == NULL) || (attr == NULL)) { | |
1863 | status = VXGE_HW_FAIL; | |
1864 | goto exit; | |
1865 | } | |
1866 | ||
1867 | hldev = vp->vpath->hldev; | |
1868 | vp_id = vp->vpath->vp_id; | |
1869 | ||
1870 | config = &hldev->config.vp_config[vp_id].ring; | |
1871 | ||
1872 | ring_length = config->ring_blocks * | |
1873 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | |
1874 | ||
1875 | ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, | |
1876 | VXGE_HW_CHANNEL_TYPE_RING, | |
1877 | ring_length, | |
1878 | attr->per_rxd_space, | |
1879 | attr->userdata); | |
1880 | ||
1881 | if (ring == NULL) { | |
1882 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
1883 | goto exit; | |
1884 | } | |
1885 | ||
1886 | vp->vpath->ringh = ring; | |
1887 | ring->vp_id = vp_id; | |
1888 | ring->vp_reg = vp->vpath->vp_reg; | |
1889 | ring->common_reg = hldev->common_reg; | |
1890 | ring->stats = &vp->vpath->sw_stats->ring_stats; | |
1891 | ring->config = config; | |
1892 | ring->callback = attr->callback; | |
1893 | ring->rxd_init = attr->rxd_init; | |
1894 | ring->rxd_term = attr->rxd_term; | |
1895 | ring->buffer_mode = config->buffer_mode; | |
1896 | ring->rxds_limit = config->rxds_limit; | |
1897 | ||
1898 | ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); | |
1899 | ring->rxd_priv_size = | |
1900 | sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; | |
1901 | ring->per_rxd_space = attr->per_rxd_space; | |
1902 | ||
1903 | ring->rxd_priv_size = | |
1904 | ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / | |
1905 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; | |
1906 | ||
1907 | /* how many RxDs can fit into one block. Depends on configured | |
1908 | * buffer_mode. */ | |
1909 | ring->rxds_per_block = | |
1910 | vxge_hw_ring_rxds_per_block_get(config->buffer_mode); | |
1911 | ||
1912 | /* calculate actual RxD block private size */ | |
1913 | ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; | |
1914 | ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; | |
1915 | ring->mempool = __vxge_hw_mempool_create(hldev, | |
1916 | VXGE_HW_BLOCK_SIZE, | |
1917 | VXGE_HW_BLOCK_SIZE, | |
1918 | ring->rxdblock_priv_size, | |
1919 | ring->config->ring_blocks, | |
1920 | ring->config->ring_blocks, | |
1921 | &ring_mp_callback, | |
1922 | ring); | |
1923 | ||
1924 | if (ring->mempool == NULL) { | |
1925 | __vxge_hw_ring_delete(vp); | |
1926 | return VXGE_HW_ERR_OUT_OF_MEMORY; | |
1927 | } | |
1928 | ||
1929 | status = __vxge_hw_channel_initialize(&ring->channel); | |
1930 | if (status != VXGE_HW_OK) { | |
1931 | __vxge_hw_ring_delete(vp); | |
1932 | goto exit; | |
1933 | } | |
1934 | ||
1935 | /* Note: | |
1936 | * Specifying rxd_init callback means two things: | |
1937 | * 1) rxds need to be initialized by driver at channel-open time; | |
1938 | * 2) rxds need to be posted at channel-open time | |
1939 | * (that's what the initial_replenish() below does) | |
1940 | * Currently we don't have a case when the 1) is done without the 2). | |
1941 | */ | |
1942 | if (ring->rxd_init) { | |
3363276f | 1943 | status = vxge_hw_ring_replenish(ring); |
40a3a915 RV |
1944 | if (status != VXGE_HW_OK) { |
1945 | __vxge_hw_ring_delete(vp); | |
1946 | goto exit; | |
1947 | } | |
1948 | } | |
1949 | ||
1950 | /* initial replenish will increment the counter in its post() routine, | |
1951 | * we have to reset it */ | |
1952 | ring->stats->common_stats.usage_cnt = 0; | |
1953 | exit: | |
1954 | return status; | |
1955 | } | |
1956 | ||
1957 | /* | |
1958 | * __vxge_hw_ring_abort - Returns the RxD | |
1959 | * This function terminates the RxDs of ring | |
1960 | */ | |
42821a5b | 1961 | static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) |
40a3a915 RV |
1962 | { |
1963 | void *rxdh; | |
1964 | struct __vxge_hw_channel *channel; | |
1965 | ||
1966 | channel = &ring->channel; | |
1967 | ||
1968 | for (;;) { | |
1969 | vxge_hw_channel_dtr_try_complete(channel, &rxdh); | |
1970 | ||
1971 | if (rxdh == NULL) | |
1972 | break; | |
1973 | ||
1974 | vxge_hw_channel_dtr_complete(channel); | |
1975 | ||
1976 | if (ring->rxd_term) | |
1977 | ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, | |
1978 | channel->userdata); | |
1979 | ||
1980 | vxge_hw_channel_dtr_free(channel, rxdh); | |
1981 | } | |
1982 | ||
1983 | return VXGE_HW_OK; | |
1984 | } | |
1985 | ||
1986 | /* | |
1987 | * __vxge_hw_ring_reset - Resets the ring | |
1988 | * This function resets the ring during vpath reset operation | |
1989 | */ | |
42821a5b | 1990 | static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) |
40a3a915 RV |
1991 | { |
1992 | enum vxge_hw_status status = VXGE_HW_OK; | |
1993 | struct __vxge_hw_channel *channel; | |
1994 | ||
1995 | channel = &ring->channel; | |
1996 | ||
1997 | __vxge_hw_ring_abort(ring); | |
1998 | ||
1999 | status = __vxge_hw_channel_reset(channel); | |
2000 | ||
2001 | if (status != VXGE_HW_OK) | |
2002 | goto exit; | |
2003 | ||
2004 | if (ring->rxd_init) { | |
3363276f | 2005 | status = vxge_hw_ring_replenish(ring); |
40a3a915 RV |
2006 | if (status != VXGE_HW_OK) |
2007 | goto exit; | |
2008 | } | |
2009 | exit: | |
2010 | return status; | |
2011 | } | |
2012 | ||
2013 | /* | |
2014 | * __vxge_hw_ring_delete - Removes the ring | |
2015 | * This function freeup the memory pool and removes the ring | |
2016 | */ | |
42821a5b | 2017 | static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) |
40a3a915 RV |
2018 | { |
2019 | struct __vxge_hw_ring *ring = vp->vpath->ringh; | |
2020 | ||
2021 | __vxge_hw_ring_abort(ring); | |
2022 | ||
2023 | if (ring->mempool) | |
2024 | __vxge_hw_mempool_destroy(ring->mempool); | |
2025 | ||
2026 | vp->vpath->ringh = NULL; | |
2027 | __vxge_hw_channel_free(&ring->channel); | |
2028 | ||
2029 | return VXGE_HW_OK; | |
2030 | } | |
2031 | ||
2032 | /* | |
2033 | * __vxge_hw_mempool_grow | |
2034 | * Will resize mempool up to %num_allocate value. | |
2035 | */ | |
42821a5b | 2036 | static enum vxge_hw_status |
40a3a915 RV |
2037 | __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, |
2038 | u32 *num_allocated) | |
2039 | { | |
2040 | u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0; | |
2041 | u32 n_items = mempool->items_per_memblock; | |
2042 | u32 start_block_idx = mempool->memblocks_allocated; | |
2043 | u32 end_block_idx = mempool->memblocks_allocated + num_allocate; | |
2044 | enum vxge_hw_status status = VXGE_HW_OK; | |
2045 | ||
2046 | *num_allocated = 0; | |
2047 | ||
2048 | if (end_block_idx > mempool->memblocks_max) { | |
2049 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
2050 | goto exit; | |
2051 | } | |
2052 | ||
2053 | for (i = start_block_idx; i < end_block_idx; i++) { | |
2054 | u32 j; | |
2055 | u32 is_last = ((end_block_idx - 1) == i); | |
2056 | struct vxge_hw_mempool_dma *dma_object = | |
2057 | mempool->memblocks_dma_arr + i; | |
2058 | void *the_memblock; | |
2059 | ||
2060 | /* allocate memblock's private part. Each DMA memblock | |
2061 | * has a space allocated for item's private usage upon | |
2062 | * mempool's user request. Each time mempool grows, it will | |
2063 | * allocate new memblock and its private part at once. | |
2064 | * This helps to minimize memory usage a lot. */ | |
2065 | mempool->memblocks_priv_arr[i] = | |
89bf67f1 | 2066 | vzalloc(mempool->items_priv_size * n_items); |
40a3a915 RV |
2067 | if (mempool->memblocks_priv_arr[i] == NULL) { |
2068 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
2069 | goto exit; | |
2070 | } | |
2071 | ||
40a3a915 RV |
2072 | /* allocate DMA-capable memblock */ |
2073 | mempool->memblocks_arr[i] = | |
2074 | __vxge_hw_blockpool_malloc(mempool->devh, | |
2075 | mempool->memblock_size, dma_object); | |
2076 | if (mempool->memblocks_arr[i] == NULL) { | |
2077 | vfree(mempool->memblocks_priv_arr[i]); | |
2078 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
2079 | goto exit; | |
2080 | } | |
2081 | ||
2082 | (*num_allocated)++; | |
2083 | mempool->memblocks_allocated++; | |
2084 | ||
2085 | memset(mempool->memblocks_arr[i], 0, mempool->memblock_size); | |
2086 | ||
2087 | the_memblock = mempool->memblocks_arr[i]; | |
2088 | ||
2089 | /* fill the items hash array */ | |
2090 | for (j = 0; j < n_items; j++) { | |
2091 | u32 index = i * n_items + j; | |
2092 | ||
2093 | if (first_time && index >= mempool->items_initial) | |
2094 | break; | |
2095 | ||
2096 | mempool->items_arr[index] = | |
2097 | ((char *)the_memblock + j*mempool->item_size); | |
2098 | ||
2099 | /* let caller to do more job on each item */ | |
2100 | if (mempool->item_func_alloc != NULL) | |
2101 | mempool->item_func_alloc(mempool, i, | |
2102 | dma_object, index, is_last); | |
2103 | ||
2104 | mempool->items_current = index + 1; | |
2105 | } | |
2106 | ||
2107 | if (first_time && mempool->items_current == | |
2108 | mempool->items_initial) | |
2109 | break; | |
2110 | } | |
2111 | exit: | |
2112 | return status; | |
2113 | } | |
2114 | ||
2115 | /* | |
2116 | * vxge_hw_mempool_create | |
2117 | * This function will create memory pool object. Pool may grow but will | |
2118 | * never shrink. Pool consists of number of dynamically allocated blocks | |
2119 | * with size enough to hold %items_initial number of items. Memory is | |
2120 | * DMA-able but client must map/unmap before interoperating with the device. | |
2121 | */ | |
42821a5b | 2122 | static struct vxge_hw_mempool* |
40a3a915 RV |
2123 | __vxge_hw_mempool_create( |
2124 | struct __vxge_hw_device *devh, | |
2125 | u32 memblock_size, | |
2126 | u32 item_size, | |
2127 | u32 items_priv_size, | |
2128 | u32 items_initial, | |
2129 | u32 items_max, | |
2130 | struct vxge_hw_mempool_cbs *mp_callback, | |
2131 | void *userdata) | |
2132 | { | |
2133 | enum vxge_hw_status status = VXGE_HW_OK; | |
2134 | u32 memblocks_to_allocate; | |
2135 | struct vxge_hw_mempool *mempool = NULL; | |
2136 | u32 allocated; | |
2137 | ||
2138 | if (memblock_size < item_size) { | |
2139 | status = VXGE_HW_FAIL; | |
2140 | goto exit; | |
2141 | } | |
2142 | ||
2143 | mempool = (struct vxge_hw_mempool *) | |
89bf67f1 | 2144 | vzalloc(sizeof(struct vxge_hw_mempool)); |
40a3a915 RV |
2145 | if (mempool == NULL) { |
2146 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
2147 | goto exit; | |
2148 | } | |
40a3a915 RV |
2149 | |
2150 | mempool->devh = devh; | |
2151 | mempool->memblock_size = memblock_size; | |
2152 | mempool->items_max = items_max; | |
2153 | mempool->items_initial = items_initial; | |
2154 | mempool->item_size = item_size; | |
2155 | mempool->items_priv_size = items_priv_size; | |
2156 | mempool->item_func_alloc = mp_callback->item_func_alloc; | |
2157 | mempool->userdata = userdata; | |
2158 | ||
2159 | mempool->memblocks_allocated = 0; | |
2160 | ||
2161 | mempool->items_per_memblock = memblock_size / item_size; | |
2162 | ||
2163 | mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) / | |
2164 | mempool->items_per_memblock; | |
2165 | ||
2166 | /* allocate array of memblocks */ | |
2167 | mempool->memblocks_arr = | |
89bf67f1 | 2168 | (void **) vzalloc(sizeof(void *) * mempool->memblocks_max); |
40a3a915 RV |
2169 | if (mempool->memblocks_arr == NULL) { |
2170 | __vxge_hw_mempool_destroy(mempool); | |
2171 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
2172 | mempool = NULL; | |
2173 | goto exit; | |
2174 | } | |
40a3a915 RV |
2175 | |
2176 | /* allocate array of private parts of items per memblocks */ | |
2177 | mempool->memblocks_priv_arr = | |
89bf67f1 | 2178 | (void **) vzalloc(sizeof(void *) * mempool->memblocks_max); |
40a3a915 RV |
2179 | if (mempool->memblocks_priv_arr == NULL) { |
2180 | __vxge_hw_mempool_destroy(mempool); | |
2181 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
2182 | mempool = NULL; | |
2183 | goto exit; | |
2184 | } | |
40a3a915 RV |
2185 | |
2186 | /* allocate array of memblocks DMA objects */ | |
2187 | mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *) | |
89bf67f1 | 2188 | vzalloc(sizeof(struct vxge_hw_mempool_dma) * |
40a3a915 RV |
2189 | mempool->memblocks_max); |
2190 | ||
2191 | if (mempool->memblocks_dma_arr == NULL) { | |
2192 | __vxge_hw_mempool_destroy(mempool); | |
2193 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
2194 | mempool = NULL; | |
2195 | goto exit; | |
2196 | } | |
40a3a915 RV |
2197 | |
2198 | /* allocate hash array of items */ | |
2199 | mempool->items_arr = | |
89bf67f1 | 2200 | (void **) vzalloc(sizeof(void *) * mempool->items_max); |
40a3a915 RV |
2201 | if (mempool->items_arr == NULL) { |
2202 | __vxge_hw_mempool_destroy(mempool); | |
2203 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
2204 | mempool = NULL; | |
2205 | goto exit; | |
2206 | } | |
40a3a915 RV |
2207 | |
2208 | /* calculate initial number of memblocks */ | |
2209 | memblocks_to_allocate = (mempool->items_initial + | |
2210 | mempool->items_per_memblock - 1) / | |
2211 | mempool->items_per_memblock; | |
2212 | ||
2213 | /* pre-allocate the mempool */ | |
2214 | status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate, | |
2215 | &allocated); | |
2216 | if (status != VXGE_HW_OK) { | |
2217 | __vxge_hw_mempool_destroy(mempool); | |
2218 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
2219 | mempool = NULL; | |
2220 | goto exit; | |
2221 | } | |
2222 | ||
2223 | exit: | |
2224 | return mempool; | |
2225 | } | |
2226 | ||
2227 | /* | |
2228 | * vxge_hw_mempool_destroy | |
2229 | */ | |
42821a5b | 2230 | static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) |
40a3a915 RV |
2231 | { |
2232 | u32 i, j; | |
2233 | struct __vxge_hw_device *devh = mempool->devh; | |
2234 | ||
2235 | for (i = 0; i < mempool->memblocks_allocated; i++) { | |
2236 | struct vxge_hw_mempool_dma *dma_object; | |
2237 | ||
2238 | vxge_assert(mempool->memblocks_arr[i]); | |
2239 | vxge_assert(mempool->memblocks_dma_arr + i); | |
2240 | ||
2241 | dma_object = mempool->memblocks_dma_arr + i; | |
2242 | ||
2243 | for (j = 0; j < mempool->items_per_memblock; j++) { | |
2244 | u32 index = i * mempool->items_per_memblock + j; | |
2245 | ||
2246 | /* to skip last partially filled(if any) memblock */ | |
2247 | if (index >= mempool->items_current) | |
2248 | break; | |
2249 | } | |
2250 | ||
2251 | vfree(mempool->memblocks_priv_arr[i]); | |
2252 | ||
2253 | __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], | |
2254 | mempool->memblock_size, dma_object); | |
2255 | } | |
2256 | ||
50d36a93 | 2257 | vfree(mempool->items_arr); |
40a3a915 | 2258 | |
50d36a93 | 2259 | vfree(mempool->memblocks_dma_arr); |
40a3a915 | 2260 | |
50d36a93 | 2261 | vfree(mempool->memblocks_priv_arr); |
40a3a915 | 2262 | |
50d36a93 | 2263 | vfree(mempool->memblocks_arr); |
40a3a915 RV |
2264 | |
2265 | vfree(mempool); | |
2266 | } | |
2267 | ||
2268 | /* | |
2269 | * __vxge_hw_device_fifo_config_check - Check fifo configuration. | |
2270 | * Check the fifo configuration | |
2271 | */ | |
2c91308f | 2272 | static enum vxge_hw_status |
40a3a915 RV |
2273 | __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) |
2274 | { | |
2275 | if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || | |
2276 | (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) | |
2277 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | |
2278 | ||
2279 | return VXGE_HW_OK; | |
2280 | } | |
2281 | ||
2282 | /* | |
2283 | * __vxge_hw_device_vpath_config_check - Check vpath configuration. | |
2284 | * Check the vpath configuration | |
2285 | */ | |
42821a5b | 2286 | static enum vxge_hw_status |
40a3a915 RV |
2287 | __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) |
2288 | { | |
2289 | enum vxge_hw_status status; | |
2290 | ||
2291 | if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || | |
2292 | (vp_config->min_bandwidth > | |
2293 | VXGE_HW_VPATH_BANDWIDTH_MAX)) | |
2294 | return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; | |
2295 | ||
2296 | status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); | |
2297 | if (status != VXGE_HW_OK) | |
2298 | return status; | |
2299 | ||
2300 | if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && | |
2301 | ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || | |
2302 | (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) | |
2303 | return VXGE_HW_BADCFG_VPATH_MTU; | |
2304 | ||
2305 | if ((vp_config->rpa_strip_vlan_tag != | |
2306 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && | |
2307 | (vp_config->rpa_strip_vlan_tag != | |
2308 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && | |
2309 | (vp_config->rpa_strip_vlan_tag != | |
2310 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) | |
2311 | return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; | |
2312 | ||
2313 | return VXGE_HW_OK; | |
2314 | } | |
2315 | ||
2316 | /* | |
2317 | * __vxge_hw_device_config_check - Check device configuration. | |
2318 | * Check the device configuration | |
2319 | */ | |
2c91308f | 2320 | static enum vxge_hw_status |
40a3a915 RV |
2321 | __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) |
2322 | { | |
2323 | u32 i; | |
2324 | enum vxge_hw_status status; | |
2325 | ||
2326 | if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && | |
2327 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && | |
2328 | (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && | |
2329 | (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) | |
2330 | return VXGE_HW_BADCFG_INTR_MODE; | |
2331 | ||
2332 | if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && | |
2333 | (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) | |
2334 | return VXGE_HW_BADCFG_RTS_MAC_EN; | |
2335 | ||
2336 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
2337 | status = __vxge_hw_device_vpath_config_check( | |
2338 | &new_config->vp_config[i]); | |
2339 | if (status != VXGE_HW_OK) | |
2340 | return status; | |
2341 | } | |
2342 | ||
2343 | return VXGE_HW_OK; | |
2344 | } | |
2345 | ||
2346 | /* | |
2347 | * vxge_hw_device_config_default_get - Initialize device config with defaults. | |
2348 | * Initialize Titan device config with default values. | |
2349 | */ | |
2350 | enum vxge_hw_status __devinit | |
2351 | vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) | |
2352 | { | |
2353 | u32 i; | |
2354 | ||
2355 | device_config->dma_blockpool_initial = | |
2356 | VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; | |
2357 | device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; | |
2358 | device_config->intr_mode = VXGE_HW_INTR_MODE_DEF; | |
2359 | device_config->rth_en = VXGE_HW_RTH_DEFAULT; | |
2360 | device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT; | |
2361 | device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS; | |
2362 | device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; | |
2363 | ||
2364 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
2365 | ||
2366 | device_config->vp_config[i].vp_id = i; | |
2367 | ||
2368 | device_config->vp_config[i].min_bandwidth = | |
2369 | VXGE_HW_VPATH_BANDWIDTH_DEFAULT; | |
2370 | ||
2371 | device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT; | |
2372 | ||
2373 | device_config->vp_config[i].ring.ring_blocks = | |
2374 | VXGE_HW_DEF_RING_BLOCKS; | |
2375 | ||
2376 | device_config->vp_config[i].ring.buffer_mode = | |
2377 | VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT; | |
2378 | ||
2379 | device_config->vp_config[i].ring.scatter_mode = | |
2380 | VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT; | |
2381 | ||
2382 | device_config->vp_config[i].ring.rxds_limit = | |
2383 | VXGE_HW_DEF_RING_RXDS_LIMIT; | |
2384 | ||
2385 | device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE; | |
2386 | ||
2387 | device_config->vp_config[i].fifo.fifo_blocks = | |
2388 | VXGE_HW_MIN_FIFO_BLOCKS; | |
2389 | ||
2390 | device_config->vp_config[i].fifo.max_frags = | |
2391 | VXGE_HW_MAX_FIFO_FRAGS; | |
2392 | ||
2393 | device_config->vp_config[i].fifo.memblock_size = | |
2394 | VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE; | |
2395 | ||
2396 | device_config->vp_config[i].fifo.alignment_size = | |
2397 | VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE; | |
2398 | ||
2399 | device_config->vp_config[i].fifo.intr = | |
2400 | VXGE_HW_FIFO_QUEUE_INTR_DEFAULT; | |
2401 | ||
2402 | device_config->vp_config[i].fifo.no_snoop_bits = | |
2403 | VXGE_HW_FIFO_NO_SNOOP_DEFAULT; | |
2404 | device_config->vp_config[i].tti.intr_enable = | |
2405 | VXGE_HW_TIM_INTR_DEFAULT; | |
2406 | ||
2407 | device_config->vp_config[i].tti.btimer_val = | |
2408 | VXGE_HW_USE_FLASH_DEFAULT; | |
2409 | ||
2410 | device_config->vp_config[i].tti.timer_ac_en = | |
2411 | VXGE_HW_USE_FLASH_DEFAULT; | |
2412 | ||
2413 | device_config->vp_config[i].tti.timer_ci_en = | |
2414 | VXGE_HW_USE_FLASH_DEFAULT; | |
2415 | ||
2416 | device_config->vp_config[i].tti.timer_ri_en = | |
2417 | VXGE_HW_USE_FLASH_DEFAULT; | |
2418 | ||
2419 | device_config->vp_config[i].tti.rtimer_val = | |
2420 | VXGE_HW_USE_FLASH_DEFAULT; | |
2421 | ||
2422 | device_config->vp_config[i].tti.util_sel = | |
2423 | VXGE_HW_USE_FLASH_DEFAULT; | |
2424 | ||
2425 | device_config->vp_config[i].tti.ltimer_val = | |
2426 | VXGE_HW_USE_FLASH_DEFAULT; | |
2427 | ||
2428 | device_config->vp_config[i].tti.urange_a = | |
2429 | VXGE_HW_USE_FLASH_DEFAULT; | |
2430 | ||
2431 | device_config->vp_config[i].tti.uec_a = | |
2432 | VXGE_HW_USE_FLASH_DEFAULT; | |
2433 | ||
2434 | device_config->vp_config[i].tti.urange_b = | |
2435 | VXGE_HW_USE_FLASH_DEFAULT; | |
2436 | ||
2437 | device_config->vp_config[i].tti.uec_b = | |
2438 | VXGE_HW_USE_FLASH_DEFAULT; | |
2439 | ||
2440 | device_config->vp_config[i].tti.urange_c = | |
2441 | VXGE_HW_USE_FLASH_DEFAULT; | |
2442 | ||
2443 | device_config->vp_config[i].tti.uec_c = | |
2444 | VXGE_HW_USE_FLASH_DEFAULT; | |
2445 | ||
2446 | device_config->vp_config[i].tti.uec_d = | |
2447 | VXGE_HW_USE_FLASH_DEFAULT; | |
2448 | ||
2449 | device_config->vp_config[i].rti.intr_enable = | |
2450 | VXGE_HW_TIM_INTR_DEFAULT; | |
2451 | ||
2452 | device_config->vp_config[i].rti.btimer_val = | |
2453 | VXGE_HW_USE_FLASH_DEFAULT; | |
2454 | ||
2455 | device_config->vp_config[i].rti.timer_ac_en = | |
2456 | VXGE_HW_USE_FLASH_DEFAULT; | |
2457 | ||
2458 | device_config->vp_config[i].rti.timer_ci_en = | |
2459 | VXGE_HW_USE_FLASH_DEFAULT; | |
2460 | ||
2461 | device_config->vp_config[i].rti.timer_ri_en = | |
2462 | VXGE_HW_USE_FLASH_DEFAULT; | |
2463 | ||
2464 | device_config->vp_config[i].rti.rtimer_val = | |
2465 | VXGE_HW_USE_FLASH_DEFAULT; | |
2466 | ||
2467 | device_config->vp_config[i].rti.util_sel = | |
2468 | VXGE_HW_USE_FLASH_DEFAULT; | |
2469 | ||
2470 | device_config->vp_config[i].rti.ltimer_val = | |
2471 | VXGE_HW_USE_FLASH_DEFAULT; | |
2472 | ||
2473 | device_config->vp_config[i].rti.urange_a = | |
2474 | VXGE_HW_USE_FLASH_DEFAULT; | |
2475 | ||
2476 | device_config->vp_config[i].rti.uec_a = | |
2477 | VXGE_HW_USE_FLASH_DEFAULT; | |
2478 | ||
2479 | device_config->vp_config[i].rti.urange_b = | |
2480 | VXGE_HW_USE_FLASH_DEFAULT; | |
2481 | ||
2482 | device_config->vp_config[i].rti.uec_b = | |
2483 | VXGE_HW_USE_FLASH_DEFAULT; | |
2484 | ||
2485 | device_config->vp_config[i].rti.urange_c = | |
2486 | VXGE_HW_USE_FLASH_DEFAULT; | |
2487 | ||
2488 | device_config->vp_config[i].rti.uec_c = | |
2489 | VXGE_HW_USE_FLASH_DEFAULT; | |
2490 | ||
2491 | device_config->vp_config[i].rti.uec_d = | |
2492 | VXGE_HW_USE_FLASH_DEFAULT; | |
2493 | ||
2494 | device_config->vp_config[i].mtu = | |
2495 | VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU; | |
2496 | ||
2497 | device_config->vp_config[i].rpa_strip_vlan_tag = | |
2498 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT; | |
2499 | } | |
2500 | ||
2501 | return VXGE_HW_OK; | |
2502 | } | |
2503 | ||
2504 | /* | |
2505 | * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. | |
2506 | * Set the swapper bits appropriately for the lagacy section. | |
2507 | */ | |
42821a5b | 2508 | static enum vxge_hw_status |
40a3a915 RV |
2509 | __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) |
2510 | { | |
2511 | u64 val64; | |
2512 | enum vxge_hw_status status = VXGE_HW_OK; | |
2513 | ||
2514 | val64 = readq(&legacy_reg->toc_swapper_fb); | |
2515 | ||
2516 | wmb(); | |
2517 | ||
2518 | switch (val64) { | |
2519 | ||
2520 | case VXGE_HW_SWAPPER_INITIAL_VALUE: | |
2521 | return status; | |
2522 | ||
2523 | case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: | |
2524 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | |
2525 | &legacy_reg->pifm_rd_swap_en); | |
2526 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | |
2527 | &legacy_reg->pifm_rd_flip_en); | |
2528 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | |
2529 | &legacy_reg->pifm_wr_swap_en); | |
2530 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | |
2531 | &legacy_reg->pifm_wr_flip_en); | |
2532 | break; | |
2533 | ||
2534 | case VXGE_HW_SWAPPER_BYTE_SWAPPED: | |
2535 | writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, | |
2536 | &legacy_reg->pifm_rd_swap_en); | |
2537 | writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, | |
2538 | &legacy_reg->pifm_wr_swap_en); | |
2539 | break; | |
2540 | ||
2541 | case VXGE_HW_SWAPPER_BIT_FLIPPED: | |
2542 | writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, | |
2543 | &legacy_reg->pifm_rd_flip_en); | |
2544 | writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, | |
2545 | &legacy_reg->pifm_wr_flip_en); | |
2546 | break; | |
2547 | } | |
2548 | ||
2549 | wmb(); | |
2550 | ||
2551 | val64 = readq(&legacy_reg->toc_swapper_fb); | |
2552 | ||
2553 | if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) | |
2554 | status = VXGE_HW_ERR_SWAPPER_CTRL; | |
2555 | ||
2556 | return status; | |
2557 | } | |
2558 | ||
2559 | /* | |
2560 | * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. | |
2561 | * Set the swapper bits appropriately for the vpath. | |
2562 | */ | |
42821a5b | 2563 | static enum vxge_hw_status |
40a3a915 RV |
2564 | __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) |
2565 | { | |
2566 | #ifndef __BIG_ENDIAN | |
2567 | u64 val64; | |
2568 | ||
2569 | val64 = readq(&vpath_reg->vpath_general_cfg1); | |
2570 | wmb(); | |
2571 | val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN; | |
2572 | writeq(val64, &vpath_reg->vpath_general_cfg1); | |
2573 | wmb(); | |
2574 | #endif | |
2575 | return VXGE_HW_OK; | |
2576 | } | |
2577 | ||
2578 | /* | |
2579 | * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. | |
2580 | * Set the swapper bits appropriately for the vpath. | |
2581 | */ | |
42821a5b | 2582 | static enum vxge_hw_status |
40a3a915 RV |
2583 | __vxge_hw_kdfc_swapper_set( |
2584 | struct vxge_hw_legacy_reg __iomem *legacy_reg, | |
2585 | struct vxge_hw_vpath_reg __iomem *vpath_reg) | |
2586 | { | |
2587 | u64 val64; | |
2588 | ||
2589 | val64 = readq(&legacy_reg->pifm_wr_swap_en); | |
2590 | ||
2591 | if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) { | |
2592 | val64 = readq(&vpath_reg->kdfcctl_cfg0); | |
2593 | wmb(); | |
2594 | ||
2595 | val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 | | |
2596 | VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 | | |
2597 | VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2; | |
2598 | ||
2599 | writeq(val64, &vpath_reg->kdfcctl_cfg0); | |
2600 | wmb(); | |
2601 | } | |
2602 | ||
2603 | return VXGE_HW_OK; | |
2604 | } | |
2605 | ||
40a3a915 RV |
2606 | /* |
2607 | * vxge_hw_mgmt_reg_read - Read Titan register. | |
2608 | */ | |
2609 | enum vxge_hw_status | |
2610 | vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev, | |
2611 | enum vxge_hw_mgmt_reg_type type, | |
2612 | u32 index, u32 offset, u64 *value) | |
2613 | { | |
2614 | enum vxge_hw_status status = VXGE_HW_OK; | |
2615 | ||
2616 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | |
2617 | status = VXGE_HW_ERR_INVALID_DEVICE; | |
2618 | goto exit; | |
2619 | } | |
2620 | ||
2621 | switch (type) { | |
2622 | case vxge_hw_mgmt_reg_type_legacy: | |
2623 | if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { | |
2624 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2625 | break; | |
2626 | } | |
2627 | *value = readq((void __iomem *)hldev->legacy_reg + offset); | |
2628 | break; | |
2629 | case vxge_hw_mgmt_reg_type_toc: | |
2630 | if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { | |
2631 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2632 | break; | |
2633 | } | |
2634 | *value = readq((void __iomem *)hldev->toc_reg + offset); | |
2635 | break; | |
2636 | case vxge_hw_mgmt_reg_type_common: | |
2637 | if (offset > sizeof(struct vxge_hw_common_reg) - 8) { | |
2638 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2639 | break; | |
2640 | } | |
2641 | *value = readq((void __iomem *)hldev->common_reg + offset); | |
2642 | break; | |
2643 | case vxge_hw_mgmt_reg_type_mrpcim: | |
2644 | if (!(hldev->access_rights & | |
2645 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { | |
2646 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | |
2647 | break; | |
2648 | } | |
2649 | if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { | |
2650 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2651 | break; | |
2652 | } | |
2653 | *value = readq((void __iomem *)hldev->mrpcim_reg + offset); | |
2654 | break; | |
2655 | case vxge_hw_mgmt_reg_type_srpcim: | |
2656 | if (!(hldev->access_rights & | |
2657 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { | |
2658 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | |
2659 | break; | |
2660 | } | |
2661 | if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { | |
2662 | status = VXGE_HW_ERR_INVALID_INDEX; | |
2663 | break; | |
2664 | } | |
2665 | if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { | |
2666 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2667 | break; | |
2668 | } | |
2669 | *value = readq((void __iomem *)hldev->srpcim_reg[index] + | |
2670 | offset); | |
2671 | break; | |
2672 | case vxge_hw_mgmt_reg_type_vpmgmt: | |
2673 | if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || | |
2674 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | |
2675 | status = VXGE_HW_ERR_INVALID_INDEX; | |
2676 | break; | |
2677 | } | |
2678 | if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { | |
2679 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2680 | break; | |
2681 | } | |
2682 | *value = readq((void __iomem *)hldev->vpmgmt_reg[index] + | |
2683 | offset); | |
2684 | break; | |
2685 | case vxge_hw_mgmt_reg_type_vpath: | |
2686 | if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) || | |
2687 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | |
2688 | status = VXGE_HW_ERR_INVALID_INDEX; | |
2689 | break; | |
2690 | } | |
2691 | if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) { | |
2692 | status = VXGE_HW_ERR_INVALID_INDEX; | |
2693 | break; | |
2694 | } | |
2695 | if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { | |
2696 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2697 | break; | |
2698 | } | |
2699 | *value = readq((void __iomem *)hldev->vpath_reg[index] + | |
2700 | offset); | |
2701 | break; | |
2702 | default: | |
2703 | status = VXGE_HW_ERR_INVALID_TYPE; | |
2704 | break; | |
2705 | } | |
2706 | ||
2707 | exit: | |
2708 | return status; | |
2709 | } | |
2710 | ||
fa41fd10 SH |
2711 | /* |
2712 | * vxge_hw_vpath_strip_fcs_check - Check for FCS strip. | |
2713 | */ | |
2714 | enum vxge_hw_status | |
2715 | vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask) | |
2716 | { | |
2717 | struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; | |
2718 | enum vxge_hw_status status = VXGE_HW_OK; | |
2719 | int i = 0, j = 0; | |
2720 | ||
2721 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
2722 | if (!((vpath_mask) & vxge_mBIT(i))) | |
2723 | continue; | |
2724 | vpmgmt_reg = hldev->vpmgmt_reg[i]; | |
2725 | for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) { | |
2726 | if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j]) | |
2727 | & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS) | |
2728 | return VXGE_HW_FAIL; | |
2729 | } | |
2730 | } | |
2731 | return status; | |
2732 | } | |
40a3a915 RV |
2733 | /* |
2734 | * vxge_hw_mgmt_reg_Write - Write Titan register. | |
2735 | */ | |
2736 | enum vxge_hw_status | |
2737 | vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev, | |
2738 | enum vxge_hw_mgmt_reg_type type, | |
2739 | u32 index, u32 offset, u64 value) | |
2740 | { | |
2741 | enum vxge_hw_status status = VXGE_HW_OK; | |
2742 | ||
2743 | if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { | |
2744 | status = VXGE_HW_ERR_INVALID_DEVICE; | |
2745 | goto exit; | |
2746 | } | |
2747 | ||
2748 | switch (type) { | |
2749 | case vxge_hw_mgmt_reg_type_legacy: | |
2750 | if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { | |
2751 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2752 | break; | |
2753 | } | |
2754 | writeq(value, (void __iomem *)hldev->legacy_reg + offset); | |
2755 | break; | |
2756 | case vxge_hw_mgmt_reg_type_toc: | |
2757 | if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { | |
2758 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2759 | break; | |
2760 | } | |
2761 | writeq(value, (void __iomem *)hldev->toc_reg + offset); | |
2762 | break; | |
2763 | case vxge_hw_mgmt_reg_type_common: | |
2764 | if (offset > sizeof(struct vxge_hw_common_reg) - 8) { | |
2765 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2766 | break; | |
2767 | } | |
2768 | writeq(value, (void __iomem *)hldev->common_reg + offset); | |
2769 | break; | |
2770 | case vxge_hw_mgmt_reg_type_mrpcim: | |
2771 | if (!(hldev->access_rights & | |
2772 | VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { | |
2773 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | |
2774 | break; | |
2775 | } | |
2776 | if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { | |
2777 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2778 | break; | |
2779 | } | |
2780 | writeq(value, (void __iomem *)hldev->mrpcim_reg + offset); | |
2781 | break; | |
2782 | case vxge_hw_mgmt_reg_type_srpcim: | |
2783 | if (!(hldev->access_rights & | |
2784 | VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { | |
2785 | status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; | |
2786 | break; | |
2787 | } | |
2788 | if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { | |
2789 | status = VXGE_HW_ERR_INVALID_INDEX; | |
2790 | break; | |
2791 | } | |
2792 | if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { | |
2793 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2794 | break; | |
2795 | } | |
2796 | writeq(value, (void __iomem *)hldev->srpcim_reg[index] + | |
2797 | offset); | |
2798 | ||
2799 | break; | |
2800 | case vxge_hw_mgmt_reg_type_vpmgmt: | |
2801 | if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || | |
2802 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | |
2803 | status = VXGE_HW_ERR_INVALID_INDEX; | |
2804 | break; | |
2805 | } | |
2806 | if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { | |
2807 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2808 | break; | |
2809 | } | |
2810 | writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] + | |
2811 | offset); | |
2812 | break; | |
2813 | case vxge_hw_mgmt_reg_type_vpath: | |
2814 | if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) || | |
2815 | (!(hldev->vpath_assignments & vxge_mBIT(index)))) { | |
2816 | status = VXGE_HW_ERR_INVALID_INDEX; | |
2817 | break; | |
2818 | } | |
2819 | if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { | |
2820 | status = VXGE_HW_ERR_INVALID_OFFSET; | |
2821 | break; | |
2822 | } | |
2823 | writeq(value, (void __iomem *)hldev->vpath_reg[index] + | |
2824 | offset); | |
2825 | break; | |
2826 | default: | |
2827 | status = VXGE_HW_ERR_INVALID_TYPE; | |
2828 | break; | |
2829 | } | |
2830 | exit: | |
2831 | return status; | |
2832 | } | |
2833 | ||
2834 | /* | |
2835 | * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD | |
2836 | * list callback | |
2837 | * This function is callback passed to __vxge_hw_mempool_create to create memory | |
2838 | * pool for TxD list | |
2839 | */ | |
2840 | static void | |
2841 | __vxge_hw_fifo_mempool_item_alloc( | |
2842 | struct vxge_hw_mempool *mempoolh, | |
2843 | u32 memblock_index, struct vxge_hw_mempool_dma *dma_object, | |
2844 | u32 index, u32 is_last) | |
2845 | { | |
2846 | u32 memblock_item_idx; | |
2847 | struct __vxge_hw_fifo_txdl_priv *txdl_priv; | |
2848 | struct vxge_hw_fifo_txd *txdp = | |
2849 | (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index]; | |
2850 | struct __vxge_hw_fifo *fifo = | |
2851 | (struct __vxge_hw_fifo *)mempoolh->userdata; | |
2852 | void *memblock = mempoolh->memblocks_arr[memblock_index]; | |
2853 | ||
2854 | vxge_assert(txdp); | |
2855 | ||
2856 | txdp->host_control = (u64) (size_t) | |
2857 | __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp, | |
2858 | &memblock_item_idx); | |
2859 | ||
2860 | txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); | |
2861 | ||
2862 | vxge_assert(txdl_priv); | |
2863 | ||
2864 | fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp; | |
2865 | ||
2866 | /* pre-format HW's TxDL's private */ | |
2867 | txdl_priv->dma_offset = (char *)txdp - (char *)memblock; | |
2868 | txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset; | |
2869 | txdl_priv->dma_handle = dma_object->handle; | |
2870 | txdl_priv->memblock = memblock; | |
2871 | txdl_priv->first_txdp = txdp; | |
2872 | txdl_priv->next_txdl_priv = NULL; | |
2873 | txdl_priv->alloc_frags = 0; | |
40a3a915 RV |
2874 | } |
2875 | ||
2876 | /* | |
2877 | * __vxge_hw_fifo_create - Create a FIFO | |
2878 | * This function creates FIFO and initializes it. | |
2879 | */ | |
2c91308f | 2880 | static enum vxge_hw_status |
40a3a915 RV |
2881 | __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, |
2882 | struct vxge_hw_fifo_attr *attr) | |
2883 | { | |
2884 | enum vxge_hw_status status = VXGE_HW_OK; | |
2885 | struct __vxge_hw_fifo *fifo; | |
2886 | struct vxge_hw_fifo_config *config; | |
2887 | u32 txdl_size, txdl_per_memblock; | |
2888 | struct vxge_hw_mempool_cbs fifo_mp_callback; | |
2889 | struct __vxge_hw_virtualpath *vpath; | |
2890 | ||
2891 | if ((vp == NULL) || (attr == NULL)) { | |
2892 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
2893 | goto exit; | |
2894 | } | |
2895 | vpath = vp->vpath; | |
2896 | config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo; | |
2897 | ||
2898 | txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd); | |
2899 | ||
2900 | txdl_per_memblock = config->memblock_size / txdl_size; | |
2901 | ||
2902 | fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp, | |
2903 | VXGE_HW_CHANNEL_TYPE_FIFO, | |
2904 | config->fifo_blocks * txdl_per_memblock, | |
2905 | attr->per_txdl_space, attr->userdata); | |
2906 | ||
2907 | if (fifo == NULL) { | |
2908 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
2909 | goto exit; | |
2910 | } | |
2911 | ||
2912 | vpath->fifoh = fifo; | |
2913 | fifo->nofl_db = vpath->nofl_db; | |
2914 | ||
2915 | fifo->vp_id = vpath->vp_id; | |
2916 | fifo->vp_reg = vpath->vp_reg; | |
2917 | fifo->stats = &vpath->sw_stats->fifo_stats; | |
2918 | ||
2919 | fifo->config = config; | |
2920 | ||
2921 | /* apply "interrupts per txdl" attribute */ | |
2922 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; | |
2923 | ||
2924 | if (fifo->config->intr) | |
2925 | fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; | |
2926 | ||
2927 | fifo->no_snoop_bits = config->no_snoop_bits; | |
2928 | ||
2929 | /* | |
2930 | * FIFO memory management strategy: | |
2931 | * | |
2932 | * TxDL split into three independent parts: | |
2933 | * - set of TxD's | |
2934 | * - TxD HW private part | |
2935 | * - driver private part | |
2936 | * | |
2937 | * Adaptative memory allocation used. i.e. Memory allocated on | |
2938 | * demand with the size which will fit into one memory block. | |
2939 | * One memory block may contain more than one TxDL. | |
2940 | * | |
2941 | * During "reserve" operations more memory can be allocated on demand | |
2942 | * for example due to FIFO full condition. | |
2943 | * | |
2944 | * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close | |
2945 | * routine which will essentially stop the channel and free resources. | |
2946 | */ | |
2947 | ||
2948 | /* TxDL common private size == TxDL private + driver private */ | |
2949 | fifo->priv_size = | |
2950 | sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space; | |
2951 | fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) / | |
2952 | VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; | |
2953 | ||
2954 | fifo->per_txdl_space = attr->per_txdl_space; | |
2955 | ||
2956 | /* recompute txdl size to be cacheline aligned */ | |
2957 | fifo->txdl_size = txdl_size; | |
2958 | fifo->txdl_per_memblock = txdl_per_memblock; | |
2959 | ||
2960 | fifo->txdl_term = attr->txdl_term; | |
2961 | fifo->callback = attr->callback; | |
2962 | ||
2963 | if (fifo->txdl_per_memblock == 0) { | |
2964 | __vxge_hw_fifo_delete(vp); | |
2965 | status = VXGE_HW_ERR_INVALID_BLOCK_SIZE; | |
2966 | goto exit; | |
2967 | } | |
2968 | ||
2969 | fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc; | |
2970 | ||
2971 | fifo->mempool = | |
2972 | __vxge_hw_mempool_create(vpath->hldev, | |
2973 | fifo->config->memblock_size, | |
2974 | fifo->txdl_size, | |
2975 | fifo->priv_size, | |
2976 | (fifo->config->fifo_blocks * fifo->txdl_per_memblock), | |
2977 | (fifo->config->fifo_blocks * fifo->txdl_per_memblock), | |
2978 | &fifo_mp_callback, | |
2979 | fifo); | |
2980 | ||
2981 | if (fifo->mempool == NULL) { | |
2982 | __vxge_hw_fifo_delete(vp); | |
2983 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
2984 | goto exit; | |
2985 | } | |
2986 | ||
2987 | status = __vxge_hw_channel_initialize(&fifo->channel); | |
2988 | if (status != VXGE_HW_OK) { | |
2989 | __vxge_hw_fifo_delete(vp); | |
2990 | goto exit; | |
2991 | } | |
2992 | ||
2993 | vxge_assert(fifo->channel.reserve_ptr); | |
2994 | exit: | |
2995 | return status; | |
2996 | } | |
2997 | ||
2998 | /* | |
2999 | * __vxge_hw_fifo_abort - Returns the TxD | |
3000 | * This function terminates the TxDs of fifo | |
3001 | */ | |
42821a5b | 3002 | static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) |
40a3a915 RV |
3003 | { |
3004 | void *txdlh; | |
3005 | ||
3006 | for (;;) { | |
3007 | vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); | |
3008 | ||
3009 | if (txdlh == NULL) | |
3010 | break; | |
3011 | ||
3012 | vxge_hw_channel_dtr_complete(&fifo->channel); | |
3013 | ||
3014 | if (fifo->txdl_term) { | |
3015 | fifo->txdl_term(txdlh, | |
3016 | VXGE_HW_TXDL_STATE_POSTED, | |
3017 | fifo->channel.userdata); | |
3018 | } | |
3019 | ||
3020 | vxge_hw_channel_dtr_free(&fifo->channel, txdlh); | |
3021 | } | |
3022 | ||
3023 | return VXGE_HW_OK; | |
3024 | } | |
3025 | ||
3026 | /* | |
3027 | * __vxge_hw_fifo_reset - Resets the fifo | |
3028 | * This function resets the fifo during vpath reset operation | |
3029 | */ | |
42821a5b | 3030 | static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) |
40a3a915 RV |
3031 | { |
3032 | enum vxge_hw_status status = VXGE_HW_OK; | |
3033 | ||
3034 | __vxge_hw_fifo_abort(fifo); | |
3035 | status = __vxge_hw_channel_reset(&fifo->channel); | |
3036 | ||
3037 | return status; | |
3038 | } | |
3039 | ||
3040 | /* | |
3041 | * __vxge_hw_fifo_delete - Removes the FIFO | |
3042 | * This function freeup the memory pool and removes the FIFO | |
3043 | */ | |
2c91308f JM |
3044 | static enum vxge_hw_status |
3045 | __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) | |
40a3a915 RV |
3046 | { |
3047 | struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; | |
3048 | ||
3049 | __vxge_hw_fifo_abort(fifo); | |
3050 | ||
3051 | if (fifo->mempool) | |
3052 | __vxge_hw_mempool_destroy(fifo->mempool); | |
3053 | ||
3054 | vp->vpath->fifoh = NULL; | |
3055 | ||
3056 | __vxge_hw_channel_free(&fifo->channel); | |
3057 | ||
3058 | return VXGE_HW_OK; | |
3059 | } | |
3060 | ||
3061 | /* | |
3062 | * __vxge_hw_vpath_pci_read - Read the content of given address | |
3063 | * in pci config space. | |
3064 | * Read from the vpath pci config space. | |
3065 | */ | |
42821a5b | 3066 | static enum vxge_hw_status |
40a3a915 RV |
3067 | __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, |
3068 | u32 phy_func_0, u32 offset, u32 *val) | |
3069 | { | |
3070 | u64 val64; | |
3071 | enum vxge_hw_status status = VXGE_HW_OK; | |
3072 | struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; | |
3073 | ||
3074 | val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset); | |
3075 | ||
3076 | if (phy_func_0) | |
3077 | val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0; | |
3078 | ||
3079 | writeq(val64, &vp_reg->pci_config_access_cfg1); | |
3080 | wmb(); | |
3081 | writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ, | |
3082 | &vp_reg->pci_config_access_cfg2); | |
3083 | wmb(); | |
3084 | ||
3085 | status = __vxge_hw_device_register_poll( | |
3086 | &vp_reg->pci_config_access_cfg2, | |
3087 | VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS); | |
3088 | ||
3089 | if (status != VXGE_HW_OK) | |
3090 | goto exit; | |
3091 | ||
3092 | val64 = readq(&vp_reg->pci_config_access_status); | |
3093 | ||
3094 | if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) { | |
3095 | status = VXGE_HW_FAIL; | |
3096 | *val = 0; | |
3097 | } else | |
3098 | *val = (u32)vxge_bVALn(val64, 32, 32); | |
3099 | exit: | |
3100 | return status; | |
3101 | } | |
3102 | ||
40a3a915 RV |
3103 | /** |
3104 | * vxge_hw_device_flick_link_led - Flick (blink) link LED. | |
3105 | * @hldev: HW device. | |
3106 | * @on_off: TRUE if flickering to be on, FALSE to be off | |
3107 | * | |
3108 | * Flicker the link LED. | |
3109 | */ | |
3110 | enum vxge_hw_status | |
8424e00d | 3111 | vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off) |
40a3a915 | 3112 | { |
8424e00d JM |
3113 | struct __vxge_hw_virtualpath *vpath; |
3114 | u64 data0, data1 = 0, steer_ctrl = 0; | |
3115 | enum vxge_hw_status status; | |
40a3a915 RV |
3116 | |
3117 | if (hldev == NULL) { | |
3118 | status = VXGE_HW_ERR_INVALID_DEVICE; | |
3119 | goto exit; | |
3120 | } | |
3121 | ||
8424e00d | 3122 | vpath = &hldev->virtual_paths[hldev->first_vp_id]; |
40a3a915 | 3123 | |
8424e00d JM |
3124 | data0 = on_off; |
3125 | status = vxge_hw_vpath_fw_api(vpath, | |
3126 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL, | |
3127 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, | |
3128 | 0, &data0, &data1, &steer_ctrl); | |
40a3a915 RV |
3129 | exit: |
3130 | return status; | |
3131 | } | |
3132 | ||
3133 | /* | |
3134 | * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables | |
3135 | */ | |
3136 | enum vxge_hw_status | |
8424e00d JM |
3137 | __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp, |
3138 | u32 action, u32 rts_table, u32 offset, | |
3139 | u64 *data0, u64 *data1) | |
40a3a915 | 3140 | { |
8424e00d JM |
3141 | enum vxge_hw_status status; |
3142 | u64 steer_ctrl = 0; | |
40a3a915 RV |
3143 | |
3144 | if (vp == NULL) { | |
3145 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
3146 | goto exit; | |
3147 | } | |
3148 | ||
40a3a915 | 3149 | if ((rts_table == |
8424e00d | 3150 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || |
40a3a915 | 3151 | (rts_table == |
8424e00d | 3152 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || |
40a3a915 | 3153 | (rts_table == |
8424e00d | 3154 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || |
40a3a915 | 3155 | (rts_table == |
8424e00d JM |
3156 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { |
3157 | steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; | |
40a3a915 RV |
3158 | } |
3159 | ||
8424e00d JM |
3160 | status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, |
3161 | data0, data1, &steer_ctrl); | |
40a3a915 RV |
3162 | if (status != VXGE_HW_OK) |
3163 | goto exit; | |
3164 | ||
8424e00d JM |
3165 | if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || |
3166 | (rts_table != | |
3167 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) | |
3168 | *data1 = 0; | |
40a3a915 RV |
3169 | exit: |
3170 | return status; | |
3171 | } | |
3172 | ||
3173 | /* | |
3174 | * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables | |
3175 | */ | |
3176 | enum vxge_hw_status | |
8424e00d JM |
3177 | __vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action, |
3178 | u32 rts_table, u32 offset, u64 steer_data0, | |
3179 | u64 steer_data1) | |
40a3a915 | 3180 | { |
8424e00d JM |
3181 | u64 data0, data1 = 0, steer_ctrl = 0; |
3182 | enum vxge_hw_status status; | |
40a3a915 RV |
3183 | |
3184 | if (vp == NULL) { | |
3185 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
3186 | goto exit; | |
3187 | } | |
3188 | ||
8424e00d | 3189 | data0 = steer_data0; |
40a3a915 RV |
3190 | |
3191 | if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || | |
3192 | (rts_table == | |
8424e00d JM |
3193 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) |
3194 | data1 = steer_data1; | |
40a3a915 | 3195 | |
8424e00d JM |
3196 | status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, |
3197 | &data0, &data1, &steer_ctrl); | |
40a3a915 RV |
3198 | exit: |
3199 | return status; | |
3200 | } | |
3201 | ||
3202 | /* | |
3203 | * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing. | |
3204 | */ | |
3205 | enum vxge_hw_status vxge_hw_vpath_rts_rth_set( | |
3206 | struct __vxge_hw_vpath_handle *vp, | |
3207 | enum vxge_hw_rth_algoritms algorithm, | |
3208 | struct vxge_hw_rth_hash_types *hash_type, | |
3209 | u16 bucket_size) | |
3210 | { | |
3211 | u64 data0, data1; | |
3212 | enum vxge_hw_status status = VXGE_HW_OK; | |
3213 | ||
3214 | if (vp == NULL) { | |
3215 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
3216 | goto exit; | |
3217 | } | |
3218 | ||
3219 | status = __vxge_hw_vpath_rts_table_get(vp, | |
3220 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, | |
3221 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, | |
3222 | 0, &data0, &data1); | |
47f01db4 JM |
3223 | if (status != VXGE_HW_OK) |
3224 | goto exit; | |
40a3a915 RV |
3225 | |
3226 | data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | | |
3227 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); | |
3228 | ||
3229 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN | | |
3230 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) | | |
3231 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm); | |
3232 | ||
3233 | if (hash_type->hash_type_tcpipv4_en) | |
3234 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN; | |
3235 | ||
3236 | if (hash_type->hash_type_ipv4_en) | |
3237 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN; | |
3238 | ||
3239 | if (hash_type->hash_type_tcpipv6_en) | |
3240 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN; | |
3241 | ||
3242 | if (hash_type->hash_type_ipv6_en) | |
3243 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN; | |
3244 | ||
3245 | if (hash_type->hash_type_tcpipv6ex_en) | |
3246 | data0 |= | |
3247 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN; | |
3248 | ||
3249 | if (hash_type->hash_type_ipv6ex_en) | |
3250 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN; | |
3251 | ||
3252 | if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0)) | |
3253 | data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; | |
3254 | else | |
3255 | data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; | |
3256 | ||
3257 | status = __vxge_hw_vpath_rts_table_set(vp, | |
3258 | VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY, | |
3259 | VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, | |
3260 | 0, data0, 0); | |
3261 | exit: | |
3262 | return status; | |
3263 | } | |
3264 | ||
3265 | static void | |
3266 | vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1, | |
3267 | u16 flag, u8 *itable) | |
3268 | { | |
3269 | switch (flag) { | |
3270 | case 1: | |
3271 | *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)| | |
3272 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN | | |
3273 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA( | |
3274 | itable[j]); | |
3275 | case 2: | |
3276 | *data0 |= | |
3277 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)| | |
3278 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN | | |
3279 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA( | |
3280 | itable[j]); | |
3281 | case 3: | |
3282 | *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)| | |
3283 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN | | |
3284 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA( | |
3285 | itable[j]); | |
3286 | case 4: | |
3287 | *data1 |= | |
3288 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)| | |
3289 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN | | |
3290 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA( | |
3291 | itable[j]); | |
3292 | default: | |
3293 | return; | |
3294 | } | |
3295 | } | |
3296 | /* | |
3297 | * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT). | |
3298 | */ | |
3299 | enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set( | |
3300 | struct __vxge_hw_vpath_handle **vpath_handles, | |
3301 | u32 vpath_count, | |
3302 | u8 *mtable, | |
3303 | u8 *itable, | |
3304 | u32 itable_size) | |
3305 | { | |
3306 | u32 i, j, action, rts_table; | |
3307 | u64 data0; | |
3308 | u64 data1; | |
3309 | u32 max_entries; | |
3310 | enum vxge_hw_status status = VXGE_HW_OK; | |
3311 | struct __vxge_hw_vpath_handle *vp = vpath_handles[0]; | |
3312 | ||
3313 | if (vp == NULL) { | |
3314 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
3315 | goto exit; | |
3316 | } | |
3317 | ||
3318 | max_entries = (((u32)1) << itable_size); | |
3319 | ||
3320 | if (vp->vpath->hldev->config.rth_it_type | |
3321 | == VXGE_HW_RTH_IT_TYPE_SOLO_IT) { | |
3322 | action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; | |
3323 | rts_table = | |
3324 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT; | |
3325 | ||
3326 | for (j = 0; j < max_entries; j++) { | |
3327 | ||
3328 | data1 = 0; | |
3329 | ||
3330 | data0 = | |
3331 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( | |
3332 | itable[j]); | |
3333 | ||
3334 | status = __vxge_hw_vpath_rts_table_set(vpath_handles[0], | |
3335 | action, rts_table, j, data0, data1); | |
3336 | ||
3337 | if (status != VXGE_HW_OK) | |
3338 | goto exit; | |
3339 | } | |
3340 | ||
3341 | for (j = 0; j < max_entries; j++) { | |
3342 | ||
3343 | data1 = 0; | |
3344 | ||
3345 | data0 = | |
3346 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN | | |
3347 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( | |
3348 | itable[j]); | |
3349 | ||
3350 | status = __vxge_hw_vpath_rts_table_set( | |
3351 | vpath_handles[mtable[itable[j]]], action, | |
3352 | rts_table, j, data0, data1); | |
3353 | ||
3354 | if (status != VXGE_HW_OK) | |
3355 | goto exit; | |
3356 | } | |
3357 | } else { | |
3358 | action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; | |
3359 | rts_table = | |
3360 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT; | |
3361 | for (i = 0; i < vpath_count; i++) { | |
3362 | ||
3363 | for (j = 0; j < max_entries;) { | |
3364 | ||
3365 | data0 = 0; | |
3366 | data1 = 0; | |
3367 | ||
3368 | while (j < max_entries) { | |
3369 | if (mtable[itable[j]] != i) { | |
3370 | j++; | |
3371 | continue; | |
3372 | } | |
3373 | vxge_hw_rts_rth_data0_data1_get(j, | |
3374 | &data0, &data1, 1, itable); | |
3375 | j++; | |
3376 | break; | |
3377 | } | |
3378 | ||
3379 | while (j < max_entries) { | |
3380 | if (mtable[itable[j]] != i) { | |
3381 | j++; | |
3382 | continue; | |
3383 | } | |
3384 | vxge_hw_rts_rth_data0_data1_get(j, | |
3385 | &data0, &data1, 2, itable); | |
3386 | j++; | |
3387 | break; | |
3388 | } | |
3389 | ||
3390 | while (j < max_entries) { | |
3391 | if (mtable[itable[j]] != i) { | |
3392 | j++; | |
3393 | continue; | |
3394 | } | |
3395 | vxge_hw_rts_rth_data0_data1_get(j, | |
3396 | &data0, &data1, 3, itable); | |
3397 | j++; | |
3398 | break; | |
3399 | } | |
3400 | ||
3401 | while (j < max_entries) { | |
3402 | if (mtable[itable[j]] != i) { | |
3403 | j++; | |
3404 | continue; | |
3405 | } | |
3406 | vxge_hw_rts_rth_data0_data1_get(j, | |
3407 | &data0, &data1, 4, itable); | |
3408 | j++; | |
3409 | break; | |
3410 | } | |
3411 | ||
3412 | if (data0 != 0) { | |
3413 | status = __vxge_hw_vpath_rts_table_set( | |
3414 | vpath_handles[i], | |
3415 | action, rts_table, | |
3416 | 0, data0, data1); | |
3417 | ||
3418 | if (status != VXGE_HW_OK) | |
3419 | goto exit; | |
3420 | } | |
3421 | } | |
3422 | } | |
3423 | } | |
3424 | exit: | |
3425 | return status; | |
3426 | } | |
3427 | ||
3428 | /** | |
3429 | * vxge_hw_vpath_check_leak - Check for memory leak | |
3430 | * @ringh: Handle to the ring object used for receive | |
3431 | * | |
3432 | * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to | |
3433 | * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred. | |
3434 | * Returns: VXGE_HW_FAIL, if leak has occurred. | |
3435 | * | |
3436 | */ | |
3437 | enum vxge_hw_status | |
3438 | vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring) | |
3439 | { | |
3440 | enum vxge_hw_status status = VXGE_HW_OK; | |
3441 | u64 rxd_new_count, rxd_spat; | |
3442 | ||
3443 | if (ring == NULL) | |
3444 | return status; | |
3445 | ||
3446 | rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell); | |
3447 | rxd_spat = readq(&ring->vp_reg->prc_cfg6); | |
3448 | rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat); | |
3449 | ||
3450 | if (rxd_new_count >= rxd_spat) | |
3451 | status = VXGE_HW_FAIL; | |
3452 | ||
3453 | return status; | |
3454 | } | |
3455 | ||
3456 | /* | |
3457 | * __vxge_hw_vpath_mgmt_read | |
3458 | * This routine reads the vpath_mgmt registers | |
3459 | */ | |
3460 | static enum vxge_hw_status | |
3461 | __vxge_hw_vpath_mgmt_read( | |
3462 | struct __vxge_hw_device *hldev, | |
3463 | struct __vxge_hw_virtualpath *vpath) | |
3464 | { | |
3465 | u32 i, mtu = 0, max_pyld = 0; | |
3466 | u64 val64; | |
3467 | enum vxge_hw_status status = VXGE_HW_OK; | |
3468 | ||
3469 | for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { | |
3470 | ||
3471 | val64 = readq(&vpath->vpmgmt_reg-> | |
3472 | rxmac_cfg0_port_vpmgmt_clone[i]); | |
3473 | max_pyld = | |
3474 | (u32) | |
3475 | VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN | |
3476 | (val64); | |
3477 | if (mtu < max_pyld) | |
3478 | mtu = max_pyld; | |
3479 | } | |
3480 | ||
3481 | vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE; | |
3482 | ||
3483 | val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp); | |
3484 | ||
3485 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
3486 | if (val64 & vxge_mBIT(i)) | |
3487 | vpath->vsport_number = i; | |
3488 | } | |
3489 | ||
3490 | val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone); | |
3491 | ||
3492 | if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK) | |
3493 | VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP); | |
3494 | else | |
3495 | VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN); | |
3496 | ||
3497 | return status; | |
3498 | } | |
3499 | ||
3500 | /* | |
3501 | * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed | |
3502 | * This routine checks the vpath_rst_in_prog register to see if | |
3503 | * adapter completed the reset process for the vpath | |
3504 | */ | |
42821a5b | 3505 | static enum vxge_hw_status |
40a3a915 RV |
3506 | __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) |
3507 | { | |
3508 | enum vxge_hw_status status; | |
3509 | ||
3510 | status = __vxge_hw_device_register_poll( | |
3511 | &vpath->hldev->common_reg->vpath_rst_in_prog, | |
3512 | VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG( | |
3513 | 1 << (16 - vpath->vp_id)), | |
3514 | vpath->hldev->config.device_poll_millis); | |
3515 | ||
3516 | return status; | |
3517 | } | |
3518 | ||
3519 | /* | |
3520 | * __vxge_hw_vpath_reset | |
3521 | * This routine resets the vpath on the device | |
3522 | */ | |
42821a5b | 3523 | static enum vxge_hw_status |
40a3a915 RV |
3524 | __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) |
3525 | { | |
3526 | u64 val64; | |
3527 | enum vxge_hw_status status = VXGE_HW_OK; | |
3528 | ||
3529 | val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id)); | |
3530 | ||
3531 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | |
3532 | &hldev->common_reg->cmn_rsthdlr_cfg0); | |
3533 | ||
3534 | return status; | |
3535 | } | |
3536 | ||
3537 | /* | |
3538 | * __vxge_hw_vpath_sw_reset | |
3539 | * This routine resets the vpath structures | |
3540 | */ | |
42821a5b | 3541 | static enum vxge_hw_status |
40a3a915 RV |
3542 | __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) |
3543 | { | |
3544 | enum vxge_hw_status status = VXGE_HW_OK; | |
3545 | struct __vxge_hw_virtualpath *vpath; | |
3546 | ||
3547 | vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id]; | |
3548 | ||
3549 | if (vpath->ringh) { | |
3550 | status = __vxge_hw_ring_reset(vpath->ringh); | |
3551 | if (status != VXGE_HW_OK) | |
3552 | goto exit; | |
3553 | } | |
3554 | ||
3555 | if (vpath->fifoh) | |
3556 | status = __vxge_hw_fifo_reset(vpath->fifoh); | |
3557 | exit: | |
3558 | return status; | |
3559 | } | |
3560 | ||
3561 | /* | |
3562 | * __vxge_hw_vpath_prc_configure | |
3563 | * This routine configures the prc registers of virtual path using the config | |
3564 | * passed | |
3565 | */ | |
42821a5b | 3566 | static void |
40a3a915 RV |
3567 | __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3568 | { | |
3569 | u64 val64; | |
3570 | struct __vxge_hw_virtualpath *vpath; | |
3571 | struct vxge_hw_vp_config *vp_config; | |
3572 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
3573 | ||
3574 | vpath = &hldev->virtual_paths[vp_id]; | |
3575 | vp_reg = vpath->vp_reg; | |
3576 | vp_config = vpath->vp_config; | |
3577 | ||
3578 | if (vp_config->ring.enable == VXGE_HW_RING_DISABLE) | |
3579 | return; | |
3580 | ||
3581 | val64 = readq(&vp_reg->prc_cfg1); | |
3582 | val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE; | |
3583 | writeq(val64, &vp_reg->prc_cfg1); | |
3584 | ||
3585 | val64 = readq(&vpath->vp_reg->prc_cfg6); | |
3586 | val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN; | |
3587 | writeq(val64, &vpath->vp_reg->prc_cfg6); | |
3588 | ||
3589 | val64 = readq(&vp_reg->prc_cfg7); | |
3590 | ||
3591 | if (vpath->vp_config->ring.scatter_mode != | |
3592 | VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) { | |
3593 | ||
3594 | val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3); | |
3595 | ||
3596 | switch (vpath->vp_config->ring.scatter_mode) { | |
3597 | case VXGE_HW_RING_SCATTER_MODE_A: | |
3598 | val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( | |
3599 | VXGE_HW_PRC_CFG7_SCATTER_MODE_A); | |
3600 | break; | |
3601 | case VXGE_HW_RING_SCATTER_MODE_B: | |
3602 | val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( | |
3603 | VXGE_HW_PRC_CFG7_SCATTER_MODE_B); | |
3604 | break; | |
3605 | case VXGE_HW_RING_SCATTER_MODE_C: | |
3606 | val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( | |
3607 | VXGE_HW_PRC_CFG7_SCATTER_MODE_C); | |
3608 | break; | |
3609 | } | |
3610 | } | |
3611 | ||
3612 | writeq(val64, &vp_reg->prc_cfg7); | |
3613 | ||
3614 | writeq(VXGE_HW_PRC_CFG5_RXD0_ADD( | |
3615 | __vxge_hw_ring_first_block_address_get( | |
3616 | vpath->ringh) >> 3), &vp_reg->prc_cfg5); | |
3617 | ||
3618 | val64 = readq(&vp_reg->prc_cfg4); | |
3619 | val64 |= VXGE_HW_PRC_CFG4_IN_SVC; | |
3620 | val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3); | |
3621 | ||
3622 | val64 |= VXGE_HW_PRC_CFG4_RING_MODE( | |
3623 | VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER); | |
3624 | ||
3625 | if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE) | |
3626 | val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE; | |
3627 | else | |
3628 | val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE; | |
3629 | ||
3630 | writeq(val64, &vp_reg->prc_cfg4); | |
40a3a915 RV |
3631 | } |
3632 | ||
3633 | /* | |
3634 | * __vxge_hw_vpath_kdfc_configure | |
3635 | * This routine configures the kdfc registers of virtual path using the | |
3636 | * config passed | |
3637 | */ | |
42821a5b | 3638 | static enum vxge_hw_status |
40a3a915 RV |
3639 | __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3640 | { | |
3641 | u64 val64; | |
3642 | u64 vpath_stride; | |
3643 | enum vxge_hw_status status = VXGE_HW_OK; | |
3644 | struct __vxge_hw_virtualpath *vpath; | |
3645 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
3646 | ||
3647 | vpath = &hldev->virtual_paths[vp_id]; | |
3648 | vp_reg = vpath->vp_reg; | |
3649 | status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg); | |
3650 | ||
3651 | if (status != VXGE_HW_OK) | |
3652 | goto exit; | |
3653 | ||
3654 | val64 = readq(&vp_reg->kdfc_drbl_triplet_total); | |
3655 | ||
3656 | vpath->max_kdfc_db = | |
3657 | (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE( | |
3658 | val64+1)/2; | |
3659 | ||
3660 | if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | |
3661 | ||
3662 | vpath->max_nofl_db = vpath->max_kdfc_db; | |
3663 | ||
3664 | if (vpath->max_nofl_db < | |
3665 | ((vpath->vp_config->fifo.memblock_size / | |
3666 | (vpath->vp_config->fifo.max_frags * | |
3667 | sizeof(struct vxge_hw_fifo_txd))) * | |
3668 | vpath->vp_config->fifo.fifo_blocks)) { | |
3669 | ||
3670 | return VXGE_HW_BADCFG_FIFO_BLOCKS; | |
3671 | } | |
3672 | val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0( | |
3673 | (vpath->max_nofl_db*2)-1); | |
3674 | } | |
3675 | ||
3676 | writeq(val64, &vp_reg->kdfc_fifo_trpl_partition); | |
3677 | ||
3678 | writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE, | |
3679 | &vp_reg->kdfc_fifo_trpl_ctrl); | |
3680 | ||
3681 | val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl); | |
3682 | ||
3683 | val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) | | |
3684 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF)); | |
3685 | ||
3686 | val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE( | |
3687 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) | | |
3688 | #ifndef __BIG_ENDIAN | |
3689 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN | | |
3690 | #endif | |
3691 | VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0); | |
3692 | ||
3693 | writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl); | |
3694 | writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address); | |
3695 | wmb(); | |
3696 | vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride); | |
3697 | ||
3698 | vpath->nofl_db = | |
3699 | (struct __vxge_hw_non_offload_db_wrapper __iomem *) | |
3700 | (hldev->kdfc + (vp_id * | |
3701 | VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE( | |
3702 | vpath_stride))); | |
3703 | exit: | |
3704 | return status; | |
3705 | } | |
3706 | ||
3707 | /* | |
3708 | * __vxge_hw_vpath_mac_configure | |
3709 | * This routine configures the mac of virtual path using the config passed | |
3710 | */ | |
42821a5b | 3711 | static enum vxge_hw_status |
40a3a915 RV |
3712 | __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3713 | { | |
3714 | u64 val64; | |
3715 | enum vxge_hw_status status = VXGE_HW_OK; | |
3716 | struct __vxge_hw_virtualpath *vpath; | |
3717 | struct vxge_hw_vp_config *vp_config; | |
3718 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
3719 | ||
3720 | vpath = &hldev->virtual_paths[vp_id]; | |
3721 | vp_reg = vpath->vp_reg; | |
3722 | vp_config = vpath->vp_config; | |
3723 | ||
3724 | writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER( | |
3725 | vpath->vsport_number), &vp_reg->xmac_vsport_choice); | |
3726 | ||
3727 | if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) { | |
3728 | ||
3729 | val64 = readq(&vp_reg->xmac_rpa_vcfg); | |
3730 | ||
3731 | if (vp_config->rpa_strip_vlan_tag != | |
3732 | VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) { | |
3733 | if (vp_config->rpa_strip_vlan_tag) | |
3734 | val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; | |
3735 | else | |
3736 | val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; | |
3737 | } | |
3738 | ||
3739 | writeq(val64, &vp_reg->xmac_rpa_vcfg); | |
3740 | val64 = readq(&vp_reg->rxmac_vcfg0); | |
3741 | ||
3742 | if (vp_config->mtu != | |
3743 | VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) { | |
3744 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); | |
3745 | if ((vp_config->mtu + | |
3746 | VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu) | |
3747 | val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( | |
3748 | vp_config->mtu + | |
3749 | VXGE_HW_MAC_HEADER_MAX_SIZE); | |
3750 | else | |
3751 | val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( | |
3752 | vpath->max_mtu); | |
3753 | } | |
3754 | ||
3755 | writeq(val64, &vp_reg->rxmac_vcfg0); | |
3756 | ||
3757 | val64 = readq(&vp_reg->rxmac_vcfg1); | |
3758 | ||
3759 | val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) | | |
3760 | VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE); | |
3761 | ||
3762 | if (hldev->config.rth_it_type == | |
3763 | VXGE_HW_RTH_IT_TYPE_MULTI_IT) { | |
3764 | val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE( | |
3765 | 0x2) | | |
3766 | VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE; | |
3767 | } | |
3768 | ||
3769 | writeq(val64, &vp_reg->rxmac_vcfg1); | |
3770 | } | |
3771 | return status; | |
3772 | } | |
3773 | ||
3774 | /* | |
3775 | * __vxge_hw_vpath_tim_configure | |
3776 | * This routine configures the tim registers of virtual path using the config | |
3777 | * passed | |
3778 | */ | |
42821a5b | 3779 | static enum vxge_hw_status |
40a3a915 RV |
3780 | __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) |
3781 | { | |
3782 | u64 val64; | |
3783 | enum vxge_hw_status status = VXGE_HW_OK; | |
3784 | struct __vxge_hw_virtualpath *vpath; | |
3785 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
3786 | struct vxge_hw_vp_config *config; | |
3787 | ||
3788 | vpath = &hldev->virtual_paths[vp_id]; | |
3789 | vp_reg = vpath->vp_reg; | |
3790 | config = vpath->vp_config; | |
3791 | ||
3792 | writeq((u64)0, &vp_reg->tim_dest_addr); | |
3793 | writeq((u64)0, &vp_reg->tim_vpath_map); | |
3794 | writeq((u64)0, &vp_reg->tim_bitmap); | |
3795 | writeq((u64)0, &vp_reg->tim_remap); | |
3796 | ||
3797 | if (config->ring.enable == VXGE_HW_RING_ENABLE) | |
3798 | writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( | |
3799 | (vp_id * VXGE_HW_MAX_INTR_PER_VP) + | |
3800 | VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn); | |
3801 | ||
3802 | val64 = readq(&vp_reg->tim_pci_cfg); | |
3803 | val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD; | |
3804 | writeq(val64, &vp_reg->tim_pci_cfg); | |
3805 | ||
3806 | if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | |
3807 | ||
3808 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | |
3809 | ||
3810 | if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | |
3811 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | |
3812 | 0x3ffffff); | |
3813 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | |
3814 | config->tti.btimer_val); | |
3815 | } | |
3816 | ||
3817 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; | |
3818 | ||
3819 | if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { | |
3820 | if (config->tti.timer_ac_en) | |
3821 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | |
3822 | else | |
3823 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | |
3824 | } | |
3825 | ||
3826 | if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { | |
3827 | if (config->tti.timer_ci_en) | |
3828 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | |
3829 | else | |
3830 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | |
3831 | } | |
3832 | ||
3833 | if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { | |
3834 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); | |
3835 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( | |
3836 | config->tti.urange_a); | |
3837 | } | |
3838 | ||
3839 | if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { | |
3840 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); | |
3841 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( | |
3842 | config->tti.urange_b); | |
3843 | } | |
3844 | ||
3845 | if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { | |
3846 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); | |
3847 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( | |
3848 | config->tti.urange_c); | |
3849 | } | |
3850 | ||
3851 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | |
3852 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); | |
3853 | ||
3854 | if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { | |
3855 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); | |
3856 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( | |
3857 | config->tti.uec_a); | |
3858 | } | |
3859 | ||
3860 | if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { | |
3861 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); | |
3862 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( | |
3863 | config->tti.uec_b); | |
3864 | } | |
3865 | ||
3866 | if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { | |
3867 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); | |
3868 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( | |
3869 | config->tti.uec_c); | |
3870 | } | |
3871 | ||
3872 | if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { | |
3873 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); | |
3874 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( | |
3875 | config->tti.uec_d); | |
3876 | } | |
3877 | ||
3878 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); | |
3879 | val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); | |
3880 | ||
3881 | if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { | |
3882 | if (config->tti.timer_ri_en) | |
3883 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | |
3884 | else | |
3885 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | |
3886 | } | |
3887 | ||
3888 | if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | |
3889 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | |
3890 | 0x3ffffff); | |
3891 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | |
3892 | config->tti.rtimer_val); | |
3893 | } | |
3894 | ||
3895 | if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { | |
3896 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); | |
3897 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( | |
3898 | config->tti.util_sel); | |
3899 | } | |
3900 | ||
3901 | if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | |
3902 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | |
3903 | 0x3ffffff); | |
3904 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | |
3905 | config->tti.ltimer_val); | |
3906 | } | |
3907 | ||
3908 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); | |
3909 | } | |
3910 | ||
3911 | if (config->ring.enable == VXGE_HW_RING_ENABLE) { | |
3912 | ||
3913 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); | |
3914 | ||
3915 | if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | |
3916 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | |
3917 | 0x3ffffff); | |
3918 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( | |
3919 | config->rti.btimer_val); | |
3920 | } | |
3921 | ||
3922 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; | |
3923 | ||
3924 | if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { | |
3925 | if (config->rti.timer_ac_en) | |
3926 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | |
3927 | else | |
3928 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; | |
3929 | } | |
3930 | ||
3931 | if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { | |
3932 | if (config->rti.timer_ci_en) | |
3933 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | |
3934 | else | |
3935 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | |
3936 | } | |
3937 | ||
3938 | if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { | |
3939 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); | |
3940 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( | |
3941 | config->rti.urange_a); | |
3942 | } | |
3943 | ||
3944 | if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { | |
3945 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); | |
3946 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( | |
3947 | config->rti.urange_b); | |
3948 | } | |
3949 | ||
3950 | if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { | |
3951 | val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); | |
3952 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( | |
3953 | config->rti.urange_c); | |
3954 | } | |
3955 | ||
3956 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); | |
3957 | val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); | |
3958 | ||
3959 | if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { | |
3960 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); | |
3961 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( | |
3962 | config->rti.uec_a); | |
3963 | } | |
3964 | ||
3965 | if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { | |
3966 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); | |
3967 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( | |
3968 | config->rti.uec_b); | |
3969 | } | |
3970 | ||
3971 | if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { | |
3972 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); | |
3973 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( | |
3974 | config->rti.uec_c); | |
3975 | } | |
3976 | ||
3977 | if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { | |
3978 | val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); | |
3979 | val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( | |
3980 | config->rti.uec_d); | |
3981 | } | |
3982 | ||
3983 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); | |
3984 | val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); | |
3985 | ||
3986 | if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { | |
3987 | if (config->rti.timer_ri_en) | |
3988 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | |
3989 | else | |
3990 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; | |
3991 | } | |
3992 | ||
3993 | if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | |
3994 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | |
3995 | 0x3ffffff); | |
3996 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( | |
3997 | config->rti.rtimer_val); | |
3998 | } | |
3999 | ||
4000 | if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { | |
4001 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); | |
4002 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( | |
4003 | config->rti.util_sel); | |
4004 | } | |
4005 | ||
4006 | if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { | |
4007 | val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | |
4008 | 0x3ffffff); | |
4009 | val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( | |
4010 | config->rti.ltimer_val); | |
4011 | } | |
4012 | ||
4013 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); | |
4014 | } | |
4015 | ||
4016 | val64 = 0; | |
4017 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]); | |
4018 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]); | |
4019 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]); | |
4020 | writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]); | |
4021 | writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); | |
4022 | writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); | |
4023 | ||
4024 | return status; | |
4025 | } | |
4026 | ||
eb5f10c2 SH |
4027 | void |
4028 | vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) | |
4029 | { | |
4030 | struct __vxge_hw_virtualpath *vpath; | |
4031 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
4032 | struct vxge_hw_vp_config *config; | |
4033 | u64 val64; | |
4034 | ||
4035 | vpath = &hldev->virtual_paths[vp_id]; | |
4036 | vp_reg = vpath->vp_reg; | |
4037 | config = vpath->vp_config; | |
4038 | ||
4039 | if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | |
4040 | val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | |
4041 | ||
4042 | if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { | |
4043 | config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; | |
4044 | val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; | |
4045 | writeq(val64, | |
4046 | &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); | |
4047 | } | |
4048 | } | |
eb5f10c2 | 4049 | } |
40a3a915 RV |
4050 | /* |
4051 | * __vxge_hw_vpath_initialize | |
4052 | * This routine is the final phase of init which initializes the | |
4053 | * registers of the vpath using the configuration passed. | |
4054 | */ | |
42821a5b | 4055 | static enum vxge_hw_status |
40a3a915 RV |
4056 | __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) |
4057 | { | |
4058 | u64 val64; | |
4059 | u32 val32; | |
4060 | enum vxge_hw_status status = VXGE_HW_OK; | |
4061 | struct __vxge_hw_virtualpath *vpath; | |
4062 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
4063 | ||
4064 | vpath = &hldev->virtual_paths[vp_id]; | |
4065 | ||
4066 | if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { | |
4067 | status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; | |
4068 | goto exit; | |
4069 | } | |
4070 | vp_reg = vpath->vp_reg; | |
4071 | ||
4072 | status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); | |
4073 | ||
4074 | if (status != VXGE_HW_OK) | |
4075 | goto exit; | |
4076 | ||
4077 | status = __vxge_hw_vpath_mac_configure(hldev, vp_id); | |
4078 | ||
4079 | if (status != VXGE_HW_OK) | |
4080 | goto exit; | |
4081 | ||
4082 | status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); | |
4083 | ||
4084 | if (status != VXGE_HW_OK) | |
4085 | goto exit; | |
4086 | ||
4087 | status = __vxge_hw_vpath_tim_configure(hldev, vp_id); | |
4088 | ||
4089 | if (status != VXGE_HW_OK) | |
4090 | goto exit; | |
4091 | ||
40a3a915 RV |
4092 | val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl); |
4093 | ||
4094 | /* Get MRRS value from device control */ | |
4095 | status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); | |
4096 | ||
4097 | if (status == VXGE_HW_OK) { | |
4098 | val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; | |
4099 | val64 &= | |
4100 | ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7)); | |
4101 | val64 |= | |
4102 | VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32); | |
4103 | ||
4104 | val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE; | |
4105 | } | |
4106 | ||
4107 | val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7)); | |
4108 | val64 |= | |
4109 | VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY( | |
4110 | VXGE_HW_MAX_PAYLOAD_SIZE_512); | |
4111 | ||
4112 | val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN; | |
4113 | writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl); | |
4114 | ||
4115 | exit: | |
4116 | return status; | |
4117 | } | |
4118 | ||
4119 | /* | |
4120 | * __vxge_hw_vp_initialize - Initialize Virtual Path structure | |
4121 | * This routine is the initial phase of init which resets the vpath and | |
4122 | * initializes the software support structures. | |
4123 | */ | |
42821a5b | 4124 | static enum vxge_hw_status |
40a3a915 RV |
4125 | __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, |
4126 | struct vxge_hw_vp_config *config) | |
4127 | { | |
4128 | struct __vxge_hw_virtualpath *vpath; | |
4129 | enum vxge_hw_status status = VXGE_HW_OK; | |
4130 | ||
4131 | if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { | |
4132 | status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; | |
4133 | goto exit; | |
4134 | } | |
4135 | ||
4136 | vpath = &hldev->virtual_paths[vp_id]; | |
4137 | ||
8424e00d | 4138 | spin_lock_init(&hldev->virtual_paths[vp_id].lock); |
40a3a915 RV |
4139 | vpath->vp_id = vp_id; |
4140 | vpath->vp_open = VXGE_HW_VP_OPEN; | |
4141 | vpath->hldev = hldev; | |
4142 | vpath->vp_config = config; | |
4143 | vpath->vp_reg = hldev->vpath_reg[vp_id]; | |
4144 | vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id]; | |
4145 | ||
4146 | __vxge_hw_vpath_reset(hldev, vp_id); | |
4147 | ||
4148 | status = __vxge_hw_vpath_reset_check(vpath); | |
40a3a915 RV |
4149 | if (status != VXGE_HW_OK) { |
4150 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | |
4151 | goto exit; | |
4152 | } | |
4153 | ||
4154 | status = __vxge_hw_vpath_mgmt_read(hldev, vpath); | |
40a3a915 RV |
4155 | if (status != VXGE_HW_OK) { |
4156 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | |
4157 | goto exit; | |
4158 | } | |
4159 | ||
4160 | INIT_LIST_HEAD(&vpath->vpath_handles); | |
4161 | ||
4162 | vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id]; | |
4163 | ||
4164 | VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0, | |
4165 | hldev->tim_int_mask1, vp_id); | |
4166 | ||
4167 | status = __vxge_hw_vpath_initialize(hldev, vp_id); | |
40a3a915 RV |
4168 | if (status != VXGE_HW_OK) |
4169 | __vxge_hw_vp_terminate(hldev, vp_id); | |
4170 | exit: | |
4171 | return status; | |
4172 | } | |
4173 | ||
4174 | /* | |
4175 | * __vxge_hw_vp_terminate - Terminate Virtual Path structure | |
4176 | * This routine closes all channels it opened and freeup memory | |
4177 | */ | |
42821a5b | 4178 | static void |
40a3a915 RV |
4179 | __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) |
4180 | { | |
4181 | struct __vxge_hw_virtualpath *vpath; | |
4182 | ||
4183 | vpath = &hldev->virtual_paths[vp_id]; | |
4184 | ||
4185 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) | |
4186 | goto exit; | |
4187 | ||
4188 | VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, | |
4189 | vpath->hldev->tim_int_mask1, vpath->vp_id); | |
4190 | hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; | |
4191 | ||
4192 | memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); | |
4193 | exit: | |
4194 | return; | |
4195 | } | |
4196 | ||
4197 | /* | |
4198 | * vxge_hw_vpath_mtu_set - Set MTU. | |
4199 | * Set new MTU value. Example, to use jumbo frames: | |
4200 | * vxge_hw_vpath_mtu_set(my_device, 9600); | |
4201 | */ | |
4202 | enum vxge_hw_status | |
4203 | vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu) | |
4204 | { | |
4205 | u64 val64; | |
4206 | enum vxge_hw_status status = VXGE_HW_OK; | |
4207 | struct __vxge_hw_virtualpath *vpath; | |
4208 | ||
4209 | if (vp == NULL) { | |
4210 | status = VXGE_HW_ERR_INVALID_HANDLE; | |
4211 | goto exit; | |
4212 | } | |
4213 | vpath = vp->vpath; | |
4214 | ||
4215 | new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE; | |
4216 | ||
4217 | if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu)) | |
4218 | status = VXGE_HW_ERR_INVALID_MTU_SIZE; | |
4219 | ||
4220 | val64 = readq(&vpath->vp_reg->rxmac_vcfg0); | |
4221 | ||
4222 | val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); | |
4223 | val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu); | |
4224 | ||
4225 | writeq(val64, &vpath->vp_reg->rxmac_vcfg0); | |
4226 | ||
4227 | vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE; | |
4228 | ||
4229 | exit: | |
4230 | return status; | |
4231 | } | |
4232 | ||
4233 | /* | |
4234 | * vxge_hw_vpath_open - Open a virtual path on a given adapter | |
4235 | * This function is used to open access to virtual path of an | |
4236 | * adapter for offload, GRO operations. This function returns | |
4237 | * synchronously. | |
4238 | */ | |
4239 | enum vxge_hw_status | |
4240 | vxge_hw_vpath_open(struct __vxge_hw_device *hldev, | |
4241 | struct vxge_hw_vpath_attr *attr, | |
4242 | struct __vxge_hw_vpath_handle **vpath_handle) | |
4243 | { | |
4244 | struct __vxge_hw_virtualpath *vpath; | |
4245 | struct __vxge_hw_vpath_handle *vp; | |
4246 | enum vxge_hw_status status; | |
4247 | ||
4248 | vpath = &hldev->virtual_paths[attr->vp_id]; | |
4249 | ||
4250 | if (vpath->vp_open == VXGE_HW_VP_OPEN) { | |
4251 | status = VXGE_HW_ERR_INVALID_STATE; | |
4252 | goto vpath_open_exit1; | |
4253 | } | |
4254 | ||
4255 | status = __vxge_hw_vp_initialize(hldev, attr->vp_id, | |
4256 | &hldev->config.vp_config[attr->vp_id]); | |
4257 | ||
4258 | if (status != VXGE_HW_OK) | |
4259 | goto vpath_open_exit1; | |
4260 | ||
4261 | vp = (struct __vxge_hw_vpath_handle *) | |
89bf67f1 | 4262 | vzalloc(sizeof(struct __vxge_hw_vpath_handle)); |
40a3a915 RV |
4263 | if (vp == NULL) { |
4264 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
4265 | goto vpath_open_exit2; | |
4266 | } | |
4267 | ||
40a3a915 RV |
4268 | vp->vpath = vpath; |
4269 | ||
4270 | if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { | |
4271 | status = __vxge_hw_fifo_create(vp, &attr->fifo_attr); | |
4272 | if (status != VXGE_HW_OK) | |
4273 | goto vpath_open_exit6; | |
4274 | } | |
4275 | ||
4276 | if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) { | |
4277 | status = __vxge_hw_ring_create(vp, &attr->ring_attr); | |
4278 | if (status != VXGE_HW_OK) | |
4279 | goto vpath_open_exit7; | |
4280 | ||
4281 | __vxge_hw_vpath_prc_configure(hldev, attr->vp_id); | |
4282 | } | |
4283 | ||
4284 | vpath->fifoh->tx_intr_num = | |
4285 | (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) + | |
4286 | VXGE_HW_VPATH_INTR_TX; | |
4287 | ||
4288 | vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, | |
4289 | VXGE_HW_BLOCK_SIZE); | |
4290 | ||
4291 | if (vpath->stats_block == NULL) { | |
4292 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
4293 | goto vpath_open_exit8; | |
4294 | } | |
4295 | ||
4296 | vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath-> | |
4297 | stats_block->memblock; | |
4298 | memset(vpath->hw_stats, 0, | |
4299 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | |
4300 | ||
4301 | hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] = | |
4302 | vpath->hw_stats; | |
4303 | ||
4304 | vpath->hw_stats_sav = | |
4305 | &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id]; | |
4306 | memset(vpath->hw_stats_sav, 0, | |
4307 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | |
4308 | ||
4309 | writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg); | |
4310 | ||
4311 | status = vxge_hw_vpath_stats_enable(vp); | |
4312 | if (status != VXGE_HW_OK) | |
4313 | goto vpath_open_exit8; | |
4314 | ||
4315 | list_add(&vp->item, &vpath->vpath_handles); | |
4316 | ||
4317 | hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id); | |
4318 | ||
4319 | *vpath_handle = vp; | |
4320 | ||
4321 | attr->fifo_attr.userdata = vpath->fifoh; | |
4322 | attr->ring_attr.userdata = vpath->ringh; | |
4323 | ||
4324 | return VXGE_HW_OK; | |
4325 | ||
4326 | vpath_open_exit8: | |
4327 | if (vpath->ringh != NULL) | |
4328 | __vxge_hw_ring_delete(vp); | |
4329 | vpath_open_exit7: | |
4330 | if (vpath->fifoh != NULL) | |
4331 | __vxge_hw_fifo_delete(vp); | |
4332 | vpath_open_exit6: | |
4333 | vfree(vp); | |
4334 | vpath_open_exit2: | |
4335 | __vxge_hw_vp_terminate(hldev, attr->vp_id); | |
4336 | vpath_open_exit1: | |
4337 | ||
4338 | return status; | |
4339 | } | |
4340 | ||
4341 | /** | |
4342 | * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath | |
4343 | * (vpath) open | |
4344 | * @vp: Handle got from previous vpath open | |
4345 | * | |
4346 | * This function is used to close access to virtual path opened | |
4347 | * earlier. | |
4348 | */ | |
4349 | void | |
4350 | vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) | |
4351 | { | |
e7935c96 JM |
4352 | struct __vxge_hw_virtualpath *vpath = vp->vpath; |
4353 | struct __vxge_hw_ring *ring = vpath->ringh; | |
4354 | struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev); | |
40a3a915 | 4355 | u64 new_count, val64, val164; |
40a3a915 | 4356 | |
e7935c96 JM |
4357 | if (vdev->titan1) { |
4358 | new_count = readq(&vpath->vp_reg->rxdmem_size); | |
4359 | new_count &= 0x1fff; | |
4360 | } else | |
4361 | new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8; | |
40a3a915 | 4362 | |
e7935c96 | 4363 | val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count); |
40a3a915 RV |
4364 | |
4365 | writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), | |
4366 | &vpath->vp_reg->prc_rxd_doorbell); | |
4367 | readl(&vpath->vp_reg->prc_rxd_doorbell); | |
4368 | ||
4369 | val164 /= 2; | |
4370 | val64 = readq(&vpath->vp_reg->prc_cfg6); | |
4371 | val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64); | |
4372 | val64 &= 0x1ff; | |
4373 | ||
4374 | /* | |
4375 | * Each RxD is of 4 qwords | |
4376 | */ | |
4377 | new_count -= (val64 + 1); | |
4378 | val64 = min(val164, new_count) / 4; | |
4379 | ||
4380 | ring->rxds_limit = min(ring->rxds_limit, val64); | |
4381 | if (ring->rxds_limit < 4) | |
4382 | ring->rxds_limit = 4; | |
4383 | } | |
4384 | ||
4385 | /* | |
4386 | * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open | |
4387 | * This function is used to close access to virtual path opened | |
4388 | * earlier. | |
4389 | */ | |
4390 | enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp) | |
4391 | { | |
4392 | struct __vxge_hw_virtualpath *vpath = NULL; | |
4393 | struct __vxge_hw_device *devh = NULL; | |
4394 | u32 vp_id = vp->vpath->vp_id; | |
4395 | u32 is_empty = TRUE; | |
4396 | enum vxge_hw_status status = VXGE_HW_OK; | |
4397 | ||
4398 | vpath = vp->vpath; | |
4399 | devh = vpath->hldev; | |
4400 | ||
4401 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
4402 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
4403 | goto vpath_close_exit; | |
4404 | } | |
4405 | ||
4406 | list_del(&vp->item); | |
4407 | ||
4408 | if (!list_empty(&vpath->vpath_handles)) { | |
4409 | list_add(&vp->item, &vpath->vpath_handles); | |
4410 | is_empty = FALSE; | |
4411 | } | |
4412 | ||
4413 | if (!is_empty) { | |
4414 | status = VXGE_HW_FAIL; | |
4415 | goto vpath_close_exit; | |
4416 | } | |
4417 | ||
4418 | devh->vpaths_deployed &= ~vxge_mBIT(vp_id); | |
4419 | ||
4420 | if (vpath->ringh != NULL) | |
4421 | __vxge_hw_ring_delete(vp); | |
4422 | ||
4423 | if (vpath->fifoh != NULL) | |
4424 | __vxge_hw_fifo_delete(vp); | |
4425 | ||
4426 | if (vpath->stats_block != NULL) | |
4427 | __vxge_hw_blockpool_block_free(devh, vpath->stats_block); | |
4428 | ||
4429 | vfree(vp); | |
4430 | ||
4431 | __vxge_hw_vp_terminate(devh, vp_id); | |
4432 | ||
8424e00d | 4433 | spin_lock(&vpath->lock); |
40a3a915 | 4434 | vpath->vp_open = VXGE_HW_VP_NOT_OPEN; |
8424e00d | 4435 | spin_unlock(&vpath->lock); |
40a3a915 RV |
4436 | |
4437 | vpath_close_exit: | |
4438 | return status; | |
4439 | } | |
4440 | ||
4441 | /* | |
4442 | * vxge_hw_vpath_reset - Resets vpath | |
4443 | * This function is used to request a reset of vpath | |
4444 | */ | |
4445 | enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp) | |
4446 | { | |
4447 | enum vxge_hw_status status; | |
4448 | u32 vp_id; | |
4449 | struct __vxge_hw_virtualpath *vpath = vp->vpath; | |
4450 | ||
4451 | vp_id = vpath->vp_id; | |
4452 | ||
4453 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
4454 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
4455 | goto exit; | |
4456 | } | |
4457 | ||
4458 | status = __vxge_hw_vpath_reset(vpath->hldev, vp_id); | |
4459 | if (status == VXGE_HW_OK) | |
4460 | vpath->sw_stats->soft_reset_cnt++; | |
4461 | exit: | |
4462 | return status; | |
4463 | } | |
4464 | ||
4465 | /* | |
4466 | * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize. | |
4467 | * This function poll's for the vpath reset completion and re initializes | |
4468 | * the vpath. | |
4469 | */ | |
4470 | enum vxge_hw_status | |
4471 | vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp) | |
4472 | { | |
4473 | struct __vxge_hw_virtualpath *vpath = NULL; | |
4474 | enum vxge_hw_status status; | |
4475 | struct __vxge_hw_device *hldev; | |
4476 | u32 vp_id; | |
4477 | ||
4478 | vp_id = vp->vpath->vp_id; | |
4479 | vpath = vp->vpath; | |
4480 | hldev = vpath->hldev; | |
4481 | ||
4482 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
4483 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
4484 | goto exit; | |
4485 | } | |
4486 | ||
4487 | status = __vxge_hw_vpath_reset_check(vpath); | |
4488 | if (status != VXGE_HW_OK) | |
4489 | goto exit; | |
4490 | ||
4491 | status = __vxge_hw_vpath_sw_reset(hldev, vp_id); | |
4492 | if (status != VXGE_HW_OK) | |
4493 | goto exit; | |
4494 | ||
4495 | status = __vxge_hw_vpath_initialize(hldev, vp_id); | |
4496 | if (status != VXGE_HW_OK) | |
4497 | goto exit; | |
4498 | ||
4499 | if (vpath->ringh != NULL) | |
4500 | __vxge_hw_vpath_prc_configure(hldev, vp_id); | |
4501 | ||
4502 | memset(vpath->hw_stats, 0, | |
4503 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | |
4504 | ||
4505 | memset(vpath->hw_stats_sav, 0, | |
4506 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | |
4507 | ||
4508 | writeq(vpath->stats_block->dma_addr, | |
4509 | &vpath->vp_reg->stats_cfg); | |
4510 | ||
4511 | status = vxge_hw_vpath_stats_enable(vp); | |
4512 | ||
4513 | exit: | |
4514 | return status; | |
4515 | } | |
4516 | ||
4517 | /* | |
4518 | * vxge_hw_vpath_enable - Enable vpath. | |
4519 | * This routine clears the vpath reset thereby enabling a vpath | |
4520 | * to start forwarding frames and generating interrupts. | |
4521 | */ | |
4522 | void | |
4523 | vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp) | |
4524 | { | |
4525 | struct __vxge_hw_device *hldev; | |
4526 | u64 val64; | |
4527 | ||
4528 | hldev = vp->vpath->hldev; | |
4529 | ||
4530 | val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET( | |
4531 | 1 << (16 - vp->vpath->vp_id)); | |
4532 | ||
4533 | __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), | |
4534 | &hldev->common_reg->cmn_rsthdlr_cfg1); | |
4535 | } | |
4536 | ||
4537 | /* | |
4538 | * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. | |
4539 | * Enable the DMA vpath statistics. The function is to be called to re-enable | |
4540 | * the adapter to update stats into the host memory | |
4541 | */ | |
42821a5b | 4542 | static enum vxge_hw_status |
40a3a915 RV |
4543 | vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) |
4544 | { | |
4545 | enum vxge_hw_status status = VXGE_HW_OK; | |
4546 | struct __vxge_hw_virtualpath *vpath; | |
4547 | ||
4548 | vpath = vp->vpath; | |
4549 | ||
4550 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
4551 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
4552 | goto exit; | |
4553 | } | |
4554 | ||
4555 | memcpy(vpath->hw_stats_sav, vpath->hw_stats, | |
4556 | sizeof(struct vxge_hw_vpath_stats_hw_info)); | |
4557 | ||
4558 | status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); | |
4559 | exit: | |
4560 | return status; | |
4561 | } | |
4562 | ||
4563 | /* | |
4564 | * __vxge_hw_vpath_stats_access - Get the statistics from the given location | |
4565 | * and offset and perform an operation | |
4566 | */ | |
42821a5b | 4567 | static enum vxge_hw_status |
40a3a915 RV |
4568 | __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, |
4569 | u32 operation, u32 offset, u64 *stat) | |
4570 | { | |
4571 | u64 val64; | |
4572 | enum vxge_hw_status status = VXGE_HW_OK; | |
4573 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
4574 | ||
4575 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
4576 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
4577 | goto vpath_stats_access_exit; | |
4578 | } | |
4579 | ||
4580 | vp_reg = vpath->vp_reg; | |
4581 | ||
4582 | val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | | |
4583 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | | |
4584 | VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); | |
4585 | ||
4586 | status = __vxge_hw_pio_mem_write64(val64, | |
4587 | &vp_reg->xmac_stats_access_cmd, | |
4588 | VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, | |
4589 | vpath->hldev->config.device_poll_millis); | |
4590 | ||
4591 | if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) | |
4592 | *stat = readq(&vp_reg->xmac_stats_access_data); | |
4593 | else | |
4594 | *stat = 0; | |
4595 | ||
4596 | vpath_stats_access_exit: | |
4597 | return status; | |
4598 | } | |
4599 | ||
4600 | /* | |
4601 | * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath | |
4602 | */ | |
42821a5b | 4603 | static enum vxge_hw_status |
40a3a915 RV |
4604 | __vxge_hw_vpath_xmac_tx_stats_get( |
4605 | struct __vxge_hw_virtualpath *vpath, | |
4606 | struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) | |
4607 | { | |
4608 | u64 *val64; | |
4609 | int i; | |
4610 | u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; | |
4611 | enum vxge_hw_status status = VXGE_HW_OK; | |
4612 | ||
4613 | val64 = (u64 *) vpath_tx_stats; | |
4614 | ||
4615 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
4616 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
4617 | goto exit; | |
4618 | } | |
4619 | ||
4620 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { | |
4621 | status = __vxge_hw_vpath_stats_access(vpath, | |
4622 | VXGE_HW_STATS_OP_READ, | |
4623 | offset, val64); | |
4624 | if (status != VXGE_HW_OK) | |
4625 | goto exit; | |
4626 | offset++; | |
4627 | val64++; | |
4628 | } | |
4629 | exit: | |
4630 | return status; | |
4631 | } | |
4632 | ||
4633 | /* | |
4634 | * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath | |
4635 | */ | |
42821a5b | 4636 | static enum vxge_hw_status |
40a3a915 | 4637 | __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, |
42821a5b | 4638 | struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) |
40a3a915 RV |
4639 | { |
4640 | u64 *val64; | |
4641 | enum vxge_hw_status status = VXGE_HW_OK; | |
4642 | int i; | |
4643 | u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; | |
4644 | val64 = (u64 *) vpath_rx_stats; | |
4645 | ||
4646 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
4647 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
4648 | goto exit; | |
4649 | } | |
4650 | for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { | |
4651 | status = __vxge_hw_vpath_stats_access(vpath, | |
4652 | VXGE_HW_STATS_OP_READ, | |
4653 | offset >> 3, val64); | |
4654 | if (status != VXGE_HW_OK) | |
4655 | goto exit; | |
4656 | ||
4657 | offset += 8; | |
4658 | val64++; | |
4659 | } | |
4660 | exit: | |
4661 | return status; | |
4662 | } | |
4663 | ||
4664 | /* | |
4665 | * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. | |
4666 | */ | |
42821a5b | 4667 | static enum vxge_hw_status |
4668 | __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, | |
4669 | struct vxge_hw_vpath_stats_hw_info *hw_stats) | |
40a3a915 RV |
4670 | { |
4671 | u64 val64; | |
4672 | enum vxge_hw_status status = VXGE_HW_OK; | |
4673 | struct vxge_hw_vpath_reg __iomem *vp_reg; | |
4674 | ||
4675 | if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { | |
4676 | status = VXGE_HW_ERR_VPATH_NOT_OPEN; | |
4677 | goto exit; | |
4678 | } | |
4679 | vp_reg = vpath->vp_reg; | |
4680 | ||
4681 | val64 = readq(&vp_reg->vpath_debug_stats0); | |
4682 | hw_stats->ini_num_mwr_sent = | |
4683 | (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); | |
4684 | ||
4685 | val64 = readq(&vp_reg->vpath_debug_stats1); | |
4686 | hw_stats->ini_num_mrd_sent = | |
4687 | (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); | |
4688 | ||
4689 | val64 = readq(&vp_reg->vpath_debug_stats2); | |
4690 | hw_stats->ini_num_cpl_rcvd = | |
4691 | (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); | |
4692 | ||
4693 | val64 = readq(&vp_reg->vpath_debug_stats3); | |
4694 | hw_stats->ini_num_mwr_byte_sent = | |
4695 | VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); | |
4696 | ||
4697 | val64 = readq(&vp_reg->vpath_debug_stats4); | |
4698 | hw_stats->ini_num_cpl_byte_rcvd = | |
4699 | VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); | |
4700 | ||
4701 | val64 = readq(&vp_reg->vpath_debug_stats5); | |
4702 | hw_stats->wrcrdtarb_xoff = | |
4703 | (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); | |
4704 | ||
4705 | val64 = readq(&vp_reg->vpath_debug_stats6); | |
4706 | hw_stats->rdcrdtarb_xoff = | |
4707 | (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); | |
4708 | ||
4709 | val64 = readq(&vp_reg->vpath_genstats_count01); | |
4710 | hw_stats->vpath_genstats_count0 = | |
4711 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( | |
4712 | val64); | |
4713 | ||
4714 | val64 = readq(&vp_reg->vpath_genstats_count01); | |
4715 | hw_stats->vpath_genstats_count1 = | |
4716 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( | |
4717 | val64); | |
4718 | ||
4719 | val64 = readq(&vp_reg->vpath_genstats_count23); | |
4720 | hw_stats->vpath_genstats_count2 = | |
4721 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( | |
4722 | val64); | |
4723 | ||
4724 | val64 = readq(&vp_reg->vpath_genstats_count01); | |
4725 | hw_stats->vpath_genstats_count3 = | |
4726 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( | |
4727 | val64); | |
4728 | ||
4729 | val64 = readq(&vp_reg->vpath_genstats_count4); | |
4730 | hw_stats->vpath_genstats_count4 = | |
4731 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( | |
4732 | val64); | |
4733 | ||
4734 | val64 = readq(&vp_reg->vpath_genstats_count5); | |
4735 | hw_stats->vpath_genstats_count5 = | |
4736 | (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( | |
4737 | val64); | |
4738 | ||
4739 | status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); | |
4740 | if (status != VXGE_HW_OK) | |
4741 | goto exit; | |
4742 | ||
4743 | status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); | |
4744 | if (status != VXGE_HW_OK) | |
4745 | goto exit; | |
4746 | ||
4747 | VXGE_HW_VPATH_STATS_PIO_READ( | |
4748 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); | |
4749 | ||
4750 | hw_stats->prog_event_vnum0 = | |
4751 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); | |
4752 | ||
4753 | hw_stats->prog_event_vnum1 = | |
4754 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); | |
4755 | ||
4756 | VXGE_HW_VPATH_STATS_PIO_READ( | |
4757 | VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); | |
4758 | ||
4759 | hw_stats->prog_event_vnum2 = | |
4760 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); | |
4761 | ||
4762 | hw_stats->prog_event_vnum3 = | |
4763 | (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); | |
4764 | ||
4765 | val64 = readq(&vp_reg->rx_multi_cast_stats); | |
4766 | hw_stats->rx_multi_cast_frame_discard = | |
4767 | (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); | |
4768 | ||
4769 | val64 = readq(&vp_reg->rx_frm_transferred); | |
4770 | hw_stats->rx_frm_transferred = | |
4771 | (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); | |
4772 | ||
4773 | val64 = readq(&vp_reg->rxd_returned); | |
4774 | hw_stats->rxd_returned = | |
4775 | (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); | |
4776 | ||
4777 | val64 = readq(&vp_reg->dbg_stats_rx_mpa); | |
4778 | hw_stats->rx_mpa_len_fail_frms = | |
4779 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); | |
4780 | hw_stats->rx_mpa_mrk_fail_frms = | |
4781 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); | |
4782 | hw_stats->rx_mpa_crc_fail_frms = | |
4783 | (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); | |
4784 | ||
4785 | val64 = readq(&vp_reg->dbg_stats_rx_fau); | |
4786 | hw_stats->rx_permitted_frms = | |
4787 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); | |
4788 | hw_stats->rx_vp_reset_discarded_frms = | |
4789 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); | |
4790 | hw_stats->rx_wol_frms = | |
4791 | (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); | |
4792 | ||
4793 | val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); | |
4794 | hw_stats->tx_vp_reset_discarded_frms = | |
4795 | (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( | |
4796 | val64); | |
4797 | exit: | |
4798 | return status; | |
4799 | } | |
4800 | ||
42821a5b | 4801 | |
4802 | static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, | |
4803 | unsigned long size) | |
4804 | { | |
4805 | gfp_t flags; | |
4806 | void *vaddr; | |
4807 | ||
4808 | if (in_interrupt()) | |
4809 | flags = GFP_ATOMIC | GFP_DMA; | |
4810 | else | |
4811 | flags = GFP_KERNEL | GFP_DMA; | |
4812 | ||
4813 | vaddr = kmalloc((size), flags); | |
4814 | ||
4815 | vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); | |
4816 | } | |
4817 | ||
4818 | static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, | |
4819 | struct pci_dev **p_dma_acch) | |
4820 | { | |
4821 | unsigned long misaligned = *(unsigned long *)p_dma_acch; | |
4822 | u8 *tmp = (u8 *)vaddr; | |
4823 | tmp -= misaligned; | |
4824 | kfree((void *)tmp); | |
4825 | } | |
4826 | ||
40a3a915 RV |
4827 | /* |
4828 | * __vxge_hw_blockpool_create - Create block pool | |
4829 | */ | |
4830 | ||
2c91308f | 4831 | static enum vxge_hw_status |
40a3a915 RV |
4832 | __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, |
4833 | struct __vxge_hw_blockpool *blockpool, | |
4834 | u32 pool_size, | |
4835 | u32 pool_max) | |
4836 | { | |
4837 | u32 i; | |
4838 | struct __vxge_hw_blockpool_entry *entry = NULL; | |
4839 | void *memblock; | |
4840 | dma_addr_t dma_addr; | |
4841 | struct pci_dev *dma_handle; | |
4842 | struct pci_dev *acc_handle; | |
4843 | enum vxge_hw_status status = VXGE_HW_OK; | |
4844 | ||
4845 | if (blockpool == NULL) { | |
4846 | status = VXGE_HW_FAIL; | |
4847 | goto blockpool_create_exit; | |
4848 | } | |
4849 | ||
4850 | blockpool->hldev = hldev; | |
4851 | blockpool->block_size = VXGE_HW_BLOCK_SIZE; | |
4852 | blockpool->pool_size = 0; | |
4853 | blockpool->pool_max = pool_max; | |
4854 | blockpool->req_out = 0; | |
4855 | ||
4856 | INIT_LIST_HEAD(&blockpool->free_block_list); | |
4857 | INIT_LIST_HEAD(&blockpool->free_entry_list); | |
4858 | ||
4859 | for (i = 0; i < pool_size + pool_max; i++) { | |
4860 | entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | |
4861 | GFP_KERNEL); | |
4862 | if (entry == NULL) { | |
4863 | __vxge_hw_blockpool_destroy(blockpool); | |
4864 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
4865 | goto blockpool_create_exit; | |
4866 | } | |
4867 | list_add(&entry->item, &blockpool->free_entry_list); | |
4868 | } | |
4869 | ||
4870 | for (i = 0; i < pool_size; i++) { | |
4871 | ||
4872 | memblock = vxge_os_dma_malloc( | |
4873 | hldev->pdev, | |
4874 | VXGE_HW_BLOCK_SIZE, | |
4875 | &dma_handle, | |
4876 | &acc_handle); | |
4877 | ||
4878 | if (memblock == NULL) { | |
4879 | __vxge_hw_blockpool_destroy(blockpool); | |
4880 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
4881 | goto blockpool_create_exit; | |
4882 | } | |
4883 | ||
4884 | dma_addr = pci_map_single(hldev->pdev, memblock, | |
4885 | VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); | |
4886 | ||
4887 | if (unlikely(pci_dma_mapping_error(hldev->pdev, | |
4888 | dma_addr))) { | |
4889 | ||
4890 | vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); | |
4891 | __vxge_hw_blockpool_destroy(blockpool); | |
4892 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
4893 | goto blockpool_create_exit; | |
4894 | } | |
4895 | ||
4896 | if (!list_empty(&blockpool->free_entry_list)) | |
4897 | entry = (struct __vxge_hw_blockpool_entry *) | |
4898 | list_first_entry(&blockpool->free_entry_list, | |
4899 | struct __vxge_hw_blockpool_entry, | |
4900 | item); | |
4901 | ||
4902 | if (entry == NULL) | |
4903 | entry = | |
4904 | kzalloc(sizeof(struct __vxge_hw_blockpool_entry), | |
4905 | GFP_KERNEL); | |
4906 | if (entry != NULL) { | |
4907 | list_del(&entry->item); | |
4908 | entry->length = VXGE_HW_BLOCK_SIZE; | |
4909 | entry->memblock = memblock; | |
4910 | entry->dma_addr = dma_addr; | |
4911 | entry->acc_handle = acc_handle; | |
4912 | entry->dma_handle = dma_handle; | |
4913 | list_add(&entry->item, | |
4914 | &blockpool->free_block_list); | |
4915 | blockpool->pool_size++; | |
4916 | } else { | |
4917 | __vxge_hw_blockpool_destroy(blockpool); | |
4918 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
4919 | goto blockpool_create_exit; | |
4920 | } | |
4921 | } | |
4922 | ||
4923 | blockpool_create_exit: | |
4924 | return status; | |
4925 | } | |
4926 | ||
4927 | /* | |
4928 | * __vxge_hw_blockpool_destroy - Deallocates the block pool | |
4929 | */ | |
4930 | ||
2c91308f | 4931 | static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) |
40a3a915 RV |
4932 | { |
4933 | ||
4934 | struct __vxge_hw_device *hldev; | |
4935 | struct list_head *p, *n; | |
4936 | u16 ret; | |
4937 | ||
4938 | if (blockpool == NULL) { | |
4939 | ret = 1; | |
4940 | goto exit; | |
4941 | } | |
4942 | ||
4943 | hldev = blockpool->hldev; | |
4944 | ||
4945 | list_for_each_safe(p, n, &blockpool->free_block_list) { | |
4946 | ||
4947 | pci_unmap_single(hldev->pdev, | |
4948 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | |
4949 | ((struct __vxge_hw_blockpool_entry *)p)->length, | |
4950 | PCI_DMA_BIDIRECTIONAL); | |
4951 | ||
4952 | vxge_os_dma_free(hldev->pdev, | |
4953 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | |
4954 | &((struct __vxge_hw_blockpool_entry *) p)->acc_handle); | |
4955 | ||
4956 | list_del( | |
4957 | &((struct __vxge_hw_blockpool_entry *)p)->item); | |
4958 | kfree(p); | |
4959 | blockpool->pool_size--; | |
4960 | } | |
4961 | ||
4962 | list_for_each_safe(p, n, &blockpool->free_entry_list) { | |
4963 | list_del( | |
4964 | &((struct __vxge_hw_blockpool_entry *)p)->item); | |
4965 | kfree((void *)p); | |
4966 | } | |
4967 | ret = 0; | |
4968 | exit: | |
4969 | return; | |
4970 | } | |
4971 | ||
4972 | /* | |
4973 | * __vxge_hw_blockpool_blocks_add - Request additional blocks | |
4974 | */ | |
4975 | static | |
4976 | void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) | |
4977 | { | |
4978 | u32 nreq = 0, i; | |
4979 | ||
4980 | if ((blockpool->pool_size + blockpool->req_out) < | |
4981 | VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { | |
4982 | nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; | |
4983 | blockpool->req_out += nreq; | |
4984 | } | |
4985 | ||
4986 | for (i = 0; i < nreq; i++) | |
4987 | vxge_os_dma_malloc_async( | |
4988 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | |
4989 | blockpool->hldev, VXGE_HW_BLOCK_SIZE); | |
4990 | } | |
4991 | ||
4992 | /* | |
4993 | * __vxge_hw_blockpool_blocks_remove - Free additional blocks | |
4994 | */ | |
4995 | static | |
4996 | void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) | |
4997 | { | |
4998 | struct list_head *p, *n; | |
4999 | ||
5000 | list_for_each_safe(p, n, &blockpool->free_block_list) { | |
5001 | ||
5002 | if (blockpool->pool_size < blockpool->pool_max) | |
5003 | break; | |
5004 | ||
5005 | pci_unmap_single( | |
5006 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | |
5007 | ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, | |
5008 | ((struct __vxge_hw_blockpool_entry *)p)->length, | |
5009 | PCI_DMA_BIDIRECTIONAL); | |
5010 | ||
5011 | vxge_os_dma_free( | |
5012 | ((struct __vxge_hw_device *)blockpool->hldev)->pdev, | |
5013 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, | |
5014 | &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); | |
5015 | ||
5016 | list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); | |
5017 | ||
5018 | list_add(p, &blockpool->free_entry_list); | |
5019 | ||
5020 | blockpool->pool_size--; | |
5021 | ||
5022 | } | |
5023 | } | |
5024 | ||
5025 | /* | |
5026 | * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async | |
5027 | * Adds a block to block pool | |
5028 | */ | |
42821a5b | 5029 | static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, |
5030 | void *block_addr, | |
5031 | u32 length, | |
5032 | struct pci_dev *dma_h, | |
5033 | struct pci_dev *acc_handle) | |
40a3a915 RV |
5034 | { |
5035 | struct __vxge_hw_blockpool *blockpool; | |
5036 | struct __vxge_hw_blockpool_entry *entry = NULL; | |
5037 | dma_addr_t dma_addr; | |
5038 | enum vxge_hw_status status = VXGE_HW_OK; | |
5039 | u32 req_out; | |
5040 | ||
5041 | blockpool = &devh->block_pool; | |
5042 | ||
5043 | if (block_addr == NULL) { | |
5044 | blockpool->req_out--; | |
5045 | status = VXGE_HW_FAIL; | |
5046 | goto exit; | |
5047 | } | |
5048 | ||
5049 | dma_addr = pci_map_single(devh->pdev, block_addr, length, | |
5050 | PCI_DMA_BIDIRECTIONAL); | |
5051 | ||
5052 | if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { | |
5053 | ||
5054 | vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); | |
5055 | blockpool->req_out--; | |
5056 | status = VXGE_HW_FAIL; | |
5057 | goto exit; | |
5058 | } | |
5059 | ||
5060 | ||
5061 | if (!list_empty(&blockpool->free_entry_list)) | |
5062 | entry = (struct __vxge_hw_blockpool_entry *) | |
5063 | list_first_entry(&blockpool->free_entry_list, | |
5064 | struct __vxge_hw_blockpool_entry, | |
5065 | item); | |
5066 | ||
5067 | if (entry == NULL) | |
5068 | entry = (struct __vxge_hw_blockpool_entry *) | |
5069 | vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); | |
5070 | else | |
5071 | list_del(&entry->item); | |
5072 | ||
5073 | if (entry != NULL) { | |
5074 | entry->length = length; | |
5075 | entry->memblock = block_addr; | |
5076 | entry->dma_addr = dma_addr; | |
5077 | entry->acc_handle = acc_handle; | |
5078 | entry->dma_handle = dma_h; | |
5079 | list_add(&entry->item, &blockpool->free_block_list); | |
5080 | blockpool->pool_size++; | |
5081 | status = VXGE_HW_OK; | |
5082 | } else | |
5083 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
5084 | ||
5085 | blockpool->req_out--; | |
5086 | ||
5087 | req_out = blockpool->req_out; | |
5088 | exit: | |
5089 | return; | |
5090 | } | |
5091 | ||
5092 | /* | |
5093 | * __vxge_hw_blockpool_malloc - Allocate a memory block from pool | |
5094 | * Allocates a block of memory of given size, either from block pool | |
5095 | * or by calling vxge_os_dma_malloc() | |
5096 | */ | |
2c91308f | 5097 | static void * |
40a3a915 RV |
5098 | __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, |
5099 | struct vxge_hw_mempool_dma *dma_object) | |
5100 | { | |
5101 | struct __vxge_hw_blockpool_entry *entry = NULL; | |
5102 | struct __vxge_hw_blockpool *blockpool; | |
5103 | void *memblock = NULL; | |
5104 | enum vxge_hw_status status = VXGE_HW_OK; | |
5105 | ||
5106 | blockpool = &devh->block_pool; | |
5107 | ||
5108 | if (size != blockpool->block_size) { | |
5109 | ||
5110 | memblock = vxge_os_dma_malloc(devh->pdev, size, | |
5111 | &dma_object->handle, | |
5112 | &dma_object->acc_handle); | |
5113 | ||
5114 | if (memblock == NULL) { | |
5115 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
5116 | goto exit; | |
5117 | } | |
5118 | ||
5119 | dma_object->addr = pci_map_single(devh->pdev, memblock, size, | |
5120 | PCI_DMA_BIDIRECTIONAL); | |
5121 | ||
5122 | if (unlikely(pci_dma_mapping_error(devh->pdev, | |
5123 | dma_object->addr))) { | |
5124 | vxge_os_dma_free(devh->pdev, memblock, | |
5125 | &dma_object->acc_handle); | |
5126 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
5127 | goto exit; | |
5128 | } | |
5129 | ||
5130 | } else { | |
5131 | ||
5132 | if (!list_empty(&blockpool->free_block_list)) | |
5133 | entry = (struct __vxge_hw_blockpool_entry *) | |
5134 | list_first_entry(&blockpool->free_block_list, | |
5135 | struct __vxge_hw_blockpool_entry, | |
5136 | item); | |
5137 | ||
5138 | if (entry != NULL) { | |
5139 | list_del(&entry->item); | |
5140 | dma_object->addr = entry->dma_addr; | |
5141 | dma_object->handle = entry->dma_handle; | |
5142 | dma_object->acc_handle = entry->acc_handle; | |
5143 | memblock = entry->memblock; | |
5144 | ||
5145 | list_add(&entry->item, | |
5146 | &blockpool->free_entry_list); | |
5147 | blockpool->pool_size--; | |
5148 | } | |
5149 | ||
5150 | if (memblock != NULL) | |
5151 | __vxge_hw_blockpool_blocks_add(blockpool); | |
5152 | } | |
5153 | exit: | |
5154 | return memblock; | |
5155 | } | |
5156 | ||
5157 | /* | |
5158 | * __vxge_hw_blockpool_free - Frees the memory allcoated with | |
5159 | __vxge_hw_blockpool_malloc | |
5160 | */ | |
2c91308f | 5161 | static void |
40a3a915 RV |
5162 | __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, |
5163 | void *memblock, u32 size, | |
5164 | struct vxge_hw_mempool_dma *dma_object) | |
5165 | { | |
5166 | struct __vxge_hw_blockpool_entry *entry = NULL; | |
5167 | struct __vxge_hw_blockpool *blockpool; | |
5168 | enum vxge_hw_status status = VXGE_HW_OK; | |
5169 | ||
5170 | blockpool = &devh->block_pool; | |
5171 | ||
5172 | if (size != blockpool->block_size) { | |
5173 | pci_unmap_single(devh->pdev, dma_object->addr, size, | |
5174 | PCI_DMA_BIDIRECTIONAL); | |
5175 | vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); | |
5176 | } else { | |
5177 | ||
5178 | if (!list_empty(&blockpool->free_entry_list)) | |
5179 | entry = (struct __vxge_hw_blockpool_entry *) | |
5180 | list_first_entry(&blockpool->free_entry_list, | |
5181 | struct __vxge_hw_blockpool_entry, | |
5182 | item); | |
5183 | ||
5184 | if (entry == NULL) | |
5185 | entry = (struct __vxge_hw_blockpool_entry *) | |
5186 | vmalloc(sizeof( | |
5187 | struct __vxge_hw_blockpool_entry)); | |
5188 | else | |
5189 | list_del(&entry->item); | |
5190 | ||
5191 | if (entry != NULL) { | |
5192 | entry->length = size; | |
5193 | entry->memblock = memblock; | |
5194 | entry->dma_addr = dma_object->addr; | |
5195 | entry->acc_handle = dma_object->acc_handle; | |
5196 | entry->dma_handle = dma_object->handle; | |
5197 | list_add(&entry->item, | |
5198 | &blockpool->free_block_list); | |
5199 | blockpool->pool_size++; | |
5200 | status = VXGE_HW_OK; | |
5201 | } else | |
5202 | status = VXGE_HW_ERR_OUT_OF_MEMORY; | |
5203 | ||
5204 | if (status == VXGE_HW_OK) | |
5205 | __vxge_hw_blockpool_blocks_remove(blockpool); | |
5206 | } | |
40a3a915 RV |
5207 | } |
5208 | ||
5209 | /* | |
5210 | * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool | |
5211 | * This function allocates a block from block pool or from the system | |
5212 | */ | |
2c91308f | 5213 | static struct __vxge_hw_blockpool_entry * |
40a3a915 RV |
5214 | __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) |
5215 | { | |
5216 | struct __vxge_hw_blockpool_entry *entry = NULL; | |
5217 | struct __vxge_hw_blockpool *blockpool; | |
5218 | ||
5219 | blockpool = &devh->block_pool; | |
5220 | ||
5221 | if (size == blockpool->block_size) { | |
5222 | ||
5223 | if (!list_empty(&blockpool->free_block_list)) | |
5224 | entry = (struct __vxge_hw_blockpool_entry *) | |
5225 | list_first_entry(&blockpool->free_block_list, | |
5226 | struct __vxge_hw_blockpool_entry, | |
5227 | item); | |
5228 | ||
5229 | if (entry != NULL) { | |
5230 | list_del(&entry->item); | |
5231 | blockpool->pool_size--; | |
5232 | } | |
5233 | } | |
5234 | ||
5235 | if (entry != NULL) | |
5236 | __vxge_hw_blockpool_blocks_add(blockpool); | |
5237 | ||
5238 | return entry; | |
5239 | } | |
5240 | ||
5241 | /* | |
5242 | * __vxge_hw_blockpool_block_free - Frees a block from block pool | |
5243 | * @devh: Hal device | |
5244 | * @entry: Entry of block to be freed | |
5245 | * | |
5246 | * This function frees a block from block pool | |
5247 | */ | |
2c91308f | 5248 | static void |
40a3a915 RV |
5249 | __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, |
5250 | struct __vxge_hw_blockpool_entry *entry) | |
5251 | { | |
5252 | struct __vxge_hw_blockpool *blockpool; | |
5253 | ||
5254 | blockpool = &devh->block_pool; | |
5255 | ||
5256 | if (entry->length == blockpool->block_size) { | |
5257 | list_add(&entry->item, &blockpool->free_block_list); | |
5258 | blockpool->pool_size++; | |
5259 | } | |
5260 | ||
5261 | __vxge_hw_blockpool_blocks_remove(blockpool); | |
40a3a915 | 5262 | } |