]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/ixgbe/ixgbe_bypass.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / net / ixgbe / ixgbe_bypass.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <time.h>
35 #include <rte_atomic.h>
36 #include <rte_ethdev.h>
37 #include "ixgbe_ethdev.h"
38 #include "ixgbe_bypass_api.h"
39
40 #define BYPASS_STATUS_OFF_MASK 3
41
42 /* Macros to check for invlaid function pointers. */
43 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
44 if ((func) == NULL) { \
45 PMD_DRV_LOG(ERR, "%s:%d function not supported", \
46 __func__, __LINE__); \
47 return retval; \
48 } \
49 } while (0)
50
51 #define FUNC_PTR_OR_RET(func) do { \
52 if ((func) == NULL) { \
53 PMD_DRV_LOG(ERR, "%s:%d function not supported", \
54 __func__, __LINE__); \
55 return; \
56 } \
57 } while (0)
58
59
60 /**
61 * ixgbe_bypass_set_time - Set bypass FW time epoc.
62 *
63 * @hw: pointer to hardware structure
64 *
65 * This function with sync the FW date stamp with that of the
66 * system clock.
67 **/
68 static void
69 ixgbe_bypass_set_time(struct ixgbe_adapter *adapter)
70 {
71 u32 mask, value;
72 u32 sec;
73 struct ixgbe_hw *hw = &adapter->hw;
74
75 sec = 0;
76
77 /*
78 * Send the FW our current time and turn on time_valid and
79 * timer_reset bits.
80 */
81 mask = BYPASS_CTL1_TIME_M |
82 BYPASS_CTL1_VALID_M |
83 BYPASS_CTL1_OFFTRST_M;
84 value = (sec & BYPASS_CTL1_TIME_M) |
85 BYPASS_CTL1_VALID |
86 BYPASS_CTL1_OFFTRST;
87
88 FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set);
89
90 /* Store FW reset time (in seconds from epoch). */
91 adapter->bps.reset_tm = time(NULL);
92
93 /* reset FW timer. */
94 adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value);
95 }
96
97 /**
98 * ixgbe_bypass_init - Make some environment changes for bypass
99 *
100 * @adapter: pointer to ixgbe_adapter structure for access to state bits
101 *
102 * This function collects all the modifications needed by the bypass
103 * driver.
104 **/
105 void
106 ixgbe_bypass_init(struct rte_eth_dev *dev)
107 {
108 struct ixgbe_adapter *adapter;
109 struct ixgbe_hw *hw;
110
111 adapter = IXGBE_DEV_TO_ADPATER(dev);
112 hw = &adapter->hw;
113
114 /* Only allow BYPASS ops on the first port */
115 if (hw->device_id != IXGBE_DEV_ID_82599_BYPASS ||
116 hw->bus.func != 0) {
117 PMD_DRV_LOG(ERR, "bypass function is not supported on that device");
118 return;
119 }
120
121 /* set bypass ops. */
122 adapter->bps.ops.bypass_rw = &ixgbe_bypass_rw_generic;
123 adapter->bps.ops.bypass_valid_rd = &ixgbe_bypass_valid_rd_generic;
124 adapter->bps.ops.bypass_set = &ixgbe_bypass_set_generic;
125 adapter->bps.ops.bypass_rd_eep = &ixgbe_bypass_rd_eep_generic;
126
127 /* set the time for logging. */
128 ixgbe_bypass_set_time(adapter);
129
130 /* Don't have the SDP to the laser */
131 hw->mac.ops.disable_tx_laser = NULL;
132 hw->mac.ops.enable_tx_laser = NULL;
133 hw->mac.ops.flap_tx_laser = NULL;
134 }
135
136 s32
137 ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state)
138 {
139 struct ixgbe_hw *hw;
140 s32 ret_val;
141 u32 cmd;
142 u32 by_ctl = 0;
143 struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
144
145 hw = &adapter->hw;
146 FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
147
148 cmd = BYPASS_PAGE_CTL0;
149 ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
150
151 /* Assume bypass_rw didn't error out, if it did state will
152 * be ignored anyway.
153 */
154 *state = (by_ctl >> BYPASS_STATUS_OFF_SHIFT) & BYPASS_STATUS_OFF_MASK;
155
156 return ret_val;
157 }
158
159
160 s32
161 ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state)
162 {
163 struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
164 struct ixgbe_hw *hw;
165 s32 ret_val;
166
167 hw = &adapter->hw;
168 FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
169
170 /* Set the new state */
171 ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
172 BYPASS_MODE_OFF_M, *new_state);
173 if (ret_val)
174 goto exit;
175
176 /* Set AUTO back on so FW can receive events */
177 ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
178 BYPASS_MODE_OFF_M, BYPASS_AUTO);
179
180 exit:
181 return ret_val;
182
183 }
184
185 s32
186 ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event,
187 u32 *state)
188 {
189 struct ixgbe_hw *hw;
190 s32 ret_val;
191 u32 shift;
192 u32 cmd;
193 u32 by_ctl = 0;
194 struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
195
196 hw = &adapter->hw;
197 FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
198
199 cmd = BYPASS_PAGE_CTL0;
200 ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
201
202 /* Assume bypass_rw didn't error out, if it did event will
203 * be ignored anyway.
204 */
205 switch (event) {
206 case BYPASS_EVENT_WDT_TO:
207 shift = BYPASS_WDTIMEOUT_SHIFT;
208 break;
209 case BYPASS_EVENT_MAIN_ON:
210 shift = BYPASS_MAIN_ON_SHIFT;
211 break;
212 case BYPASS_EVENT_MAIN_OFF:
213 shift = BYPASS_MAIN_OFF_SHIFT;
214 break;
215 case BYPASS_EVENT_AUX_ON:
216 shift = BYPASS_AUX_ON_SHIFT;
217 break;
218 case BYPASS_EVENT_AUX_OFF:
219 shift = BYPASS_AUX_OFF_SHIFT;
220 break;
221 default:
222 return EINVAL;
223 }
224
225 *state = (by_ctl >> shift) & 0x3;
226
227 return ret_val;
228 }
229
230 s32
231 ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event,
232 u32 state)
233 {
234 struct ixgbe_hw *hw;
235 u32 status;
236 u32 off;
237 s32 ret_val;
238 struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
239
240 hw = &adapter->hw;
241 FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
242
243 switch (event) {
244 case BYPASS_EVENT_WDT_TO:
245 off = BYPASS_WDTIMEOUT_M;
246 status = state << BYPASS_WDTIMEOUT_SHIFT;
247 break;
248 case BYPASS_EVENT_MAIN_ON:
249 off = BYPASS_MAIN_ON_M;
250 status = state << BYPASS_MAIN_ON_SHIFT;
251 break;
252 case BYPASS_EVENT_MAIN_OFF:
253 off = BYPASS_MAIN_OFF_M;
254 status = state << BYPASS_MAIN_OFF_SHIFT;
255 break;
256 case BYPASS_EVENT_AUX_ON:
257 off = BYPASS_AUX_ON_M;
258 status = state << BYPASS_AUX_ON_SHIFT;
259 break;
260 case BYPASS_EVENT_AUX_OFF:
261 off = BYPASS_AUX_OFF_M;
262 status = state << BYPASS_AUX_OFF_SHIFT;
263 break;
264 default:
265 return EINVAL;
266 }
267
268 ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
269 off, status);
270
271 return ret_val;
272 }
273
274 s32
275 ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
276 {
277 struct ixgbe_hw *hw;
278 u32 status;
279 u32 mask;
280 s32 ret_val;
281 struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
282
283 hw = &adapter->hw;
284 FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
285
286 /* disable the timer with timeout of zero */
287 if (timeout == RTE_BYPASS_TMT_OFF) {
288 status = 0x0; /* WDG enable off */
289 mask = BYPASS_WDT_ENABLE_M;
290 } else {
291 /* set time out value */
292 mask = BYPASS_WDT_VALUE_M;
293
294 /* enable the timer */
295 status = timeout << BYPASS_WDT_TIME_SHIFT;
296 status |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
297 mask |= BYPASS_WDT_ENABLE_M;
298 }
299
300 ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
301 mask, status);
302
303 return ret_val;
304 }
305
306 s32
307 ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver)
308 {
309 struct ixgbe_hw *hw;
310 u32 cmd;
311 u32 status;
312 s32 ret_val;
313 struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
314
315 hw = &adapter->hw;
316 FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
317
318 cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
319 cmd |= (BYPASS_EEPROM_VER_ADD << BYPASS_CTL2_OFFSET_SHIFT) &
320 BYPASS_CTL2_OFFSET_M;
321 ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
322 if (ret_val)
323 goto exit;
324
325 /* wait for the write to stick */
326 msleep(100);
327
328 /* Now read the results */
329 cmd &= ~BYPASS_WE;
330 ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
331 if (ret_val)
332 goto exit;
333
334 *ver = status & BYPASS_CTL2_DATA_M; /* only one byte of date */
335
336 exit:
337 return ret_val;
338 }
339
340 s32
341 ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout)
342 {
343 struct ixgbe_hw *hw;
344 u32 by_ctl = 0;
345 u32 cmd;
346 u32 wdg;
347 s32 ret_val;
348 struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
349
350 hw = &adapter->hw;
351 FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
352
353 cmd = BYPASS_PAGE_CTL0;
354 ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
355
356 wdg = by_ctl & BYPASS_WDT_ENABLE_M;
357 if (!wdg)
358 *wd_timeout = RTE_BYPASS_TMT_OFF;
359 else
360 *wd_timeout = (by_ctl >> BYPASS_WDT_TIME_SHIFT) &
361 BYPASS_WDT_MASK;
362
363 return ret_val;
364 }
365
366 s32
367 ixgbe_bypass_wd_reset(struct rte_eth_dev *dev)
368 {
369 u32 cmd;
370 u32 status;
371 u32 sec;
372 u32 count = 0;
373 s32 ret_val;
374 struct ixgbe_hw *hw;
375 struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
376
377 hw = &adapter->hw;
378
379 FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
380 FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_valid_rd, -ENOTSUP);
381
382 /* Use the lower level bit-bang functions since we don't need
383 * to read the register first to get it's current state as we
384 * are setting every thing in this write.
385 */
386 /* Set up WD pet */
387 cmd = BYPASS_PAGE_CTL1 | BYPASS_WE | BYPASS_CTL1_WDT_PET;
388
389 /* Resync the FW time while writing to CTL1 anyway */
390 adapter->bps.reset_tm = time(NULL);
391 sec = 0;
392
393 cmd |= (sec & BYPASS_CTL1_TIME_M) | BYPASS_CTL1_VALID;
394
395 /* reset FW timer offset since we are resetting the clock */
396 cmd |= BYPASS_CTL1_OFFTRST;
397
398 ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
399
400 /* Read until it matches what we wrote, or we time out */
401 do {
402 if (count++ > 10) {
403 ret_val = IXGBE_BYPASS_FW_WRITE_FAILURE;
404 break;
405 }
406
407 if (adapter->bps.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &status)) {
408 ret_val = IXGBE_ERR_INVALID_ARGUMENT;
409 break;
410 }
411 } while (!adapter->bps.ops.bypass_valid_rd(cmd, status));
412
413 return ret_val;
414 }