]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ixgbe/ixgbe_x540.c
Fix common misspellings
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ixgbe / ixgbe_x540.c
1 /*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/pci.h>
29 #include <linux/delay.h>
30 #include <linux/sched.h>
31
32 #include "ixgbe.h"
33 #include "ixgbe_phy.h"
34
35 #define IXGBE_X540_MAX_TX_QUEUES 128
36 #define IXGBE_X540_MAX_RX_QUEUES 128
37 #define IXGBE_X540_RAR_ENTRIES 128
38 #define IXGBE_X540_MC_TBL_SIZE 128
39 #define IXGBE_X540_VFT_TBL_SIZE 128
40
41 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
42 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
43 static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
44 static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
45 static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
46 static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
47
48 static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
49 {
50 return ixgbe_media_type_copper;
51 }
52
53 static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
54 {
55 struct ixgbe_mac_info *mac = &hw->mac;
56
57 /* Call PHY identify routine to get the phy type */
58 ixgbe_identify_phy_generic(hw);
59
60 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
61 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
62 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
63 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
64 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
65 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
66
67 return 0;
68 }
69
70 /**
71 * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
72 * @hw: pointer to hardware structure
73 * @speed: new link speed
74 * @autoneg: true if autonegotiation enabled
75 * @autoneg_wait_to_complete: true when waiting for completion is needed
76 **/
77 static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
78 ixgbe_link_speed speed, bool autoneg,
79 bool autoneg_wait_to_complete)
80 {
81 return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
82 autoneg_wait_to_complete);
83 }
84
85 /**
86 * ixgbe_reset_hw_X540 - Perform hardware reset
87 * @hw: pointer to hardware structure
88 *
89 * Resets the hardware by resetting the transmit and receive units, masks
90 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
91 * reset.
92 **/
93 static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
94 {
95 ixgbe_link_speed link_speed;
96 s32 status = 0;
97 u32 ctrl;
98 u32 ctrl_ext;
99 u32 reset_bit;
100 u32 i;
101 u32 autoc;
102 u32 autoc2;
103 bool link_up = false;
104
105 /* Call adapter stop to disable tx/rx and clear interrupts */
106 hw->mac.ops.stop_adapter(hw);
107
108 /*
109 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
110 * access and verify no pending requests before reset
111 */
112 ixgbe_disable_pcie_master(hw);
113
114 mac_reset_top:
115 /*
116 * Issue global reset to the MAC. Needs to be SW reset if link is up.
117 * If link reset is used when link is up, it might reset the PHY when
118 * mng is using it. If link is down or the flag to force full link
119 * reset is set, then perform link reset.
120 */
121 if (hw->force_full_reset) {
122 reset_bit = IXGBE_CTRL_LNK_RST;
123 } else {
124 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
125 if (!link_up)
126 reset_bit = IXGBE_CTRL_LNK_RST;
127 else
128 reset_bit = IXGBE_CTRL_RST;
129 }
130
131 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
132 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
133 IXGBE_WRITE_FLUSH(hw);
134
135 /* Poll for reset bit to self-clear indicating reset is complete */
136 for (i = 0; i < 10; i++) {
137 udelay(1);
138 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
139 if (!(ctrl & reset_bit))
140 break;
141 }
142 if (ctrl & reset_bit) {
143 status = IXGBE_ERR_RESET_FAILED;
144 hw_dbg(hw, "Reset polling failed to complete.\n");
145 }
146
147 /*
148 * Double resets are required for recovery from certain error
149 * conditions. Between resets, it is necessary to stall to allow time
150 * for any pending HW events to complete. We use 1usec since that is
151 * what is needed for ixgbe_disable_pcie_master(). The second reset
152 * then clears out any effects of those events.
153 */
154 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
155 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
156 udelay(1);
157 goto mac_reset_top;
158 }
159
160 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
161 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
162 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
163 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
164
165 msleep(50);
166
167 /* Set the Rx packet buffer size. */
168 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
169
170 /* Store the permanent mac address */
171 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
172
173 /*
174 * Store the original AUTOC/AUTOC2 values if they have not been
175 * stored off yet. Otherwise restore the stored original
176 * values since the reset operation sets back to defaults.
177 */
178 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
179 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
180 if (hw->mac.orig_link_settings_stored == false) {
181 hw->mac.orig_autoc = autoc;
182 hw->mac.orig_autoc2 = autoc2;
183 hw->mac.orig_link_settings_stored = true;
184 } else {
185 if (autoc != hw->mac.orig_autoc)
186 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
187 IXGBE_AUTOC_AN_RESTART));
188
189 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
190 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
191 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
192 autoc2 |= (hw->mac.orig_autoc2 &
193 IXGBE_AUTOC2_UPPER_MASK);
194 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
195 }
196 }
197
198 /*
199 * Store MAC address from RAR0, clear receive address registers, and
200 * clear the multicast table. Also reset num_rar_entries to 128,
201 * since we modify this value when programming the SAN MAC address.
202 */
203 hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
204 hw->mac.ops.init_rx_addrs(hw);
205
206 /* Store the permanent mac address */
207 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
208
209 /* Store the permanent SAN mac address */
210 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
211
212 /* Add the SAN MAC address to the RAR only if it's a valid address */
213 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
214 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
215 hw->mac.san_addr, 0, IXGBE_RAH_AV);
216
217 /* Reserve the last RAR for the SAN MAC address */
218 hw->mac.num_rar_entries--;
219 }
220
221 /* Store the alternative WWNN/WWPN prefix */
222 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
223 &hw->mac.wwpn_prefix);
224
225 return status;
226 }
227
228 /**
229 * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
230 * @hw: pointer to hardware structure
231 *
232 * Determines physical layer capabilities of the current configuration.
233 **/
234 static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
235 {
236 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
237 u16 ext_ability = 0;
238
239 hw->phy.ops.identify(hw);
240
241 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
242 &ext_ability);
243 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
244 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
245 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
246 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
247 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
248 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
249
250 return physical_layer;
251 }
252
253 /**
254 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
255 * @hw: pointer to hardware structure
256 *
257 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
258 * ixgbe_hw struct in order to set up EEPROM access.
259 **/
260 static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
261 {
262 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
263 u32 eec;
264 u16 eeprom_size;
265
266 if (eeprom->type == ixgbe_eeprom_uninitialized) {
267 eeprom->semaphore_delay = 10;
268 eeprom->type = ixgbe_flash;
269
270 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
271 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
272 IXGBE_EEC_SIZE_SHIFT);
273 eeprom->word_size = 1 << (eeprom_size +
274 IXGBE_EEPROM_WORD_SIZE_SHIFT);
275
276 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
277 eeprom->type, eeprom->word_size);
278 }
279
280 return 0;
281 }
282
283 /**
284 * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
285 * @hw: pointer to hardware structure
286 * @offset: offset of word in the EEPROM to read
287 * @data: word read from the EERPOM
288 **/
289 static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
290 {
291 s32 status;
292
293 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
294 status = ixgbe_read_eerd_generic(hw, offset, data);
295 else
296 status = IXGBE_ERR_SWFW_SYNC;
297
298 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
299 return status;
300 }
301
302 /**
303 * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
304 * @hw: pointer to hardware structure
305 * @offset: offset of word in the EEPROM to write
306 * @data: word write to the EEPROM
307 *
308 * Write a 16 bit word to the EEPROM using the EEWR register.
309 **/
310 static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
311 {
312 u32 eewr;
313 s32 status;
314
315 hw->eeprom.ops.init_params(hw);
316
317 if (offset >= hw->eeprom.word_size) {
318 status = IXGBE_ERR_EEPROM;
319 goto out;
320 }
321
322 eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
323 (data << IXGBE_EEPROM_RW_REG_DATA) |
324 IXGBE_EEPROM_RW_REG_START;
325
326 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
327 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
328 if (status != 0) {
329 hw_dbg(hw, "Eeprom write EEWR timed out\n");
330 goto out;
331 }
332
333 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
334
335 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
336 if (status != 0) {
337 hw_dbg(hw, "Eeprom write EEWR timed out\n");
338 goto out;
339 }
340 } else {
341 status = IXGBE_ERR_SWFW_SYNC;
342 }
343
344 out:
345 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
346 return status;
347 }
348
349 /**
350 * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
351 * @hw: pointer to hardware structure
352 **/
353 static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
354 {
355 u16 i;
356 u16 j;
357 u16 checksum = 0;
358 u16 length = 0;
359 u16 pointer = 0;
360 u16 word = 0;
361
362 /* Include 0x0-0x3F in the checksum */
363 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
364 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
365 hw_dbg(hw, "EEPROM read failed\n");
366 break;
367 }
368 checksum += word;
369 }
370
371 /*
372 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
373 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
374 */
375 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
376 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
377 continue;
378
379 if (hw->eeprom.ops.read(hw, i, &pointer) != 0) {
380 hw_dbg(hw, "EEPROM read failed\n");
381 break;
382 }
383
384 /* Skip pointer section if the pointer is invalid. */
385 if (pointer == 0xFFFF || pointer == 0 ||
386 pointer >= hw->eeprom.word_size)
387 continue;
388
389 if (hw->eeprom.ops.read(hw, pointer, &length) != 0) {
390 hw_dbg(hw, "EEPROM read failed\n");
391 break;
392 }
393
394 /* Skip pointer section if length is invalid. */
395 if (length == 0xFFFF || length == 0 ||
396 (pointer + length) >= hw->eeprom.word_size)
397 continue;
398
399 for (j = pointer+1; j <= pointer+length; j++) {
400 if (hw->eeprom.ops.read(hw, j, &word) != 0) {
401 hw_dbg(hw, "EEPROM read failed\n");
402 break;
403 }
404 checksum += word;
405 }
406 }
407
408 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
409
410 return checksum;
411 }
412
413 /**
414 * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
415 * @hw: pointer to hardware structure
416 *
417 * After writing EEPROM to shadow RAM using EEWR register, software calculates
418 * checksum and updates the EEPROM and instructs the hardware to update
419 * the flash.
420 **/
421 static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
422 {
423 s32 status;
424
425 status = ixgbe_update_eeprom_checksum_generic(hw);
426
427 if (status)
428 status = ixgbe_update_flash_X540(hw);
429
430 return status;
431 }
432
433 /**
434 * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
435 * @hw: pointer to hardware structure
436 *
437 * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
438 * EEPROM from shadow RAM to the flash device.
439 **/
440 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
441 {
442 u32 flup;
443 s32 status = IXGBE_ERR_EEPROM;
444
445 status = ixgbe_poll_flash_update_done_X540(hw);
446 if (status == IXGBE_ERR_EEPROM) {
447 hw_dbg(hw, "Flash update time out\n");
448 goto out;
449 }
450
451 flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
452 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
453
454 status = ixgbe_poll_flash_update_done_X540(hw);
455 if (status)
456 hw_dbg(hw, "Flash update complete\n");
457 else
458 hw_dbg(hw, "Flash update time out\n");
459
460 if (hw->revision_id == 0) {
461 flup = IXGBE_READ_REG(hw, IXGBE_EEC);
462
463 if (flup & IXGBE_EEC_SEC1VAL) {
464 flup |= IXGBE_EEC_FLUP;
465 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
466 }
467
468 status = ixgbe_poll_flash_update_done_X540(hw);
469 if (status)
470 hw_dbg(hw, "Flash update complete\n");
471 else
472 hw_dbg(hw, "Flash update time out\n");
473
474 }
475 out:
476 return status;
477 }
478
479 /**
480 * ixgbe_poll_flash_update_done_X540 - Poll flash update status
481 * @hw: pointer to hardware structure
482 *
483 * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
484 * flash update is done.
485 **/
486 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
487 {
488 u32 i;
489 u32 reg;
490 s32 status = IXGBE_ERR_EEPROM;
491
492 for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
493 reg = IXGBE_READ_REG(hw, IXGBE_EEC);
494 if (reg & IXGBE_EEC_FLUDONE) {
495 status = 0;
496 break;
497 }
498 udelay(5);
499 }
500 return status;
501 }
502
503 /**
504 * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
505 * @hw: pointer to hardware structure
506 * @mask: Mask to specify which semaphore to acquire
507 *
508 * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
509 * the specified function (CSR, PHY0, PHY1, NVM, Flash)
510 **/
511 static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
512 {
513 u32 swfw_sync;
514 u32 swmask = mask;
515 u32 fwmask = mask << 5;
516 u32 hwmask = 0;
517 u32 timeout = 200;
518 u32 i;
519
520 if (swmask == IXGBE_GSSR_EEP_SM)
521 hwmask = IXGBE_GSSR_FLASH_SM;
522
523 for (i = 0; i < timeout; i++) {
524 /*
525 * SW NVM semaphore bit is used for access to all
526 * SW_FW_SYNC bits (not just NVM)
527 */
528 if (ixgbe_get_swfw_sync_semaphore(hw))
529 return IXGBE_ERR_SWFW_SYNC;
530
531 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
532 if (!(swfw_sync & (fwmask | swmask | hwmask))) {
533 swfw_sync |= swmask;
534 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
535 ixgbe_release_swfw_sync_semaphore(hw);
536 break;
537 } else {
538 /*
539 * Firmware currently using resource (fwmask),
540 * hardware currently using resource (hwmask),
541 * or other software thread currently using
542 * resource (swmask)
543 */
544 ixgbe_release_swfw_sync_semaphore(hw);
545 msleep(5);
546 }
547 }
548
549 /*
550 * If the resource is not released by the FW/HW the SW can assume that
551 * the FW/HW malfunctions. In that case the SW should sets the
552 * SW bit(s) of the requested resource(s) while ignoring the
553 * corresponding FW/HW bits in the SW_FW_SYNC register.
554 */
555 if (i >= timeout) {
556 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
557 if (swfw_sync & (fwmask | hwmask)) {
558 if (ixgbe_get_swfw_sync_semaphore(hw))
559 return IXGBE_ERR_SWFW_SYNC;
560
561 swfw_sync |= swmask;
562 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
563 ixgbe_release_swfw_sync_semaphore(hw);
564 }
565 }
566
567 msleep(5);
568 return 0;
569 }
570
571 /**
572 * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
573 * @hw: pointer to hardware structure
574 * @mask: Mask to specify which semaphore to release
575 *
576 * Releases the SWFW semaphore through the SW_FW_SYNC register
577 * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
578 **/
579 static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
580 {
581 u32 swfw_sync;
582 u32 swmask = mask;
583
584 ixgbe_get_swfw_sync_semaphore(hw);
585
586 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
587 swfw_sync &= ~swmask;
588 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
589
590 ixgbe_release_swfw_sync_semaphore(hw);
591 msleep(5);
592 }
593
594 /**
595 * ixgbe_get_nvm_semaphore - Get hardware semaphore
596 * @hw: pointer to hardware structure
597 *
598 * Sets the hardware semaphores so SW/FW can gain control of shared resources
599 **/
600 static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
601 {
602 s32 status = IXGBE_ERR_EEPROM;
603 u32 timeout = 2000;
604 u32 i;
605 u32 swsm;
606
607 /* Get SMBI software semaphore between device drivers first */
608 for (i = 0; i < timeout; i++) {
609 /*
610 * If the SMBI bit is 0 when we read it, then the bit will be
611 * set and we have the semaphore
612 */
613 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
614 if (!(swsm & IXGBE_SWSM_SMBI)) {
615 status = 0;
616 break;
617 }
618 udelay(50);
619 }
620
621 /* Now get the semaphore between SW/FW through the REGSMP bit */
622 if (status) {
623 for (i = 0; i < timeout; i++) {
624 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
625 if (!(swsm & IXGBE_SWFW_REGSMP))
626 break;
627
628 udelay(50);
629 }
630 } else {
631 hw_dbg(hw, "Software semaphore SMBI between device drivers "
632 "not granted.\n");
633 }
634
635 return status;
636 }
637
638 /**
639 * ixgbe_release_nvm_semaphore - Release hardware semaphore
640 * @hw: pointer to hardware structure
641 *
642 * This function clears hardware semaphore bits.
643 **/
644 static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
645 {
646 u32 swsm;
647
648 /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
649
650 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
651 swsm &= ~IXGBE_SWSM_SMBI;
652 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
653
654 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
655 swsm &= ~IXGBE_SWFW_REGSMP;
656 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
657
658 IXGBE_WRITE_FLUSH(hw);
659 }
660
661 static struct ixgbe_mac_operations mac_ops_X540 = {
662 .init_hw = &ixgbe_init_hw_generic,
663 .reset_hw = &ixgbe_reset_hw_X540,
664 .start_hw = &ixgbe_start_hw_generic,
665 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
666 .get_media_type = &ixgbe_get_media_type_X540,
667 .get_supported_physical_layer =
668 &ixgbe_get_supported_physical_layer_X540,
669 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
670 .get_mac_addr = &ixgbe_get_mac_addr_generic,
671 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
672 .get_device_caps = NULL,
673 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
674 .stop_adapter = &ixgbe_stop_adapter_generic,
675 .get_bus_info = &ixgbe_get_bus_info_generic,
676 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
677 .read_analog_reg8 = NULL,
678 .write_analog_reg8 = NULL,
679 .setup_link = &ixgbe_setup_mac_link_X540,
680 .check_link = &ixgbe_check_mac_link_generic,
681 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
682 .led_on = &ixgbe_led_on_generic,
683 .led_off = &ixgbe_led_off_generic,
684 .blink_led_start = &ixgbe_blink_led_start_generic,
685 .blink_led_stop = &ixgbe_blink_led_stop_generic,
686 .set_rar = &ixgbe_set_rar_generic,
687 .clear_rar = &ixgbe_clear_rar_generic,
688 .set_vmdq = &ixgbe_set_vmdq_generic,
689 .clear_vmdq = &ixgbe_clear_vmdq_generic,
690 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
691 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
692 .enable_mc = &ixgbe_enable_mc_generic,
693 .disable_mc = &ixgbe_disable_mc_generic,
694 .clear_vfta = &ixgbe_clear_vfta_generic,
695 .set_vfta = &ixgbe_set_vfta_generic,
696 .fc_enable = &ixgbe_fc_enable_generic,
697 .init_uta_tables = &ixgbe_init_uta_tables_generic,
698 .setup_sfp = NULL,
699 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
700 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
701 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
702 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
703 };
704
705 static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
706 .init_params = &ixgbe_init_eeprom_params_X540,
707 .read = &ixgbe_read_eerd_X540,
708 .write = &ixgbe_write_eewr_X540,
709 .calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
710 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
711 .update_checksum = &ixgbe_update_eeprom_checksum_X540,
712 };
713
714 static struct ixgbe_phy_operations phy_ops_X540 = {
715 .identify = &ixgbe_identify_phy_generic,
716 .identify_sfp = &ixgbe_identify_sfp_module_generic,
717 .init = NULL,
718 .reset = NULL,
719 .read_reg = &ixgbe_read_phy_reg_generic,
720 .write_reg = &ixgbe_write_phy_reg_generic,
721 .setup_link = &ixgbe_setup_phy_link_generic,
722 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
723 .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
724 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
725 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
726 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
727 .check_overtemp = &ixgbe_tn_check_overtemp,
728 };
729
730 struct ixgbe_info ixgbe_X540_info = {
731 .mac = ixgbe_mac_X540,
732 .get_invariants = &ixgbe_get_invariants_X540,
733 .mac_ops = &mac_ops_X540,
734 .eeprom_ops = &eeprom_ops_X540,
735 .phy_ops = &phy_ops_X540,
736 .mbx_ops = &mbx_ops_generic,
737 };