]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/neterion/s2io.c
Merge tag 'efi-urgent-for-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / neterion / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
926bd900 3 * Copyright(c) 2002-2010 Exar Corp.
d44570e4 4 *
1da177e4
LT
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4 27 * The module loadable parameters that are supported by the driver and a brief
a2a20aef 28 * explanation of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
6d517a27 35 * values are 1, 2.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
8abc4d5b 40 * 2(MSI_X). Default value is '2(MSI_X)'
9dc737a7
AR
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
926930b2
SS
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
926930b2
SS
45 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
46 * Possible values '1' for enable , '0' for disable.
47 * Default is '2' - which means disable in promisc mode
48 * and enable in non-promiscuous mode.
3a3d5756
SH
49 * multiq: This parameter used to enable/disable MULTIQUEUE support.
50 * Possible values '1' for enable and '0' for disable. Default is '0'
1da177e4
LT
51 ************************************************************************/
52
6cef2b8e
JP
53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
1da177e4
LT
55#include <linux/module.h>
56#include <linux/types.h>
57#include <linux/errno.h>
58#include <linux/ioport.h>
59#include <linux/pci.h>
1e7f0bd8 60#include <linux/dma-mapping.h>
1da177e4
LT
61#include <linux/kernel.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
40239396 64#include <linux/mdio.h>
1da177e4
LT
65#include <linux/skbuff.h>
66#include <linux/init.h>
67#include <linux/delay.h>
68#include <linux/stddef.h>
69#include <linux/ioctl.h>
70#include <linux/timex.h>
1da177e4 71#include <linux/ethtool.h>
1da177e4 72#include <linux/workqueue.h>
be3a6b02 73#include <linux/if_vlan.h>
7d3d0439
RA
74#include <linux/ip.h>
75#include <linux/tcp.h>
d44570e4
JP
76#include <linux/uaccess.h>
77#include <linux/io.h>
2208e9a7 78#include <linux/io-64-nonatomic-lo-hi.h>
5a0e3ad6 79#include <linux/slab.h>
70c71606 80#include <linux/prefetch.h>
7d3d0439 81#include <net/tcp.h>
9a18dd15 82#include <net/checksum.h>
1da177e4 83
fe931395 84#include <asm/div64.h>
330ce0de 85#include <asm/irq.h>
1da177e4
LT
86
87/* local include */
88#include "s2io.h"
89#include "s2io-regs.h"
90
11410b62 91#define DRV_VERSION "2.0.26.28"
6c1792f4 92
1da177e4 93/* S2io Driver name & version. */
c0dbf37e
JM
94static const char s2io_driver_name[] = "Neterion";
95static const char s2io_driver_version[] = DRV_VERSION;
1da177e4 96
c0dbf37e
JM
97static const int rxd_size[2] = {32, 48};
98static const int rxd_count[2] = {127, 85};
da6971d8 99
1ee6dd77 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd
K
101{
102 int ret;
103
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
d44570e4 105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
5e25b9dd
K
106
107 return ret;
108}
109
20346722 110/*
1da177e4
LT
111 * Cards with following subsystem_id have a link state indication
112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113 * macro below identifies these cards given the subsystem_id.
114 */
d44570e4
JP
115#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
119
120#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
1da177e4 122
d44570e4 123static inline int is_s2io_card_up(const struct s2io_nic *sp)
92b84437
SS
124{
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126}
127
1da177e4 128/* Ethtool related variables and Macros. */
6fce365d 129static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
1da177e4
LT
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
135};
136
6fce365d 137static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
138 {"tmac_frms"},
139 {"tmac_data_octets"},
140 {"tmac_drop_frms"},
141 {"tmac_mcst_frms"},
142 {"tmac_bcst_frms"},
143 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
144 {"tmac_ttl_octets"},
145 {"tmac_ucst_frms"},
146 {"tmac_nucst_frms"},
1da177e4 147 {"tmac_any_err_frms"},
bd1034f0 148 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
149 {"tmac_vld_ip_octets"},
150 {"tmac_vld_ip"},
151 {"tmac_drop_ip"},
152 {"tmac_icmp"},
153 {"tmac_rst_tcp"},
154 {"tmac_tcp"},
155 {"tmac_udp"},
156 {"rmac_vld_frms"},
157 {"rmac_data_octets"},
158 {"rmac_fcs_err_frms"},
159 {"rmac_drop_frms"},
160 {"rmac_vld_mcst_frms"},
161 {"rmac_vld_bcst_frms"},
162 {"rmac_in_rng_len_err_frms"},
bd1034f0 163 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
164 {"rmac_long_frms"},
165 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
166 {"rmac_unsup_ctrl_frms"},
167 {"rmac_ttl_octets"},
168 {"rmac_accepted_ucst_frms"},
169 {"rmac_accepted_nucst_frms"},
1da177e4 170 {"rmac_discarded_frms"},
bd1034f0
AR
171 {"rmac_drop_events"},
172 {"rmac_ttl_less_fb_octets"},
173 {"rmac_ttl_frms"},
1da177e4
LT
174 {"rmac_usized_frms"},
175 {"rmac_osized_frms"},
176 {"rmac_frag_frms"},
177 {"rmac_jabber_frms"},
bd1034f0
AR
178 {"rmac_ttl_64_frms"},
179 {"rmac_ttl_65_127_frms"},
180 {"rmac_ttl_128_255_frms"},
181 {"rmac_ttl_256_511_frms"},
182 {"rmac_ttl_512_1023_frms"},
183 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
184 {"rmac_ip"},
185 {"rmac_ip_octets"},
186 {"rmac_hdr_err_ip"},
187 {"rmac_drop_ip"},
188 {"rmac_icmp"},
189 {"rmac_tcp"},
190 {"rmac_udp"},
191 {"rmac_err_drp_udp"},
bd1034f0
AR
192 {"rmac_xgmii_err_sym"},
193 {"rmac_frms_q0"},
194 {"rmac_frms_q1"},
195 {"rmac_frms_q2"},
196 {"rmac_frms_q3"},
197 {"rmac_frms_q4"},
198 {"rmac_frms_q5"},
199 {"rmac_frms_q6"},
200 {"rmac_frms_q7"},
201 {"rmac_full_q0"},
202 {"rmac_full_q1"},
203 {"rmac_full_q2"},
204 {"rmac_full_q3"},
205 {"rmac_full_q4"},
206 {"rmac_full_q5"},
207 {"rmac_full_q6"},
208 {"rmac_full_q7"},
1da177e4 209 {"rmac_pause_cnt"},
bd1034f0
AR
210 {"rmac_xgmii_data_err_cnt"},
211 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
212 {"rmac_accepted_ip"},
213 {"rmac_err_tcp"},
bd1034f0
AR
214 {"rd_req_cnt"},
215 {"new_rd_req_cnt"},
216 {"new_rd_req_rtry_cnt"},
217 {"rd_rtry_cnt"},
218 {"wr_rtry_rd_ack_cnt"},
219 {"wr_req_cnt"},
220 {"new_wr_req_cnt"},
221 {"new_wr_req_rtry_cnt"},
222 {"wr_rtry_cnt"},
223 {"wr_disc_cnt"},
224 {"rd_rtry_wr_ack_cnt"},
225 {"txp_wr_cnt"},
226 {"txd_rd_cnt"},
227 {"txd_wr_cnt"},
228 {"rxd_rd_cnt"},
229 {"rxd_wr_cnt"},
230 {"txf_rd_cnt"},
fa1f0cb3
SS
231 {"rxf_wr_cnt"}
232};
233
6fce365d 234static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
235 {"rmac_ttl_1519_4095_frms"},
236 {"rmac_ttl_4096_8191_frms"},
237 {"rmac_ttl_8192_max_frms"},
238 {"rmac_ttl_gt_max_frms"},
239 {"rmac_osized_alt_frms"},
240 {"rmac_jabber_alt_frms"},
241 {"rmac_gt_max_alt_frms"},
242 {"rmac_vlan_frms"},
243 {"rmac_len_discard"},
244 {"rmac_fcs_discard"},
245 {"rmac_pf_discard"},
246 {"rmac_da_discard"},
247 {"rmac_red_discard"},
248 {"rmac_rts_discard"},
249 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
250 {"link_fault_cnt"}
251};
252
6fce365d 253static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac
K
254 {"\n DRIVER STATISTICS"},
255 {"single_bit_ecc_errs"},
256 {"double_bit_ecc_errs"},
bd1034f0
AR
257 {"parity_err_cnt"},
258 {"serious_err_cnt"},
259 {"soft_reset_cnt"},
260 {"fifo_full_cnt"},
8116f3cf
SS
261 {"ring_0_full_cnt"},
262 {"ring_1_full_cnt"},
263 {"ring_2_full_cnt"},
264 {"ring_3_full_cnt"},
265 {"ring_4_full_cnt"},
266 {"ring_5_full_cnt"},
267 {"ring_6_full_cnt"},
268 {"ring_7_full_cnt"},
43b7c451
SH
269 {"alarm_transceiver_temp_high"},
270 {"alarm_transceiver_temp_low"},
271 {"alarm_laser_bias_current_high"},
272 {"alarm_laser_bias_current_low"},
273 {"alarm_laser_output_power_high"},
274 {"alarm_laser_output_power_low"},
275 {"warn_transceiver_temp_high"},
276 {"warn_transceiver_temp_low"},
277 {"warn_laser_bias_current_high"},
278 {"warn_laser_bias_current_low"},
279 {"warn_laser_output_power_high"},
280 {"warn_laser_output_power_low"},
281 {"lro_aggregated_pkts"},
282 {"lro_flush_both_count"},
283 {"lro_out_of_sequence_pkts"},
284 {"lro_flush_due_to_max_pkts"},
285 {"lro_avg_aggr_pkts"},
286 {"mem_alloc_fail_cnt"},
287 {"pci_map_fail_cnt"},
288 {"watchdog_timer_cnt"},
289 {"mem_allocated"},
290 {"mem_freed"},
291 {"link_up_cnt"},
292 {"link_down_cnt"},
293 {"link_up_time"},
294 {"link_down_time"},
295 {"tx_tcode_buf_abort_cnt"},
296 {"tx_tcode_desc_abort_cnt"},
297 {"tx_tcode_parity_err_cnt"},
298 {"tx_tcode_link_loss_cnt"},
299 {"tx_tcode_list_proc_err_cnt"},
300 {"rx_tcode_parity_err_cnt"},
301 {"rx_tcode_abort_cnt"},
302 {"rx_tcode_parity_abort_cnt"},
303 {"rx_tcode_rda_fail_cnt"},
304 {"rx_tcode_unkn_prot_cnt"},
305 {"rx_tcode_fcs_err_cnt"},
306 {"rx_tcode_buf_size_err_cnt"},
307 {"rx_tcode_rxd_corrupt_cnt"},
308 {"rx_tcode_unkn_err_cnt"},
8116f3cf
SS
309 {"tda_err_cnt"},
310 {"pfc_err_cnt"},
311 {"pcc_err_cnt"},
312 {"tti_err_cnt"},
313 {"tpa_err_cnt"},
314 {"sm_err_cnt"},
315 {"lso_err_cnt"},
316 {"mac_tmac_err_cnt"},
317 {"mac_rmac_err_cnt"},
318 {"xgxs_txgxs_err_cnt"},
319 {"xgxs_rxgxs_err_cnt"},
320 {"rc_err_cnt"},
321 {"prc_pcix_err_cnt"},
322 {"rpa_err_cnt"},
323 {"rda_err_cnt"},
324 {"rti_err_cnt"},
325 {"mc_err_cnt"}
1da177e4
LT
326};
327
4c3616cd
AMR
328#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
fa1f0cb3 331
d44570e4
JP
332#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
fa1f0cb3 334
d44570e4
JP
335#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
1da177e4 337
4c3616cd 338#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
d44570e4 339#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
1da177e4 340
2fd37688
SS
341/* copy mac addr to def_mac_addr array */
342static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
343{
344 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
345 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
346 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
347 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
348 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
349 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
350}
04025095 351
20346722 352/*
1da177e4
LT
353 * Constants to be programmed into the Xena's registers, to configure
354 * the XAUI.
355 */
356
1da177e4 357#define END_SIGN 0x0
f71e1309 358static const u64 herc_act_dtx_cfg[] = {
541ae68f 359 /* Set address */
e960fc5c 360 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 361 /* Write data */
e960fc5c 362 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
363 /* Set address */
364 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
365 /* Write data */
366 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
367 /* Set address */
e960fc5c 368 0x801205150D440000ULL, 0x801205150D4400E0ULL,
369 /* Write data */
370 0x801205150D440004ULL, 0x801205150D4400E4ULL,
371 /* Set address */
541ae68f
K
372 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
373 /* Write data */
374 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
375 /* Done */
376 END_SIGN
377};
378
f71e1309 379static const u64 xena_dtx_cfg[] = {
c92ca04b 380 /* Set address */
1da177e4 381 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
382 /* Write data */
383 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
384 /* Set address */
385 0x8001051500000000ULL, 0x80010515000000E0ULL,
386 /* Write data */
387 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
388 /* Set address */
1da177e4 389 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
390 /* Write data */
391 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
392 END_SIGN
393};
394
20346722 395/*
1da177e4
LT
396 * Constants for Fixing the MacAddress problem seen mostly on
397 * Alpha machines.
398 */
f71e1309 399static const u64 fix_mac[] = {
1da177e4
LT
400 0x0060000000000000ULL, 0x0060600000000000ULL,
401 0x0040600000000000ULL, 0x0000600000000000ULL,
402 0x0020600000000000ULL, 0x0060600000000000ULL,
403 0x0020600000000000ULL, 0x0060600000000000ULL,
404 0x0020600000000000ULL, 0x0060600000000000ULL,
405 0x0020600000000000ULL, 0x0060600000000000ULL,
406 0x0020600000000000ULL, 0x0060600000000000ULL,
407 0x0020600000000000ULL, 0x0060600000000000ULL,
408 0x0020600000000000ULL, 0x0060600000000000ULL,
409 0x0020600000000000ULL, 0x0060600000000000ULL,
410 0x0020600000000000ULL, 0x0060600000000000ULL,
411 0x0020600000000000ULL, 0x0060600000000000ULL,
412 0x0020600000000000ULL, 0x0000600000000000ULL,
413 0x0040600000000000ULL, 0x0060600000000000ULL,
414 END_SIGN
415};
416
b41477f3
AR
417MODULE_LICENSE("GPL");
418MODULE_VERSION(DRV_VERSION);
419
420
1da177e4 421/* Module Loadable parameters. */
6cfc482b 422S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
b41477f3 423S2IO_PARM_INT(rx_ring_num, 1);
3a3d5756 424S2IO_PARM_INT(multiq, 0);
b41477f3
AR
425S2IO_PARM_INT(rx_ring_mode, 1);
426S2IO_PARM_INT(use_continuous_tx_intrs, 1);
427S2IO_PARM_INT(rmac_pause_time, 0x100);
428S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
429S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
430S2IO_PARM_INT(shared_splits, 0);
431S2IO_PARM_INT(tmac_util_period, 5);
432S2IO_PARM_INT(rmac_util_period, 5);
b41477f3 433S2IO_PARM_INT(l3l4hdr_size, 128);
6cfc482b
SH
434/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
435S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
303bcb4b 436/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 437S2IO_PARM_INT(rxsync_frequency, 3);
eccb8628 438/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
8abc4d5b 439S2IO_PARM_INT(intr_type, 2);
7d3d0439 440/* Large receive offload feature */
43b7c451 441
7d3d0439
RA
442/* Max pkts to be aggregated by LRO at one time. If not specified,
443 * aggregation happens until we hit max IP pkt size(64K)
444 */
b41477f3 445S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 446S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
447
448S2IO_PARM_INT(napi, 1);
926930b2 449S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
450
451static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
d44570e4 452{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
b41477f3 453static unsigned int rx_ring_sz[MAX_RX_RINGS] =
d44570e4 454{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
b41477f3 455static unsigned int rts_frm_len[MAX_RX_RINGS] =
d44570e4 456{[0 ...(MAX_RX_RINGS - 1)] = 0 };
b41477f3
AR
457
458module_param_array(tx_fifo_len, uint, NULL, 0);
459module_param_array(rx_ring_sz, uint, NULL, 0);
460module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 461
20346722 462/*
1da177e4 463 * S2IO device table.
20346722 464 * This table lists all the devices that this driver supports.
1da177e4 465 */
9baa3c34 466static const struct pci_device_id s2io_tbl[] = {
1da177e4
LT
467 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
468 PCI_ANY_ID, PCI_ANY_ID},
469 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
470 PCI_ANY_ID, PCI_ANY_ID},
471 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
d44570e4
JP
472 PCI_ANY_ID, PCI_ANY_ID},
473 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
474 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
475 {0,}
476};
477
478MODULE_DEVICE_TABLE(pci, s2io_tbl);
479
3646f0e5 480static const struct pci_error_handlers s2io_err_handler = {
d796fdb7
LV
481 .error_detected = s2io_io_error_detected,
482 .slot_reset = s2io_io_slot_reset,
483 .resume = s2io_io_resume,
484};
485
1da177e4 486static struct pci_driver s2io_driver = {
d44570e4
JP
487 .name = "S2IO",
488 .id_table = s2io_tbl,
489 .probe = s2io_init_nic,
3a036ce5 490 .remove = s2io_rem_nic,
d44570e4 491 .err_handler = &s2io_err_handler,
1da177e4
LT
492};
493
494/* A simplifier macro used both by init and free shared_mem Fns(). */
f8a1988f 495#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
1da177e4 496
3a3d5756
SH
497/* netqueue manipulation helper functions */
498static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
499{
fd2ea0a7
DM
500 if (!sp->config.multiq) {
501 int i;
502
3a3d5756
SH
503 for (i = 0; i < sp->config.tx_fifo_num; i++)
504 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
3a3d5756 505 }
fd2ea0a7 506 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
507}
508
509static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
510{
fd2ea0a7 511 if (!sp->config.multiq)
3a3d5756
SH
512 sp->mac_control.fifos[fifo_no].queue_state =
513 FIFO_QUEUE_STOP;
fd2ea0a7
DM
514
515 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
516}
517
518static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
519{
fd2ea0a7
DM
520 if (!sp->config.multiq) {
521 int i;
522
3a3d5756
SH
523 for (i = 0; i < sp->config.tx_fifo_num; i++)
524 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 525 }
fd2ea0a7 526 netif_tx_start_all_queues(sp->dev);
3a3d5756
SH
527}
528
3a3d5756
SH
529static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
530{
fd2ea0a7
DM
531 if (!sp->config.multiq) {
532 int i;
533
3a3d5756
SH
534 for (i = 0; i < sp->config.tx_fifo_num; i++)
535 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 536 }
fd2ea0a7 537 netif_tx_wake_all_queues(sp->dev);
3a3d5756
SH
538}
539
540static inline void s2io_wake_tx_queue(
541 struct fifo_info *fifo, int cnt, u8 multiq)
542{
543
3a3d5756
SH
544 if (multiq) {
545 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
546 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
b19fa1fa 547 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
548 if (netif_queue_stopped(fifo->dev)) {
549 fifo->queue_state = FIFO_QUEUE_START;
550 netif_wake_queue(fifo->dev);
551 }
552 }
553}
554
1da177e4
LT
555/**
556 * init_shared_mem - Allocation and Initialization of Memory
557 * @nic: Device private variable.
20346722
K
558 * Description: The function allocates all the memory areas shared
559 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
560 * Rx descriptors and the statistics block.
561 */
562
563static int init_shared_mem(struct s2io_nic *nic)
564{
565 u32 size;
566 void *tmp_v_addr, *tmp_v_addr_next;
567 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 568 struct RxD_block *pre_rxd_blk = NULL;
372cc597 569 int i, j, blk_cnt;
1da177e4
LT
570 int lst_size, lst_per_page;
571 struct net_device *dev = nic->dev;
8ae418cf 572 unsigned long tmp;
1ee6dd77 573 struct buffAdd *ba;
ffb5df6c
JP
574 struct config_param *config = &nic->config;
575 struct mac_info *mac_control = &nic->mac_control;
491976b2 576 unsigned long long mem_allocated = 0;
1da177e4 577
13d866a9 578 /* Allocation and initialization of TXDLs in FIFOs */
1da177e4
LT
579 size = 0;
580 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
581 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
582
583 size += tx_cfg->fifo_len;
1da177e4
LT
584 }
585 if (size > MAX_AVAILABLE_TXDS) {
9e39f7c5
JP
586 DBG_PRINT(ERR_DBG,
587 "Too many TxDs requested: %d, max supported: %d\n",
588 size, MAX_AVAILABLE_TXDS);
b41477f3 589 return -EINVAL;
1da177e4
LT
590 }
591
2fda096d
SR
592 size = 0;
593 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
594 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
595
596 size = tx_cfg->fifo_len;
2fda096d
SR
597 /*
598 * Legal values are from 2 to 8192
599 */
600 if (size < 2) {
9e39f7c5
JP
601 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
602 "Valid lengths are 2 through 8192\n",
603 i, size);
2fda096d
SR
604 return -EINVAL;
605 }
606 }
607
1ee6dd77 608 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
609 lst_per_page = PAGE_SIZE / lst_size;
610
611 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
612 struct fifo_info *fifo = &mac_control->fifos[i];
613 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
614 int fifo_len = tx_cfg->fifo_len;
1ee6dd77 615 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
13d866a9
JP
616
617 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
618 if (!fifo->list_info) {
d44570e4 619 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
1da177e4
LT
620 return -ENOMEM;
621 }
491976b2 622 mem_allocated += list_holder_size;
1da177e4
LT
623 }
624 for (i = 0; i < config->tx_fifo_num; i++) {
625 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
626 lst_per_page);
13d866a9
JP
627 struct fifo_info *fifo = &mac_control->fifos[i];
628 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
629
630 fifo->tx_curr_put_info.offset = 0;
631 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
632 fifo->tx_curr_get_info.offset = 0;
633 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
634 fifo->fifo_no = i;
635 fifo->nic = nic;
636 fifo->max_txds = MAX_SKB_FRAGS + 2;
637 fifo->dev = dev;
20346722 638
1da177e4
LT
639 for (j = 0; j < page_num; j++) {
640 int k = 0;
641 dma_addr_t tmp_p;
642 void *tmp_v;
fb059b26
CJ
643 tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
644 &tmp_p, GFP_KERNEL);
1da177e4 645 if (!tmp_v) {
9e39f7c5 646 DBG_PRINT(INFO_DBG,
fb059b26 647 "dma_alloc_coherent failed for TxDL\n");
1da177e4
LT
648 return -ENOMEM;
649 }
776bd20f 650 /* If we got a zero DMA address(can happen on
651 * certain platforms like PPC), reallocate.
652 * Store virtual address of page we don't want,
653 * to be freed later.
654 */
655 if (!tmp_p) {
656 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 657 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
658 "%s: Zero DMA address for TxDL. "
659 "Virtual address %p\n",
660 dev->name, tmp_v);
fb059b26
CJ
661 tmp_v = dma_alloc_coherent(&nic->pdev->dev,
662 PAGE_SIZE, &tmp_p,
663 GFP_KERNEL);
776bd20f 664 if (!tmp_v) {
0c61ed5f 665 DBG_PRINT(INFO_DBG,
fb059b26 666 "dma_alloc_coherent failed for TxDL\n");
776bd20f 667 return -ENOMEM;
668 }
491976b2 669 mem_allocated += PAGE_SIZE;
776bd20f 670 }
1da177e4
LT
671 while (k < lst_per_page) {
672 int l = (j * lst_per_page) + k;
13d866a9 673 if (l == tx_cfg->fifo_len)
20346722 674 break;
13d866a9 675 fifo->list_info[l].list_virt_addr =
d44570e4 676 tmp_v + (k * lst_size);
13d866a9 677 fifo->list_info[l].list_phy_addr =
d44570e4 678 tmp_p + (k * lst_size);
1da177e4
LT
679 k++;
680 }
681 }
682 }
1da177e4 683
2fda096d 684 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
685 struct fifo_info *fifo = &mac_control->fifos[i];
686 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
687
688 size = tx_cfg->fifo_len;
689 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
690 if (!fifo->ufo_in_band_v)
2fda096d
SR
691 return -ENOMEM;
692 mem_allocated += (size * sizeof(u64));
693 }
fed5eccd 694
1da177e4
LT
695 /* Allocation and initialization of RXDs in Rings */
696 size = 0;
697 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
698 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
699 struct ring_info *ring = &mac_control->rings[i];
700
701 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
9e39f7c5
JP
702 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
703 "multiple of RxDs per Block\n",
704 dev->name, i);
1da177e4
LT
705 return FAILURE;
706 }
13d866a9
JP
707 size += rx_cfg->num_rxd;
708 ring->block_count = rx_cfg->num_rxd /
d44570e4 709 (rxd_count[nic->rxd_mode] + 1);
13d866a9 710 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
1da177e4 711 }
da6971d8 712 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 713 size = (size * (sizeof(struct RxD1)));
da6971d8 714 else
1ee6dd77 715 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
716
717 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
718 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
719 struct ring_info *ring = &mac_control->rings[i];
720
721 ring->rx_curr_get_info.block_index = 0;
722 ring->rx_curr_get_info.offset = 0;
723 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
724 ring->rx_curr_put_info.block_index = 0;
725 ring->rx_curr_put_info.offset = 0;
726 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
727 ring->nic = nic;
728 ring->ring_no = i;
13d866a9
JP
729
730 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
731 /* Allocating all the Rx blocks */
732 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 733 struct rx_block_info *rx_blocks;
da6971d8
AR
734 int l;
735
13d866a9 736 rx_blocks = &ring->rx_blocks[j];
d44570e4 737 size = SIZE_OF_BLOCK; /* size is always page size */
fb059b26
CJ
738 tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
739 &tmp_p_addr, GFP_KERNEL);
1da177e4
LT
740 if (tmp_v_addr == NULL) {
741 /*
20346722
K
742 * In case of failure, free_shared_mem()
743 * is called, which should free any
744 * memory that was alloced till the
1da177e4
LT
745 * failure happened.
746 */
da6971d8 747 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
748 return -ENOMEM;
749 }
491976b2 750 mem_allocated += size;
4f870320
JP
751
752 size = sizeof(struct rxd_info) *
753 rxd_count[nic->rxd_mode];
da6971d8
AR
754 rx_blocks->block_virt_addr = tmp_v_addr;
755 rx_blocks->block_dma_addr = tmp_p_addr;
4f870320 756 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
372cc597
SS
757 if (!rx_blocks->rxds)
758 return -ENOMEM;
4f870320 759 mem_allocated += size;
d44570e4 760 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
da6971d8
AR
761 rx_blocks->rxds[l].virt_addr =
762 rx_blocks->block_virt_addr +
763 (rxd_size[nic->rxd_mode] * l);
764 rx_blocks->rxds[l].dma_addr =
765 rx_blocks->block_dma_addr +
766 (rxd_size[nic->rxd_mode] * l);
767 }
1da177e4
LT
768 }
769 /* Interlinking all Rx Blocks */
770 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
771 int next = (j + 1) % blk_cnt;
772 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
773 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
774 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
775 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
1da177e4 776
43d620c8 777 pre_rxd_blk = tmp_v_addr;
1da177e4 778 pre_rxd_blk->reserved_2_pNext_RxD_block =
d44570e4 779 (unsigned long)tmp_v_addr_next;
1da177e4 780 pre_rxd_blk->pNext_RxD_Blk_physical =
d44570e4 781 (u64)tmp_p_addr_next;
1da177e4
LT
782 }
783 }
6d517a27 784 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
785 /*
786 * Allocation of Storages for buffer addresses in 2BUFF mode
787 * and the buffers as well.
788 */
789 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
790 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
791 struct ring_info *ring = &mac_control->rings[i];
792
793 blk_cnt = rx_cfg->num_rxd /
d44570e4 794 (rxd_count[nic->rxd_mode] + 1);
4f870320
JP
795 size = sizeof(struct buffAdd *) * blk_cnt;
796 ring->ba = kmalloc(size, GFP_KERNEL);
13d866a9 797 if (!ring->ba)
1da177e4 798 return -ENOMEM;
4f870320 799 mem_allocated += size;
da6971d8
AR
800 for (j = 0; j < blk_cnt; j++) {
801 int k = 0;
4f870320
JP
802
803 size = sizeof(struct buffAdd) *
804 (rxd_count[nic->rxd_mode] + 1);
805 ring->ba[j] = kmalloc(size, GFP_KERNEL);
13d866a9 806 if (!ring->ba[j])
1da177e4 807 return -ENOMEM;
4f870320 808 mem_allocated += size;
da6971d8 809 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 810 ba = &ring->ba[j][k];
4f870320
JP
811 size = BUF0_LEN + ALIGN_SIZE;
812 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
813 if (!ba->ba_0_org)
814 return -ENOMEM;
4f870320 815 mem_allocated += size;
da6971d8
AR
816 tmp = (unsigned long)ba->ba_0_org;
817 tmp += ALIGN_SIZE;
d44570e4
JP
818 tmp &= ~((unsigned long)ALIGN_SIZE);
819 ba->ba_0 = (void *)tmp;
da6971d8 820
4f870320
JP
821 size = BUF1_LEN + ALIGN_SIZE;
822 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
823 if (!ba->ba_1_org)
824 return -ENOMEM;
4f870320 825 mem_allocated += size;
d44570e4 826 tmp = (unsigned long)ba->ba_1_org;
da6971d8 827 tmp += ALIGN_SIZE;
d44570e4
JP
828 tmp &= ~((unsigned long)ALIGN_SIZE);
829 ba->ba_1 = (void *)tmp;
da6971d8
AR
830 k++;
831 }
1da177e4
LT
832 }
833 }
834 }
1da177e4
LT
835
836 /* Allocation and initialization of Statistics block */
1ee6dd77 837 size = sizeof(struct stat_block);
d44570e4 838 mac_control->stats_mem =
fb059b26
CJ
839 dma_alloc_coherent(&nic->pdev->dev, size,
840 &mac_control->stats_mem_phy, GFP_KERNEL);
1da177e4
LT
841
842 if (!mac_control->stats_mem) {
20346722
K
843 /*
844 * In case of failure, free_shared_mem() is called, which
845 * should free any memory that was alloced till the
1da177e4
LT
846 * failure happened.
847 */
848 return -ENOMEM;
849 }
491976b2 850 mem_allocated += size;
1da177e4
LT
851 mac_control->stats_mem_sz = size;
852
853 tmp_v_addr = mac_control->stats_mem;
43d620c8 854 mac_control->stats_info = tmp_v_addr;
1da177e4 855 memset(tmp_v_addr, 0, size);
3a22813a
BL
856 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
857 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
491976b2 858 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
1da177e4
LT
859 return SUCCESS;
860}
861
20346722
K
862/**
863 * free_shared_mem - Free the allocated Memory
1da177e4
LT
864 * @nic: Device private variable.
865 * Description: This function is to free all memory locations allocated by
866 * the init_shared_mem() function and return it to the kernel.
867 */
868
869static void free_shared_mem(struct s2io_nic *nic)
870{
871 int i, j, blk_cnt, size;
872 void *tmp_v_addr;
873 dma_addr_t tmp_p_addr;
1da177e4 874 int lst_size, lst_per_page;
8910b49f 875 struct net_device *dev;
491976b2 876 int page_num = 0;
ffb5df6c
JP
877 struct config_param *config;
878 struct mac_info *mac_control;
879 struct stat_block *stats;
880 struct swStat *swstats;
1da177e4
LT
881
882 if (!nic)
883 return;
884
8910b49f
MG
885 dev = nic->dev;
886
1da177e4 887 config = &nic->config;
ffb5df6c
JP
888 mac_control = &nic->mac_control;
889 stats = mac_control->stats_info;
890 swstats = &stats->sw_stat;
1da177e4 891
d44570e4 892 lst_size = sizeof(struct TxD) * config->max_txds;
1da177e4
LT
893 lst_per_page = PAGE_SIZE / lst_size;
894
895 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
896 struct fifo_info *fifo = &mac_control->fifos[i];
897 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
898
899 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
1da177e4
LT
900 for (j = 0; j < page_num; j++) {
901 int mem_blks = (j * lst_per_page);
13d866a9
JP
902 struct list_info_hold *fli;
903
904 if (!fifo->list_info)
6aa20a22 905 return;
13d866a9
JP
906
907 fli = &fifo->list_info[mem_blks];
908 if (!fli->list_virt_addr)
1da177e4 909 break;
fb059b26
CJ
910 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
911 fli->list_virt_addr,
912 fli->list_phy_addr);
ffb5df6c 913 swstats->mem_freed += PAGE_SIZE;
1da177e4 914 }
776bd20f 915 /* If we got a zero DMA address during allocation,
916 * free the page now
917 */
918 if (mac_control->zerodma_virt_addr) {
fb059b26
CJ
919 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
920 mac_control->zerodma_virt_addr,
921 (dma_addr_t)0);
6aa20a22 922 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
923 "%s: Freeing TxDL with zero DMA address. "
924 "Virtual address %p\n",
925 dev->name, mac_control->zerodma_virt_addr);
ffb5df6c 926 swstats->mem_freed += PAGE_SIZE;
776bd20f 927 }
13d866a9 928 kfree(fifo->list_info);
82c2d023 929 swstats->mem_freed += tx_cfg->fifo_len *
d44570e4 930 sizeof(struct list_info_hold);
1da177e4
LT
931 }
932
1da177e4 933 size = SIZE_OF_BLOCK;
1da177e4 934 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
935 struct ring_info *ring = &mac_control->rings[i];
936
937 blk_cnt = ring->block_count;
1da177e4 938 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
939 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
940 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1da177e4
LT
941 if (tmp_v_addr == NULL)
942 break;
fb059b26
CJ
943 dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
944 tmp_p_addr);
ffb5df6c 945 swstats->mem_freed += size;
13d866a9 946 kfree(ring->rx_blocks[j].rxds);
ffb5df6c
JP
947 swstats->mem_freed += sizeof(struct rxd_info) *
948 rxd_count[nic->rxd_mode];
1da177e4
LT
949 }
950 }
951
6d517a27 952 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
953 /* Freeing buffer storage addresses in 2BUFF mode. */
954 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
955 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
956 struct ring_info *ring = &mac_control->rings[i];
957
958 blk_cnt = rx_cfg->num_rxd /
959 (rxd_count[nic->rxd_mode] + 1);
da6971d8
AR
960 for (j = 0; j < blk_cnt; j++) {
961 int k = 0;
13d866a9 962 if (!ring->ba[j])
da6971d8
AR
963 continue;
964 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 965 struct buffAdd *ba = &ring->ba[j][k];
da6971d8 966 kfree(ba->ba_0_org);
ffb5df6c
JP
967 swstats->mem_freed +=
968 BUF0_LEN + ALIGN_SIZE;
da6971d8 969 kfree(ba->ba_1_org);
ffb5df6c
JP
970 swstats->mem_freed +=
971 BUF1_LEN + ALIGN_SIZE;
da6971d8
AR
972 k++;
973 }
13d866a9 974 kfree(ring->ba[j]);
ffb5df6c
JP
975 swstats->mem_freed += sizeof(struct buffAdd) *
976 (rxd_count[nic->rxd_mode] + 1);
1da177e4 977 }
13d866a9 978 kfree(ring->ba);
ffb5df6c
JP
979 swstats->mem_freed += sizeof(struct buffAdd *) *
980 blk_cnt;
1da177e4 981 }
1da177e4 982 }
1da177e4 983
2fda096d 984 for (i = 0; i < nic->config.tx_fifo_num; i++) {
13d866a9
JP
985 struct fifo_info *fifo = &mac_control->fifos[i];
986 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
987
988 if (fifo->ufo_in_band_v) {
ffb5df6c
JP
989 swstats->mem_freed += tx_cfg->fifo_len *
990 sizeof(u64);
13d866a9 991 kfree(fifo->ufo_in_band_v);
2fda096d
SR
992 }
993 }
994
1da177e4 995 if (mac_control->stats_mem) {
ffb5df6c 996 swstats->mem_freed += mac_control->stats_mem_sz;
fb059b26
CJ
997 dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
998 mac_control->stats_mem,
999 mac_control->stats_mem_phy);
491976b2 1000 }
1da177e4
LT
1001}
1002
d0ea5cbd 1003/*
541ae68f
K
1004 * s2io_verify_pci_mode -
1005 */
1006
1ee6dd77 1007static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 1008{
1ee6dd77 1009 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1010 register u64 val64 = 0;
1011 int mode;
1012
1013 val64 = readq(&bar0->pci_mode);
1014 mode = (u8)GET_PCI_MODE(val64);
1015
d44570e4 1016 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f
K
1017 return -1; /* Unknown PCI mode */
1018 return mode;
1019}
1020
c92ca04b
AR
1021#define NEC_VENID 0x1033
1022#define NEC_DEVID 0x0125
1023static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024{
1025 struct pci_dev *tdev = NULL;
008d845c 1026 for_each_pci_dev(tdev) {
26d36b64 1027 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
7ad62dbc 1028 if (tdev->bus == s2io_pdev->bus->parent) {
26d36b64 1029 pci_dev_put(tdev);
c92ca04b 1030 return 1;
7ad62dbc 1031 }
c92ca04b
AR
1032 }
1033 }
1034 return 0;
1035}
541ae68f 1036
7b32a312 1037static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
d0ea5cbd 1038/*
541ae68f
K
1039 * s2io_print_pci_mode -
1040 */
1ee6dd77 1041static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 1042{
1ee6dd77 1043 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1044 register u64 val64 = 0;
1045 int mode;
1046 struct config_param *config = &nic->config;
9e39f7c5 1047 const char *pcimode;
541ae68f
K
1048
1049 val64 = readq(&bar0->pci_mode);
1050 mode = (u8)GET_PCI_MODE(val64);
1051
d44570e4 1052 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f
K
1053 return -1; /* Unknown PCI mode */
1054
c92ca04b
AR
1055 config->bus_speed = bus_speed[mode];
1056
1057 if (s2io_on_nec_bridge(nic->pdev)) {
1058 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
d44570e4 1059 nic->dev->name);
c92ca04b
AR
1060 return mode;
1061 }
1062
d44570e4
JP
1063 switch (mode) {
1064 case PCI_MODE_PCI_33:
9e39f7c5 1065 pcimode = "33MHz PCI bus";
d44570e4
JP
1066 break;
1067 case PCI_MODE_PCI_66:
9e39f7c5 1068 pcimode = "66MHz PCI bus";
d44570e4
JP
1069 break;
1070 case PCI_MODE_PCIX_M1_66:
9e39f7c5 1071 pcimode = "66MHz PCIX(M1) bus";
d44570e4
JP
1072 break;
1073 case PCI_MODE_PCIX_M1_100:
9e39f7c5 1074 pcimode = "100MHz PCIX(M1) bus";
d44570e4
JP
1075 break;
1076 case PCI_MODE_PCIX_M1_133:
9e39f7c5 1077 pcimode = "133MHz PCIX(M1) bus";
d44570e4
JP
1078 break;
1079 case PCI_MODE_PCIX_M2_66:
9e39f7c5 1080 pcimode = "133MHz PCIX(M2) bus";
d44570e4
JP
1081 break;
1082 case PCI_MODE_PCIX_M2_100:
9e39f7c5 1083 pcimode = "200MHz PCIX(M2) bus";
d44570e4
JP
1084 break;
1085 case PCI_MODE_PCIX_M2_133:
9e39f7c5 1086 pcimode = "266MHz PCIX(M2) bus";
d44570e4
JP
1087 break;
1088 default:
9e39f7c5
JP
1089 pcimode = "unsupported bus!";
1090 mode = -1;
541ae68f
K
1091 }
1092
9e39f7c5
JP
1093 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095
541ae68f
K
1096 return mode;
1097}
1098
b7c5678f
RV
1099/**
1100 * init_tti - Initialization transmit traffic interrupt scheme
1101 * @nic: device private variable
1102 * @link: link status (UP/DOWN) used to enable/disable continuous
1103 * transmit interrupts
0bf4d9af
YY
1104 * @may_sleep: parameter indicates if sleeping when waiting for
1105 * command complete
b7c5678f
RV
1106 * Description: The function configures transmit traffic interrupts
1107 * Return Value: SUCCESS on success and
1108 * '-1' on failure
1109 */
1110
5ce7f3f4 1111static int init_tti(struct s2io_nic *nic, int link, bool may_sleep)
b7c5678f
RV
1112{
1113 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1114 register u64 val64 = 0;
1115 int i;
ffb5df6c 1116 struct config_param *config = &nic->config;
b7c5678f
RV
1117
1118 for (i = 0; i < config->tx_fifo_num; i++) {
1119 /*
1120 * TTI Initialization. Default Tx timer gets us about
1121 * 250 interrupts per sec. Continuous interrupts are enabled
1122 * by default.
1123 */
1124 if (nic->device_type == XFRAME_II_DEVICE) {
1125 int count = (nic->config.bus_speed * 125)/2;
1126 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1127 } else
1128 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1129
1130 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
d44570e4
JP
1131 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1132 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1133 TTI_DATA1_MEM_TX_TIMER_AC_EN;
ac731ab6
SH
1134 if (i == 0)
1135 if (use_continuous_tx_intrs && (link == LINK_UP))
1136 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
b7c5678f
RV
1137 writeq(val64, &bar0->tti_data1_mem);
1138
ac731ab6
SH
1139 if (nic->config.intr_type == MSI_X) {
1140 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1141 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1142 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1143 TTI_DATA2_MEM_TX_UFC_D(0x300);
1144 } else {
1145 if ((nic->config.tx_steering_type ==
d44570e4
JP
1146 TX_DEFAULT_STEERING) &&
1147 (config->tx_fifo_num > 1) &&
1148 (i >= nic->udp_fifo_idx) &&
1149 (i < (nic->udp_fifo_idx +
1150 nic->total_udp_fifos)))
ac731ab6
SH
1151 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1152 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1153 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1154 TTI_DATA2_MEM_TX_UFC_D(0x120);
1155 else
1156 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1157 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1158 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1159 TTI_DATA2_MEM_TX_UFC_D(0x80);
1160 }
b7c5678f
RV
1161
1162 writeq(val64, &bar0->tti_data2_mem);
1163
d44570e4
JP
1164 val64 = TTI_CMD_MEM_WE |
1165 TTI_CMD_MEM_STROBE_NEW_CMD |
1166 TTI_CMD_MEM_OFFSET(i);
b7c5678f
RV
1167 writeq(val64, &bar0->tti_command_mem);
1168
1169 if (wait_for_cmd_complete(&bar0->tti_command_mem,
d44570e4 1170 TTI_CMD_MEM_STROBE_NEW_CMD,
5ce7f3f4 1171 S2IO_BIT_RESET, may_sleep) != SUCCESS)
b7c5678f
RV
1172 return FAILURE;
1173 }
1174
1175 return SUCCESS;
1176}
1177
20346722
K
1178/**
1179 * init_nic - Initialization of hardware
b7c5678f 1180 * @nic: device private variable
20346722
K
1181 * Description: The function sequentially configures every block
1182 * of the H/W from their reset values.
1183 * Return Value: SUCCESS on success and
1da177e4
LT
1184 * '-1' on failure (endian settings incorrect).
1185 */
1186
1187static int init_nic(struct s2io_nic *nic)
1188{
1ee6dd77 1189 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1190 struct net_device *dev = nic->dev;
1191 register u64 val64 = 0;
1192 void __iomem *add;
1193 u32 time;
1194 int i, j;
c92ca04b 1195 int dtx_cnt = 0;
1da177e4 1196 unsigned long long mem_share;
20346722 1197 int mem_size;
ffb5df6c
JP
1198 struct config_param *config = &nic->config;
1199 struct mac_info *mac_control = &nic->mac_control;
1da177e4 1200
5e25b9dd 1201 /* to set the swapper controle on the card */
d44570e4
JP
1202 if (s2io_set_swapper(nic)) {
1203 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
9f74ffde 1204 return -EIO;
1da177e4
LT
1205 }
1206
541ae68f
K
1207 /*
1208 * Herc requires EOI to be removed from reset before XGXS, so..
1209 */
1210 if (nic->device_type & XFRAME_II_DEVICE) {
1211 val64 = 0xA500000000ULL;
1212 writeq(val64, &bar0->sw_reset);
1213 msleep(500);
1214 val64 = readq(&bar0->sw_reset);
1215 }
1216
1da177e4
LT
1217 /* Remove XGXS from reset state */
1218 val64 = 0;
1219 writeq(val64, &bar0->sw_reset);
1da177e4 1220 msleep(500);
20346722 1221 val64 = readq(&bar0->sw_reset);
1da177e4 1222
7962024e
SH
1223 /* Ensure that it's safe to access registers by checking
1224 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1225 */
1226 if (nic->device_type == XFRAME_II_DEVICE) {
1227 for (i = 0; i < 50; i++) {
1228 val64 = readq(&bar0->adapter_status);
1229 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1230 break;
1231 msleep(10);
1232 }
1233 if (i == 50)
1234 return -ENODEV;
1235 }
1236
1da177e4
LT
1237 /* Enable Receiving broadcasts */
1238 add = &bar0->mac_cfg;
1239 val64 = readq(&bar0->mac_cfg);
1240 val64 |= MAC_RMAC_BCAST_ENABLE;
1241 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 1242 writel((u32)val64, add);
1da177e4
LT
1243 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1244 writel((u32) (val64 >> 32), (add + 4));
1245
1246 /* Read registers in all blocks */
1247 val64 = readq(&bar0->mac_int_mask);
1248 val64 = readq(&bar0->mc_int_mask);
1249 val64 = readq(&bar0->xgxs_int_mask);
1250
1251 /* Set MTU */
1252 val64 = dev->mtu;
1253 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1254
541ae68f
K
1255 if (nic->device_type & XFRAME_II_DEVICE) {
1256 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1257 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1258 &bar0->dtx_control, UF);
541ae68f
K
1259 if (dtx_cnt & 0x1)
1260 msleep(1); /* Necessary!! */
1da177e4
LT
1261 dtx_cnt++;
1262 }
541ae68f 1263 } else {
c92ca04b
AR
1264 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1265 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1266 &bar0->dtx_control, UF);
1267 val64 = readq(&bar0->dtx_control);
1268 dtx_cnt++;
1da177e4
LT
1269 }
1270 }
1271
1272 /* Tx DMA Initialization */
1273 val64 = 0;
1274 writeq(val64, &bar0->tx_fifo_partition_0);
1275 writeq(val64, &bar0->tx_fifo_partition_1);
1276 writeq(val64, &bar0->tx_fifo_partition_2);
1277 writeq(val64, &bar0->tx_fifo_partition_3);
1278
1da177e4 1279 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
1280 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1281
1282 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1283 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1da177e4
LT
1284
1285 if (i == (config->tx_fifo_num - 1)) {
1286 if (i % 2 == 0)
1287 i++;
1288 }
1289
1290 switch (i) {
1291 case 1:
1292 writeq(val64, &bar0->tx_fifo_partition_0);
1293 val64 = 0;
b7c5678f 1294 j = 0;
1da177e4
LT
1295 break;
1296 case 3:
1297 writeq(val64, &bar0->tx_fifo_partition_1);
1298 val64 = 0;
b7c5678f 1299 j = 0;
1da177e4
LT
1300 break;
1301 case 5:
1302 writeq(val64, &bar0->tx_fifo_partition_2);
1303 val64 = 0;
b7c5678f 1304 j = 0;
1da177e4
LT
1305 break;
1306 case 7:
1307 writeq(val64, &bar0->tx_fifo_partition_3);
b7c5678f
RV
1308 val64 = 0;
1309 j = 0;
1310 break;
1311 default:
1312 j++;
1da177e4
LT
1313 break;
1314 }
1315 }
1316
5e25b9dd
K
1317 /*
1318 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1319 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1320 */
d44570e4 1321 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
5e25b9dd
K
1322 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1323
1da177e4
LT
1324 val64 = readq(&bar0->tx_fifo_partition_0);
1325 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
d44570e4 1326 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1da177e4 1327
20346722
K
1328 /*
1329 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1330 * integrity checking.
1331 */
1332 val64 = readq(&bar0->tx_pa_cfg);
d44570e4
JP
1333 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1334 TX_PA_CFG_IGNORE_SNAP_OUI |
1335 TX_PA_CFG_IGNORE_LLC_CTRL |
1336 TX_PA_CFG_IGNORE_L2_ERR;
1da177e4
LT
1337 writeq(val64, &bar0->tx_pa_cfg);
1338
dbedd44e 1339 /* Rx DMA initialization. */
1da177e4
LT
1340 val64 = 0;
1341 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1342 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1343
1344 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1da177e4
LT
1345 }
1346 writeq(val64, &bar0->rx_queue_priority);
1347
20346722
K
1348 /*
1349 * Allocating equal share of memory to all the
1da177e4
LT
1350 * configured Rings.
1351 */
1352 val64 = 0;
541ae68f
K
1353 if (nic->device_type & XFRAME_II_DEVICE)
1354 mem_size = 32;
1355 else
1356 mem_size = 64;
1357
1da177e4
LT
1358 for (i = 0; i < config->rx_ring_num; i++) {
1359 switch (i) {
1360 case 0:
20346722
K
1361 mem_share = (mem_size / config->rx_ring_num +
1362 mem_size % config->rx_ring_num);
1da177e4
LT
1363 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1364 continue;
1365 case 1:
20346722 1366 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1367 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1368 continue;
1369 case 2:
20346722 1370 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1371 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1372 continue;
1373 case 3:
20346722 1374 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1375 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1376 continue;
1377 case 4:
20346722 1378 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1379 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1380 continue;
1381 case 5:
20346722 1382 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1383 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1384 continue;
1385 case 6:
20346722 1386 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1387 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1388 continue;
1389 case 7:
20346722 1390 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1391 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1392 continue;
1393 }
1394 }
1395 writeq(val64, &bar0->rx_queue_cfg);
1396
20346722 1397 /*
5e25b9dd 1398 * Filling Tx round robin registers
b7c5678f 1399 * as per the number of FIFOs for equal scheduling priority
1da177e4 1400 */
5e25b9dd
K
1401 switch (config->tx_fifo_num) {
1402 case 1:
b7c5678f 1403 val64 = 0x0;
5e25b9dd
K
1404 writeq(val64, &bar0->tx_w_round_robin_0);
1405 writeq(val64, &bar0->tx_w_round_robin_1);
1406 writeq(val64, &bar0->tx_w_round_robin_2);
1407 writeq(val64, &bar0->tx_w_round_robin_3);
1408 writeq(val64, &bar0->tx_w_round_robin_4);
1409 break;
1410 case 2:
b7c5678f 1411 val64 = 0x0001000100010001ULL;
5e25b9dd 1412 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1413 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1414 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1415 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1416 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1417 writeq(val64, &bar0->tx_w_round_robin_4);
1418 break;
1419 case 3:
b7c5678f 1420 val64 = 0x0001020001020001ULL;
5e25b9dd 1421 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1422 val64 = 0x0200010200010200ULL;
5e25b9dd 1423 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1424 val64 = 0x0102000102000102ULL;
5e25b9dd 1425 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1426 val64 = 0x0001020001020001ULL;
5e25b9dd 1427 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1428 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1429 writeq(val64, &bar0->tx_w_round_robin_4);
1430 break;
1431 case 4:
b7c5678f 1432 val64 = 0x0001020300010203ULL;
5e25b9dd 1433 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1434 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1435 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1436 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1437 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1438 writeq(val64, &bar0->tx_w_round_robin_4);
1439 break;
1440 case 5:
b7c5678f 1441 val64 = 0x0001020304000102ULL;
5e25b9dd 1442 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1443 val64 = 0x0304000102030400ULL;
5e25b9dd 1444 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1445 val64 = 0x0102030400010203ULL;
5e25b9dd 1446 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1447 val64 = 0x0400010203040001ULL;
5e25b9dd 1448 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1449 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1450 writeq(val64, &bar0->tx_w_round_robin_4);
1451 break;
1452 case 6:
b7c5678f 1453 val64 = 0x0001020304050001ULL;
5e25b9dd 1454 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1455 val64 = 0x0203040500010203ULL;
5e25b9dd 1456 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1457 val64 = 0x0405000102030405ULL;
5e25b9dd 1458 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1459 val64 = 0x0001020304050001ULL;
5e25b9dd 1460 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1461 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1462 writeq(val64, &bar0->tx_w_round_robin_4);
1463 break;
1464 case 7:
b7c5678f 1465 val64 = 0x0001020304050600ULL;
5e25b9dd 1466 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1467 val64 = 0x0102030405060001ULL;
5e25b9dd 1468 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1469 val64 = 0x0203040506000102ULL;
5e25b9dd 1470 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1471 val64 = 0x0304050600010203ULL;
5e25b9dd 1472 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1473 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1474 writeq(val64, &bar0->tx_w_round_robin_4);
1475 break;
1476 case 8:
b7c5678f 1477 val64 = 0x0001020304050607ULL;
5e25b9dd 1478 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1479 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1480 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1481 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1482 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1483 writeq(val64, &bar0->tx_w_round_robin_4);
1484 break;
1485 }
1486
b41477f3 1487 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1488 val64 = readq(&bar0->tx_fifo_partition_0);
1489 val64 |= (TX_FIFO_PARTITION_EN);
1490 writeq(val64, &bar0->tx_fifo_partition_0);
1491
5e25b9dd 1492 /* Filling the Rx round robin registers as per the
0425b46a
SH
1493 * number of Rings and steering based on QoS with
1494 * equal priority.
1495 */
5e25b9dd
K
1496 switch (config->rx_ring_num) {
1497 case 1:
0425b46a
SH
1498 val64 = 0x0;
1499 writeq(val64, &bar0->rx_w_round_robin_0);
1500 writeq(val64, &bar0->rx_w_round_robin_1);
1501 writeq(val64, &bar0->rx_w_round_robin_2);
1502 writeq(val64, &bar0->rx_w_round_robin_3);
1503 writeq(val64, &bar0->rx_w_round_robin_4);
1504
5e25b9dd
K
1505 val64 = 0x8080808080808080ULL;
1506 writeq(val64, &bar0->rts_qos_steering);
1507 break;
1508 case 2:
0425b46a 1509 val64 = 0x0001000100010001ULL;
5e25b9dd 1510 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1511 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1512 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1513 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1514 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1515 writeq(val64, &bar0->rx_w_round_robin_4);
1516
1517 val64 = 0x8080808040404040ULL;
1518 writeq(val64, &bar0->rts_qos_steering);
1519 break;
1520 case 3:
0425b46a 1521 val64 = 0x0001020001020001ULL;
5e25b9dd 1522 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1523 val64 = 0x0200010200010200ULL;
5e25b9dd 1524 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1525 val64 = 0x0102000102000102ULL;
5e25b9dd 1526 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1527 val64 = 0x0001020001020001ULL;
5e25b9dd 1528 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1529 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1530 writeq(val64, &bar0->rx_w_round_robin_4);
1531
1532 val64 = 0x8080804040402020ULL;
1533 writeq(val64, &bar0->rts_qos_steering);
1534 break;
1535 case 4:
0425b46a 1536 val64 = 0x0001020300010203ULL;
5e25b9dd 1537 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1538 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1539 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1540 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1541 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1542 writeq(val64, &bar0->rx_w_round_robin_4);
1543
1544 val64 = 0x8080404020201010ULL;
1545 writeq(val64, &bar0->rts_qos_steering);
1546 break;
1547 case 5:
0425b46a 1548 val64 = 0x0001020304000102ULL;
5e25b9dd 1549 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1550 val64 = 0x0304000102030400ULL;
5e25b9dd 1551 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1552 val64 = 0x0102030400010203ULL;
5e25b9dd 1553 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1554 val64 = 0x0400010203040001ULL;
5e25b9dd 1555 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1556 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1557 writeq(val64, &bar0->rx_w_round_robin_4);
1558
1559 val64 = 0x8080404020201008ULL;
1560 writeq(val64, &bar0->rts_qos_steering);
1561 break;
1562 case 6:
0425b46a 1563 val64 = 0x0001020304050001ULL;
5e25b9dd 1564 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1565 val64 = 0x0203040500010203ULL;
5e25b9dd 1566 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1567 val64 = 0x0405000102030405ULL;
5e25b9dd 1568 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1569 val64 = 0x0001020304050001ULL;
5e25b9dd 1570 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1571 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1572 writeq(val64, &bar0->rx_w_round_robin_4);
1573
1574 val64 = 0x8080404020100804ULL;
1575 writeq(val64, &bar0->rts_qos_steering);
1576 break;
1577 case 7:
0425b46a 1578 val64 = 0x0001020304050600ULL;
5e25b9dd 1579 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1580 val64 = 0x0102030405060001ULL;
5e25b9dd 1581 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1582 val64 = 0x0203040506000102ULL;
5e25b9dd 1583 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1584 val64 = 0x0304050600010203ULL;
5e25b9dd 1585 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1586 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1587 writeq(val64, &bar0->rx_w_round_robin_4);
1588
1589 val64 = 0x8080402010080402ULL;
1590 writeq(val64, &bar0->rts_qos_steering);
1591 break;
1592 case 8:
0425b46a 1593 val64 = 0x0001020304050607ULL;
5e25b9dd 1594 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1595 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1596 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1597 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1598 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1599 writeq(val64, &bar0->rx_w_round_robin_4);
1600
1601 val64 = 0x8040201008040201ULL;
1602 writeq(val64, &bar0->rts_qos_steering);
1603 break;
1604 }
1da177e4
LT
1605
1606 /* UDP Fix */
1607 val64 = 0;
20346722 1608 for (i = 0; i < 8; i++)
1da177e4
LT
1609 writeq(val64, &bar0->rts_frm_len_n[i]);
1610
5e25b9dd
K
1611 /* Set the default rts frame length for the rings configured */
1612 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1613 for (i = 0 ; i < config->rx_ring_num ; i++)
1614 writeq(val64, &bar0->rts_frm_len_n[i]);
1615
1616 /* Set the frame length for the configured rings
1617 * desired by the user
1618 */
1619 for (i = 0; i < config->rx_ring_num; i++) {
1620 /* If rts_frm_len[i] == 0 then it is assumed that user not
1621 * specified frame length steering.
1622 * If the user provides the frame length then program
1623 * the rts_frm_len register for those values or else
1624 * leave it as it is.
1625 */
1626 if (rts_frm_len[i] != 0) {
1627 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
d44570e4 1628 &bar0->rts_frm_len_n[i]);
5e25b9dd
K
1629 }
1630 }
8a4bdbaa 1631
9fc93a41
SS
1632 /* Disable differentiated services steering logic */
1633 for (i = 0; i < 64; i++) {
1634 if (rts_ds_steer(nic, i, 0) == FAILURE) {
9e39f7c5
JP
1635 DBG_PRINT(ERR_DBG,
1636 "%s: rts_ds_steer failed on codepoint %d\n",
1637 dev->name, i);
9f74ffde 1638 return -ENODEV;
9fc93a41
SS
1639 }
1640 }
1641
20346722 1642 /* Program statistics memory */
1da177e4 1643 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1644
541ae68f
K
1645 if (nic->device_type == XFRAME_II_DEVICE) {
1646 val64 = STAT_BC(0x320);
1647 writeq(val64, &bar0->stat_byte_cnt);
1648 }
1649
20346722 1650 /*
1da177e4
LT
1651 * Initializing the sampling rate for the device to calculate the
1652 * bandwidth utilization.
1653 */
1654 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
d44570e4 1655 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1da177e4
LT
1656 writeq(val64, &bar0->mac_link_util);
1657
20346722
K
1658 /*
1659 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1660 * Scheme.
1661 */
1da177e4 1662
b7c5678f 1663 /* Initialize TTI */
5ce7f3f4 1664 if (SUCCESS != init_tti(nic, nic->last_link_state, true))
b7c5678f 1665 return -ENODEV;
1da177e4 1666
8a4bdbaa
SS
1667 /* RTI Initialization */
1668 if (nic->device_type == XFRAME_II_DEVICE) {
541ae68f 1669 /*
8a4bdbaa
SS
1670 * Programmed to generate Apprx 500 Intrs per
1671 * second
1672 */
1673 int count = (nic->config.bus_speed * 125)/4;
1674 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1675 } else
1676 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1677 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
d44570e4
JP
1678 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1679 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1680 RTI_DATA1_MEM_RX_TIMER_AC_EN;
8a4bdbaa
SS
1681
1682 writeq(val64, &bar0->rti_data1_mem);
1683
1684 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1685 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1686 if (nic->config.intr_type == MSI_X)
d44570e4
JP
1687 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1688 RTI_DATA2_MEM_RX_UFC_D(0x40));
8a4bdbaa 1689 else
d44570e4
JP
1690 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1691 RTI_DATA2_MEM_RX_UFC_D(0x80));
8a4bdbaa 1692 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1693
8a4bdbaa 1694 for (i = 0; i < config->rx_ring_num; i++) {
d44570e4
JP
1695 val64 = RTI_CMD_MEM_WE |
1696 RTI_CMD_MEM_STROBE_NEW_CMD |
1697 RTI_CMD_MEM_OFFSET(i);
8a4bdbaa 1698 writeq(val64, &bar0->rti_command_mem);
1da177e4 1699
8a4bdbaa
SS
1700 /*
1701 * Once the operation completes, the Strobe bit of the
1702 * command register will be reset. We poll for this
1703 * particular condition. We wait for a maximum of 500ms
1704 * for the operation to complete, if it's not complete
1705 * by then we return error.
1706 */
1707 time = 0;
f957bcf0 1708 while (true) {
8a4bdbaa
SS
1709 val64 = readq(&bar0->rti_command_mem);
1710 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1711 break;
b6e3f982 1712
8a4bdbaa 1713 if (time > 10) {
9e39f7c5 1714 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
8a4bdbaa 1715 dev->name);
9f74ffde 1716 return -ENODEV;
b6e3f982 1717 }
8a4bdbaa
SS
1718 time++;
1719 msleep(50);
1da177e4 1720 }
1da177e4
LT
1721 }
1722
20346722
K
1723 /*
1724 * Initializing proper values as Pause threshold into all
1da177e4
LT
1725 * the 8 Queues on Rx side.
1726 */
1727 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1728 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1729
1730 /* Disable RMAC PAD STRIPPING */
509a2671 1731 add = &bar0->mac_cfg;
1da177e4
LT
1732 val64 = readq(&bar0->mac_cfg);
1733 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1734 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735 writel((u32) (val64), add);
1736 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1737 writel((u32) (val64 >> 32), (add + 4));
1738 val64 = readq(&bar0->mac_cfg);
1739
7d3d0439
RA
1740 /* Enable FCS stripping by adapter */
1741 add = &bar0->mac_cfg;
1742 val64 = readq(&bar0->mac_cfg);
1743 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1744 if (nic->device_type == XFRAME_II_DEVICE)
1745 writeq(val64, &bar0->mac_cfg);
1746 else {
1747 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748 writel((u32) (val64), add);
1749 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1750 writel((u32) (val64 >> 32), (add + 4));
1751 }
1752
20346722
K
1753 /*
1754 * Set the time value to be inserted in the pause frame
1da177e4
LT
1755 * generated by xena.
1756 */
1757 val64 = readq(&bar0->rmac_pause_cfg);
1758 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1759 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1760 writeq(val64, &bar0->rmac_pause_cfg);
1761
20346722 1762 /*
1da177e4
LT
1763 * Set the Threshold Limit for Generating the pause frame
1764 * If the amount of data in any Queue exceeds ratio of
1765 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1766 * pause frame is generated
1767 */
1768 val64 = 0;
1769 for (i = 0; i < 4; i++) {
d44570e4
JP
1770 val64 |= (((u64)0xFF00 |
1771 nic->mac_control.mc_pause_threshold_q0q3)
1772 << (i * 2 * 8));
1da177e4
LT
1773 }
1774 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1775
1776 val64 = 0;
1777 for (i = 0; i < 4; i++) {
d44570e4
JP
1778 val64 |= (((u64)0xFF00 |
1779 nic->mac_control.mc_pause_threshold_q4q7)
1780 << (i * 2 * 8));
1da177e4
LT
1781 }
1782 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1783
20346722
K
1784 /*
1785 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1786 * exceeded the limit pointed by shared_splits
1787 */
1788 val64 = readq(&bar0->pic_control);
1789 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1790 writeq(val64, &bar0->pic_control);
1791
863c11a9
AR
1792 if (nic->config.bus_speed == 266) {
1793 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1794 writeq(0x0, &bar0->read_retry_delay);
1795 writeq(0x0, &bar0->write_retry_delay);
1796 }
1797
541ae68f
K
1798 /*
1799 * Programming the Herc to split every write transaction
1800 * that does not start on an ADB to reduce disconnects.
1801 */
1802 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1803 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1804 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1805 writeq(val64, &bar0->misc_control);
1806 val64 = readq(&bar0->pic_control2);
b7b5a128 1807 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
863c11a9 1808 writeq(val64, &bar0->pic_control2);
541ae68f 1809 }
c92ca04b
AR
1810 if (strstr(nic->product_name, "CX4")) {
1811 val64 = TMAC_AVG_IPG(0x17);
1812 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1813 }
1814
1da177e4
LT
1815 return SUCCESS;
1816}
a371a07d
K
1817#define LINK_UP_DOWN_INTERRUPT 1
1818#define MAC_RMAC_ERR_TIMER 2
1819
1ee6dd77 1820static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d
K
1821{
1822 if (nic->device_type == XFRAME_II_DEVICE)
1823 return LINK_UP_DOWN_INTERRUPT;
1824 else
1825 return MAC_RMAC_ERR_TIMER;
1826}
8116f3cf 1827
9caab458
SS
1828/**
1829 * do_s2io_write_bits - update alarm bits in alarm register
1830 * @value: alarm bits
1831 * @flag: interrupt status
1832 * @addr: address value
1833 * Description: update alarm bits in alarm register
1834 * Return Value:
1835 * NONE.
1836 */
1837static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1838{
1839 u64 temp64;
1840
1841 temp64 = readq(addr);
1842
d44570e4
JP
1843 if (flag == ENABLE_INTRS)
1844 temp64 &= ~((u64)value);
9caab458 1845 else
d44570e4 1846 temp64 |= ((u64)value);
9caab458
SS
1847 writeq(temp64, addr);
1848}
1da177e4 1849
43b7c451 1850static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
9caab458
SS
1851{
1852 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1853 register u64 gen_int_mask = 0;
01e16faa 1854 u64 interruptible;
9caab458 1855
01e16faa 1856 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
9caab458 1857 if (mask & TX_DMA_INTR) {
9caab458
SS
1858 gen_int_mask |= TXDMA_INT_M;
1859
1860 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
d44570e4
JP
1861 TXDMA_PCC_INT | TXDMA_TTI_INT |
1862 TXDMA_LSO_INT | TXDMA_TPA_INT |
1863 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
9caab458
SS
1864
1865 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
d44570e4
JP
1866 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1867 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1868 &bar0->pfc_err_mask);
9caab458
SS
1869
1870 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
d44570e4
JP
1871 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1872 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
9caab458
SS
1873
1874 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
d44570e4
JP
1875 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1876 PCC_N_SERR | PCC_6_COF_OV_ERR |
1877 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1878 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1879 PCC_TXB_ECC_SG_ERR,
1880 flag, &bar0->pcc_err_mask);
9caab458
SS
1881
1882 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
d44570e4 1883 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
9caab458
SS
1884
1885 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
d44570e4
JP
1886 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1887 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1888 flag, &bar0->lso_err_mask);
9caab458
SS
1889
1890 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
d44570e4 1891 flag, &bar0->tpa_err_mask);
9caab458
SS
1892
1893 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
9caab458
SS
1894 }
1895
1896 if (mask & TX_MAC_INTR) {
1897 gen_int_mask |= TXMAC_INT_M;
1898 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
d44570e4 1899 &bar0->mac_int_mask);
9caab458 1900 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
d44570e4
JP
1901 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1902 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1903 flag, &bar0->mac_tmac_err_mask);
9caab458
SS
1904 }
1905
1906 if (mask & TX_XGXS_INTR) {
1907 gen_int_mask |= TXXGXS_INT_M;
1908 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
d44570e4 1909 &bar0->xgxs_int_mask);
9caab458 1910 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
d44570e4
JP
1911 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1912 flag, &bar0->xgxs_txgxs_err_mask);
9caab458
SS
1913 }
1914
1915 if (mask & RX_DMA_INTR) {
1916 gen_int_mask |= RXDMA_INT_M;
1917 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
d44570e4
JP
1918 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1919 flag, &bar0->rxdma_int_mask);
9caab458 1920 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
d44570e4
JP
1921 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1922 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1923 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
9caab458 1924 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
d44570e4
JP
1925 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1926 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1927 &bar0->prc_pcix_err_mask);
9caab458 1928 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
d44570e4
JP
1929 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1930 &bar0->rpa_err_mask);
9caab458 1931 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
d44570e4
JP
1932 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1933 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1934 RDA_FRM_ECC_SG_ERR |
1935 RDA_MISC_ERR|RDA_PCIX_ERR,
1936 flag, &bar0->rda_err_mask);
9caab458 1937 do_s2io_write_bits(RTI_SM_ERR_ALARM |
d44570e4
JP
1938 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1939 flag, &bar0->rti_err_mask);
9caab458
SS
1940 }
1941
1942 if (mask & RX_MAC_INTR) {
1943 gen_int_mask |= RXMAC_INT_M;
1944 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
d44570e4
JP
1945 &bar0->mac_int_mask);
1946 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1947 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1948 RMAC_DOUBLE_ECC_ERR);
01e16faa
SH
1949 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1950 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1951 do_s2io_write_bits(interruptible,
d44570e4 1952 flag, &bar0->mac_rmac_err_mask);
9caab458
SS
1953 }
1954
d44570e4 1955 if (mask & RX_XGXS_INTR) {
9caab458
SS
1956 gen_int_mask |= RXXGXS_INT_M;
1957 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
d44570e4 1958 &bar0->xgxs_int_mask);
9caab458 1959 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
d44570e4 1960 &bar0->xgxs_rxgxs_err_mask);
9caab458
SS
1961 }
1962
1963 if (mask & MC_INTR) {
1964 gen_int_mask |= MC_INT_M;
d44570e4
JP
1965 do_s2io_write_bits(MC_INT_MASK_MC_INT,
1966 flag, &bar0->mc_int_mask);
9caab458 1967 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
d44570e4
JP
1968 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1969 &bar0->mc_err_mask);
9caab458
SS
1970 }
1971 nic->general_int_mask = gen_int_mask;
1972
1973 /* Remove this line when alarm interrupts are enabled */
1974 nic->general_int_mask = 0;
1975}
d44570e4 1976
20346722
K
1977/**
1978 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1979 * @nic: device private variable,
1980 * @mask: A mask indicating which Intr block must be modified and,
1981 * @flag: A flag indicating whether to enable or disable the Intrs.
1982 * Description: This function will either disable or enable the interrupts
20346722
K
1983 * depending on the flag argument. The mask argument can be used to
1984 * enable/disable any Intr block.
1da177e4
LT
1985 * Return Value: NONE.
1986 */
1987
1988static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1989{
1ee6dd77 1990 struct XENA_dev_config __iomem *bar0 = nic->bar0;
9caab458
SS
1991 register u64 temp64 = 0, intr_mask = 0;
1992
1993 intr_mask = nic->general_int_mask;
1da177e4
LT
1994
1995 /* Top level interrupt classification */
1996 /* PIC Interrupts */
9caab458 1997 if (mask & TX_PIC_INTR) {
1da177e4 1998 /* Enable PIC Intrs in the general intr mask register */
9caab458 1999 intr_mask |= TXPIC_INT_M;
1da177e4 2000 if (flag == ENABLE_INTRS) {
20346722 2001 /*
a371a07d 2002 * If Hercules adapter enable GPIO otherwise
b41477f3 2003 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
2004 * interrupts for now.
2005 * TODO
1da177e4 2006 */
a371a07d 2007 if (s2io_link_fault_indication(nic) ==
d44570e4 2008 LINK_UP_DOWN_INTERRUPT) {
9caab458 2009 do_s2io_write_bits(PIC_INT_GPIO, flag,
d44570e4 2010 &bar0->pic_int_mask);
9caab458 2011 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
d44570e4 2012 &bar0->gpio_int_mask);
9caab458 2013 } else
a371a07d 2014 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4 2015 } else if (flag == DISABLE_INTRS) {
20346722
K
2016 /*
2017 * Disable PIC Intrs in the general
2018 * intr mask register
1da177e4
LT
2019 */
2020 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4
LT
2021 }
2022 }
2023
1da177e4
LT
2024 /* Tx traffic interrupts */
2025 if (mask & TX_TRAFFIC_INTR) {
9caab458 2026 intr_mask |= TXTRAFFIC_INT_M;
1da177e4 2027 if (flag == ENABLE_INTRS) {
20346722 2028 /*
1da177e4 2029 * Enable all the Tx side interrupts
20346722 2030 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
2031 */
2032 writeq(0x0, &bar0->tx_traffic_mask);
2033 } else if (flag == DISABLE_INTRS) {
20346722
K
2034 /*
2035 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
2036 * register.
2037 */
2038 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1da177e4
LT
2039 }
2040 }
2041
2042 /* Rx traffic interrupts */
2043 if (mask & RX_TRAFFIC_INTR) {
9caab458 2044 intr_mask |= RXTRAFFIC_INT_M;
1da177e4 2045 if (flag == ENABLE_INTRS) {
1da177e4
LT
2046 /* writing 0 Enables all 8 RX interrupt levels */
2047 writeq(0x0, &bar0->rx_traffic_mask);
2048 } else if (flag == DISABLE_INTRS) {
20346722
K
2049 /*
2050 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
2051 * register.
2052 */
2053 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1da177e4
LT
2054 }
2055 }
9caab458
SS
2056
2057 temp64 = readq(&bar0->general_int_mask);
2058 if (flag == ENABLE_INTRS)
d44570e4 2059 temp64 &= ~((u64)intr_mask);
9caab458
SS
2060 else
2061 temp64 = DISABLE_ALL_INTRS;
2062 writeq(temp64, &bar0->general_int_mask);
2063
2064 nic->general_int_mask = readq(&bar0->general_int_mask);
1da177e4
LT
2065}
2066
19a60522
SS
2067/**
2068 * verify_pcc_quiescent- Checks for PCC quiescent state
d0ea5cbd
JB
2069 * @sp : private member of the device structure, which is a pointer to the
2070 * s2io_nic structure.
2071 * @flag: boolean controlling function path
19a60522
SS
2072 * Return: 1 If PCC is quiescence
2073 * 0 If PCC is not quiescence
2074 */
1ee6dd77 2075static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 2076{
19a60522 2077 int ret = 0, herc;
1ee6dd77 2078 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522 2079 u64 val64 = readq(&bar0->adapter_status);
8a4bdbaa 2080
19a60522 2081 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722 2082
f957bcf0 2083 if (flag == false) {
44c10138 2084 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
19a60522 2085 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2086 ret = 1;
19a60522
SS
2087 } else {
2088 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2089 ret = 1;
20346722
K
2090 }
2091 } else {
44c10138 2092 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
5e25b9dd 2093 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 2094 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2095 ret = 1;
5e25b9dd
K
2096 } else {
2097 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 2098 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2099 ret = 1;
20346722
K
2100 }
2101 }
2102
2103 return ret;
2104}
2105/**
2106 * verify_xena_quiescence - Checks whether the H/W is ready
d0ea5cbd
JB
2107 * @sp : private member of the device structure, which is a pointer to the
2108 * s2io_nic structure.
1da177e4 2109 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 2110 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
2111 * differs and the calling function passes the input argument flag to
2112 * indicate this.
20346722 2113 * Return: 1 If xena is quiescence
1da177e4
LT
2114 * 0 If Xena is not quiescence
2115 */
2116
1ee6dd77 2117static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 2118{
19a60522 2119 int mode;
1ee6dd77 2120 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
2121 u64 val64 = readq(&bar0->adapter_status);
2122 mode = s2io_verify_pci_mode(sp);
1da177e4 2123
19a60522 2124 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
9e39f7c5 2125 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
19a60522
SS
2126 return 0;
2127 }
2128 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
9e39f7c5 2129 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
19a60522
SS
2130 return 0;
2131 }
2132 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
9e39f7c5 2133 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
19a60522
SS
2134 return 0;
2135 }
2136 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
9e39f7c5 2137 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
19a60522
SS
2138 return 0;
2139 }
2140 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
9e39f7c5 2141 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
19a60522
SS
2142 return 0;
2143 }
2144 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
9e39f7c5 2145 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
19a60522
SS
2146 return 0;
2147 }
2148 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
9e39f7c5 2149 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
19a60522
SS
2150 return 0;
2151 }
2152 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
9e39f7c5 2153 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
19a60522 2154 return 0;
1da177e4
LT
2155 }
2156
19a60522
SS
2157 /*
2158 * In PCI 33 mode, the P_PLL is not used, and therefore,
2159 * the the P_PLL_LOCK bit in the adapter_status register will
2160 * not be asserted.
2161 */
2162 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
d44570e4
JP
2163 sp->device_type == XFRAME_II_DEVICE &&
2164 mode != PCI_MODE_PCI_33) {
9e39f7c5 2165 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
19a60522
SS
2166 return 0;
2167 }
2168 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
d44570e4 2169 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
9e39f7c5 2170 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
19a60522
SS
2171 return 0;
2172 }
2173 return 1;
1da177e4
LT
2174}
2175
2176/**
2177 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2178 * @sp: Pointer to device specifc structure
20346722 2179 * Description :
1da177e4
LT
2180 * New procedure to clear mac address reading problems on Alpha platforms
2181 *
2182 */
2183
d44570e4 2184static void fix_mac_address(struct s2io_nic *sp)
1da177e4 2185{
1ee6dd77 2186 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
2187 int i = 0;
2188
2189 while (fix_mac[i] != END_SIGN) {
2190 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 2191 udelay(10);
d83d282b 2192 (void) readq(&bar0->gpio_control);
1da177e4
LT
2193 }
2194}
2195
2196/**
20346722 2197 * start_nic - Turns the device on
1da177e4 2198 * @nic : device private variable.
20346722
K
2199 * Description:
2200 * This function actually turns the device on. Before this function is
2201 * called,all Registers are configured from their reset states
2202 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
2203 * calling this function, the device interrupts are cleared and the NIC is
2204 * literally switched on by writing into the adapter control register.
20346722 2205 * Return Value:
1da177e4
LT
2206 * SUCCESS on success and -1 on failure.
2207 */
2208
2209static int start_nic(struct s2io_nic *nic)
2210{
1ee6dd77 2211 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
2212 struct net_device *dev = nic->dev;
2213 register u64 val64 = 0;
20346722 2214 u16 subid, i;
ffb5df6c
JP
2215 struct config_param *config = &nic->config;
2216 struct mac_info *mac_control = &nic->mac_control;
1da177e4
LT
2217
2218 /* PRC Initialization and configuration */
2219 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2220 struct ring_info *ring = &mac_control->rings[i];
2221
d44570e4 2222 writeq((u64)ring->rx_blocks[0].block_dma_addr,
1da177e4
LT
2223 &bar0->prc_rxd0_n[i]);
2224
2225 val64 = readq(&bar0->prc_ctrl_n[i]);
da6971d8
AR
2226 if (nic->rxd_mode == RXD_MODE_1)
2227 val64 |= PRC_CTRL_RC_ENABLED;
2228 else
2229 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
2230 if (nic->device_type == XFRAME_II_DEVICE)
2231 val64 |= PRC_CTRL_GROUP_READS;
2232 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2233 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2234 writeq(val64, &bar0->prc_ctrl_n[i]);
2235 }
2236
da6971d8
AR
2237 if (nic->rxd_mode == RXD_MODE_3B) {
2238 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2239 val64 = readq(&bar0->rx_pa_cfg);
2240 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2241 writeq(val64, &bar0->rx_pa_cfg);
2242 }
1da177e4 2243
926930b2
SS
2244 if (vlan_tag_strip == 0) {
2245 val64 = readq(&bar0->rx_pa_cfg);
2246 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2247 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 2248 nic->vlan_strip_flag = 0;
926930b2
SS
2249 }
2250
20346722 2251 /*
1da177e4
LT
2252 * Enabling MC-RLDRAM. After enabling the device, we timeout
2253 * for around 100ms, which is approximately the time required
2254 * for the device to be ready for operation.
2255 */
2256 val64 = readq(&bar0->mc_rldram_mrs);
2257 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2258 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2259 val64 = readq(&bar0->mc_rldram_mrs);
2260
20346722 2261 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2262
2263 /* Enabling ECC Protection. */
2264 val64 = readq(&bar0->adapter_control);
2265 val64 &= ~ADAPTER_ECC_EN;
2266 writeq(val64, &bar0->adapter_control);
2267
20346722
K
2268 /*
2269 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2270 * it.
2271 */
2272 val64 = readq(&bar0->adapter_status);
19a60522 2273 if (!verify_xena_quiescence(nic)) {
9e39f7c5
JP
2274 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2275 "Adapter status reads: 0x%llx\n",
2276 dev->name, (unsigned long long)val64);
1da177e4
LT
2277 return FAILURE;
2278 }
2279
20346722 2280 /*
1da177e4 2281 * With some switches, link might be already up at this point.
20346722
K
2282 * Because of this weird behavior, when we enable laser,
2283 * we may not get link. We need to handle this. We cannot
2284 * figure out which switch is misbehaving. So we are forced to
2285 * make a global change.
1da177e4
LT
2286 */
2287
2288 /* Enabling Laser. */
2289 val64 = readq(&bar0->adapter_control);
2290 val64 |= ADAPTER_EOI_TX_ON;
2291 writeq(val64, &bar0->adapter_control);
2292
c92ca04b
AR
2293 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2294 /*
25985edc 2295 * Dont see link state interrupts initially on some switches,
c92ca04b
AR
2296 * so directly scheduling the link state task here.
2297 */
2298 schedule_work(&nic->set_link_task);
2299 }
1da177e4
LT
2300 /* SXE-002: Initialize link and activity LED */
2301 subid = nic->pdev->subsystem_device;
541ae68f
K
2302 if (((subid & 0xFF) >= 0x07) &&
2303 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2304 val64 = readq(&bar0->gpio_control);
2305 val64 |= 0x0000800000000000ULL;
2306 writeq(val64, &bar0->gpio_control);
2307 val64 = 0x0411040400000000ULL;
509a2671 2308 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2309 }
2310
1da177e4
LT
2311 return SUCCESS;
2312}
fed5eccd
AR
2313/**
2314 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
d0ea5cbd
JB
2315 * @fifo_data: fifo data pointer
2316 * @txdlp: descriptor
2317 * @get_off: unused
fed5eccd 2318 */
d44570e4
JP
2319static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2320 struct TxD *txdlp, int get_off)
fed5eccd 2321{
1ee6dd77 2322 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2323 struct sk_buff *skb;
1ee6dd77 2324 struct TxD *txds;
fed5eccd
AR
2325 u16 j, frg_cnt;
2326
2327 txds = txdlp;
2fda096d 2328 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
fb059b26
CJ
2329 dma_unmap_single(&nic->pdev->dev,
2330 (dma_addr_t)txds->Buffer_Pointer,
2331 sizeof(u64), DMA_TO_DEVICE);
fed5eccd
AR
2332 txds++;
2333 }
2334
d44570e4 2335 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
fed5eccd 2336 if (!skb) {
1ee6dd77 2337 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2338 return NULL;
2339 }
fb059b26
CJ
2340 dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
2341 skb_headlen(skb), DMA_TO_DEVICE);
fed5eccd
AR
2342 frg_cnt = skb_shinfo(skb)->nr_frags;
2343 if (frg_cnt) {
2344 txds++;
2345 for (j = 0; j < frg_cnt; j++, txds++) {
9e903e08 2346 const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
fed5eccd
AR
2347 if (!txds->Buffer_Pointer)
2348 break;
fb059b26 2349 dma_unmap_page(&nic->pdev->dev,
d44570e4 2350 (dma_addr_t)txds->Buffer_Pointer,
fb059b26 2351 skb_frag_size(frag), DMA_TO_DEVICE);
fed5eccd
AR
2352 }
2353 }
d44570e4
JP
2354 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2355 return skb;
fed5eccd 2356}
1da177e4 2357
20346722
K
2358/**
2359 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2360 * @nic : device private variable.
20346722 2361 * Description:
1da177e4 2362 * Free all queued Tx buffers.
20346722 2363 * Return Value: void
d44570e4 2364 */
1da177e4
LT
2365
2366static void free_tx_buffers(struct s2io_nic *nic)
2367{
2368 struct net_device *dev = nic->dev;
2369 struct sk_buff *skb;
1ee6dd77 2370 struct TxD *txdp;
1da177e4 2371 int i, j;
fed5eccd 2372 int cnt = 0;
ffb5df6c
JP
2373 struct config_param *config = &nic->config;
2374 struct mac_info *mac_control = &nic->mac_control;
2375 struct stat_block *stats = mac_control->stats_info;
2376 struct swStat *swstats = &stats->sw_stat;
1da177e4
LT
2377
2378 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
2379 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2380 struct fifo_info *fifo = &mac_control->fifos[i];
2fda096d 2381 unsigned long flags;
13d866a9
JP
2382
2383 spin_lock_irqsave(&fifo->tx_lock, flags);
2384 for (j = 0; j < tx_cfg->fifo_len; j++) {
43d620c8 2385 txdp = fifo->list_info[j].list_virt_addr;
fed5eccd
AR
2386 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2387 if (skb) {
ffb5df6c 2388 swstats->mem_freed += skb->truesize;
fed5eccd
AR
2389 dev_kfree_skb(skb);
2390 cnt++;
1da177e4 2391 }
1da177e4
LT
2392 }
2393 DBG_PRINT(INTR_DBG,
9e39f7c5 2394 "%s: forcibly freeing %d skbs on FIFO%d\n",
1da177e4 2395 dev->name, cnt, i);
13d866a9
JP
2396 fifo->tx_curr_get_info.offset = 0;
2397 fifo->tx_curr_put_info.offset = 0;
2398 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
2399 }
2400}
2401
20346722
K
2402/**
2403 * stop_nic - To stop the nic
d0ea5cbd 2404 * @nic : device private variable.
20346722
K
2405 * Description:
2406 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2407 * function does. This function is called to stop the device.
2408 * Return Value:
2409 * void.
2410 */
2411
2412static void stop_nic(struct s2io_nic *nic)
2413{
1ee6dd77 2414 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2415 register u64 val64 = 0;
5d3213cc 2416 u16 interruptible;
1da177e4
LT
2417
2418 /* Disable all interrupts */
9caab458 2419 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
e960fc5c 2420 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 2421 interruptible |= TX_PIC_INTR;
1da177e4
LT
2422 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2423
5d3213cc
AR
2424 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2425 val64 = readq(&bar0->adapter_control);
2426 val64 &= ~(ADAPTER_CNTL_EN);
2427 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2428}
2429
20346722
K
2430/**
2431 * fill_rx_buffers - Allocates the Rx side skbs
d0ea5cbd
JB
2432 * @nic : device private variable.
2433 * @ring: per ring structure
3f78d885
SH
2434 * @from_card_up: If this is true, we will map the buffer to get
2435 * the dma address for buf0 and buf1 to give it to the card.
2436 * Else we will sync the already mapped buffer to give it to the card.
20346722 2437 * Description:
1da177e4
LT
2438 * The function allocates Rx side skbs and puts the physical
2439 * address of these buffers into the RxD buffer pointers, so that the NIC
2440 * can DMA the received frame into these locations.
2441 * The NIC supports 3 receive modes, viz
2442 * 1. single buffer,
2443 * 2. three buffer and
2444 * 3. Five buffer modes.
20346722
K
2445 * Each mode defines how many fragments the received frame will be split
2446 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2447 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2448 * is split into 3 fragments. As of now only single buffer mode is
2449 * supported.
2450 * Return Value:
2451 * SUCCESS on success or an appropriate -ve value on failure.
2452 */
8d8bb39b 2453static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
d44570e4 2454 int from_card_up)
1da177e4 2455{
1da177e4 2456 struct sk_buff *skb;
1ee6dd77 2457 struct RxD_t *rxdp;
0425b46a 2458 int off, size, block_no, block_no1;
1da177e4 2459 u32 alloc_tab = 0;
20346722 2460 u32 alloc_cnt;
20346722 2461 u64 tmp;
1ee6dd77 2462 struct buffAdd *ba;
1ee6dd77 2463 struct RxD_t *first_rxdp = NULL;
363dc367 2464 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
6d517a27
VP
2465 struct RxD1 *rxdp1;
2466 struct RxD3 *rxdp3;
ffb5df6c 2467 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
1da177e4 2468
0425b46a 2469 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
1da177e4 2470
0425b46a 2471 block_no1 = ring->rx_curr_get_info.block_index;
1da177e4 2472 while (alloc_tab < alloc_cnt) {
0425b46a 2473 block_no = ring->rx_curr_put_info.block_index;
1da177e4 2474
0425b46a
SH
2475 off = ring->rx_curr_put_info.offset;
2476
2477 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2478
7d2e3cb7 2479 if ((block_no == block_no1) &&
d44570e4
JP
2480 (off == ring->rx_curr_get_info.offset) &&
2481 (rxdp->Host_Control)) {
9e39f7c5
JP
2482 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2483 ring->dev->name);
1da177e4
LT
2484 goto end;
2485 }
0425b46a
SH
2486 if (off && (off == ring->rxd_count)) {
2487 ring->rx_curr_put_info.block_index++;
2488 if (ring->rx_curr_put_info.block_index ==
d44570e4 2489 ring->block_count)
0425b46a
SH
2490 ring->rx_curr_put_info.block_index = 0;
2491 block_no = ring->rx_curr_put_info.block_index;
2492 off = 0;
2493 ring->rx_curr_put_info.offset = off;
2494 rxdp = ring->rx_blocks[block_no].block_virt_addr;
1da177e4 2495 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
0425b46a
SH
2496 ring->dev->name, rxdp);
2497
1da177e4 2498 }
c9fcbf47 2499
da6971d8 2500 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
d44570e4
JP
2501 ((ring->rxd_mode == RXD_MODE_3B) &&
2502 (rxdp->Control_2 & s2BIT(0)))) {
0425b46a 2503 ring->rx_curr_put_info.offset = off;
1da177e4
LT
2504 goto end;
2505 }
da6971d8 2506 /* calculate size of skb based on ring mode */
d44570e4
JP
2507 size = ring->mtu +
2508 HEADER_ETHERNET_II_802_3_SIZE +
2509 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
0425b46a 2510 if (ring->rxd_mode == RXD_MODE_1)
da6971d8 2511 size += NET_IP_ALIGN;
da6971d8 2512 else
0425b46a 2513 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2514
da6971d8 2515 /* allocate skb */
c056b734 2516 skb = netdev_alloc_skb(nic->dev, size);
d44570e4 2517 if (!skb) {
9e39f7c5
JP
2518 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2519 ring->dev->name);
303bcb4b 2520 if (first_rxdp) {
03cc864a 2521 dma_wmb();
303bcb4b
K
2522 first_rxdp->Control_1 |= RXD_OWN_XENA;
2523 }
ffb5df6c 2524 swstats->mem_alloc_fail_cnt++;
7d2e3cb7 2525
da6971d8
AR
2526 return -ENOMEM ;
2527 }
ffb5df6c 2528 swstats->mem_allocated += skb->truesize;
0425b46a
SH
2529
2530 if (ring->rxd_mode == RXD_MODE_1) {
da6971d8 2531 /* 1 buffer mode - normal operation mode */
d44570e4 2532 rxdp1 = (struct RxD1 *)rxdp;
1ee6dd77 2533 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2534 skb_reserve(skb, NET_IP_ALIGN);
d44570e4 2535 rxdp1->Buffer0_ptr =
fb059b26 2536 dma_map_single(&ring->pdev->dev, skb->data,
d44570e4 2537 size - NET_IP_ALIGN,
fb059b26
CJ
2538 DMA_FROM_DEVICE);
2539 if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
491abf25
VP
2540 goto pci_map_failed;
2541
8a4bdbaa 2542 rxdp->Control_2 =
491976b2 2543 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
d44570e4 2544 rxdp->Host_Control = (unsigned long)skb;
0425b46a 2545 } else if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8 2546 /*
6d517a27
VP
2547 * 2 buffer mode -
2548 * 2 buffer mode provides 128
da6971d8 2549 * byte aligned receive buffers.
da6971d8
AR
2550 */
2551
d44570e4 2552 rxdp3 = (struct RxD3 *)rxdp;
491976b2 2553 /* save buffer pointers to avoid frequent dma mapping */
6d517a27
VP
2554 Buffer0_ptr = rxdp3->Buffer0_ptr;
2555 Buffer1_ptr = rxdp3->Buffer1_ptr;
1ee6dd77 2556 memset(rxdp, 0, sizeof(struct RxD3));
363dc367 2557 /* restore the buffer pointers for dma sync*/
6d517a27
VP
2558 rxdp3->Buffer0_ptr = Buffer0_ptr;
2559 rxdp3->Buffer1_ptr = Buffer1_ptr;
363dc367 2560
0425b46a 2561 ba = &ring->ba[block_no][off];
da6971d8 2562 skb_reserve(skb, BUF0_LEN);
d44570e4 2563 tmp = (u64)(unsigned long)skb->data;
da6971d8
AR
2564 tmp += ALIGN_SIZE;
2565 tmp &= ~ALIGN_SIZE;
2566 skb->data = (void *) (unsigned long)tmp;
27a884dc 2567 skb_reset_tail_pointer(skb);
da6971d8 2568
3f78d885 2569 if (from_card_up) {
6d517a27 2570 rxdp3->Buffer0_ptr =
fb059b26
CJ
2571 dma_map_single(&ring->pdev->dev,
2572 ba->ba_0, BUF0_LEN,
2573 DMA_FROM_DEVICE);
2574 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
3f78d885
SH
2575 goto pci_map_failed;
2576 } else
fb059b26
CJ
2577 dma_sync_single_for_device(&ring->pdev->dev,
2578 (dma_addr_t)rxdp3->Buffer0_ptr,
2579 BUF0_LEN,
2580 DMA_FROM_DEVICE);
491abf25 2581
da6971d8 2582 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
0425b46a 2583 if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
2584 /* Two buffer mode */
2585
2586 /*
6aa20a22 2587 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2588 * L4 payload
2589 */
fb059b26 2590 rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
d44570e4
JP
2591 skb->data,
2592 ring->mtu + 4,
fb059b26 2593 DMA_FROM_DEVICE);
da6971d8 2594
fb059b26 2595 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
491abf25
VP
2596 goto pci_map_failed;
2597
3f78d885 2598 if (from_card_up) {
0425b46a 2599 rxdp3->Buffer1_ptr =
fb059b26 2600 dma_map_single(&ring->pdev->dev,
d44570e4
JP
2601 ba->ba_1,
2602 BUF1_LEN,
fb059b26 2603 DMA_FROM_DEVICE);
0425b46a 2604
fb059b26
CJ
2605 if (dma_mapping_error(&nic->pdev->dev,
2606 rxdp3->Buffer1_ptr)) {
2607 dma_unmap_single(&ring->pdev->dev,
d44570e4
JP
2608 (dma_addr_t)(unsigned long)
2609 skb->data,
2610 ring->mtu + 4,
fb059b26 2611 DMA_FROM_DEVICE);
3f78d885
SH
2612 goto pci_map_failed;
2613 }
75c30b13 2614 }
da6971d8
AR
2615 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2616 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
d44570e4 2617 (ring->mtu + 4);
da6971d8 2618 }
b7b5a128 2619 rxdp->Control_2 |= s2BIT(0);
0425b46a 2620 rxdp->Host_Control = (unsigned long) (skb);
1da177e4 2621 }
303bcb4b
K
2622 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2623 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2624 off++;
0425b46a 2625 if (off == (ring->rxd_count + 1))
da6971d8 2626 off = 0;
0425b46a 2627 ring->rx_curr_put_info.offset = off;
20346722 2628
da6971d8 2629 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2630 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2631 if (first_rxdp) {
03cc864a 2632 dma_wmb();
303bcb4b
K
2633 first_rxdp->Control_1 |= RXD_OWN_XENA;
2634 }
2635 first_rxdp = rxdp;
2636 }
0425b46a 2637 ring->rx_bufs_left += 1;
1da177e4
LT
2638 alloc_tab++;
2639 }
2640
d44570e4 2641end:
303bcb4b
K
2642 /* Transfer ownership of first descriptor to adapter just before
2643 * exiting. Before that, use memory barrier so that ownership
2644 * and other fields are seen by adapter correctly.
2645 */
2646 if (first_rxdp) {
03cc864a 2647 dma_wmb();
303bcb4b
K
2648 first_rxdp->Control_1 |= RXD_OWN_XENA;
2649 }
2650
1da177e4 2651 return SUCCESS;
d44570e4 2652
491abf25 2653pci_map_failed:
ffb5df6c
JP
2654 swstats->pci_map_fail_cnt++;
2655 swstats->mem_freed += skb->truesize;
491abf25
VP
2656 dev_kfree_skb_irq(skb);
2657 return -ENOMEM;
1da177e4
LT
2658}
2659
da6971d8
AR
2660static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2661{
2662 struct net_device *dev = sp->dev;
2663 int j;
2664 struct sk_buff *skb;
1ee6dd77 2665 struct RxD_t *rxdp;
6d517a27
VP
2666 struct RxD1 *rxdp1;
2667 struct RxD3 *rxdp3;
ffb5df6c
JP
2668 struct mac_info *mac_control = &sp->mac_control;
2669 struct stat_block *stats = mac_control->stats_info;
2670 struct swStat *swstats = &stats->sw_stat;
da6971d8 2671
da6971d8
AR
2672 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2673 rxdp = mac_control->rings[ring_no].
d44570e4
JP
2674 rx_blocks[blk].rxds[j].virt_addr;
2675 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2676 if (!skb)
da6971d8 2677 continue;
da6971d8 2678 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4 2679 rxdp1 = (struct RxD1 *)rxdp;
fb059b26 2680 dma_unmap_single(&sp->pdev->dev,
d44570e4
JP
2681 (dma_addr_t)rxdp1->Buffer0_ptr,
2682 dev->mtu +
2683 HEADER_ETHERNET_II_802_3_SIZE +
2684 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
fb059b26 2685 DMA_FROM_DEVICE);
1ee6dd77 2686 memset(rxdp, 0, sizeof(struct RxD1));
d44570e4
JP
2687 } else if (sp->rxd_mode == RXD_MODE_3B) {
2688 rxdp3 = (struct RxD3 *)rxdp;
fb059b26 2689 dma_unmap_single(&sp->pdev->dev,
d44570e4 2690 (dma_addr_t)rxdp3->Buffer0_ptr,
fb059b26
CJ
2691 BUF0_LEN, DMA_FROM_DEVICE);
2692 dma_unmap_single(&sp->pdev->dev,
d44570e4 2693 (dma_addr_t)rxdp3->Buffer1_ptr,
fb059b26
CJ
2694 BUF1_LEN, DMA_FROM_DEVICE);
2695 dma_unmap_single(&sp->pdev->dev,
d44570e4 2696 (dma_addr_t)rxdp3->Buffer2_ptr,
fb059b26 2697 dev->mtu + 4, DMA_FROM_DEVICE);
1ee6dd77 2698 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8 2699 }
ffb5df6c 2700 swstats->mem_freed += skb->truesize;
da6971d8 2701 dev_kfree_skb(skb);
0425b46a 2702 mac_control->rings[ring_no].rx_bufs_left -= 1;
da6971d8
AR
2703 }
2704}
2705
1da177e4 2706/**
20346722 2707 * free_rx_buffers - Frees all Rx buffers
1da177e4 2708 * @sp: device private variable.
20346722 2709 * Description:
1da177e4
LT
2710 * This function will free all Rx buffers allocated by host.
2711 * Return Value:
2712 * NONE.
2713 */
2714
2715static void free_rx_buffers(struct s2io_nic *sp)
2716{
2717 struct net_device *dev = sp->dev;
da6971d8 2718 int i, blk = 0, buf_cnt = 0;
ffb5df6c
JP
2719 struct config_param *config = &sp->config;
2720 struct mac_info *mac_control = &sp->mac_control;
1da177e4
LT
2721
2722 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2723 struct ring_info *ring = &mac_control->rings[i];
2724
da6971d8 2725 for (blk = 0; blk < rx_ring_sz[i]; blk++)
d44570e4 2726 free_rxd_blk(sp, i, blk);
1da177e4 2727
13d866a9
JP
2728 ring->rx_curr_put_info.block_index = 0;
2729 ring->rx_curr_get_info.block_index = 0;
2730 ring->rx_curr_put_info.offset = 0;
2731 ring->rx_curr_get_info.offset = 0;
2732 ring->rx_bufs_left = 0;
9e39f7c5 2733 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
1da177e4
LT
2734 dev->name, buf_cnt, i);
2735 }
2736}
2737
8d8bb39b 2738static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
f61e0a35 2739{
8d8bb39b 2740 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2741 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2742 ring->dev->name);
f61e0a35
SH
2743 }
2744 return 0;
2745}
2746
1da177e4 2747/**
dc432f5a 2748 * s2io_poll_msix - Rx interrupt handler for NAPI support
bea3348e 2749 * @napi : pointer to the napi structure.
20346722 2750 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2751 * during one pass through the 'Poll" function.
2752 * Description:
2753 * Comes into picture only if NAPI support has been incorporated. It does
2754 * the same thing that rx_intr_handler does, but not in a interrupt context
2755 * also It will process only a given number of packets.
2756 * Return value:
2757 * 0 on success and 1 if there are No Rx packets to be processed.
2758 */
2759
f61e0a35 2760static int s2io_poll_msix(struct napi_struct *napi, int budget)
1da177e4 2761{
f61e0a35
SH
2762 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2763 struct net_device *dev = ring->dev;
f61e0a35 2764 int pkts_processed = 0;
1a79d1c3
AV
2765 u8 __iomem *addr = NULL;
2766 u8 val8 = 0;
4cf1653a 2767 struct s2io_nic *nic = netdev_priv(dev);
1ee6dd77 2768 struct XENA_dev_config __iomem *bar0 = nic->bar0;
f61e0a35 2769 int budget_org = budget;
1da177e4 2770
f61e0a35
SH
2771 if (unlikely(!is_s2io_card_up(nic)))
2772 return 0;
1da177e4 2773
f61e0a35 2774 pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2775 s2io_chk_rx_buffers(nic, ring);
1da177e4 2776
f61e0a35 2777 if (pkts_processed < budget_org) {
6ad20165 2778 napi_complete_done(napi, pkts_processed);
f61e0a35 2779 /*Re Enable MSI-Rx Vector*/
1a79d1c3 2780 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
2781 addr += 7 - ring->ring_no;
2782 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2783 writeb(val8, addr);
2784 val8 = readb(addr);
1da177e4 2785 }
f61e0a35
SH
2786 return pkts_processed;
2787}
d44570e4 2788
f61e0a35
SH
2789static int s2io_poll_inta(struct napi_struct *napi, int budget)
2790{
2791 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
f61e0a35
SH
2792 int pkts_processed = 0;
2793 int ring_pkts_processed, i;
2794 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2795 int budget_org = budget;
ffb5df6c
JP
2796 struct config_param *config = &nic->config;
2797 struct mac_info *mac_control = &nic->mac_control;
1da177e4 2798
f61e0a35
SH
2799 if (unlikely(!is_s2io_card_up(nic)))
2800 return 0;
1da177e4 2801
1da177e4 2802 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9 2803 struct ring_info *ring = &mac_control->rings[i];
f61e0a35 2804 ring_pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2805 s2io_chk_rx_buffers(nic, ring);
f61e0a35
SH
2806 pkts_processed += ring_pkts_processed;
2807 budget -= ring_pkts_processed;
2808 if (budget <= 0)
1da177e4 2809 break;
1da177e4 2810 }
f61e0a35 2811 if (pkts_processed < budget_org) {
6ad20165 2812 napi_complete_done(napi, pkts_processed);
f61e0a35
SH
2813 /* Re enable the Rx interrupts for the ring */
2814 writeq(0, &bar0->rx_traffic_mask);
2815 readl(&bar0->rx_traffic_mask);
2816 }
2817 return pkts_processed;
1da177e4 2818}
20346722 2819
b41477f3 2820#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2821/**
b41477f3 2822 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2823 * @dev : pointer to the device structure.
2824 * Description:
b41477f3
AR
2825 * This function will be called by upper layer to check for events on the
2826 * interface in situations where interrupts are disabled. It is used for
2827 * specific in-kernel networking tasks, such as remote consoles and kernel
2828 * debugging over the network (example netdump in RedHat).
612eff0e 2829 */
612eff0e
BH
2830static void s2io_netpoll(struct net_device *dev)
2831{
4cf1653a 2832 struct s2io_nic *nic = netdev_priv(dev);
80777c54 2833 const int irq = nic->pdev->irq;
1ee6dd77 2834 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2835 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e 2836 int i;
ffb5df6c
JP
2837 struct config_param *config = &nic->config;
2838 struct mac_info *mac_control = &nic->mac_control;
612eff0e 2839
d796fdb7
LV
2840 if (pci_channel_offline(nic->pdev))
2841 return;
2842
80777c54 2843 disable_irq(irq);
612eff0e 2844
612eff0e 2845 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2846 writeq(val64, &bar0->tx_traffic_int);
2847
6aa20a22 2848 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2849 * run out of skbs and will fail and eventually netpoll application such
2850 * as netdump will fail.
2851 */
2852 for (i = 0; i < config->tx_fifo_num; i++)
2853 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2854
b41477f3 2855 /* check for received packet and indicate up to network */
13d866a9
JP
2856 for (i = 0; i < config->rx_ring_num; i++) {
2857 struct ring_info *ring = &mac_control->rings[i];
2858
2859 rx_intr_handler(ring, 0);
2860 }
612eff0e
BH
2861
2862 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2863 struct ring_info *ring = &mac_control->rings[i];
2864
2865 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2866 DBG_PRINT(INFO_DBG,
2867 "%s: Out of memory in Rx Netpoll!!\n",
2868 dev->name);
612eff0e
BH
2869 break;
2870 }
2871 }
80777c54 2872 enable_irq(irq);
612eff0e
BH
2873}
2874#endif
2875
20346722 2876/**
1da177e4 2877 * rx_intr_handler - Rx interrupt handler
d0ea5cbd 2878 * @ring_data: per ring structure.
f61e0a35 2879 * @budget: budget for napi processing.
20346722
K
2880 * Description:
2881 * If the interrupt is because of a received frame or if the
1da177e4 2882 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2883 * called. It picks out the RxD at which place the last Rx processing had
2884 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2885 * the offset.
2886 * Return Value:
f61e0a35 2887 * No. of napi packets processed.
1da177e4 2888 */
f61e0a35 2889static int rx_intr_handler(struct ring_info *ring_data, int budget)
1da177e4 2890{
c9fcbf47 2891 int get_block, put_block;
1ee6dd77
RB
2892 struct rx_curr_get_info get_info, put_info;
2893 struct RxD_t *rxdp;
1da177e4 2894 struct sk_buff *skb;
f61e0a35 2895 int pkt_cnt = 0, napi_pkts = 0;
7d3d0439 2896 int i;
d44570e4
JP
2897 struct RxD1 *rxdp1;
2898 struct RxD3 *rxdp3;
7d3d0439 2899
99a09c26
EB
2900 if (budget <= 0)
2901 return napi_pkts;
2902
20346722
K
2903 get_info = ring_data->rx_curr_get_info;
2904 get_block = get_info.block_index;
1ee6dd77 2905 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2906 put_block = put_info.block_index;
da6971d8 2907 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65 2908
da6971d8 2909 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2910 /*
2911 * If your are next to put index then it's
2912 * FIFO full condition
2913 */
da6971d8
AR
2914 if ((get_block == put_block) &&
2915 (get_info.offset + 1) == put_info.offset) {
0425b46a 2916 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
d44570e4 2917 ring_data->dev->name);
da6971d8
AR
2918 break;
2919 }
d44570e4 2920 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
20346722 2921 if (skb == NULL) {
9e39f7c5 2922 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
0425b46a 2923 ring_data->dev->name);
f61e0a35 2924 return 0;
1da177e4 2925 }
0425b46a 2926 if (ring_data->rxd_mode == RXD_MODE_1) {
d44570e4 2927 rxdp1 = (struct RxD1 *)rxdp;
fb059b26
CJ
2928 dma_unmap_single(&ring_data->pdev->dev,
2929 (dma_addr_t)rxdp1->Buffer0_ptr,
d44570e4
JP
2930 ring_data->mtu +
2931 HEADER_ETHERNET_II_802_3_SIZE +
2932 HEADER_802_2_SIZE +
2933 HEADER_SNAP_SIZE,
fb059b26 2934 DMA_FROM_DEVICE);
0425b46a 2935 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
d44570e4 2936 rxdp3 = (struct RxD3 *)rxdp;
fb059b26
CJ
2937 dma_sync_single_for_cpu(&ring_data->pdev->dev,
2938 (dma_addr_t)rxdp3->Buffer0_ptr,
2939 BUF0_LEN, DMA_FROM_DEVICE);
2940 dma_unmap_single(&ring_data->pdev->dev,
d44570e4 2941 (dma_addr_t)rxdp3->Buffer2_ptr,
fb059b26 2942 ring_data->mtu + 4, DMA_FROM_DEVICE);
da6971d8 2943 }
863c11a9 2944 prefetch(skb->data);
20346722
K
2945 rx_osm_handler(ring_data, rxdp);
2946 get_info.offset++;
da6971d8
AR
2947 ring_data->rx_curr_get_info.offset = get_info.offset;
2948 rxdp = ring_data->rx_blocks[get_block].
d44570e4 2949 rxds[get_info.offset].virt_addr;
0425b46a 2950 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
20346722 2951 get_info.offset = 0;
da6971d8 2952 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 2953 get_block++;
da6971d8
AR
2954 if (get_block == ring_data->block_count)
2955 get_block = 0;
2956 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
2957 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2958 }
1da177e4 2959
f61e0a35
SH
2960 if (ring_data->nic->config.napi) {
2961 budget--;
2962 napi_pkts++;
2963 if (!budget)
0425b46a
SH
2964 break;
2965 }
20346722 2966 pkt_cnt++;
1da177e4
LT
2967 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2968 break;
2969 }
0425b46a 2970 if (ring_data->lro) {
7d3d0439 2971 /* Clear all LRO sessions before exiting */
d44570e4 2972 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 2973 struct lro *lro = &ring_data->lro0_n[i];
7d3d0439 2974 if (lro->in_use) {
0425b46a 2975 update_L3L4_header(ring_data->nic, lro);
cdb5bf02 2976 queue_rx_frame(lro->parent, lro->vlan_tag);
7d3d0439
RA
2977 clear_lro_session(lro);
2978 }
2979 }
2980 }
d44570e4 2981 return napi_pkts;
1da177e4 2982}
20346722
K
2983
2984/**
1da177e4 2985 * tx_intr_handler - Transmit interrupt handler
d0ea5cbd 2986 * @fifo_data : fifo data pointer
20346722
K
2987 * Description:
2988 * If an interrupt was raised to indicate DMA complete of the
2989 * Tx packet, this function is called. It identifies the last TxD
2990 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2991 * DMA'ed into the NICs internal memory.
2992 * Return Value:
2993 * NONE
2994 */
2995
1ee6dd77 2996static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 2997{
1ee6dd77 2998 struct s2io_nic *nic = fifo_data->nic;
1ee6dd77 2999 struct tx_curr_get_info get_info, put_info;
3a3d5756 3000 struct sk_buff *skb = NULL;
1ee6dd77 3001 struct TxD *txdlp;
3a3d5756 3002 int pkt_cnt = 0;
2fda096d 3003 unsigned long flags = 0;
f9046eb3 3004 u8 err_mask;
ffb5df6c
JP
3005 struct stat_block *stats = nic->mac_control.stats_info;
3006 struct swStat *swstats = &stats->sw_stat;
1da177e4 3007
2fda096d 3008 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
d44570e4 3009 return;
2fda096d 3010
20346722 3011 get_info = fifo_data->tx_curr_get_info;
1ee6dd77 3012 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
43d620c8 3013 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
20346722
K
3014 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3015 (get_info.offset != put_info.offset) &&
3016 (txdlp->Host_Control)) {
3017 /* Check for TxD errors */
3018 if (txdlp->Control_1 & TXD_T_CODE) {
3019 unsigned long long err;
3020 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0 3021 if (err & 0x1) {
ffb5df6c 3022 swstats->parity_err_cnt++;
bd1034f0 3023 }
491976b2
SH
3024
3025 /* update t_code statistics */
f9046eb3 3026 err_mask = err >> 48;
d44570e4
JP
3027 switch (err_mask) {
3028 case 2:
ffb5df6c 3029 swstats->tx_buf_abort_cnt++;
491976b2
SH
3030 break;
3031
d44570e4 3032 case 3:
ffb5df6c 3033 swstats->tx_desc_abort_cnt++;
491976b2
SH
3034 break;
3035
d44570e4 3036 case 7:
ffb5df6c 3037 swstats->tx_parity_err_cnt++;
491976b2
SH
3038 break;
3039
d44570e4 3040 case 10:
ffb5df6c 3041 swstats->tx_link_loss_cnt++;
491976b2
SH
3042 break;
3043
d44570e4 3044 case 15:
ffb5df6c 3045 swstats->tx_list_proc_err_cnt++;
491976b2 3046 break;
d44570e4 3047 }
20346722 3048 }
1da177e4 3049
fed5eccd 3050 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 3051 if (skb == NULL) {
2fda096d 3052 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
9e39f7c5
JP
3053 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3054 __func__);
20346722
K
3055 return;
3056 }
3a3d5756 3057 pkt_cnt++;
20346722 3058
20346722 3059 /* Updating the statistics block */
ffb5df6c 3060 swstats->mem_freed += skb->truesize;
46befd32 3061 dev_consume_skb_irq(skb);
20346722
K
3062
3063 get_info.offset++;
863c11a9
AR
3064 if (get_info.offset == get_info.fifo_len + 1)
3065 get_info.offset = 0;
43d620c8 3066 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
d44570e4 3067 fifo_data->tx_curr_get_info.offset = get_info.offset;
1da177e4
LT
3068 }
3069
3a3d5756 3070 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
2fda096d
SR
3071
3072 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
1da177e4
LT
3073}
3074
bd1034f0
AR
3075/**
3076 * s2io_mdio_write - Function to write in to MDIO registers
3077 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3078 * @addr : address value
3079 * @value : data value
3080 * @dev : pointer to net_device structure
3081 * Description:
3082 * This function is used to write values to the MDIO registers
3083 * NONE
3084 */
d44570e4
JP
3085static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3086 struct net_device *dev)
bd1034f0 3087{
d44570e4 3088 u64 val64;
4cf1653a 3089 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3090 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0 3091
d44570e4
JP
3092 /* address transaction */
3093 val64 = MDIO_MMD_INDX_ADDR(addr) |
3094 MDIO_MMD_DEV_ADDR(mmd_type) |
3095 MDIO_MMS_PRT_ADDR(0x0);
bd1034f0
AR
3096 writeq(val64, &bar0->mdio_control);
3097 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3098 writeq(val64, &bar0->mdio_control);
3099 udelay(100);
3100
d44570e4
JP
3101 /* Data transaction */
3102 val64 = MDIO_MMD_INDX_ADDR(addr) |
3103 MDIO_MMD_DEV_ADDR(mmd_type) |
3104 MDIO_MMS_PRT_ADDR(0x0) |
3105 MDIO_MDIO_DATA(value) |
3106 MDIO_OP(MDIO_OP_WRITE_TRANS);
bd1034f0
AR
3107 writeq(val64, &bar0->mdio_control);
3108 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3109 writeq(val64, &bar0->mdio_control);
3110 udelay(100);
3111
d44570e4
JP
3112 val64 = MDIO_MMD_INDX_ADDR(addr) |
3113 MDIO_MMD_DEV_ADDR(mmd_type) |
3114 MDIO_MMS_PRT_ADDR(0x0) |
3115 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3116 writeq(val64, &bar0->mdio_control);
3117 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3118 writeq(val64, &bar0->mdio_control);
3119 udelay(100);
bd1034f0
AR
3120}
3121
3122/**
3123 * s2io_mdio_read - Function to write in to MDIO registers
3124 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3125 * @addr : address value
3126 * @dev : pointer to net_device structure
3127 * Description:
3128 * This function is used to read values to the MDIO registers
3129 * NONE
3130 */
3131static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3132{
3133 u64 val64 = 0x0;
3134 u64 rval64 = 0x0;
4cf1653a 3135 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3136 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3137
3138 /* address transaction */
d44570e4
JP
3139 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3140 | MDIO_MMD_DEV_ADDR(mmd_type)
3141 | MDIO_MMS_PRT_ADDR(0x0));
bd1034f0
AR
3142 writeq(val64, &bar0->mdio_control);
3143 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3144 writeq(val64, &bar0->mdio_control);
3145 udelay(100);
3146
3147 /* Data transaction */
d44570e4
JP
3148 val64 = MDIO_MMD_INDX_ADDR(addr) |
3149 MDIO_MMD_DEV_ADDR(mmd_type) |
3150 MDIO_MMS_PRT_ADDR(0x0) |
3151 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3152 writeq(val64, &bar0->mdio_control);
3153 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3154 writeq(val64, &bar0->mdio_control);
3155 udelay(100);
3156
3157 /* Read the value from regs */
3158 rval64 = readq(&bar0->mdio_control);
3159 rval64 = rval64 & 0xFFFF0000;
3160 rval64 = rval64 >> 16;
3161 return rval64;
3162}
d44570e4 3163
bd1034f0
AR
3164/**
3165 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
fbfecd37 3166 * @counter : counter value to be updated
d0ea5cbd
JB
3167 * @regs_stat : registers status
3168 * @index : index
bd1034f0
AR
3169 * @flag : flag to indicate the status
3170 * @type : counter type
3171 * Description:
3172 * This function is to check the status of the xpak counters value
3173 * NONE
3174 */
3175
d44570e4
JP
3176static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3177 u16 flag, u16 type)
bd1034f0
AR
3178{
3179 u64 mask = 0x3;
3180 u64 val64;
3181 int i;
d44570e4 3182 for (i = 0; i < index; i++)
bd1034f0
AR
3183 mask = mask << 0x2;
3184
d44570e4 3185 if (flag > 0) {
bd1034f0
AR
3186 *counter = *counter + 1;
3187 val64 = *regs_stat & mask;
3188 val64 = val64 >> (index * 0x2);
3189 val64 = val64 + 1;
d44570e4
JP
3190 if (val64 == 3) {
3191 switch (type) {
bd1034f0 3192 case 1:
9e39f7c5
JP
3193 DBG_PRINT(ERR_DBG,
3194 "Take Xframe NIC out of service.\n");
3195 DBG_PRINT(ERR_DBG,
3196"Excessive temperatures may result in premature transceiver failure.\n");
d44570e4 3197 break;
bd1034f0 3198 case 2:
9e39f7c5
JP
3199 DBG_PRINT(ERR_DBG,
3200 "Take Xframe NIC out of service.\n");
3201 DBG_PRINT(ERR_DBG,
3202"Excessive bias currents may indicate imminent laser diode failure.\n");
d44570e4 3203 break;
bd1034f0 3204 case 3:
9e39f7c5
JP
3205 DBG_PRINT(ERR_DBG,
3206 "Take Xframe NIC out of service.\n");
3207 DBG_PRINT(ERR_DBG,
3208"Excessive laser output power may saturate far-end receiver.\n");
d44570e4 3209 break;
bd1034f0 3210 default:
d44570e4
JP
3211 DBG_PRINT(ERR_DBG,
3212 "Incorrect XPAK Alarm type\n");
bd1034f0
AR
3213 }
3214 val64 = 0x0;
3215 }
3216 val64 = val64 << (index * 0x2);
3217 *regs_stat = (*regs_stat & (~mask)) | (val64);
3218
3219 } else {
3220 *regs_stat = *regs_stat & (~mask);
3221 }
3222}
3223
3224/**
3225 * s2io_updt_xpak_counter - Function to update the xpak counters
3226 * @dev : pointer to net_device struct
3227 * Description:
3228 * This function is to upate the status of the xpak counters value
3229 * NONE
3230 */
3231static void s2io_updt_xpak_counter(struct net_device *dev)
3232{
3233 u16 flag = 0x0;
3234 u16 type = 0x0;
3235 u16 val16 = 0x0;
3236 u64 val64 = 0x0;
3237 u64 addr = 0x0;
3238
4cf1653a 3239 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
3240 struct stat_block *stats = sp->mac_control.stats_info;
3241 struct xpakStat *xstats = &stats->xpak_stat;
bd1034f0
AR
3242
3243 /* Check the communication with the MDIO slave */
40239396 3244 addr = MDIO_CTRL1;
bd1034f0 3245 val64 = 0x0;
40239396 3246 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
d44570e4 3247 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
9e39f7c5
JP
3248 DBG_PRINT(ERR_DBG,
3249 "ERR: MDIO slave access failed - Returned %llx\n",
3250 (unsigned long long)val64);
bd1034f0
AR
3251 return;
3252 }
3253
40239396 3254 /* Check for the expected value of control reg 1 */
d44570e4 3255 if (val64 != MDIO_CTRL1_SPEED10G) {
9e39f7c5
JP
3256 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3257 "Returned: %llx- Expected: 0x%x\n",
40239396 3258 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
bd1034f0
AR
3259 return;
3260 }
3261
3262 /* Loading the DOM register to MDIO register */
3263 addr = 0xA100;
40239396
BH
3264 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3265 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3266
3267 /* Reading the Alarm flags */
3268 addr = 0xA070;
3269 val64 = 0x0;
40239396 3270 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3271
3272 flag = CHECKBIT(val64, 0x7);
3273 type = 1;
ffb5df6c
JP
3274 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3275 &xstats->xpak_regs_stat,
d44570e4 3276 0x0, flag, type);
bd1034f0 3277
d44570e4 3278 if (CHECKBIT(val64, 0x6))
ffb5df6c 3279 xstats->alarm_transceiver_temp_low++;
bd1034f0
AR
3280
3281 flag = CHECKBIT(val64, 0x3);
3282 type = 2;
ffb5df6c
JP
3283 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3284 &xstats->xpak_regs_stat,
d44570e4 3285 0x2, flag, type);
bd1034f0 3286
d44570e4 3287 if (CHECKBIT(val64, 0x2))
ffb5df6c 3288 xstats->alarm_laser_bias_current_low++;
bd1034f0
AR
3289
3290 flag = CHECKBIT(val64, 0x1);
3291 type = 3;
ffb5df6c
JP
3292 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3293 &xstats->xpak_regs_stat,
d44570e4 3294 0x4, flag, type);
bd1034f0 3295
d44570e4 3296 if (CHECKBIT(val64, 0x0))
ffb5df6c 3297 xstats->alarm_laser_output_power_low++;
bd1034f0
AR
3298
3299 /* Reading the Warning flags */
3300 addr = 0xA074;
3301 val64 = 0x0;
40239396 3302 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0 3303
d44570e4 3304 if (CHECKBIT(val64, 0x7))
ffb5df6c 3305 xstats->warn_transceiver_temp_high++;
bd1034f0 3306
d44570e4 3307 if (CHECKBIT(val64, 0x6))
ffb5df6c 3308 xstats->warn_transceiver_temp_low++;
bd1034f0 3309
d44570e4 3310 if (CHECKBIT(val64, 0x3))
ffb5df6c 3311 xstats->warn_laser_bias_current_high++;
bd1034f0 3312
d44570e4 3313 if (CHECKBIT(val64, 0x2))
ffb5df6c 3314 xstats->warn_laser_bias_current_low++;
bd1034f0 3315
d44570e4 3316 if (CHECKBIT(val64, 0x1))
ffb5df6c 3317 xstats->warn_laser_output_power_high++;
bd1034f0 3318
d44570e4 3319 if (CHECKBIT(val64, 0x0))
ffb5df6c 3320 xstats->warn_laser_output_power_low++;
bd1034f0
AR
3321}
3322
20346722 3323/**
1da177e4 3324 * wait_for_cmd_complete - waits for a command to complete.
d0ea5cbd
JB
3325 * @addr: address
3326 * @busy_bit: bit to check for busy
3327 * @bit_state: state to check
0bf4d9af
YY
3328 * @may_sleep: parameter indicates if sleeping when waiting for
3329 * command complete
20346722
K
3330 * Description: Function that waits for a command to Write into RMAC
3331 * ADDR DATA registers to be completed and returns either success or
3332 * error depending on whether the command was complete or not.
1da177e4
LT
3333 * Return value:
3334 * SUCCESS on success and FAILURE on failure.
3335 */
3336
9fc93a41 3337static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
5ce7f3f4 3338 int bit_state, bool may_sleep)
1da177e4 3339{
9fc93a41 3340 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3341 u64 val64;
3342
9fc93a41
SS
3343 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3344 return FAILURE;
3345
3346 do {
c92ca04b 3347 val64 = readq(addr);
9fc93a41
SS
3348 if (bit_state == S2IO_BIT_RESET) {
3349 if (!(val64 & busy_bit)) {
3350 ret = SUCCESS;
3351 break;
3352 }
3353 } else {
2d146eb1 3354 if (val64 & busy_bit) {
9fc93a41
SS
3355 ret = SUCCESS;
3356 break;
3357 }
1da177e4 3358 }
c92ca04b 3359
5ce7f3f4 3360 if (!may_sleep)
9fc93a41 3361 mdelay(delay);
c92ca04b 3362 else
9fc93a41 3363 msleep(delay);
c92ca04b 3364
9fc93a41
SS
3365 if (++cnt >= 10)
3366 delay = 50;
3367 } while (cnt < 20);
1da177e4
LT
3368 return ret;
3369}
49ce9c2c 3370/**
19a60522
SS
3371 * check_pci_device_id - Checks if the device id is supported
3372 * @id : device id
3373 * Description: Function to check if the pci device id is supported by driver.
3374 * Return value: Actual device id if supported else PCI_ANY_ID
3375 */
3376static u16 check_pci_device_id(u16 id)
3377{
3378 switch (id) {
3379 case PCI_DEVICE_ID_HERC_WIN:
3380 case PCI_DEVICE_ID_HERC_UNI:
3381 return XFRAME_II_DEVICE;
3382 case PCI_DEVICE_ID_S2IO_UNI:
3383 case PCI_DEVICE_ID_S2IO_WIN:
3384 return XFRAME_I_DEVICE;
3385 default:
3386 return PCI_ANY_ID;
3387 }
3388}
1da177e4 3389
20346722
K
3390/**
3391 * s2io_reset - Resets the card.
1da177e4
LT
3392 * @sp : private member of the device structure.
3393 * Description: Function to Reset the card. This function then also
20346722 3394 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3395 * the card reset also resets the configuration space.
3396 * Return value:
3397 * void.
3398 */
3399
d44570e4 3400static void s2io_reset(struct s2io_nic *sp)
1da177e4 3401{
1ee6dd77 3402 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3403 u64 val64;
5e25b9dd 3404 u16 subid, pci_cmd;
19a60522
SS
3405 int i;
3406 u16 val16;
491976b2
SH
3407 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3408 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
ffb5df6c
JP
3409 struct stat_block *stats;
3410 struct swStat *swstats;
491976b2 3411
9e39f7c5 3412 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3a22813a 3413 __func__, pci_name(sp->pdev));
1da177e4 3414
0b1f7ebe 3415 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3416 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3417
1da177e4
LT
3418 val64 = SW_RESET_ALL;
3419 writeq(val64, &bar0->sw_reset);
d44570e4 3420 if (strstr(sp->product_name, "CX4"))
c92ca04b 3421 msleep(750);
19a60522
SS
3422 msleep(250);
3423 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3424
19a60522
SS
3425 /* Restore the PCI state saved during initialization. */
3426 pci_restore_state(sp->pdev);
b8a623bf 3427 pci_save_state(sp->pdev);
19a60522
SS
3428 pci_read_config_word(sp->pdev, 0x2, &val16);
3429 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3430 break;
3431 msleep(200);
3432 }
1da177e4 3433
d44570e4
JP
3434 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3435 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
19a60522
SS
3436
3437 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3438
3439 s2io_init_pci(sp);
1da177e4 3440
20346722
K
3441 /* Set swapper to enable I/O register access */
3442 s2io_set_swapper(sp);
3443
faa4f796
SH
3444 /* restore mac_addr entries */
3445 do_s2io_restore_unicast_mc(sp);
3446
cc6e7c44
RA
3447 /* Restore the MSIX table entries from local variables */
3448 restore_xmsi_data(sp);
3449
5e25b9dd 3450 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3451 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3452 /* Clear "detected parity error" bit */
303bcb4b 3453 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3454
303bcb4b
K
3455 /* Clearing PCIX Ecc status register */
3456 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3457
303bcb4b 3458 /* Clearing PCI_STATUS error reflected here */
b7b5a128 3459 writeq(s2BIT(62), &bar0->txpic_int_reg);
303bcb4b 3460 }
5e25b9dd 3461
20346722 3462 /* Reset device statistics maintained by OS */
d44570e4 3463 memset(&sp->stats, 0, sizeof(struct net_device_stats));
8a4bdbaa 3464
ffb5df6c
JP
3465 stats = sp->mac_control.stats_info;
3466 swstats = &stats->sw_stat;
3467
491976b2 3468 /* save link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3469 up_cnt = swstats->link_up_cnt;
3470 down_cnt = swstats->link_down_cnt;
3471 up_time = swstats->link_up_time;
3472 down_time = swstats->link_down_time;
3473 reset_cnt = swstats->soft_reset_cnt;
3474 mem_alloc_cnt = swstats->mem_allocated;
3475 mem_free_cnt = swstats->mem_freed;
3476 watchdog_cnt = swstats->watchdog_timer_cnt;
3477
3478 memset(stats, 0, sizeof(struct stat_block));
3479
491976b2 3480 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3481 swstats->link_up_cnt = up_cnt;
3482 swstats->link_down_cnt = down_cnt;
3483 swstats->link_up_time = up_time;
3484 swstats->link_down_time = down_time;
3485 swstats->soft_reset_cnt = reset_cnt;
3486 swstats->mem_allocated = mem_alloc_cnt;
3487 swstats->mem_freed = mem_free_cnt;
3488 swstats->watchdog_timer_cnt = watchdog_cnt;
20346722 3489
1da177e4
LT
3490 /* SXE-002: Configure link and activity LED to turn it off */
3491 subid = sp->pdev->subsystem_device;
541ae68f
K
3492 if (((subid & 0xFF) >= 0x07) &&
3493 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3494 val64 = readq(&bar0->gpio_control);
3495 val64 |= 0x0000800000000000ULL;
3496 writeq(val64, &bar0->gpio_control);
3497 val64 = 0x0411040400000000ULL;
509a2671 3498 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3499 }
3500
541ae68f 3501 /*
25985edc 3502 * Clear spurious ECC interrupts that would have occurred on
541ae68f
K
3503 * XFRAME II cards after reset.
3504 */
3505 if (sp->device_type == XFRAME_II_DEVICE) {
3506 val64 = readq(&bar0->pcc_err_reg);
3507 writeq(val64, &bar0->pcc_err_reg);
3508 }
3509
f957bcf0 3510 sp->device_enabled_once = false;
1da177e4
LT
3511}
3512
3513/**
20346722
K
3514 * s2io_set_swapper - to set the swapper controle on the card
3515 * @sp : private member of the device structure,
1da177e4 3516 * pointer to the s2io_nic structure.
20346722 3517 * Description: Function to set the swapper control on the card
1da177e4
LT
3518 * correctly depending on the 'endianness' of the system.
3519 * Return value:
3520 * SUCCESS on success and FAILURE on failure.
3521 */
3522
d44570e4 3523static int s2io_set_swapper(struct s2io_nic *sp)
1da177e4
LT
3524{
3525 struct net_device *dev = sp->dev;
1ee6dd77 3526 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3527 u64 val64, valt, valr;
3528
20346722 3529 /*
1da177e4
LT
3530 * Set proper endian settings and verify the same by reading
3531 * the PIF Feed-back register.
3532 */
3533
3534 val64 = readq(&bar0->pif_rd_swapper_fb);
3535 if (val64 != 0x0123456789ABCDEFULL) {
3536 int i = 0;
85a56498
JM
3537 static const u64 value[] = {
3538 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3539 0x8100008181000081ULL, /* FE=1, SE=0 */
3540 0x4200004242000042ULL, /* FE=0, SE=1 */
3541 0 /* FE=0, SE=0 */
3542 };
1da177e4 3543
d44570e4 3544 while (i < 4) {
1da177e4
LT
3545 writeq(value[i], &bar0->swapper_ctrl);
3546 val64 = readq(&bar0->pif_rd_swapper_fb);
3547 if (val64 == 0x0123456789ABCDEFULL)
3548 break;
3549 i++;
3550 }
3551 if (i == 4) {
9e39f7c5
JP
3552 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3553 "feedback read %llx\n",
3554 dev->name, (unsigned long long)val64);
1da177e4
LT
3555 return FAILURE;
3556 }
3557 valr = value[i];
3558 } else {
3559 valr = readq(&bar0->swapper_ctrl);
3560 }
3561
3562 valt = 0x0123456789ABCDEFULL;
3563 writeq(valt, &bar0->xmsi_address);
3564 val64 = readq(&bar0->xmsi_address);
3565
d44570e4 3566 if (val64 != valt) {
1da177e4 3567 int i = 0;
85a56498
JM
3568 static const u64 value[] = {
3569 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3570 0x0081810000818100ULL, /* FE=1, SE=0 */
3571 0x0042420000424200ULL, /* FE=0, SE=1 */
3572 0 /* FE=0, SE=0 */
3573 };
1da177e4 3574
d44570e4 3575 while (i < 4) {
1da177e4
LT
3576 writeq((value[i] | valr), &bar0->swapper_ctrl);
3577 writeq(valt, &bar0->xmsi_address);
3578 val64 = readq(&bar0->xmsi_address);
d44570e4 3579 if (val64 == valt)
1da177e4
LT
3580 break;
3581 i++;
3582 }
d44570e4 3583 if (i == 4) {
20346722 3584 unsigned long long x = val64;
9e39f7c5
JP
3585 DBG_PRINT(ERR_DBG,
3586 "Write failed, Xmsi_addr reads:0x%llx\n", x);
1da177e4
LT
3587 return FAILURE;
3588 }
3589 }
3590 val64 = readq(&bar0->swapper_ctrl);
3591 val64 &= 0xFFFF000000000000ULL;
3592
d44570e4 3593#ifdef __BIG_ENDIAN
20346722
K
3594 /*
3595 * The device by default set to a big endian format, so a
1da177e4
LT
3596 * big endian driver need not set anything.
3597 */
3598 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3599 SWAPPER_CTRL_TXP_SE |
3600 SWAPPER_CTRL_TXD_R_FE |
3601 SWAPPER_CTRL_TXD_W_FE |
3602 SWAPPER_CTRL_TXF_R_FE |
3603 SWAPPER_CTRL_RXD_R_FE |
3604 SWAPPER_CTRL_RXD_W_FE |
3605 SWAPPER_CTRL_RXF_W_FE |
3606 SWAPPER_CTRL_XMSI_FE |
3607 SWAPPER_CTRL_STATS_FE |
3608 SWAPPER_CTRL_STATS_SE);
eaae7f72 3609 if (sp->config.intr_type == INTA)
cc6e7c44 3610 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3611 writeq(val64, &bar0->swapper_ctrl);
3612#else
20346722 3613 /*
1da177e4 3614 * Initially we enable all bits to make it accessible by the
20346722 3615 * driver, then we selectively enable only those bits that
1da177e4
LT
3616 * we want to set.
3617 */
3618 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3619 SWAPPER_CTRL_TXP_SE |
3620 SWAPPER_CTRL_TXD_R_FE |
3621 SWAPPER_CTRL_TXD_R_SE |
3622 SWAPPER_CTRL_TXD_W_FE |
3623 SWAPPER_CTRL_TXD_W_SE |
3624 SWAPPER_CTRL_TXF_R_FE |
3625 SWAPPER_CTRL_RXD_R_FE |
3626 SWAPPER_CTRL_RXD_R_SE |
3627 SWAPPER_CTRL_RXD_W_FE |
3628 SWAPPER_CTRL_RXD_W_SE |
3629 SWAPPER_CTRL_RXF_W_FE |
3630 SWAPPER_CTRL_XMSI_FE |
3631 SWAPPER_CTRL_STATS_FE |
3632 SWAPPER_CTRL_STATS_SE);
eaae7f72 3633 if (sp->config.intr_type == INTA)
cc6e7c44 3634 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3635 writeq(val64, &bar0->swapper_ctrl);
3636#endif
3637 val64 = readq(&bar0->swapper_ctrl);
3638
20346722
K
3639 /*
3640 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3641 * feedback register.
3642 */
3643 val64 = readq(&bar0->pif_rd_swapper_fb);
3644 if (val64 != 0x0123456789ABCDEFULL) {
3645 /* Endian settings are incorrect, calls for another dekko. */
9e39f7c5
JP
3646 DBG_PRINT(ERR_DBG,
3647 "%s: Endian settings are wrong, feedback read %llx\n",
3648 dev->name, (unsigned long long)val64);
1da177e4
LT
3649 return FAILURE;
3650 }
3651
3652 return SUCCESS;
3653}
3654
1ee6dd77 3655static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3656{
1ee6dd77 3657 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3658 u64 val64;
3659 int ret = 0, cnt = 0;
3660
3661 do {
3662 val64 = readq(&bar0->xmsi_access);
b7b5a128 3663 if (!(val64 & s2BIT(15)))
cc6e7c44
RA
3664 break;
3665 mdelay(1);
3666 cnt++;
d44570e4 3667 } while (cnt < 5);
cc6e7c44
RA
3668 if (cnt == 5) {
3669 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3670 ret = 1;
3671 }
3672
3673 return ret;
3674}
3675
1ee6dd77 3676static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3677{
1ee6dd77 3678 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3679 u64 val64;
f61e0a35
SH
3680 int i, msix_index;
3681
f61e0a35
SH
3682 if (nic->device_type == XFRAME_I_DEVICE)
3683 return;
cc6e7c44 3684
d44570e4
JP
3685 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3686 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
cc6e7c44
RA
3687 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3688 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
f61e0a35 3689 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3690 writeq(val64, &bar0->xmsi_access);
68c38507 3691 if (wait_for_msix_trans(nic, msix_index))
9e39f7c5
JP
3692 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3693 __func__, msix_index);
cc6e7c44
RA
3694 }
3695}
3696
1ee6dd77 3697static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3698{
1ee6dd77 3699 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3700 u64 val64, addr, data;
f61e0a35
SH
3701 int i, msix_index;
3702
3703 if (nic->device_type == XFRAME_I_DEVICE)
3704 return;
cc6e7c44
RA
3705
3706 /* Store and display */
d44570e4
JP
3707 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3708 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
f61e0a35 3709 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3710 writeq(val64, &bar0->xmsi_access);
f61e0a35 3711 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3712 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3713 __func__, msix_index);
cc6e7c44
RA
3714 continue;
3715 }
3716 addr = readq(&bar0->xmsi_address);
3717 data = readq(&bar0->xmsi_data);
3718 if (addr && data) {
3719 nic->msix_info[i].addr = addr;
3720 nic->msix_info[i].data = data;
3721 }
3722 }
3723}
3724
1ee6dd77 3725static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3726{
1ee6dd77 3727 struct XENA_dev_config __iomem *bar0 = nic->bar0;
ac731ab6 3728 u64 rx_mat;
cc6e7c44
RA
3729 u16 msi_control; /* Temp variable */
3730 int ret, i, j, msix_indx = 1;
4f870320 3731 int size;
ffb5df6c
JP
3732 struct stat_block *stats = nic->mac_control.stats_info;
3733 struct swStat *swstats = &stats->sw_stat;
cc6e7c44 3734
4f870320 3735 size = nic->num_entries * sizeof(struct msix_entry);
44364a03 3736 nic->entries = kzalloc(size, GFP_KERNEL);
bd684e43 3737 if (!nic->entries) {
d44570e4
JP
3738 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3739 __func__);
ffb5df6c 3740 swstats->mem_alloc_fail_cnt++;
cc6e7c44
RA
3741 return -ENOMEM;
3742 }
ffb5df6c 3743 swstats->mem_allocated += size;
f61e0a35 3744
4f870320 3745 size = nic->num_entries * sizeof(struct s2io_msix_entry);
44364a03 3746 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
bd684e43 3747 if (!nic->s2io_entries) {
8a4bdbaa 3748 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
d44570e4 3749 __func__);
ffb5df6c 3750 swstats->mem_alloc_fail_cnt++;
cc6e7c44 3751 kfree(nic->entries);
ffb5df6c 3752 swstats->mem_freed
f61e0a35 3753 += (nic->num_entries * sizeof(struct msix_entry));
cc6e7c44
RA
3754 return -ENOMEM;
3755 }
ffb5df6c 3756 swstats->mem_allocated += size;
cc6e7c44 3757
ac731ab6
SH
3758 nic->entries[0].entry = 0;
3759 nic->s2io_entries[0].entry = 0;
3760 nic->s2io_entries[0].in_use = MSIX_FLG;
3761 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3762 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3763
f61e0a35
SH
3764 for (i = 1; i < nic->num_entries; i++) {
3765 nic->entries[i].entry = ((i - 1) * 8) + 1;
3766 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
cc6e7c44
RA
3767 nic->s2io_entries[i].arg = NULL;
3768 nic->s2io_entries[i].in_use = 0;
3769 }
3770
8a4bdbaa 3771 rx_mat = readq(&bar0->rx_mat);
f61e0a35 3772 for (j = 0; j < nic->config.rx_ring_num; j++) {
8a4bdbaa 3773 rx_mat |= RX_MAT_SET(j, msix_indx);
f61e0a35
SH
3774 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3775 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3776 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3777 msix_indx += 8;
cc6e7c44 3778 }
8a4bdbaa 3779 writeq(rx_mat, &bar0->rx_mat);
f61e0a35 3780 readq(&bar0->rx_mat);
cc6e7c44 3781
37a15ed3
AG
3782 ret = pci_enable_msix_range(nic->pdev, nic->entries,
3783 nic->num_entries, nic->num_entries);
c92ca04b 3784 /* We fail init if error or we get less vectors than min required */
37a15ed3 3785 if (ret < 0) {
9e39f7c5 3786 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
cc6e7c44 3787 kfree(nic->entries);
ffb5df6c
JP
3788 swstats->mem_freed += nic->num_entries *
3789 sizeof(struct msix_entry);
cc6e7c44 3790 kfree(nic->s2io_entries);
ffb5df6c
JP
3791 swstats->mem_freed += nic->num_entries *
3792 sizeof(struct s2io_msix_entry);
cc6e7c44
RA
3793 nic->entries = NULL;
3794 nic->s2io_entries = NULL;
3795 return -ENOMEM;
3796 }
3797
3798 /*
3799 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3800 * in the herc NIC. (Temp change, needs to be removed later)
3801 */
3802 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3803 msi_control |= 0x1; /* Enable MSI */
3804 pci_write_config_word(nic->pdev, 0x42, msi_control);
3805
3806 return 0;
3807}
3808
8abc4d5b 3809/* Handle software interrupt used during MSI(X) test */
33390a70 3810static irqreturn_t s2io_test_intr(int irq, void *dev_id)
8abc4d5b
SS
3811{
3812 struct s2io_nic *sp = dev_id;
3813
3814 sp->msi_detected = 1;
3815 wake_up(&sp->msi_wait);
3816
3817 return IRQ_HANDLED;
3818}
3819
3820/* Test interrupt path by forcing a a software IRQ */
33390a70 3821static int s2io_test_msi(struct s2io_nic *sp)
8abc4d5b
SS
3822{
3823 struct pci_dev *pdev = sp->pdev;
3824 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3825 int err;
3826 u64 val64, saved64;
3827
3828 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
d44570e4 3829 sp->name, sp);
8abc4d5b
SS
3830 if (err) {
3831 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
d44570e4 3832 sp->dev->name, pci_name(pdev), pdev->irq);
8abc4d5b
SS
3833 return err;
3834 }
3835
d44570e4 3836 init_waitqueue_head(&sp->msi_wait);
8abc4d5b
SS
3837 sp->msi_detected = 0;
3838
3839 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3840 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3841 val64 |= SCHED_INT_CTRL_TIMER_EN;
3842 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3843 writeq(val64, &bar0->scheduled_int_ctrl);
3844
3845 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3846
3847 if (!sp->msi_detected) {
3848 /* MSI(X) test failed, go back to INTx mode */
2450022a 3849 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
9e39f7c5
JP
3850 "using MSI(X) during test\n",
3851 sp->dev->name, pci_name(pdev));
8abc4d5b
SS
3852
3853 err = -EOPNOTSUPP;
3854 }
3855
3856 free_irq(sp->entries[1].vector, sp);
3857
3858 writeq(saved64, &bar0->scheduled_int_ctrl);
3859
3860 return err;
3861}
18b2b7bd
SH
3862
3863static void remove_msix_isr(struct s2io_nic *sp)
3864{
3865 int i;
3866 u16 msi_control;
3867
f61e0a35 3868 for (i = 0; i < sp->num_entries; i++) {
d44570e4 3869 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
18b2b7bd
SH
3870 int vector = sp->entries[i].vector;
3871 void *arg = sp->s2io_entries[i].arg;
3872 free_irq(vector, arg);
3873 }
3874 }
3875
3876 kfree(sp->entries);
3877 kfree(sp->s2io_entries);
3878 sp->entries = NULL;
3879 sp->s2io_entries = NULL;
3880
3881 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3882 msi_control &= 0xFFFE; /* Disable MSI */
3883 pci_write_config_word(sp->pdev, 0x42, msi_control);
3884
3885 pci_disable_msix(sp->pdev);
3886}
3887
3888static void remove_inta_isr(struct s2io_nic *sp)
3889{
80777c54 3890 free_irq(sp->pdev->irq, sp->dev);
18b2b7bd
SH
3891}
3892
1da177e4
LT
3893/* ********************************************************* *
3894 * Functions defined below concern the OS part of the driver *
3895 * ********************************************************* */
3896
20346722 3897/**
1da177e4
LT
3898 * s2io_open - open entry point of the driver
3899 * @dev : pointer to the device structure.
3900 * Description:
3901 * This function is the open entry point of the driver. It mainly calls a
3902 * function to allocate Rx buffers and inserts them into the buffer
20346722 3903 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3904 * Return value:
3905 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3906 * file on failure.
3907 */
3908
ac1f60db 3909static int s2io_open(struct net_device *dev)
1da177e4 3910{
4cf1653a 3911 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 3912 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
3913 int err = 0;
3914
20346722
K
3915 /*
3916 * Make sure you have link off by default every time
1da177e4
LT
3917 * Nic is initialized
3918 */
3919 netif_carrier_off(dev);
0b1f7ebe 3920 sp->last_link_state = 0;
1da177e4
LT
3921
3922 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3923 err = s2io_card_up(sp);
3924 if (err) {
1da177e4
LT
3925 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3926 dev->name);
e6a8fee2 3927 goto hw_init_failed;
1da177e4
LT
3928 }
3929
2fd37688 3930 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
1da177e4 3931 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3932 s2io_card_down(sp);
20346722 3933 err = -ENODEV;
e6a8fee2 3934 goto hw_init_failed;
1da177e4 3935 }
3a3d5756 3936 s2io_start_all_tx_queue(sp);
1da177e4 3937 return 0;
20346722 3938
20346722 3939hw_init_failed:
eaae7f72 3940 if (sp->config.intr_type == MSI_X) {
491976b2 3941 if (sp->entries) {
cc6e7c44 3942 kfree(sp->entries);
ffb5df6c
JP
3943 swstats->mem_freed += sp->num_entries *
3944 sizeof(struct msix_entry);
491976b2
SH
3945 }
3946 if (sp->s2io_entries) {
cc6e7c44 3947 kfree(sp->s2io_entries);
ffb5df6c
JP
3948 swstats->mem_freed += sp->num_entries *
3949 sizeof(struct s2io_msix_entry);
491976b2 3950 }
cc6e7c44 3951 }
20346722 3952 return err;
1da177e4
LT
3953}
3954
3955/**
3956 * s2io_close -close entry point of the driver
3957 * @dev : device pointer.
3958 * Description:
3959 * This is the stop entry point of the driver. It needs to undo exactly
3960 * whatever was done by the open entry point,thus it's usually referred to
3961 * as the close function.Among other things this function mainly stops the
3962 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3963 * Return value:
3964 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3965 * file on failure.
3966 */
3967
ac1f60db 3968static int s2io_close(struct net_device *dev)
1da177e4 3969{
4cf1653a 3970 struct s2io_nic *sp = netdev_priv(dev);
faa4f796
SH
3971 struct config_param *config = &sp->config;
3972 u64 tmp64;
3973 int offset;
cc6e7c44 3974
9f74ffde 3975 /* Return if the device is already closed *
d44570e4
JP
3976 * Can happen when s2io_card_up failed in change_mtu *
3977 */
9f74ffde
SH
3978 if (!is_s2io_card_up(sp))
3979 return 0;
3980
3a3d5756 3981 s2io_stop_all_tx_queue(sp);
faa4f796
SH
3982 /* delete all populated mac entries */
3983 for (offset = 1; offset < config->max_mc_addr; offset++) {
3984 tmp64 = do_s2io_read_unicast_mc(sp, offset);
3985 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3986 do_s2io_delete_unicast_mc(sp, tmp64);
3987 }
3988
e6a8fee2 3989 s2io_card_down(sp);
cc6e7c44 3990
1da177e4
LT
3991 return 0;
3992}
3993
3994/**
3995 * s2io_xmit - Tx entry point of te driver
3996 * @skb : the socket buffer containing the Tx data.
3997 * @dev : device pointer.
3998 * Description :
3999 * This function is the Tx entry point of the driver. S2IO NIC supports
4000 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
25985edc 4001 * NOTE: when device can't queue the pkt,just the trans_start variable will
1da177e4
LT
4002 * not be upadted.
4003 * Return value:
4004 * 0 on success & 1 on failure.
4005 */
4006
61357325 4007static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 4008{
4cf1653a 4009 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
4010 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4011 register u64 val64;
1ee6dd77
RB
4012 struct TxD *txdp;
4013 struct TxFIFO_element __iomem *tx_fifo;
2fda096d 4014 unsigned long flags = 0;
be3a6b02 4015 u16 vlan_tag = 0;
2fda096d 4016 struct fifo_info *fifo = NULL;
75c30b13 4017 int offload_type;
6cfc482b 4018 int enable_per_list_interrupt = 0;
ffb5df6c
JP
4019 struct config_param *config = &sp->config;
4020 struct mac_info *mac_control = &sp->mac_control;
4021 struct stat_block *stats = mac_control->stats_info;
4022 struct swStat *swstats = &stats->sw_stat;
1da177e4 4023
20346722 4024 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
491976b2
SH
4025
4026 if (unlikely(skb->len <= 0)) {
9e39f7c5 4027 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
491976b2 4028 dev_kfree_skb_any(skb);
6ed10654 4029 return NETDEV_TX_OK;
2fda096d 4030 }
491976b2 4031
92b84437 4032 if (!is_s2io_card_up(sp)) {
20346722 4033 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4 4034 dev->name);
e6d26bd0 4035 dev_kfree_skb_any(skb);
6ed10654 4036 return NETDEV_TX_OK;
1da177e4
LT
4037 }
4038
4039 queue = 0;
df8a39de
JP
4040 if (skb_vlan_tag_present(skb))
4041 vlan_tag = skb_vlan_tag_get(skb);
6cfc482b
SH
4042 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4043 if (skb->protocol == htons(ETH_P_IP)) {
4044 struct iphdr *ip;
4045 struct tcphdr *th;
4046 ip = ip_hdr(skb);
4047
56f8a75c 4048 if (!ip_is_fragment(ip)) {
6cfc482b 4049 th = (struct tcphdr *)(((unsigned char *)ip) +
d44570e4 4050 ip->ihl*4);
6cfc482b
SH
4051
4052 if (ip->protocol == IPPROTO_TCP) {
4053 queue_len = sp->total_tcp_fifos;
4054 queue = (ntohs(th->source) +
d44570e4
JP
4055 ntohs(th->dest)) &
4056 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4057 if (queue >= queue_len)
4058 queue = queue_len - 1;
4059 } else if (ip->protocol == IPPROTO_UDP) {
4060 queue_len = sp->total_udp_fifos;
4061 queue = (ntohs(th->source) +
d44570e4
JP
4062 ntohs(th->dest)) &
4063 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4064 if (queue >= queue_len)
4065 queue = queue_len - 1;
4066 queue += sp->udp_fifo_idx;
4067 if (skb->len > 1024)
4068 enable_per_list_interrupt = 1;
6cfc482b
SH
4069 }
4070 }
4071 }
4072 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4073 /* get fifo number based on skb->priority value */
4074 queue = config->fifo_mapping
d44570e4 4075 [skb->priority & (MAX_TX_FIFOS - 1)];
6cfc482b 4076 fifo = &mac_control->fifos[queue];
3a3d5756 4077
a6086a89 4078 spin_lock_irqsave(&fifo->tx_lock, flags);
be3a6b02 4079
3a3d5756
SH
4080 if (sp->config.multiq) {
4081 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4082 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4083 return NETDEV_TX_BUSY;
4084 }
b19fa1fa 4085 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
4086 if (netif_queue_stopped(dev)) {
4087 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4088 return NETDEV_TX_BUSY;
4089 }
4090 }
4091
d44570e4
JP
4092 put_off = (u16)fifo->tx_curr_put_info.offset;
4093 get_off = (u16)fifo->tx_curr_get_info.offset;
43d620c8 4094 txdp = fifo->list_info[put_off].list_virt_addr;
20346722 4095
2fda096d 4096 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
1da177e4 4097 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4098 if (txdp->Host_Control ||
d44570e4 4099 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 4100 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3a3d5756 4101 s2io_stop_tx_queue(sp, fifo->fifo_no);
e6d26bd0 4102 dev_kfree_skb_any(skb);
2fda096d 4103 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4104 return NETDEV_TX_OK;
1da177e4 4105 }
0b1f7ebe 4106
75c30b13 4107 offload_type = s2io_offload_type(skb);
75c30b13 4108 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 4109 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 4110 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 4111 }
84fa7933 4112 if (skb->ip_summed == CHECKSUM_PARTIAL) {
d44570e4
JP
4113 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4114 TXD_TX_CKO_TCP_EN |
4115 TXD_TX_CKO_UDP_EN);
1da177e4 4116 }
fed5eccd
AR
4117 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4118 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2fda096d 4119 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
6cfc482b
SH
4120 if (enable_per_list_interrupt)
4121 if (put_off & (queue_len >> 5))
4122 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
3a3d5756 4123 if (vlan_tag) {
be3a6b02
K
4124 txdp->Control_2 |= TXD_VLAN_ENABLE;
4125 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4126 }
4127
e743d313 4128 frg_len = skb_headlen(skb);
fb059b26
CJ
4129 txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
4130 frg_len, DMA_TO_DEVICE);
4131 if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
491abf25
VP
4132 goto pci_map_failed;
4133
d44570e4 4134 txdp->Host_Control = (unsigned long)skb;
fed5eccd 4135 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
fed5eccd
AR
4136
4137 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
4138 /* For fragmented SKB. */
4139 for (i = 0; i < frg_cnt; i++) {
9e903e08 4140 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe 4141 /* A '0' length fragment will be ignored */
9e903e08 4142 if (!skb_frag_size(frag))
0b1f7ebe 4143 continue;
1da177e4 4144 txdp++;
f0d06d82
IC
4145 txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4146 frag, 0,
9e903e08 4147 skb_frag_size(frag),
5d6bcdfe 4148 DMA_TO_DEVICE);
9e903e08 4149 txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
1da177e4
LT
4150 }
4151 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4152
4153 tx_fifo = mac_control->tx_FIFO_start[queue];
2fda096d 4154 val64 = fifo->list_info[put_off].list_phy_addr;
1da177e4
LT
4155 writeq(val64, &tx_fifo->TxDL_Pointer);
4156
4157 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4158 TX_FIFO_LAST_LIST);
75c30b13 4159 if (offload_type)
fed5eccd 4160 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 4161
1da177e4
LT
4162 writeq(val64, &tx_fifo->List_Control);
4163
1da177e4 4164 put_off++;
2fda096d 4165 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
863c11a9 4166 put_off = 0;
2fda096d 4167 fifo->tx_curr_put_info.offset = put_off;
1da177e4
LT
4168
4169 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4170 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
ffb5df6c 4171 swstats->fifo_full_cnt++;
1da177e4
LT
4172 DBG_PRINT(TX_DBG,
4173 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4174 put_off, get_off);
3a3d5756 4175 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4176 }
ffb5df6c 4177 swstats->mem_allocated += skb->truesize;
2fda096d 4178 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4 4179
f6f4bfa3
SH
4180 if (sp->config.intr_type == MSI_X)
4181 tx_intr_handler(fifo);
4182
6ed10654 4183 return NETDEV_TX_OK;
ffb5df6c 4184
491abf25 4185pci_map_failed:
ffb5df6c 4186 swstats->pci_map_fail_cnt++;
3a3d5756 4187 s2io_stop_tx_queue(sp, fifo->fifo_no);
ffb5df6c 4188 swstats->mem_freed += skb->truesize;
e6d26bd0 4189 dev_kfree_skb_any(skb);
2fda096d 4190 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4191 return NETDEV_TX_OK;
1da177e4
LT
4192}
4193
25fff88e 4194static void
e84a2ac9 4195s2io_alarm_handle(struct timer_list *t)
25fff88e 4196{
e84a2ac9 4197 struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
8116f3cf 4198 struct net_device *dev = sp->dev;
25fff88e 4199
8116f3cf 4200 s2io_handle_errors(dev);
25fff88e
K
4201 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4202}
4203
7d12e780 4204static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4205{
1ee6dd77
RB
4206 struct ring_info *ring = (struct ring_info *)dev_id;
4207 struct s2io_nic *sp = ring->nic;
f61e0a35 4208 struct XENA_dev_config __iomem *bar0 = sp->bar0;
cc6e7c44 4209
f61e0a35 4210 if (unlikely(!is_s2io_card_up(sp)))
92b84437 4211 return IRQ_HANDLED;
92b84437 4212
f61e0a35 4213 if (sp->config.napi) {
1a79d1c3
AV
4214 u8 __iomem *addr = NULL;
4215 u8 val8 = 0;
f61e0a35 4216
1a79d1c3 4217 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
4218 addr += (7 - ring->ring_no);
4219 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4220 writeb(val8, addr);
4221 val8 = readb(addr);
288379f0 4222 napi_schedule(&ring->napi);
f61e0a35
SH
4223 } else {
4224 rx_intr_handler(ring, 0);
8d8bb39b 4225 s2io_chk_rx_buffers(sp, ring);
f61e0a35 4226 }
7d3d0439 4227
cc6e7c44
RA
4228 return IRQ_HANDLED;
4229}
4230
7d12e780 4231static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4232{
ac731ab6
SH
4233 int i;
4234 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4235 struct s2io_nic *sp = fifos->nic;
4236 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4237 struct config_param *config = &sp->config;
4238 u64 reason;
cc6e7c44 4239
ac731ab6
SH
4240 if (unlikely(!is_s2io_card_up(sp)))
4241 return IRQ_NONE;
4242
4243 reason = readq(&bar0->general_int_status);
4244 if (unlikely(reason == S2IO_MINUS_ONE))
4245 /* Nothing much can be done. Get out */
92b84437 4246 return IRQ_HANDLED;
92b84437 4247
01e16faa
SH
4248 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4249 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
ac731ab6 4250
01e16faa
SH
4251 if (reason & GEN_INTR_TXPIC)
4252 s2io_txpic_intr_handle(sp);
ac731ab6 4253
01e16faa
SH
4254 if (reason & GEN_INTR_TXTRAFFIC)
4255 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
ac731ab6 4256
01e16faa
SH
4257 for (i = 0; i < config->tx_fifo_num; i++)
4258 tx_intr_handler(&fifos[i]);
ac731ab6 4259
01e16faa
SH
4260 writeq(sp->general_int_mask, &bar0->general_int_mask);
4261 readl(&bar0->general_int_status);
4262 return IRQ_HANDLED;
4263 }
4264 /* The interrupt was not raised by us */
4265 return IRQ_NONE;
cc6e7c44 4266}
ac731ab6 4267
1ee6dd77 4268static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4269{
1ee6dd77 4270 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d
K
4271 u64 val64;
4272
4273 val64 = readq(&bar0->pic_int_status);
4274 if (val64 & PIC_INT_GPIO) {
4275 val64 = readq(&bar0->gpio_int_reg);
4276 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4277 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4278 /*
4279 * This is unstable state so clear both up/down
4280 * interrupt and adapter to re-evaluate the link state.
4281 */
d44570e4 4282 val64 |= GPIO_INT_REG_LINK_DOWN;
a371a07d
K
4283 val64 |= GPIO_INT_REG_LINK_UP;
4284 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4285 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4286 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4287 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4288 writeq(val64, &bar0->gpio_int_mask);
d44570e4 4289 } else if (val64 & GPIO_INT_REG_LINK_UP) {
c92ca04b 4290 val64 = readq(&bar0->adapter_status);
d44570e4 4291 /* Enable Adapter */
19a60522
SS
4292 val64 = readq(&bar0->adapter_control);
4293 val64 |= ADAPTER_CNTL_EN;
4294 writeq(val64, &bar0->adapter_control);
4295 val64 |= ADAPTER_LED_ON;
4296 writeq(val64, &bar0->adapter_control);
4297 if (!sp->device_enabled_once)
4298 sp->device_enabled_once = 1;
c92ca04b 4299
19a60522
SS
4300 s2io_link(sp, LINK_UP);
4301 /*
4302 * unmask link down interrupt and mask link-up
4303 * intr
4304 */
4305 val64 = readq(&bar0->gpio_int_mask);
4306 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4307 val64 |= GPIO_INT_MASK_LINK_UP;
4308 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4309
d44570e4 4310 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
c92ca04b 4311 val64 = readq(&bar0->adapter_status);
19a60522
SS
4312 s2io_link(sp, LINK_DOWN);
4313 /* Link is down so unmaks link up interrupt */
4314 val64 = readq(&bar0->gpio_int_mask);
4315 val64 &= ~GPIO_INT_MASK_LINK_UP;
4316 val64 |= GPIO_INT_MASK_LINK_DOWN;
4317 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4318
4319 /* turn off LED */
4320 val64 = readq(&bar0->adapter_control);
d44570e4 4321 val64 = val64 & (~ADAPTER_LED_ON);
ac1f90d6 4322 writeq(val64, &bar0->adapter_control);
a371a07d
K
4323 }
4324 }
c92ca04b 4325 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4326}
4327
8116f3cf
SS
4328/**
4329 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4330 * @value: alarm bits
4331 * @addr: address value
4332 * @cnt: counter variable
4333 * Description: Check for alarm and increment the counter
4334 * Return Value:
4335 * 1 - if alarm bit set
4336 * 0 - if alarm bit is not set
4337 */
d44570e4
JP
4338static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4339 unsigned long long *cnt)
8116f3cf
SS
4340{
4341 u64 val64;
4342 val64 = readq(addr);
d44570e4 4343 if (val64 & value) {
8116f3cf
SS
4344 writeq(val64, addr);
4345 (*cnt)++;
4346 return 1;
4347 }
4348 return 0;
4349
4350}
4351
4352/**
4353 * s2io_handle_errors - Xframe error indication handler
d0ea5cbd 4354 * @dev_id: opaque handle to dev
8116f3cf
SS
4355 * Description: Handle alarms such as loss of link, single or
4356 * double ECC errors, critical and serious errors.
4357 * Return Value:
4358 * NONE
4359 */
d44570e4 4360static void s2io_handle_errors(void *dev_id)
8116f3cf 4361{
d44570e4 4362 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4363 struct s2io_nic *sp = netdev_priv(dev);
8116f3cf 4364 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d44570e4 4365 u64 temp64 = 0, val64 = 0;
8116f3cf
SS
4366 int i = 0;
4367
4368 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4369 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4370
92b84437 4371 if (!is_s2io_card_up(sp))
8116f3cf
SS
4372 return;
4373
4374 if (pci_channel_offline(sp->pdev))
4375 return;
4376
4377 memset(&sw_stat->ring_full_cnt, 0,
d44570e4 4378 sizeof(sw_stat->ring_full_cnt));
8116f3cf
SS
4379
4380 /* Handling the XPAK counters update */
d44570e4 4381 if (stats->xpak_timer_count < 72000) {
8116f3cf
SS
4382 /* waiting for an hour */
4383 stats->xpak_timer_count++;
4384 } else {
4385 s2io_updt_xpak_counter(dev);
4386 /* reset the count to zero */
4387 stats->xpak_timer_count = 0;
4388 }
4389
4390 /* Handling link status change error Intr */
4391 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4392 val64 = readq(&bar0->mac_rmac_err_reg);
4393 writeq(val64, &bar0->mac_rmac_err_reg);
4394 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4395 schedule_work(&sp->set_link_task);
4396 }
4397
4398 /* In case of a serious error, the device will be Reset. */
4399 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
d44570e4 4400 &sw_stat->serious_err_cnt))
8116f3cf
SS
4401 goto reset;
4402
4403 /* Check for data parity error */
4404 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
d44570e4 4405 &sw_stat->parity_err_cnt))
8116f3cf
SS
4406 goto reset;
4407
4408 /* Check for ring full counter */
4409 if (sp->device_type == XFRAME_II_DEVICE) {
4410 val64 = readq(&bar0->ring_bump_counter1);
d44570e4
JP
4411 for (i = 0; i < 4; i++) {
4412 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf
SS
4413 temp64 >>= 64 - ((i+1)*16);
4414 sw_stat->ring_full_cnt[i] += temp64;
4415 }
4416
4417 val64 = readq(&bar0->ring_bump_counter2);
d44570e4
JP
4418 for (i = 0; i < 4; i++) {
4419 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf 4420 temp64 >>= 64 - ((i+1)*16);
d44570e4 4421 sw_stat->ring_full_cnt[i+4] += temp64;
8116f3cf
SS
4422 }
4423 }
4424
4425 val64 = readq(&bar0->txdma_int_status);
4426 /*check for pfc_err*/
4427 if (val64 & TXDMA_PFC_INT) {
d44570e4
JP
4428 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4429 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4430 PFC_PCIX_ERR,
4431 &bar0->pfc_err_reg,
4432 &sw_stat->pfc_err_cnt))
8116f3cf 4433 goto reset;
d44570e4
JP
4434 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4435 &bar0->pfc_err_reg,
4436 &sw_stat->pfc_err_cnt);
8116f3cf
SS
4437 }
4438
4439 /*check for tda_err*/
4440 if (val64 & TXDMA_TDA_INT) {
d44570e4
JP
4441 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4442 TDA_SM0_ERR_ALARM |
4443 TDA_SM1_ERR_ALARM,
4444 &bar0->tda_err_reg,
4445 &sw_stat->tda_err_cnt))
8116f3cf
SS
4446 goto reset;
4447 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
d44570e4
JP
4448 &bar0->tda_err_reg,
4449 &sw_stat->tda_err_cnt);
8116f3cf
SS
4450 }
4451 /*check for pcc_err*/
4452 if (val64 & TXDMA_PCC_INT) {
d44570e4
JP
4453 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4454 PCC_N_SERR | PCC_6_COF_OV_ERR |
4455 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4456 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4457 PCC_TXB_ECC_DB_ERR,
4458 &bar0->pcc_err_reg,
4459 &sw_stat->pcc_err_cnt))
8116f3cf
SS
4460 goto reset;
4461 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
d44570e4
JP
4462 &bar0->pcc_err_reg,
4463 &sw_stat->pcc_err_cnt);
8116f3cf
SS
4464 }
4465
4466 /*check for tti_err*/
4467 if (val64 & TXDMA_TTI_INT) {
d44570e4
JP
4468 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4469 &bar0->tti_err_reg,
4470 &sw_stat->tti_err_cnt))
8116f3cf
SS
4471 goto reset;
4472 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
d44570e4
JP
4473 &bar0->tti_err_reg,
4474 &sw_stat->tti_err_cnt);
8116f3cf
SS
4475 }
4476
4477 /*check for lso_err*/
4478 if (val64 & TXDMA_LSO_INT) {
d44570e4
JP
4479 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4480 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4481 &bar0->lso_err_reg,
4482 &sw_stat->lso_err_cnt))
8116f3cf
SS
4483 goto reset;
4484 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
d44570e4
JP
4485 &bar0->lso_err_reg,
4486 &sw_stat->lso_err_cnt);
8116f3cf
SS
4487 }
4488
4489 /*check for tpa_err*/
4490 if (val64 & TXDMA_TPA_INT) {
d44570e4
JP
4491 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4492 &bar0->tpa_err_reg,
4493 &sw_stat->tpa_err_cnt))
8116f3cf 4494 goto reset;
d44570e4
JP
4495 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4496 &bar0->tpa_err_reg,
4497 &sw_stat->tpa_err_cnt);
8116f3cf
SS
4498 }
4499
4500 /*check for sm_err*/
4501 if (val64 & TXDMA_SM_INT) {
d44570e4
JP
4502 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4503 &bar0->sm_err_reg,
4504 &sw_stat->sm_err_cnt))
8116f3cf
SS
4505 goto reset;
4506 }
4507
4508 val64 = readq(&bar0->mac_int_status);
4509 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4510 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
d44570e4
JP
4511 &bar0->mac_tmac_err_reg,
4512 &sw_stat->mac_tmac_err_cnt))
8116f3cf 4513 goto reset;
d44570e4
JP
4514 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4515 TMAC_DESC_ECC_SG_ERR |
4516 TMAC_DESC_ECC_DB_ERR,
4517 &bar0->mac_tmac_err_reg,
4518 &sw_stat->mac_tmac_err_cnt);
8116f3cf
SS
4519 }
4520
4521 val64 = readq(&bar0->xgxs_int_status);
4522 if (val64 & XGXS_INT_STATUS_TXGXS) {
4523 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
d44570e4
JP
4524 &bar0->xgxs_txgxs_err_reg,
4525 &sw_stat->xgxs_txgxs_err_cnt))
8116f3cf
SS
4526 goto reset;
4527 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
d44570e4
JP
4528 &bar0->xgxs_txgxs_err_reg,
4529 &sw_stat->xgxs_txgxs_err_cnt);
8116f3cf
SS
4530 }
4531
4532 val64 = readq(&bar0->rxdma_int_status);
4533 if (val64 & RXDMA_INT_RC_INT_M) {
d44570e4
JP
4534 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4535 RC_FTC_ECC_DB_ERR |
4536 RC_PRCn_SM_ERR_ALARM |
4537 RC_FTC_SM_ERR_ALARM,
4538 &bar0->rc_err_reg,
4539 &sw_stat->rc_err_cnt))
8116f3cf 4540 goto reset;
d44570e4
JP
4541 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4542 RC_FTC_ECC_SG_ERR |
4543 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4544 &sw_stat->rc_err_cnt);
4545 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4546 PRC_PCI_AB_WR_Rn |
4547 PRC_PCI_AB_F_WR_Rn,
4548 &bar0->prc_pcix_err_reg,
4549 &sw_stat->prc_pcix_err_cnt))
8116f3cf 4550 goto reset;
d44570e4
JP
4551 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4552 PRC_PCI_DP_WR_Rn |
4553 PRC_PCI_DP_F_WR_Rn,
4554 &bar0->prc_pcix_err_reg,
4555 &sw_stat->prc_pcix_err_cnt);
8116f3cf
SS
4556 }
4557
4558 if (val64 & RXDMA_INT_RPA_INT_M) {
4559 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
d44570e4
JP
4560 &bar0->rpa_err_reg,
4561 &sw_stat->rpa_err_cnt))
8116f3cf
SS
4562 goto reset;
4563 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
d44570e4
JP
4564 &bar0->rpa_err_reg,
4565 &sw_stat->rpa_err_cnt);
8116f3cf
SS
4566 }
4567
4568 if (val64 & RXDMA_INT_RDA_INT_M) {
d44570e4
JP
4569 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4570 RDA_FRM_ECC_DB_N_AERR |
4571 RDA_SM1_ERR_ALARM |
4572 RDA_SM0_ERR_ALARM |
4573 RDA_RXD_ECC_DB_SERR,
4574 &bar0->rda_err_reg,
4575 &sw_stat->rda_err_cnt))
8116f3cf 4576 goto reset;
d44570e4
JP
4577 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4578 RDA_FRM_ECC_SG_ERR |
4579 RDA_MISC_ERR |
4580 RDA_PCIX_ERR,
4581 &bar0->rda_err_reg,
4582 &sw_stat->rda_err_cnt);
8116f3cf
SS
4583 }
4584
4585 if (val64 & RXDMA_INT_RTI_INT_M) {
d44570e4
JP
4586 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4587 &bar0->rti_err_reg,
4588 &sw_stat->rti_err_cnt))
8116f3cf
SS
4589 goto reset;
4590 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
d44570e4
JP
4591 &bar0->rti_err_reg,
4592 &sw_stat->rti_err_cnt);
8116f3cf
SS
4593 }
4594
4595 val64 = readq(&bar0->mac_int_status);
4596 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4597 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
d44570e4
JP
4598 &bar0->mac_rmac_err_reg,
4599 &sw_stat->mac_rmac_err_cnt))
8116f3cf 4600 goto reset;
d44570e4
JP
4601 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4602 RMAC_SINGLE_ECC_ERR |
4603 RMAC_DOUBLE_ECC_ERR,
4604 &bar0->mac_rmac_err_reg,
4605 &sw_stat->mac_rmac_err_cnt);
8116f3cf
SS
4606 }
4607
4608 val64 = readq(&bar0->xgxs_int_status);
4609 if (val64 & XGXS_INT_STATUS_RXGXS) {
4610 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
d44570e4
JP
4611 &bar0->xgxs_rxgxs_err_reg,
4612 &sw_stat->xgxs_rxgxs_err_cnt))
8116f3cf
SS
4613 goto reset;
4614 }
4615
4616 val64 = readq(&bar0->mc_int_status);
d44570e4
JP
4617 if (val64 & MC_INT_STATUS_MC_INT) {
4618 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4619 &bar0->mc_err_reg,
4620 &sw_stat->mc_err_cnt))
8116f3cf
SS
4621 goto reset;
4622
4623 /* Handling Ecc errors */
4624 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4625 writeq(val64, &bar0->mc_err_reg);
4626 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4627 sw_stat->double_ecc_errs++;
4628 if (sp->device_type != XFRAME_II_DEVICE) {
4629 /*
4630 * Reset XframeI only if critical error
4631 */
4632 if (val64 &
d44570e4
JP
4633 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4634 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4635 goto reset;
4636 }
8116f3cf
SS
4637 } else
4638 sw_stat->single_ecc_errs++;
4639 }
4640 }
4641 return;
4642
4643reset:
3a3d5756 4644 s2io_stop_all_tx_queue(sp);
8116f3cf
SS
4645 schedule_work(&sp->rst_timer_task);
4646 sw_stat->soft_reset_cnt++;
8116f3cf
SS
4647}
4648
1da177e4
LT
4649/**
4650 * s2io_isr - ISR handler of the device .
4651 * @irq: the irq of the device.
4652 * @dev_id: a void pointer to the dev structure of the NIC.
20346722
K
4653 * Description: This function is the ISR handler of the device. It
4654 * identifies the reason for the interrupt and calls the relevant
4655 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4656 * recv buffers, if their numbers are below the panic value which is
4657 * presently set to 25% of the original number of rcv buffers allocated.
4658 * Return value:
20346722 4659 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4660 * IRQ_NONE: will be returned if interrupt is not from our device
4661 */
7d12e780 4662static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4 4663{
d44570e4 4664 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4665 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4666 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4667 int i;
19a60522 4668 u64 reason = 0;
1ee6dd77 4669 struct mac_info *mac_control;
1da177e4
LT
4670 struct config_param *config;
4671
d796fdb7
LV
4672 /* Pretend we handled any irq's from a disconnected card */
4673 if (pci_channel_offline(sp->pdev))
4674 return IRQ_NONE;
4675
596c5c97 4676 if (!is_s2io_card_up(sp))
92b84437 4677 return IRQ_NONE;
92b84437 4678
1da177e4 4679 config = &sp->config;
ffb5df6c 4680 mac_control = &sp->mac_control;
1da177e4 4681
20346722 4682 /*
1da177e4
LT
4683 * Identify the cause for interrupt and call the appropriate
4684 * interrupt handler. Causes for the interrupt could be;
4685 * 1. Rx of packet.
4686 * 2. Tx complete.
4687 * 3. Link down.
1da177e4
LT
4688 */
4689 reason = readq(&bar0->general_int_status);
4690
d44570e4
JP
4691 if (unlikely(reason == S2IO_MINUS_ONE))
4692 return IRQ_HANDLED; /* Nothing much can be done. Get out */
5d3213cc 4693
d44570e4
JP
4694 if (reason &
4695 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
596c5c97
SS
4696 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4697
4698 if (config->napi) {
4699 if (reason & GEN_INTR_RXTRAFFIC) {
288379f0 4700 napi_schedule(&sp->napi);
f61e0a35
SH
4701 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4702 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4703 readl(&bar0->rx_traffic_int);
db874e65 4704 }
596c5c97
SS
4705 } else {
4706 /*
4707 * rx_traffic_int reg is an R1 register, writing all 1's
4708 * will ensure that the actual interrupt causing bit
4709 * get's cleared and hence a read can be avoided.
4710 */
4711 if (reason & GEN_INTR_RXTRAFFIC)
19a60522 4712 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
596c5c97 4713
13d866a9
JP
4714 for (i = 0; i < config->rx_ring_num; i++) {
4715 struct ring_info *ring = &mac_control->rings[i];
4716
4717 rx_intr_handler(ring, 0);
4718 }
db874e65 4719 }
596c5c97 4720
db874e65 4721 /*
596c5c97 4722 * tx_traffic_int reg is an R1 register, writing all 1's
db874e65
SS
4723 * will ensure that the actual interrupt causing bit get's
4724 * cleared and hence a read can be avoided.
4725 */
596c5c97
SS
4726 if (reason & GEN_INTR_TXTRAFFIC)
4727 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
19a60522 4728
596c5c97
SS
4729 for (i = 0; i < config->tx_fifo_num; i++)
4730 tx_intr_handler(&mac_control->fifos[i]);
1da177e4 4731
596c5c97
SS
4732 if (reason & GEN_INTR_TXPIC)
4733 s2io_txpic_intr_handle(sp);
fe113638 4734
596c5c97
SS
4735 /*
4736 * Reallocate the buffers from the interrupt handler itself.
4737 */
4738 if (!config->napi) {
13d866a9
JP
4739 for (i = 0; i < config->rx_ring_num; i++) {
4740 struct ring_info *ring = &mac_control->rings[i];
4741
4742 s2io_chk_rx_buffers(sp, ring);
4743 }
596c5c97
SS
4744 }
4745 writeq(sp->general_int_mask, &bar0->general_int_mask);
4746 readl(&bar0->general_int_status);
20346722 4747
596c5c97 4748 return IRQ_HANDLED;
db874e65 4749
d44570e4 4750 } else if (!reason) {
596c5c97
SS
4751 /* The interrupt was not raised by us */
4752 return IRQ_NONE;
4753 }
db874e65 4754
1da177e4
LT
4755 return IRQ_HANDLED;
4756}
4757
d0ea5cbd 4758/*
7ba013ac
K
4759 * s2io_updt_stats -
4760 */
1ee6dd77 4761static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4762{
1ee6dd77 4763 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac
K
4764 u64 val64;
4765 int cnt = 0;
4766
92b84437 4767 if (is_s2io_card_up(sp)) {
7ba013ac
K
4768 /* Apprx 30us on a 133 MHz bus */
4769 val64 = SET_UPDT_CLICKS(10) |
4770 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4771 writeq(val64, &bar0->stat_cfg);
4772 do {
4773 udelay(100);
4774 val64 = readq(&bar0->stat_cfg);
b7b5a128 4775 if (!(val64 & s2BIT(0)))
7ba013ac
K
4776 break;
4777 cnt++;
4778 if (cnt == 5)
4779 break; /* Updt failed */
d44570e4 4780 } while (1);
8a4bdbaa 4781 }
7ba013ac
K
4782}
4783
1da177e4 4784/**
20346722 4785 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4786 * @dev : pointer to the device structure.
4787 * Description:
20346722 4788 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4789 * structure and returns a pointer to the same.
4790 * Return value:
4791 * pointer to the updated net_device_stats structure.
4792 */
ac1f60db 4793static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4794{
4cf1653a 4795 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
4796 struct mac_info *mac_control = &sp->mac_control;
4797 struct stat_block *stats = mac_control->stats_info;
4a490432 4798 u64 delta;
1da177e4 4799
7ba013ac
K
4800 /* Configure Stats for immediate updt */
4801 s2io_updt_stats(sp);
4802
4a490432
JM
4803 /* A device reset will cause the on-adapter statistics to be zero'ed.
4804 * This can be done while running by changing the MTU. To prevent the
4805 * system from having the stats zero'ed, the driver keeps a copy of the
4806 * last update to the system (which is also zero'ed on reset). This
4807 * enables the driver to accurately know the delta between the last
4808 * update and the current update.
4809 */
4810 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4811 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4812 sp->stats.rx_packets += delta;
4813 dev->stats.rx_packets += delta;
4814
4815 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4816 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4817 sp->stats.tx_packets += delta;
4818 dev->stats.tx_packets += delta;
4819
4820 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4821 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4822 sp->stats.rx_bytes += delta;
4823 dev->stats.rx_bytes += delta;
4824
4825 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4826 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4827 sp->stats.tx_bytes += delta;
4828 dev->stats.tx_bytes += delta;
4829
4830 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4831 sp->stats.rx_errors += delta;
4832 dev->stats.rx_errors += delta;
4833
4834 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4835 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4836 sp->stats.tx_errors += delta;
4837 dev->stats.tx_errors += delta;
4838
4839 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4840 sp->stats.rx_dropped += delta;
4841 dev->stats.rx_dropped += delta;
4842
4843 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4844 sp->stats.tx_dropped += delta;
4845 dev->stats.tx_dropped += delta;
4846
4847 /* The adapter MAC interprets pause frames as multicast packets, but
4848 * does not pass them up. This erroneously increases the multicast
4849 * packet count and needs to be deducted when the multicast frame count
4850 * is queried.
4851 */
4852 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4853 le32_to_cpu(stats->rmac_vld_mcst_frms);
4854 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4855 delta -= sp->stats.multicast;
4856 sp->stats.multicast += delta;
4857 dev->stats.multicast += delta;
1da177e4 4858
4a490432
JM
4859 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4860 le32_to_cpu(stats->rmac_usized_frms)) +
4861 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4862 sp->stats.rx_length_errors += delta;
4863 dev->stats.rx_length_errors += delta;
13d866a9 4864
4a490432
JM
4865 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4866 sp->stats.rx_crc_errors += delta;
4867 dev->stats.rx_crc_errors += delta;
0425b46a 4868
d44570e4 4869 return &dev->stats;
1da177e4
LT
4870}
4871
4872/**
4873 * s2io_set_multicast - entry point for multicast address enable/disable.
4874 * @dev : pointer to the device structure
0bf4d9af
YY
4875 * @may_sleep: parameter indicates if sleeping when waiting for command
4876 * complete
1da177e4 4877 * Description:
20346722
K
4878 * This function is a driver entry point which gets called by the kernel
4879 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4880 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4881 * determine, if multicast address must be enabled or if promiscuous mode
4882 * is to be disabled etc.
4883 * Return value:
4884 * void.
4885 */
5ce7f3f4 4886static void s2io_set_multicast(struct net_device *dev, bool may_sleep)
1da177e4
LT
4887{
4888 int i, j, prev_cnt;
22bedad3 4889 struct netdev_hw_addr *ha;
4cf1653a 4890 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4891 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4892 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
d44570e4 4893 0xfeffffffffffULL;
faa4f796 4894 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
1da177e4 4895 void __iomem *add;
faa4f796 4896 struct config_param *config = &sp->config;
1da177e4
LT
4897
4898 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4899 /* Enable all Multicast addresses */
4900 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4901 &bar0->rmac_addr_data0_mem);
4902 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4903 &bar0->rmac_addr_data1_mem);
4904 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
4905 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4906 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
1da177e4
LT
4907 writeq(val64, &bar0->rmac_addr_cmd_mem);
4908 /* Wait till command completes */
c92ca04b 4909 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4 4910 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5ce7f3f4 4911 S2IO_BIT_RESET, may_sleep);
1da177e4
LT
4912
4913 sp->m_cast_flg = 1;
faa4f796 4914 sp->all_multi_pos = config->max_mc_addr - 1;
1da177e4
LT
4915 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4916 /* Disable all Multicast addresses */
4917 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4918 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
4919 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4920 &bar0->rmac_addr_data1_mem);
1da177e4 4921 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
4922 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4923 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
1da177e4
LT
4924 writeq(val64, &bar0->rmac_addr_cmd_mem);
4925 /* Wait till command completes */
c92ca04b 4926 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4 4927 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5ce7f3f4 4928 S2IO_BIT_RESET, may_sleep);
1da177e4
LT
4929
4930 sp->m_cast_flg = 0;
4931 sp->all_multi_pos = 0;
4932 }
4933
4934 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4935 /* Put the NIC into promiscuous mode */
4936 add = &bar0->mac_cfg;
4937 val64 = readq(&bar0->mac_cfg);
4938 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4939
4940 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 4941 writel((u32)val64, add);
1da177e4
LT
4942 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4943 writel((u32) (val64 >> 32), (add + 4));
4944
926930b2
SS
4945 if (vlan_tag_strip != 1) {
4946 val64 = readq(&bar0->rx_pa_cfg);
4947 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4948 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 4949 sp->vlan_strip_flag = 0;
926930b2
SS
4950 }
4951
1da177e4
LT
4952 val64 = readq(&bar0->mac_cfg);
4953 sp->promisc_flg = 1;
776bd20f 4954 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
4955 dev->name);
4956 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4957 /* Remove the NIC from promiscuous mode */
4958 add = &bar0->mac_cfg;
4959 val64 = readq(&bar0->mac_cfg);
4960 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4961
4962 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 4963 writel((u32)val64, add);
1da177e4
LT
4964 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4965 writel((u32) (val64 >> 32), (add + 4));
4966
926930b2
SS
4967 if (vlan_tag_strip != 0) {
4968 val64 = readq(&bar0->rx_pa_cfg);
4969 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4970 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 4971 sp->vlan_strip_flag = 1;
926930b2
SS
4972 }
4973
1da177e4
LT
4974 val64 = readq(&bar0->mac_cfg);
4975 sp->promisc_flg = 0;
9e39f7c5 4976 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
1da177e4
LT
4977 }
4978
4979 /* Update individual M_CAST address list */
4cd24eaf
JP
4980 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4981 if (netdev_mc_count(dev) >
faa4f796 4982 (config->max_mc_addr - config->max_mac_addr)) {
9e39f7c5
JP
4983 DBG_PRINT(ERR_DBG,
4984 "%s: No more Rx filters can be added - "
4985 "please enable ALL_MULTI instead\n",
1da177e4 4986 dev->name);
1da177e4
LT
4987 return;
4988 }
4989
4990 prev_cnt = sp->mc_addr_count;
4cd24eaf 4991 sp->mc_addr_count = netdev_mc_count(dev);
1da177e4
LT
4992
4993 /* Clear out the previous list of Mc in the H/W. */
4994 for (i = 0; i < prev_cnt; i++) {
4995 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4996 &bar0->rmac_addr_data0_mem);
4997 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 4998 &bar0->rmac_addr_data1_mem);
1da177e4 4999 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5000 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5001 RMAC_ADDR_CMD_MEM_OFFSET
5002 (config->mc_start_offset + i);
1da177e4
LT
5003 writeq(val64, &bar0->rmac_addr_cmd_mem);
5004
5005 /* Wait for command completes */
c92ca04b 5006 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4 5007 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5ce7f3f4 5008 S2IO_BIT_RESET, may_sleep)) {
9e39f7c5
JP
5009 DBG_PRINT(ERR_DBG,
5010 "%s: Adding Multicasts failed\n",
5011 dev->name);
1da177e4
LT
5012 return;
5013 }
5014 }
5015
5016 /* Create the new Rx filter list and update the same in H/W. */
5508590c 5017 i = 0;
22bedad3 5018 netdev_for_each_mc_addr(ha, dev) {
a7a80d5a 5019 mac_addr = 0;
1da177e4 5020 for (j = 0; j < ETH_ALEN; j++) {
22bedad3 5021 mac_addr |= ha->addr[j];
1da177e4
LT
5022 mac_addr <<= 8;
5023 }
5024 mac_addr >>= 8;
5025 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5026 &bar0->rmac_addr_data0_mem);
5027 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 5028 &bar0->rmac_addr_data1_mem);
1da177e4 5029 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5030 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5031 RMAC_ADDR_CMD_MEM_OFFSET
5032 (i + config->mc_start_offset);
1da177e4
LT
5033 writeq(val64, &bar0->rmac_addr_cmd_mem);
5034
5035 /* Wait for command completes */
c92ca04b 5036 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4 5037 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5ce7f3f4 5038 S2IO_BIT_RESET, may_sleep)) {
9e39f7c5
JP
5039 DBG_PRINT(ERR_DBG,
5040 "%s: Adding Multicasts failed\n",
5041 dev->name);
1da177e4
LT
5042 return;
5043 }
5508590c 5044 i++;
1da177e4
LT
5045 }
5046 }
5047}
5048
5ce7f3f4
SAS
5049/* NDO wrapper for s2io_set_multicast */
5050static void s2io_ndo_set_multicast(struct net_device *dev)
5051{
5052 s2io_set_multicast(dev, false);
5053}
5054
faa4f796
SH
5055/* read from CAM unicast & multicast addresses and store it in
5056 * def_mac_addr structure
5057 */
dac499f9 5058static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
faa4f796
SH
5059{
5060 int offset;
5061 u64 mac_addr = 0x0;
5062 struct config_param *config = &sp->config;
5063
5064 /* store unicast & multicast mac addresses */
5065 for (offset = 0; offset < config->max_mc_addr; offset++) {
5066 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5067 /* if read fails disable the entry */
5068 if (mac_addr == FAILURE)
5069 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5070 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5071 }
5072}
5073
5074/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5075static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5076{
5077 int offset;
5078 struct config_param *config = &sp->config;
5079 /* restore unicast mac address */
5080 for (offset = 0; offset < config->max_mac_addr; offset++)
5081 do_s2io_prog_unicast(sp->dev,
d44570e4 5082 sp->def_mac_addr[offset].mac_addr);
faa4f796
SH
5083
5084 /* restore multicast mac address */
5085 for (offset = config->mc_start_offset;
d44570e4 5086 offset < config->max_mc_addr; offset++)
faa4f796
SH
5087 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5088}
5089
5090/* add a multicast MAC address to CAM */
5091static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5092{
5093 int i;
5094 u64 mac_addr = 0;
5095 struct config_param *config = &sp->config;
5096
5097 for (i = 0; i < ETH_ALEN; i++) {
5098 mac_addr <<= 8;
5099 mac_addr |= addr[i];
5100 }
5101 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5102 return SUCCESS;
5103
5104 /* check if the multicast mac already preset in CAM */
5105 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5106 u64 tmp64;
5107 tmp64 = do_s2io_read_unicast_mc(sp, i);
5108 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5109 break;
5110
5111 if (tmp64 == mac_addr)
5112 return SUCCESS;
5113 }
5114 if (i == config->max_mc_addr) {
5115 DBG_PRINT(ERR_DBG,
d44570e4 5116 "CAM full no space left for multicast MAC\n");
faa4f796
SH
5117 return FAILURE;
5118 }
5119 /* Update the internal structure with this new mac address */
5120 do_s2io_copy_mac_addr(sp, i, mac_addr);
5121
d44570e4 5122 return do_s2io_add_mac(sp, mac_addr, i);
faa4f796
SH
5123}
5124
5125/* add MAC address to CAM */
5126static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
2fd37688
SS
5127{
5128 u64 val64;
5129 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5130
5131 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
d44570e4 5132 &bar0->rmac_addr_data0_mem);
2fd37688 5133
d44570e4 5134 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2fd37688
SS
5135 RMAC_ADDR_CMD_MEM_OFFSET(off);
5136 writeq(val64, &bar0->rmac_addr_cmd_mem);
5137
5138 /* Wait till command completes */
5139 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4 5140 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5ce7f3f4 5141 S2IO_BIT_RESET, true)) {
faa4f796 5142 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
2fd37688
SS
5143 return FAILURE;
5144 }
5145 return SUCCESS;
5146}
faa4f796
SH
5147/* deletes a specified unicast/multicast mac entry from CAM */
5148static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5149{
5150 int offset;
5151 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5152 struct config_param *config = &sp->config;
5153
5154 for (offset = 1;
d44570e4 5155 offset < config->max_mc_addr; offset++) {
faa4f796
SH
5156 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5157 if (tmp64 == addr) {
5158 /* disable the entry by writing 0xffffffffffffULL */
5159 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5160 return FAILURE;
5161 /* store the new mac list from CAM */
5162 do_s2io_store_unicast_mc(sp);
5163 return SUCCESS;
5164 }
5165 }
5166 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
d44570e4 5167 (unsigned long long)addr);
faa4f796
SH
5168 return FAILURE;
5169}
5170
5171/* read mac entries from CAM */
5172static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5173{
465aa304 5174 u64 tmp64, val64;
faa4f796
SH
5175 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5176
5177 /* read mac addr */
d44570e4 5178 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796
SH
5179 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5180 writeq(val64, &bar0->rmac_addr_cmd_mem);
5181
5182 /* Wait till command completes */
5183 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4 5184 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5ce7f3f4 5185 S2IO_BIT_RESET, true)) {
faa4f796
SH
5186 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5187 return FAILURE;
5188 }
5189 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4
JP
5190
5191 return tmp64 >> 16;
faa4f796 5192}
2fd37688 5193
d0ea5cbd 5194/*
49ce9c2c 5195 * s2io_set_mac_addr - driver entry point
2fd37688 5196 */
faa4f796 5197
2fd37688
SS
5198static int s2io_set_mac_addr(struct net_device *dev, void *p)
5199{
5200 struct sockaddr *addr = p;
5201
5202 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 5203 return -EADDRNOTAVAIL;
2fd37688
SS
5204
5205 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5206
5207 /* store the MAC address in CAM */
d44570e4 5208 return do_s2io_prog_unicast(dev, dev->dev_addr);
2fd37688 5209}
1da177e4 5210/**
2fd37688 5211 * do_s2io_prog_unicast - Programs the Xframe mac address
1da177e4
LT
5212 * @dev : pointer to the device structure.
5213 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 5214 * Description : This procedure will program the Xframe to receive
1da177e4 5215 * frames with new Mac Address
20346722 5216 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
5217 * as defined in errno.h file on failure.
5218 */
faa4f796 5219
2fd37688 5220static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
1da177e4 5221{
4cf1653a 5222 struct s2io_nic *sp = netdev_priv(dev);
2fd37688 5223 register u64 mac_addr = 0, perm_addr = 0;
1da177e4 5224 int i;
faa4f796
SH
5225 u64 tmp64;
5226 struct config_param *config = &sp->config;
1da177e4 5227
20346722 5228 /*
d44570e4
JP
5229 * Set the new MAC address as the new unicast filter and reflect this
5230 * change on the device address registered with the OS. It will be
5231 * at offset 0.
5232 */
1da177e4
LT
5233 for (i = 0; i < ETH_ALEN; i++) {
5234 mac_addr <<= 8;
5235 mac_addr |= addr[i];
2fd37688
SS
5236 perm_addr <<= 8;
5237 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
d8d70caf
SS
5238 }
5239
2fd37688
SS
5240 /* check if the dev_addr is different than perm_addr */
5241 if (mac_addr == perm_addr)
d8d70caf
SS
5242 return SUCCESS;
5243
faa4f796
SH
5244 /* check if the mac already preset in CAM */
5245 for (i = 1; i < config->max_mac_addr; i++) {
5246 tmp64 = do_s2io_read_unicast_mc(sp, i);
5247 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5248 break;
5249
5250 if (tmp64 == mac_addr) {
5251 DBG_PRINT(INFO_DBG,
d44570e4
JP
5252 "MAC addr:0x%llx already present in CAM\n",
5253 (unsigned long long)mac_addr);
faa4f796
SH
5254 return SUCCESS;
5255 }
5256 }
5257 if (i == config->max_mac_addr) {
5258 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5259 return FAILURE;
5260 }
d8d70caf 5261 /* Update the internal structure with this new mac address */
faa4f796 5262 do_s2io_copy_mac_addr(sp, i, mac_addr);
d44570e4
JP
5263
5264 return do_s2io_add_mac(sp, mac_addr, i);
1da177e4
LT
5265}
5266
5267/**
51f21442 5268 * s2io_ethtool_set_link_ksettings - Sets different link parameters.
d0ea5cbd 5269 * @dev : pointer to netdev
51f21442 5270 * @cmd: pointer to the structure with parameters given by ethtool to set
1da177e4
LT
5271 * link information.
5272 * Description:
20346722 5273 * The function sets different link parameters provided by the user onto
1da177e4
LT
5274 * the NIC.
5275 * Return value:
5276 * 0 on success.
d44570e4 5277 */
1da177e4 5278
51f21442
PR
5279static int
5280s2io_ethtool_set_link_ksettings(struct net_device *dev,
5281 const struct ethtool_link_ksettings *cmd)
1da177e4 5282{
4cf1653a 5283 struct s2io_nic *sp = netdev_priv(dev);
51f21442
PR
5284 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5285 (cmd->base.speed != SPEED_10000) ||
5286 (cmd->base.duplex != DUPLEX_FULL))
1da177e4
LT
5287 return -EINVAL;
5288 else {
5289 s2io_close(sp->dev);
5290 s2io_open(sp->dev);
5291 }
5292
5293 return 0;
5294}
5295
5296/**
dc432f5a 5297 * s2io_ethtool_get_link_ksettings - Return link specific information.
d0ea5cbd 5298 * @dev: pointer to netdev
51f21442 5299 * @cmd : pointer to the structure with parameters given by ethtool
1da177e4
LT
5300 * to return link information.
5301 * Description:
5302 * Returns link specific information like speed, duplex etc.. to ethtool.
5303 * Return value :
5304 * return 0 on success.
5305 */
5306
51f21442
PR
5307static int
5308s2io_ethtool_get_link_ksettings(struct net_device *dev,
5309 struct ethtool_link_ksettings *cmd)
1da177e4 5310{
4cf1653a 5311 struct s2io_nic *sp = netdev_priv(dev);
1a7eb72b 5312
51f21442
PR
5313 ethtool_link_ksettings_zero_link_mode(cmd, supported);
5314 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5315 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5316
5317 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5318 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5319 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5320
5321 cmd->base.port = PORT_FIBRE;
1da177e4
LT
5322
5323 if (netif_carrier_ok(sp->dev)) {
51f21442
PR
5324 cmd->base.speed = SPEED_10000;
5325 cmd->base.duplex = DUPLEX_FULL;
1da177e4 5326 } else {
51f21442
PR
5327 cmd->base.speed = SPEED_UNKNOWN;
5328 cmd->base.duplex = DUPLEX_UNKNOWN;
1da177e4
LT
5329 }
5330
51f21442 5331 cmd->base.autoneg = AUTONEG_DISABLE;
1da177e4
LT
5332 return 0;
5333}
5334
5335/**
20346722 5336 * s2io_ethtool_gdrvinfo - Returns driver specific information.
d0ea5cbd 5337 * @dev: pointer to netdev
1da177e4
LT
5338 * @info : pointer to the structure with parameters given by ethtool to
5339 * return driver information.
5340 * Description:
5341 * Returns driver specefic information like name, version etc.. to ethtool.
5342 * Return value:
5343 * void
5344 */
5345
5346static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5347 struct ethtool_drvinfo *info)
5348{
4cf1653a 5349 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 5350
68aad78c
RJ
5351 strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5352 strlcpy(info->version, s2io_driver_version, sizeof(info->version));
68aad78c 5353 strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
5354}
5355
5356/**
5357 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
d0ea5cbd 5358 * @dev: pointer to netdev
20346722 5359 * @regs : pointer to the structure with parameters given by ethtool for
d0ea5cbd
JB
5360 * dumping the registers.
5361 * @space: The input argument into which all the registers are dumped.
1da177e4
LT
5362 * Description:
5363 * Dumps the entire register space of xFrame NIC into the user given
5364 * buffer area.
5365 * Return value :
5366 * void .
d44570e4 5367 */
1da177e4
LT
5368
5369static void s2io_ethtool_gregs(struct net_device *dev,
5370 struct ethtool_regs *regs, void *space)
5371{
5372 int i;
5373 u64 reg;
d44570e4 5374 u8 *reg_space = (u8 *)space;
4cf1653a 5375 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5376
5377 regs->len = XENA_REG_SPACE;
5378 regs->version = sp->pdev->subsystem_device;
5379
5380 for (i = 0; i < regs->len; i += 8) {
5381 reg = readq(sp->bar0 + i);
5382 memcpy((reg_space + i), &reg, 8);
5383 }
5384}
5385
034e3450 5386/*
5387 * s2io_set_led - control NIC led
d44570e4 5388 */
034e3450 5389static void s2io_set_led(struct s2io_nic *sp, bool on)
1da177e4 5390{
1ee6dd77 5391 struct XENA_dev_config __iomem *bar0 = sp->bar0;
034e3450 5392 u16 subid = sp->pdev->subsystem_device;
5393 u64 val64;
1da177e4 5394
541ae68f 5395 if ((sp->device_type == XFRAME_II_DEVICE) ||
d44570e4 5396 ((subid & 0xFF) >= 0x07)) {
1da177e4 5397 val64 = readq(&bar0->gpio_control);
034e3450 5398 if (on)
5399 val64 |= GPIO_CTRL_GPIO_0;
5400 else
5401 val64 &= ~GPIO_CTRL_GPIO_0;
5402
1da177e4
LT
5403 writeq(val64, &bar0->gpio_control);
5404 } else {
5405 val64 = readq(&bar0->adapter_control);
034e3450 5406 if (on)
5407 val64 |= ADAPTER_LED_ON;
5408 else
5409 val64 &= ~ADAPTER_LED_ON;
5410
1da177e4
LT
5411 writeq(val64, &bar0->adapter_control);
5412 }
5413
1da177e4
LT
5414}
5415
5416/**
034e3450 5417 * s2io_ethtool_set_led - To physically identify the nic on the system.
5418 * @dev : network device
5419 * @state: led setting
5420 *
1da177e4 5421 * Description: Used to physically identify the NIC on the system.
20346722 5422 * The Link LED will blink for a time specified by the user for
1da177e4 5423 * identification.
20346722 5424 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4 5425 * identification is possible only if it's link is up.
1da177e4
LT
5426 */
5427
034e3450 5428static int s2io_ethtool_set_led(struct net_device *dev,
5429 enum ethtool_phys_id_state state)
1da177e4 5430{
4cf1653a 5431 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5432 struct XENA_dev_config __iomem *bar0 = sp->bar0;
034e3450 5433 u16 subid = sp->pdev->subsystem_device;
1da177e4 5434
d44570e4 5435 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
034e3450 5436 u64 val64 = readq(&bar0->adapter_control);
1da177e4 5437 if (!(val64 & ADAPTER_CNTL_EN)) {
6cef2b8e 5438 pr_err("Adapter Link down, cannot blink LED\n");
034e3450 5439 return -EAGAIN;
1da177e4
LT
5440 }
5441 }
1da177e4 5442
034e3450 5443 switch (state) {
5444 case ETHTOOL_ID_ACTIVE:
5445 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
fce55922 5446 return 1; /* cycle on/off once per second */
034e3450 5447
5448 case ETHTOOL_ID_ON:
5449 s2io_set_led(sp, true);
5450 break;
5451
5452 case ETHTOOL_ID_OFF:
5453 s2io_set_led(sp, false);
5454 break;
5455
5456 case ETHTOOL_ID_INACTIVE:
5457 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5458 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
1da177e4
LT
5459 }
5460
5461 return 0;
5462}
5463
0cec35eb 5464static void s2io_ethtool_gringparam(struct net_device *dev,
d44570e4 5465 struct ethtool_ringparam *ering)
0cec35eb 5466{
4cf1653a 5467 struct s2io_nic *sp = netdev_priv(dev);
d44570e4 5468 int i, tx_desc_count = 0, rx_desc_count = 0;
0cec35eb 5469
1853e2e1 5470 if (sp->rxd_mode == RXD_MODE_1) {
0cec35eb 5471 ering->rx_max_pending = MAX_RX_DESC_1;
1853e2e1
JM
5472 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5473 } else {
0cec35eb 5474 ering->rx_max_pending = MAX_RX_DESC_2;
1853e2e1
JM
5475 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5476 }
0cec35eb
SH
5477
5478 ering->tx_max_pending = MAX_TX_DESC;
8a4bdbaa 5479
1853e2e1 5480 for (i = 0; i < sp->config.rx_ring_num; i++)
0cec35eb 5481 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
0cec35eb 5482 ering->rx_pending = rx_desc_count;
0cec35eb 5483 ering->rx_jumbo_pending = rx_desc_count;
1853e2e1
JM
5484
5485 for (i = 0; i < sp->config.tx_fifo_num; i++)
5486 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5487 ering->tx_pending = tx_desc_count;
5488 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
0cec35eb
SH
5489}
5490
1da177e4
LT
5491/**
5492 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
d0ea5cbd 5493 * @dev: pointer to netdev
1da177e4
LT
5494 * @ep : pointer to the structure with pause parameters given by ethtool.
5495 * Description:
5496 * Returns the Pause frame generation and reception capability of the NIC.
5497 * Return value:
5498 * void
5499 */
5500static void s2io_ethtool_getpause_data(struct net_device *dev,
5501 struct ethtool_pauseparam *ep)
5502{
5503 u64 val64;
4cf1653a 5504 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5505 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5506
5507 val64 = readq(&bar0->rmac_pause_cfg);
5508 if (val64 & RMAC_PAUSE_GEN_ENABLE)
f957bcf0 5509 ep->tx_pause = true;
1da177e4 5510 if (val64 & RMAC_PAUSE_RX_ENABLE)
f957bcf0
TK
5511 ep->rx_pause = true;
5512 ep->autoneg = false;
1da177e4
LT
5513}
5514
5515/**
5516 * s2io_ethtool_setpause_data - set/reset pause frame generation.
d0ea5cbd 5517 * @dev: pointer to netdev
1da177e4
LT
5518 * @ep : pointer to the structure with pause parameters given by ethtool.
5519 * Description:
5520 * It can be used to set or reset Pause frame generation or reception
5521 * support of the NIC.
5522 * Return value:
5523 * int, returns 0 on Success
5524 */
5525
5526static int s2io_ethtool_setpause_data(struct net_device *dev,
d44570e4 5527 struct ethtool_pauseparam *ep)
1da177e4
LT
5528{
5529 u64 val64;
4cf1653a 5530 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5531 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5532
5533 val64 = readq(&bar0->rmac_pause_cfg);
5534 if (ep->tx_pause)
5535 val64 |= RMAC_PAUSE_GEN_ENABLE;
5536 else
5537 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5538 if (ep->rx_pause)
5539 val64 |= RMAC_PAUSE_RX_ENABLE;
5540 else
5541 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5542 writeq(val64, &bar0->rmac_pause_cfg);
5543 return 0;
5544}
5545
d0ea5cbd 5546#define S2IO_DEV_ID 5
1da177e4
LT
5547/**
5548 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 5549 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5550 * s2io_nic structure.
5551 * @off : offset at which the data must be written
5552 * @data : Its an output parameter where the data read at the given
20346722 5553 * offset is stored.
1da177e4 5554 * Description:
20346722 5555 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
5556 * read data.
5557 * NOTE: Will allow to read only part of the EEPROM visible through the
5558 * I2C bus.
5559 * Return value:
5560 * -1 on failure and 0 on success.
5561 */
d44570e4 5562static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
1da177e4
LT
5563{
5564 int ret = -1;
5565 u32 exit_cnt = 0;
5566 u64 val64;
1ee6dd77 5567 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5568
ad4ebed0 5569 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5570 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5571 I2C_CONTROL_ADDR(off) |
5572 I2C_CONTROL_BYTE_CNT(0x3) |
5573 I2C_CONTROL_READ |
5574 I2C_CONTROL_CNTL_START;
ad4ebed0 5575 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 5576
ad4ebed0 5577 while (exit_cnt < 5) {
5578 val64 = readq(&bar0->i2c_control);
5579 if (I2C_CONTROL_CNTL_END(val64)) {
5580 *data = I2C_CONTROL_GET_DATA(val64);
5581 ret = 0;
5582 break;
5583 }
5584 msleep(50);
5585 exit_cnt++;
1da177e4 5586 }
1da177e4
LT
5587 }
5588
ad4ebed0 5589 if (sp->device_type == XFRAME_II_DEVICE) {
5590 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5591 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 5592 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5593 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5594 val64 |= SPI_CONTROL_REQ;
5595 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5596 while (exit_cnt < 5) {
5597 val64 = readq(&bar0->spi_control);
5598 if (val64 & SPI_CONTROL_NACK) {
5599 ret = 1;
5600 break;
5601 } else if (val64 & SPI_CONTROL_DONE) {
5602 *data = readq(&bar0->spi_data);
5603 *data &= 0xffffff;
5604 ret = 0;
5605 break;
5606 }
5607 msleep(50);
5608 exit_cnt++;
5609 }
5610 }
1da177e4
LT
5611 return ret;
5612}
5613
5614/**
5615 * write_eeprom - actually writes the relevant part of the data value.
5616 * @sp : private member of the device structure, which is a pointer to the
5617 * s2io_nic structure.
5618 * @off : offset at which the data must be written
5619 * @data : The data that is to be written
20346722 5620 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
5621 * the Eeprom. (max of 3)
5622 * Description:
5623 * Actually writes the relevant part of the data value into the Eeprom
5624 * through the I2C bus.
5625 * Return value:
5626 * 0 on success, -1 on failure.
5627 */
5628
d44570e4 5629static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
1da177e4
LT
5630{
5631 int exit_cnt = 0, ret = -1;
5632 u64 val64;
1ee6dd77 5633 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5634
ad4ebed0 5635 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5636 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5637 I2C_CONTROL_ADDR(off) |
5638 I2C_CONTROL_BYTE_CNT(cnt) |
5639 I2C_CONTROL_SET_DATA((u32)data) |
5640 I2C_CONTROL_CNTL_START;
ad4ebed0 5641 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5642
5643 while (exit_cnt < 5) {
5644 val64 = readq(&bar0->i2c_control);
5645 if (I2C_CONTROL_CNTL_END(val64)) {
5646 if (!(val64 & I2C_CONTROL_NACK))
5647 ret = 0;
5648 break;
5649 }
5650 msleep(50);
5651 exit_cnt++;
5652 }
5653 }
1da177e4 5654
ad4ebed0 5655 if (sp->device_type == XFRAME_II_DEVICE) {
5656 int write_cnt = (cnt == 8) ? 0 : cnt;
d44570e4 5657 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
ad4ebed0 5658
5659 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5660 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 5661 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5662 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5663 val64 |= SPI_CONTROL_REQ;
5664 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5665 while (exit_cnt < 5) {
5666 val64 = readq(&bar0->spi_control);
5667 if (val64 & SPI_CONTROL_NACK) {
5668 ret = 1;
5669 break;
5670 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 5671 ret = 0;
ad4ebed0 5672 break;
5673 }
5674 msleep(50);
5675 exit_cnt++;
1da177e4 5676 }
1da177e4 5677 }
1da177e4
LT
5678 return ret;
5679}
1ee6dd77 5680static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5681{
b41477f3
AR
5682 u8 *vpd_data;
5683 u8 data;
9c179780 5684 int i = 0, cnt, len, fail = 0;
9dc737a7 5685 int vpd_addr = 0x80;
ffb5df6c 5686 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
9dc737a7
AR
5687
5688 if (nic->device_type == XFRAME_II_DEVICE) {
5689 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5690 vpd_addr = 0x80;
d44570e4 5691 } else {
9dc737a7
AR
5692 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5693 vpd_addr = 0x50;
5694 }
19a60522 5695 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5696
b41477f3 5697 vpd_data = kmalloc(256, GFP_KERNEL);
c53d4945 5698 if (!vpd_data) {
ffb5df6c 5699 swstats->mem_alloc_fail_cnt++;
b41477f3 5700 return;
c53d4945 5701 }
ffb5df6c 5702 swstats->mem_allocated += 256;
b41477f3 5703
d44570e4 5704 for (i = 0; i < 256; i += 4) {
9dc737a7
AR
5705 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5706 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5707 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
d44570e4 5708 for (cnt = 0; cnt < 5; cnt++) {
9dc737a7
AR
5709 msleep(2);
5710 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5711 if (data == 0x80)
5712 break;
5713 }
5714 if (cnt >= 5) {
5715 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5716 fail = 1;
5717 break;
5718 }
5719 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5720 (u32 *)&vpd_data[i]);
5721 }
19a60522 5722
d44570e4 5723 if (!fail) {
19a60522 5724 /* read serial number of adapter */
9c179780 5725 for (cnt = 0; cnt < 252; cnt++) {
d44570e4 5726 if ((vpd_data[cnt] == 'S') &&
9c179780
KV
5727 (vpd_data[cnt+1] == 'N')) {
5728 len = vpd_data[cnt+2];
5729 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5730 memcpy(nic->serial_num,
5731 &vpd_data[cnt + 3],
5732 len);
5733 memset(nic->serial_num+len,
5734 0,
5735 VPD_STRING_LEN-len);
5736 break;
5737 }
19a60522
SS
5738 }
5739 }
5740 }
5741
9c179780
KV
5742 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5743 len = vpd_data[1];
5744 memcpy(nic->product_name, &vpd_data[3], len);
5745 nic->product_name[len] = 0;
5746 }
b41477f3 5747 kfree(vpd_data);
ffb5df6c 5748 swstats->mem_freed += 256;
9dc737a7
AR
5749}
5750
1da177e4
LT
5751/**
5752 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
d0ea5cbd 5753 * @dev: pointer to netdev
20346722 5754 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5755 * containing all relevant information.
5756 * @data_buf : user defined value to be written into Eeprom.
5757 * Description: Reads the values stored in the Eeprom at given offset
5758 * for a given length. Stores these values int the input argument data
5759 * buffer 'data_buf' and returns these to the caller (ethtool.)
5760 * Return value:
5761 * int 0 on success
5762 */
5763
5764static int s2io_ethtool_geeprom(struct net_device *dev,
d44570e4 5765 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5766{
ad4ebed0 5767 u32 i, valid;
5768 u64 data;
4cf1653a 5769 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5770
5771 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5772
5773 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5774 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5775
5776 for (i = 0; i < eeprom->len; i += 4) {
5777 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5778 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5779 return -EFAULT;
5780 }
5781 valid = INV(data);
5782 memcpy((data_buf + i), &valid, 4);
5783 }
5784 return 0;
5785}
5786
5787/**
5788 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
d0ea5cbd 5789 * @dev: pointer to netdev
20346722 5790 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4 5791 * containing all relevant information.
d0ea5cbd 5792 * @data_buf : user defined value to be written into Eeprom.
1da177e4
LT
5793 * Description:
5794 * Tries to write the user provided value in the Eeprom, at the offset
5795 * given by the user.
5796 * Return value:
5797 * 0 on success, -EFAULT on failure.
5798 */
5799
5800static int s2io_ethtool_seeprom(struct net_device *dev,
5801 struct ethtool_eeprom *eeprom,
d44570e4 5802 u8 *data_buf)
1da177e4
LT
5803{
5804 int len = eeprom->len, cnt = 0;
ad4ebed0 5805 u64 valid = 0, data;
4cf1653a 5806 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5807
5808 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5809 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5810 "ETHTOOL_WRITE_EEPROM Err: "
5811 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5812 (sp->pdev->vendor | (sp->pdev->device << 16)),
5813 eeprom->magic);
1da177e4
LT
5814 return -EFAULT;
5815 }
5816
5817 while (len) {
d44570e4
JP
5818 data = (u32)data_buf[cnt] & 0x000000FF;
5819 if (data)
5820 valid = (u32)(data << 24);
5821 else
1da177e4
LT
5822 valid = data;
5823
5824 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5825 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5826 "ETHTOOL_WRITE_EEPROM Err: "
5827 "Cannot write into the specified offset\n");
1da177e4
LT
5828 return -EFAULT;
5829 }
5830 cnt++;
5831 len--;
5832 }
5833
5834 return 0;
5835}
5836
5837/**
20346722
K
5838 * s2io_register_test - reads and writes into all clock domains.
5839 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5840 * s2io_nic structure.
5841 * @data : variable that returns the result of each of the test conducted b
5842 * by the driver.
5843 * Description:
5844 * Read and write into all clock domains. The NIC has 3 clock domains,
5845 * see that registers in all the three regions are accessible.
5846 * Return value:
5847 * 0 on success.
5848 */
5849
d44570e4 5850static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 5851{
1ee6dd77 5852 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5853 u64 val64 = 0, exp_val;
1da177e4
LT
5854 int fail = 0;
5855
20346722
K
5856 val64 = readq(&bar0->pif_rd_swapper_fb);
5857 if (val64 != 0x123456789abcdefULL) {
1da177e4 5858 fail = 1;
9e39f7c5 5859 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
1da177e4
LT
5860 }
5861
5862 val64 = readq(&bar0->rmac_pause_cfg);
5863 if (val64 != 0xc000ffff00000000ULL) {
5864 fail = 1;
9e39f7c5 5865 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
1da177e4
LT
5866 }
5867
5868 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5869 if (sp->device_type == XFRAME_II_DEVICE)
5870 exp_val = 0x0404040404040404ULL;
5871 else
5872 exp_val = 0x0808080808080808ULL;
5873 if (val64 != exp_val) {
1da177e4 5874 fail = 1;
9e39f7c5 5875 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
1da177e4
LT
5876 }
5877
5878 val64 = readq(&bar0->xgxs_efifo_cfg);
5879 if (val64 != 0x000000001923141EULL) {
5880 fail = 1;
9e39f7c5 5881 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
1da177e4
LT
5882 }
5883
5884 val64 = 0x5A5A5A5A5A5A5A5AULL;
5885 writeq(val64, &bar0->xmsi_data);
5886 val64 = readq(&bar0->xmsi_data);
5887 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5888 fail = 1;
9e39f7c5 5889 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
1da177e4
LT
5890 }
5891
5892 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5893 writeq(val64, &bar0->xmsi_data);
5894 val64 = readq(&bar0->xmsi_data);
5895 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5896 fail = 1;
9e39f7c5 5897 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
1da177e4
LT
5898 }
5899
5900 *data = fail;
ad4ebed0 5901 return fail;
1da177e4
LT
5902}
5903
5904/**
20346722 5905 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5906 * @sp : private member of the device structure, which is a pointer to the
5907 * s2io_nic structure.
5908 * @data:variable that returns the result of each of the test conducted by
5909 * the driver.
5910 * Description:
20346722 5911 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5912 * register.
5913 * Return value:
5914 * 0 on success.
5915 */
5916
d44570e4 5917static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
5918{
5919 int fail = 0;
ad4ebed0 5920 u64 ret_data, org_4F0, org_7F0;
5921 u8 saved_4F0 = 0, saved_7F0 = 0;
5922 struct net_device *dev = sp->dev;
1da177e4
LT
5923
5924 /* Test Write Error at offset 0 */
ad4ebed0 5925 /* Note that SPI interface allows write access to all areas
5926 * of EEPROM. Hence doing all negative testing only for Xframe I.
5927 */
5928 if (sp->device_type == XFRAME_I_DEVICE)
5929 if (!write_eeprom(sp, 0, 0, 3))
5930 fail = 1;
5931
5932 /* Save current values at offsets 0x4F0 and 0x7F0 */
5933 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5934 saved_4F0 = 1;
5935 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5936 saved_7F0 = 1;
1da177e4
LT
5937
5938 /* Test Write at offset 4f0 */
ad4ebed0 5939 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5940 fail = 1;
5941 if (read_eeprom(sp, 0x4F0, &ret_data))
5942 fail = 1;
5943
ad4ebed0 5944 if (ret_data != 0x012345) {
26b7625c 5945 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
d44570e4
JP
5946 "Data written %llx Data read %llx\n",
5947 dev->name, (unsigned long long)0x12345,
5948 (unsigned long long)ret_data);
1da177e4 5949 fail = 1;
ad4ebed0 5950 }
1da177e4
LT
5951
5952 /* Reset the EEPROM data go FFFF */
ad4ebed0 5953 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5954
5955 /* Test Write Request Error at offset 0x7c */
ad4ebed0 5956 if (sp->device_type == XFRAME_I_DEVICE)
5957 if (!write_eeprom(sp, 0x07C, 0, 3))
5958 fail = 1;
1da177e4 5959
ad4ebed0 5960 /* Test Write Request at offset 0x7f0 */
5961 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 5962 fail = 1;
ad4ebed0 5963 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
5964 fail = 1;
5965
ad4ebed0 5966 if (ret_data != 0x012345) {
26b7625c 5967 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
d44570e4
JP
5968 "Data written %llx Data read %llx\n",
5969 dev->name, (unsigned long long)0x12345,
5970 (unsigned long long)ret_data);
1da177e4 5971 fail = 1;
ad4ebed0 5972 }
1da177e4
LT
5973
5974 /* Reset the EEPROM data go FFFF */
ad4ebed0 5975 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 5976
ad4ebed0 5977 if (sp->device_type == XFRAME_I_DEVICE) {
5978 /* Test Write Error at offset 0x80 */
5979 if (!write_eeprom(sp, 0x080, 0, 3))
5980 fail = 1;
1da177e4 5981
ad4ebed0 5982 /* Test Write Error at offset 0xfc */
5983 if (!write_eeprom(sp, 0x0FC, 0, 3))
5984 fail = 1;
1da177e4 5985
ad4ebed0 5986 /* Test Write Error at offset 0x100 */
5987 if (!write_eeprom(sp, 0x100, 0, 3))
5988 fail = 1;
1da177e4 5989
ad4ebed0 5990 /* Test Write Error at offset 4ec */
5991 if (!write_eeprom(sp, 0x4EC, 0, 3))
5992 fail = 1;
5993 }
5994
5995 /* Restore values at offsets 0x4F0 and 0x7F0 */
5996 if (saved_4F0)
5997 write_eeprom(sp, 0x4F0, org_4F0, 3);
5998 if (saved_7F0)
5999 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
6000
6001 *data = fail;
ad4ebed0 6002 return fail;
1da177e4
LT
6003}
6004
6005/**
6006 * s2io_bist_test - invokes the MemBist test of the card .
20346722 6007 * @sp : private member of the device structure, which is a pointer to the
1da177e4 6008 * s2io_nic structure.
20346722 6009 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
6010 * the driver.
6011 * Description:
6012 * This invokes the MemBist test of the card. We give around
6013 * 2 secs time for the Test to complete. If it's still not complete
20346722 6014 * within this peiod, we consider that the test failed.
1da177e4
LT
6015 * Return value:
6016 * 0 on success and -1 on failure.
6017 */
6018
d44570e4 6019static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
6020{
6021 u8 bist = 0;
6022 int cnt = 0, ret = -1;
6023
6024 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6025 bist |= PCI_BIST_START;
6026 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6027
6028 while (cnt < 20) {
6029 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6030 if (!(bist & PCI_BIST_START)) {
6031 *data = (bist & PCI_BIST_CODE_MASK);
6032 ret = 0;
6033 break;
6034 }
6035 msleep(100);
6036 cnt++;
6037 }
6038
6039 return ret;
6040}
6041
6042/**
49ce9c2c 6043 * s2io_link_test - verifies the link state of the nic
d0ea5cbd 6044 * @sp: private member of the device structure, which is a pointer to the
1da177e4
LT
6045 * s2io_nic structure.
6046 * @data: variable that returns the result of each of the test conducted by
6047 * the driver.
6048 * Description:
20346722 6049 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
6050 * argument 'data' appropriately.
6051 * Return value:
6052 * 0 on success.
6053 */
6054
d44570e4 6055static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6056{
1ee6dd77 6057 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
6058 u64 val64;
6059
6060 val64 = readq(&bar0->adapter_status);
d44570e4 6061 if (!(LINK_IS_UP(val64)))
1da177e4 6062 *data = 1;
c92ca04b
AR
6063 else
6064 *data = 0;
1da177e4 6065
b41477f3 6066 return *data;
1da177e4
LT
6067}
6068
6069/**
20346722 6070 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
49ce9c2c 6071 * @sp: private member of the device structure, which is a pointer to the
1da177e4 6072 * s2io_nic structure.
49ce9c2c 6073 * @data: variable that returns the result of each of the test
1da177e4
LT
6074 * conducted by the driver.
6075 * Description:
20346722 6076 * This is one of the offline test that tests the read and write
1da177e4
LT
6077 * access to the RldRam chip on the NIC.
6078 * Return value:
6079 * 0 on success.
6080 */
6081
d44570e4 6082static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6083{
1ee6dd77 6084 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 6085 u64 val64;
ad4ebed0 6086 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
6087
6088 val64 = readq(&bar0->adapter_control);
6089 val64 &= ~ADAPTER_ECC_EN;
6090 writeq(val64, &bar0->adapter_control);
6091
6092 val64 = readq(&bar0->mc_rldram_test_ctrl);
6093 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 6094 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6095
6096 val64 = readq(&bar0->mc_rldram_mrs);
6097 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6098 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6099
6100 val64 |= MC_RLDRAM_MRS_ENABLE;
6101 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6102
6103 while (iteration < 2) {
6104 val64 = 0x55555555aaaa0000ULL;
d44570e4 6105 if (iteration == 1)
1da177e4 6106 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6107 writeq(val64, &bar0->mc_rldram_test_d0);
6108
6109 val64 = 0xaaaa5a5555550000ULL;
d44570e4 6110 if (iteration == 1)
1da177e4 6111 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6112 writeq(val64, &bar0->mc_rldram_test_d1);
6113
6114 val64 = 0x55aaaaaaaa5a0000ULL;
d44570e4 6115 if (iteration == 1)
1da177e4 6116 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6117 writeq(val64, &bar0->mc_rldram_test_d2);
6118
ad4ebed0 6119 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
6120 writeq(val64, &bar0->mc_rldram_test_add);
6121
d44570e4
JP
6122 val64 = MC_RLDRAM_TEST_MODE |
6123 MC_RLDRAM_TEST_WRITE |
6124 MC_RLDRAM_TEST_GO;
ad4ebed0 6125 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6126
6127 for (cnt = 0; cnt < 5; cnt++) {
6128 val64 = readq(&bar0->mc_rldram_test_ctrl);
6129 if (val64 & MC_RLDRAM_TEST_DONE)
6130 break;
6131 msleep(200);
6132 }
6133
6134 if (cnt == 5)
6135 break;
6136
ad4ebed0 6137 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6138 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6139
6140 for (cnt = 0; cnt < 5; cnt++) {
6141 val64 = readq(&bar0->mc_rldram_test_ctrl);
6142 if (val64 & MC_RLDRAM_TEST_DONE)
6143 break;
6144 msleep(500);
6145 }
6146
6147 if (cnt == 5)
6148 break;
6149
6150 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 6151 if (!(val64 & MC_RLDRAM_TEST_PASS))
6152 test_fail = 1;
1da177e4
LT
6153
6154 iteration++;
6155 }
6156
ad4ebed0 6157 *data = test_fail;
1da177e4 6158
ad4ebed0 6159 /* Bring the adapter out of test mode */
6160 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6161
6162 return test_fail;
1da177e4
LT
6163}
6164
6165/**
6166 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
d0ea5cbd 6167 * @dev: pointer to netdev
1da177e4
LT
6168 * @ethtest : pointer to a ethtool command specific structure that will be
6169 * returned to the user.
20346722 6170 * @data : variable that returns the result of each of the test
1da177e4
LT
6171 * conducted by the driver.
6172 * Description:
6173 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6174 * the health of the card.
6175 * Return value:
6176 * void
6177 */
6178
6179static void s2io_ethtool_test(struct net_device *dev,
6180 struct ethtool_test *ethtest,
d44570e4 6181 uint64_t *data)
1da177e4 6182{
4cf1653a 6183 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
6184 int orig_state = netif_running(sp->dev);
6185
6186 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6187 /* Offline Tests. */
20346722 6188 if (orig_state)
1da177e4 6189 s2io_close(sp->dev);
1da177e4
LT
6190
6191 if (s2io_register_test(sp, &data[0]))
6192 ethtest->flags |= ETH_TEST_FL_FAILED;
6193
6194 s2io_reset(sp);
1da177e4
LT
6195
6196 if (s2io_rldram_test(sp, &data[3]))
6197 ethtest->flags |= ETH_TEST_FL_FAILED;
6198
6199 s2io_reset(sp);
1da177e4
LT
6200
6201 if (s2io_eeprom_test(sp, &data[1]))
6202 ethtest->flags |= ETH_TEST_FL_FAILED;
6203
6204 if (s2io_bist_test(sp, &data[4]))
6205 ethtest->flags |= ETH_TEST_FL_FAILED;
6206
6207 if (orig_state)
6208 s2io_open(sp->dev);
6209
6210 data[2] = 0;
6211 } else {
6212 /* Online Tests. */
6213 if (!orig_state) {
d44570e4 6214 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
1da177e4
LT
6215 dev->name);
6216 data[0] = -1;
6217 data[1] = -1;
6218 data[2] = -1;
6219 data[3] = -1;
6220 data[4] = -1;
6221 }
6222
6223 if (s2io_link_test(sp, &data[2]))
6224 ethtest->flags |= ETH_TEST_FL_FAILED;
6225
6226 data[0] = 0;
6227 data[1] = 0;
6228 data[3] = 0;
6229 data[4] = 0;
6230 }
6231}
6232
6233static void s2io_get_ethtool_stats(struct net_device *dev,
6234 struct ethtool_stats *estats,
d44570e4 6235 u64 *tmp_stats)
1da177e4 6236{
8116f3cf 6237 int i = 0, k;
4cf1653a 6238 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
6239 struct stat_block *stats = sp->mac_control.stats_info;
6240 struct swStat *swstats = &stats->sw_stat;
6241 struct xpakStat *xstats = &stats->xpak_stat;
1da177e4 6242
7ba013ac 6243 s2io_updt_stats(sp);
541ae68f 6244 tmp_stats[i++] =
ffb5df6c
JP
6245 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6246 le32_to_cpu(stats->tmac_frms);
541ae68f 6247 tmp_stats[i++] =
ffb5df6c
JP
6248 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6249 le32_to_cpu(stats->tmac_data_octets);
6250 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
541ae68f 6251 tmp_stats[i++] =
ffb5df6c
JP
6252 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6253 le32_to_cpu(stats->tmac_mcst_frms);
541ae68f 6254 tmp_stats[i++] =
ffb5df6c
JP
6255 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6256 le32_to_cpu(stats->tmac_bcst_frms);
6257 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
bd1034f0 6258 tmp_stats[i++] =
ffb5df6c
JP
6259 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6260 le32_to_cpu(stats->tmac_ttl_octets);
bd1034f0 6261 tmp_stats[i++] =
ffb5df6c
JP
6262 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6263 le32_to_cpu(stats->tmac_ucst_frms);
d44570e4 6264 tmp_stats[i++] =
ffb5df6c
JP
6265 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6266 le32_to_cpu(stats->tmac_nucst_frms);
541ae68f 6267 tmp_stats[i++] =
ffb5df6c
JP
6268 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6269 le32_to_cpu(stats->tmac_any_err_frms);
6270 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6271 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
541ae68f 6272 tmp_stats[i++] =
ffb5df6c
JP
6273 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6274 le32_to_cpu(stats->tmac_vld_ip);
541ae68f 6275 tmp_stats[i++] =
ffb5df6c
JP
6276 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6277 le32_to_cpu(stats->tmac_drop_ip);
541ae68f 6278 tmp_stats[i++] =
ffb5df6c
JP
6279 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6280 le32_to_cpu(stats->tmac_icmp);
541ae68f 6281 tmp_stats[i++] =
ffb5df6c
JP
6282 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6283 le32_to_cpu(stats->tmac_rst_tcp);
6284 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6285 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6286 le32_to_cpu(stats->tmac_udp);
541ae68f 6287 tmp_stats[i++] =
ffb5df6c
JP
6288 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6289 le32_to_cpu(stats->rmac_vld_frms);
541ae68f 6290 tmp_stats[i++] =
ffb5df6c
JP
6291 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6292 le32_to_cpu(stats->rmac_data_octets);
6293 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6294 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
541ae68f 6295 tmp_stats[i++] =
ffb5df6c
JP
6296 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6297 le32_to_cpu(stats->rmac_vld_mcst_frms);
541ae68f 6298 tmp_stats[i++] =
ffb5df6c
JP
6299 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6300 le32_to_cpu(stats->rmac_vld_bcst_frms);
6301 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6302 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6303 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6304 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6305 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
d44570e4 6306 tmp_stats[i++] =
ffb5df6c
JP
6307 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6308 le32_to_cpu(stats->rmac_ttl_octets);
bd1034f0 6309 tmp_stats[i++] =
ffb5df6c
JP
6310 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6311 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
d44570e4 6312 tmp_stats[i++] =
ffb5df6c
JP
6313 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6314 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
541ae68f 6315 tmp_stats[i++] =
ffb5df6c
JP
6316 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6317 le32_to_cpu(stats->rmac_discarded_frms);
d44570e4 6318 tmp_stats[i++] =
ffb5df6c
JP
6319 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6320 << 32 | le32_to_cpu(stats->rmac_drop_events);
6321 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6322 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
541ae68f 6323 tmp_stats[i++] =
ffb5df6c
JP
6324 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6325 le32_to_cpu(stats->rmac_usized_frms);
541ae68f 6326 tmp_stats[i++] =
ffb5df6c
JP
6327 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6328 le32_to_cpu(stats->rmac_osized_frms);
541ae68f 6329 tmp_stats[i++] =
ffb5df6c
JP
6330 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6331 le32_to_cpu(stats->rmac_frag_frms);
541ae68f 6332 tmp_stats[i++] =
ffb5df6c
JP
6333 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6334 le32_to_cpu(stats->rmac_jabber_frms);
6335 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6336 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6337 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6338 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6339 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6340 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
bd1034f0 6341 tmp_stats[i++] =
ffb5df6c
JP
6342 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6343 le32_to_cpu(stats->rmac_ip);
6344 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6345 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
bd1034f0 6346 tmp_stats[i++] =
ffb5df6c
JP
6347 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6348 le32_to_cpu(stats->rmac_drop_ip);
bd1034f0 6349 tmp_stats[i++] =
ffb5df6c
JP
6350 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6351 le32_to_cpu(stats->rmac_icmp);
6352 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
bd1034f0 6353 tmp_stats[i++] =
ffb5df6c
JP
6354 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6355 le32_to_cpu(stats->rmac_udp);
541ae68f 6356 tmp_stats[i++] =
ffb5df6c
JP
6357 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6358 le32_to_cpu(stats->rmac_err_drp_udp);
6359 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6360 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6361 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6362 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6363 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6364 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6365 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6366 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6367 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6368 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6369 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6370 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6371 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6372 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6373 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6374 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6375 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
541ae68f 6376 tmp_stats[i++] =
ffb5df6c
JP
6377 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6378 le32_to_cpu(stats->rmac_pause_cnt);
6379 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6380 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
541ae68f 6381 tmp_stats[i++] =
ffb5df6c
JP
6382 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6383 le32_to_cpu(stats->rmac_accepted_ip);
6384 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6385 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6386 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6387 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6388 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6389 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6390 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6391 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6392 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6393 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6394 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6395 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6396 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6397 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6398 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6399 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6400 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6401 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6402 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
fa1f0cb3
SS
6403
6404 /* Enhanced statistics exist only for Hercules */
d44570e4 6405 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6406 tmp_stats[i++] =
ffb5df6c 6407 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
fa1f0cb3 6408 tmp_stats[i++] =
ffb5df6c 6409 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
fa1f0cb3 6410 tmp_stats[i++] =
ffb5df6c
JP
6411 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6412 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6413 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6414 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6415 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6416 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6417 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6418 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6419 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6420 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6421 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6422 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6423 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6424 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
fa1f0cb3
SS
6425 }
6426
7ba013ac 6427 tmp_stats[i++] = 0;
ffb5df6c
JP
6428 tmp_stats[i++] = swstats->single_ecc_errs;
6429 tmp_stats[i++] = swstats->double_ecc_errs;
6430 tmp_stats[i++] = swstats->parity_err_cnt;
6431 tmp_stats[i++] = swstats->serious_err_cnt;
6432 tmp_stats[i++] = swstats->soft_reset_cnt;
6433 tmp_stats[i++] = swstats->fifo_full_cnt;
8116f3cf 6434 for (k = 0; k < MAX_RX_RINGS; k++)
ffb5df6c
JP
6435 tmp_stats[i++] = swstats->ring_full_cnt[k];
6436 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6437 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6438 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6439 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6440 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6441 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6442 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6443 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6444 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6445 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6446 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6447 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6448 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6449 tmp_stats[i++] = swstats->sending_both;
6450 tmp_stats[i++] = swstats->outof_sequence_pkts;
6451 tmp_stats[i++] = swstats->flush_max_pkts;
6452 if (swstats->num_aggregations) {
6453 u64 tmp = swstats->sum_avg_pkts_aggregated;
bd1034f0 6454 int count = 0;
6aa20a22 6455 /*
bd1034f0
AR
6456 * Since 64-bit divide does not work on all platforms,
6457 * do repeated subtraction.
6458 */
ffb5df6c
JP
6459 while (tmp >= swstats->num_aggregations) {
6460 tmp -= swstats->num_aggregations;
bd1034f0
AR
6461 count++;
6462 }
6463 tmp_stats[i++] = count;
d44570e4 6464 } else
bd1034f0 6465 tmp_stats[i++] = 0;
ffb5df6c
JP
6466 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6467 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6468 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6469 tmp_stats[i++] = swstats->mem_allocated;
6470 tmp_stats[i++] = swstats->mem_freed;
6471 tmp_stats[i++] = swstats->link_up_cnt;
6472 tmp_stats[i++] = swstats->link_down_cnt;
6473 tmp_stats[i++] = swstats->link_up_time;
6474 tmp_stats[i++] = swstats->link_down_time;
6475
6476 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6477 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6478 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6479 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6480 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6481
6482 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6483 tmp_stats[i++] = swstats->rx_abort_cnt;
6484 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6485 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6486 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6487 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6488 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6489 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6490 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6491 tmp_stats[i++] = swstats->tda_err_cnt;
6492 tmp_stats[i++] = swstats->pfc_err_cnt;
6493 tmp_stats[i++] = swstats->pcc_err_cnt;
6494 tmp_stats[i++] = swstats->tti_err_cnt;
6495 tmp_stats[i++] = swstats->tpa_err_cnt;
6496 tmp_stats[i++] = swstats->sm_err_cnt;
6497 tmp_stats[i++] = swstats->lso_err_cnt;
6498 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6499 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6500 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6501 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6502 tmp_stats[i++] = swstats->rc_err_cnt;
6503 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6504 tmp_stats[i++] = swstats->rpa_err_cnt;
6505 tmp_stats[i++] = swstats->rda_err_cnt;
6506 tmp_stats[i++] = swstats->rti_err_cnt;
6507 tmp_stats[i++] = swstats->mc_err_cnt;
1da177e4
LT
6508}
6509
ac1f60db 6510static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4 6511{
d44570e4 6512 return XENA_REG_SPACE;
1da177e4
LT
6513}
6514
6515
ac1f60db 6516static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4 6517{
d44570e4 6518 return XENA_EEPROM_SPACE;
1da177e4
LT
6519}
6520
b9f2c044 6521static int s2io_get_sset_count(struct net_device *dev, int sset)
1da177e4 6522{
4cf1653a 6523 struct s2io_nic *sp = netdev_priv(dev);
b9f2c044
JG
6524
6525 switch (sset) {
6526 case ETH_SS_TEST:
6527 return S2IO_TEST_LEN;
6528 case ETH_SS_STATS:
d44570e4 6529 switch (sp->device_type) {
b9f2c044
JG
6530 case XFRAME_I_DEVICE:
6531 return XFRAME_I_STAT_LEN;
6532 case XFRAME_II_DEVICE:
6533 return XFRAME_II_STAT_LEN;
6534 default:
6535 return 0;
6536 }
6537 default:
6538 return -EOPNOTSUPP;
6539 }
1da177e4 6540}
ac1f60db
AB
6541
6542static void s2io_ethtool_get_strings(struct net_device *dev,
d44570e4 6543 u32 stringset, u8 *data)
1da177e4 6544{
fa1f0cb3 6545 int stat_size = 0;
4cf1653a 6546 struct s2io_nic *sp = netdev_priv(dev);
fa1f0cb3 6547
1da177e4
LT
6548 switch (stringset) {
6549 case ETH_SS_TEST:
6550 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6551 break;
6552 case ETH_SS_STATS:
fa1f0cb3 6553 stat_size = sizeof(ethtool_xena_stats_keys);
d44570e4
JP
6554 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6555 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6556 memcpy(data + stat_size,
d44570e4
JP
6557 &ethtool_enhanced_stats_keys,
6558 sizeof(ethtool_enhanced_stats_keys));
fa1f0cb3
SS
6559 stat_size += sizeof(ethtool_enhanced_stats_keys);
6560 }
6561
6562 memcpy(data + stat_size, &ethtool_driver_stats_keys,
d44570e4 6563 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
6564 }
6565}
1da177e4 6566
c8f44aff 6567static int s2io_set_features(struct net_device *dev, netdev_features_t features)
958de193
JM
6568{
6569 struct s2io_nic *sp = netdev_priv(dev);
c8f44aff 6570 netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
958de193
JM
6571
6572 if (changed && netif_running(dev)) {
b437a8cc
MM
6573 int rc;
6574
958de193
JM
6575 s2io_stop_all_tx_queue(sp);
6576 s2io_card_down(sp);
b437a8cc 6577 dev->features = features;
958de193
JM
6578 rc = s2io_card_up(sp);
6579 if (rc)
6580 s2io_reset(sp);
6581 else
6582 s2io_start_all_tx_queue(sp);
b437a8cc
MM
6583
6584 return rc ? rc : 1;
958de193
JM
6585 }
6586
b437a8cc 6587 return 0;
958de193
JM
6588}
6589
7282d491 6590static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
6591 .get_drvinfo = s2io_ethtool_gdrvinfo,
6592 .get_regs_len = s2io_ethtool_get_regs_len,
6593 .get_regs = s2io_ethtool_gregs,
6594 .get_link = ethtool_op_get_link,
6595 .get_eeprom_len = s2io_get_eeprom_len,
6596 .get_eeprom = s2io_ethtool_geeprom,
6597 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 6598 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
6599 .get_pauseparam = s2io_ethtool_getpause_data,
6600 .set_pauseparam = s2io_ethtool_setpause_data,
1da177e4
LT
6601 .self_test = s2io_ethtool_test,
6602 .get_strings = s2io_ethtool_get_strings,
034e3450 6603 .set_phys_id = s2io_ethtool_set_led,
b9f2c044
JG
6604 .get_ethtool_stats = s2io_get_ethtool_stats,
6605 .get_sset_count = s2io_get_sset_count,
51f21442
PR
6606 .get_link_ksettings = s2io_ethtool_get_link_ksettings,
6607 .set_link_ksettings = s2io_ethtool_set_link_ksettings,
1da177e4
LT
6608};
6609
6610/**
20346722 6611 * s2io_ioctl - Entry point for the Ioctl
1da177e4 6612 * @dev : Device pointer.
d0ea5cbd 6613 * @rq : An IOCTL specefic structure, that can contain a pointer to
1da177e4
LT
6614 * a proprietary structure used to pass information to the driver.
6615 * @cmd : This is used to distinguish between the different commands that
6616 * can be passed to the IOCTL functions.
6617 * Description:
20346722
K
6618 * Currently there are no special functionality supported in IOCTL, hence
6619 * function always return EOPNOTSUPPORTED
1da177e4
LT
6620 */
6621
ac1f60db 6622static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
6623{
6624 return -EOPNOTSUPP;
6625}
6626
6627/**
6628 * s2io_change_mtu - entry point to change MTU size for the device.
6629 * @dev : device pointer.
6630 * @new_mtu : the new MTU size for the device.
6631 * Description: A driver entry point to change MTU size for the device.
6632 * Before changing the MTU the device must be stopped.
6633 * Return value:
6634 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6635 * file on failure.
6636 */
6637
ac1f60db 6638static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 6639{
4cf1653a 6640 struct s2io_nic *sp = netdev_priv(dev);
9f74ffde 6641 int ret = 0;
1da177e4 6642
1da177e4 6643 dev->mtu = new_mtu;
d8892c6e 6644 if (netif_running(dev)) {
3a3d5756 6645 s2io_stop_all_tx_queue(sp);
e6a8fee2 6646 s2io_card_down(sp);
9f74ffde
SH
6647 ret = s2io_card_up(sp);
6648 if (ret) {
d8892c6e 6649 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
b39d66a8 6650 __func__);
9f74ffde 6651 return ret;
d8892c6e 6652 }
3a3d5756 6653 s2io_wake_all_tx_queue(sp);
d8892c6e 6654 } else { /* Device is down */
1ee6dd77 6655 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e
K
6656 u64 val64 = new_mtu;
6657
6658 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6659 }
1da177e4 6660
9f74ffde 6661 return ret;
1da177e4
LT
6662}
6663
1da177e4
LT
6664/**
6665 * s2io_set_link - Set the LInk status
29c35da1 6666 * @work: work struct containing a pointer to device private structure
1da177e4
LT
6667 * Description: Sets the link status for the adapter
6668 */
6669
c4028958 6670static void s2io_set_link(struct work_struct *work)
1da177e4 6671{
d44570e4
JP
6672 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6673 set_link_task);
1da177e4 6674 struct net_device *dev = nic->dev;
1ee6dd77 6675 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6676 register u64 val64;
6677 u16 subid;
6678
22747d6b
FR
6679 rtnl_lock();
6680
6681 if (!netif_running(dev))
6682 goto out_unlock;
6683
92b84437 6684 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
1da177e4 6685 /* The card is being reset, no point doing anything */
22747d6b 6686 goto out_unlock;
1da177e4
LT
6687 }
6688
6689 subid = nic->pdev->subsystem_device;
a371a07d
K
6690 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6691 /*
6692 * Allow a small delay for the NICs self initiated
6693 * cleanup to complete.
6694 */
6695 msleep(100);
6696 }
1da177e4
LT
6697
6698 val64 = readq(&bar0->adapter_status);
19a60522
SS
6699 if (LINK_IS_UP(val64)) {
6700 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6701 if (verify_xena_quiescence(nic)) {
6702 val64 = readq(&bar0->adapter_control);
6703 val64 |= ADAPTER_CNTL_EN;
1da177e4 6704 writeq(val64, &bar0->adapter_control);
19a60522 6705 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
d44570e4 6706 nic->device_type, subid)) {
19a60522
SS
6707 val64 = readq(&bar0->gpio_control);
6708 val64 |= GPIO_CTRL_GPIO_0;
6709 writeq(val64, &bar0->gpio_control);
6710 val64 = readq(&bar0->gpio_control);
6711 } else {
6712 val64 |= ADAPTER_LED_ON;
6713 writeq(val64, &bar0->adapter_control);
a371a07d 6714 }
f957bcf0 6715 nic->device_enabled_once = true;
19a60522 6716 } else {
9e39f7c5
JP
6717 DBG_PRINT(ERR_DBG,
6718 "%s: Error: device is not Quiescent\n",
6719 dev->name);
3a3d5756 6720 s2io_stop_all_tx_queue(nic);
1da177e4 6721 }
19a60522 6722 }
92c48799
SS
6723 val64 = readq(&bar0->adapter_control);
6724 val64 |= ADAPTER_LED_ON;
6725 writeq(val64, &bar0->adapter_control);
6726 s2io_link(nic, LINK_UP);
19a60522
SS
6727 } else {
6728 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6729 subid)) {
6730 val64 = readq(&bar0->gpio_control);
6731 val64 &= ~GPIO_CTRL_GPIO_0;
6732 writeq(val64, &bar0->gpio_control);
6733 val64 = readq(&bar0->gpio_control);
1da177e4 6734 }
92c48799
SS
6735 /* turn off LED */
6736 val64 = readq(&bar0->adapter_control);
d44570e4 6737 val64 = val64 & (~ADAPTER_LED_ON);
92c48799 6738 writeq(val64, &bar0->adapter_control);
19a60522 6739 s2io_link(nic, LINK_DOWN);
1da177e4 6740 }
92b84437 6741 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
22747d6b
FR
6742
6743out_unlock:
d8d70caf 6744 rtnl_unlock();
1da177e4
LT
6745}
6746
1ee6dd77 6747static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
d44570e4
JP
6748 struct buffAdd *ba,
6749 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6750 u64 *temp2, int size)
5d3213cc
AR
6751{
6752 struct net_device *dev = sp->dev;
491abf25 6753 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
5d3213cc
AR
6754
6755 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6d517a27 6756 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
5d3213cc
AR
6757 /* allocate skb */
6758 if (*skb) {
6759 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6760 /*
6761 * As Rx frame are not going to be processed,
6762 * using same mapped address for the Rxd
6763 * buffer pointer
6764 */
6d517a27 6765 rxdp1->Buffer0_ptr = *temp0;
5d3213cc 6766 } else {
c056b734 6767 *skb = netdev_alloc_skb(dev, size);
5d3213cc 6768 if (!(*skb)) {
9e39f7c5
JP
6769 DBG_PRINT(INFO_DBG,
6770 "%s: Out of memory to allocate %s\n",
6771 dev->name, "1 buf mode SKBs");
ffb5df6c 6772 stats->mem_alloc_fail_cnt++;
5d3213cc
AR
6773 return -ENOMEM ;
6774 }
ffb5df6c 6775 stats->mem_allocated += (*skb)->truesize;
5d3213cc
AR
6776 /* storing the mapped addr in a temp variable
6777 * such it will be used for next rxd whose
6778 * Host Control is NULL
6779 */
6d517a27 6780 rxdp1->Buffer0_ptr = *temp0 =
fb059b26 6781 dma_map_single(&sp->pdev->dev, (*skb)->data,
d44570e4 6782 size - NET_IP_ALIGN,
fb059b26
CJ
6783 DMA_FROM_DEVICE);
6784 if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
491abf25 6785 goto memalloc_failed;
5d3213cc
AR
6786 rxdp->Host_Control = (unsigned long) (*skb);
6787 }
6788 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6d517a27 6789 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
5d3213cc
AR
6790 /* Two buffer Mode */
6791 if (*skb) {
6d517a27
VP
6792 rxdp3->Buffer2_ptr = *temp2;
6793 rxdp3->Buffer0_ptr = *temp0;
6794 rxdp3->Buffer1_ptr = *temp1;
5d3213cc 6795 } else {
c056b734 6796 *skb = netdev_alloc_skb(dev, size);
2ceaac75 6797 if (!(*skb)) {
9e39f7c5
JP
6798 DBG_PRINT(INFO_DBG,
6799 "%s: Out of memory to allocate %s\n",
6800 dev->name,
6801 "2 buf mode SKBs");
ffb5df6c 6802 stats->mem_alloc_fail_cnt++;
2ceaac75
DR
6803 return -ENOMEM;
6804 }
ffb5df6c 6805 stats->mem_allocated += (*skb)->truesize;
6d517a27 6806 rxdp3->Buffer2_ptr = *temp2 =
fb059b26
CJ
6807 dma_map_single(&sp->pdev->dev, (*skb)->data,
6808 dev->mtu + 4, DMA_FROM_DEVICE);
6809 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
491abf25 6810 goto memalloc_failed;
6d517a27 6811 rxdp3->Buffer0_ptr = *temp0 =
fb059b26
CJ
6812 dma_map_single(&sp->pdev->dev, ba->ba_0,
6813 BUF0_LEN, DMA_FROM_DEVICE);
6814 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
6815 dma_unmap_single(&sp->pdev->dev,
d44570e4
JP
6816 (dma_addr_t)rxdp3->Buffer2_ptr,
6817 dev->mtu + 4,
fb059b26 6818 DMA_FROM_DEVICE);
491abf25
VP
6819 goto memalloc_failed;
6820 }
5d3213cc
AR
6821 rxdp->Host_Control = (unsigned long) (*skb);
6822
6823 /* Buffer-1 will be dummy buffer not used */
6d517a27 6824 rxdp3->Buffer1_ptr = *temp1 =
fb059b26
CJ
6825 dma_map_single(&sp->pdev->dev, ba->ba_1,
6826 BUF1_LEN, DMA_FROM_DEVICE);
6827 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
6828 dma_unmap_single(&sp->pdev->dev,
d44570e4 6829 (dma_addr_t)rxdp3->Buffer0_ptr,
fb059b26
CJ
6830 BUF0_LEN, DMA_FROM_DEVICE);
6831 dma_unmap_single(&sp->pdev->dev,
d44570e4
JP
6832 (dma_addr_t)rxdp3->Buffer2_ptr,
6833 dev->mtu + 4,
fb059b26 6834 DMA_FROM_DEVICE);
491abf25
VP
6835 goto memalloc_failed;
6836 }
5d3213cc
AR
6837 }
6838 }
6839 return 0;
d44570e4
JP
6840
6841memalloc_failed:
6842 stats->pci_map_fail_cnt++;
6843 stats->mem_freed += (*skb)->truesize;
6844 dev_kfree_skb(*skb);
6845 return -ENOMEM;
5d3213cc 6846}
491abf25 6847
1ee6dd77
RB
6848static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6849 int size)
5d3213cc
AR
6850{
6851 struct net_device *dev = sp->dev;
6852 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4 6853 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
5d3213cc
AR
6854 } else if (sp->rxd_mode == RXD_MODE_3B) {
6855 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6856 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
d44570e4 6857 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
5d3213cc
AR
6858 }
6859}
6860
1ee6dd77 6861static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
6862{
6863 int i, j, k, blk_cnt = 0, size;
5d3213cc 6864 struct config_param *config = &sp->config;
ffb5df6c 6865 struct mac_info *mac_control = &sp->mac_control;
5d3213cc 6866 struct net_device *dev = sp->dev;
1ee6dd77 6867 struct RxD_t *rxdp = NULL;
5d3213cc 6868 struct sk_buff *skb = NULL;
1ee6dd77 6869 struct buffAdd *ba = NULL;
5d3213cc
AR
6870 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6871
6872 /* Calculate the size based on ring mode */
6873 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6874 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6875 if (sp->rxd_mode == RXD_MODE_1)
6876 size += NET_IP_ALIGN;
6877 else if (sp->rxd_mode == RXD_MODE_3B)
6878 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
5d3213cc
AR
6879
6880 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
6881 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6882 struct ring_info *ring = &mac_control->rings[i];
6883
d44570e4 6884 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
5d3213cc
AR
6885
6886 for (j = 0; j < blk_cnt; j++) {
6887 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
d44570e4
JP
6888 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6889 if (sp->rxd_mode == RXD_MODE_3B)
13d866a9 6890 ba = &ring->ba[j][k];
d44570e4 6891 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
64699336
JP
6892 &temp0_64,
6893 &temp1_64,
6894 &temp2_64,
d44570e4 6895 size) == -ENOMEM) {
ac1f90d6
SS
6896 return 0;
6897 }
5d3213cc
AR
6898
6899 set_rxd_buffer_size(sp, rxdp, size);
03cc864a 6900 dma_wmb();
5d3213cc
AR
6901 /* flip the Ownership bit to Hardware */
6902 rxdp->Control_1 |= RXD_OWN_XENA;
6903 }
6904 }
6905 }
6906 return 0;
6907
6908}
6909
d44570e4 6910static int s2io_add_isr(struct s2io_nic *sp)
1da177e4 6911{
e6a8fee2 6912 int ret = 0;
c92ca04b 6913 struct net_device *dev = sp->dev;
e6a8fee2 6914 int err = 0;
1da177e4 6915
eaae7f72 6916 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
6917 ret = s2io_enable_msi_x(sp);
6918 if (ret) {
6919 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
eaae7f72 6920 sp->config.intr_type = INTA;
20346722 6921 }
1da177e4 6922
d44570e4
JP
6923 /*
6924 * Store the values of the MSIX table in
6925 * the struct s2io_nic structure
6926 */
e6a8fee2 6927 store_xmsi_data(sp);
c92ca04b 6928
e6a8fee2 6929 /* After proper initialization of H/W, register ISR */
eaae7f72 6930 if (sp->config.intr_type == MSI_X) {
ac731ab6
SH
6931 int i, msix_rx_cnt = 0;
6932
f61e0a35
SH
6933 for (i = 0; i < sp->num_entries; i++) {
6934 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6935 if (sp->s2io_entries[i].type ==
d44570e4 6936 MSIX_RING_TYPE) {
a8c1d28a
DC
6937 snprintf(sp->desc[i],
6938 sizeof(sp->desc[i]),
6939 "%s:MSI-X-%d-RX",
ac731ab6
SH
6940 dev->name, i);
6941 err = request_irq(sp->entries[i].vector,
d44570e4
JP
6942 s2io_msix_ring_handle,
6943 0,
6944 sp->desc[i],
6945 sp->s2io_entries[i].arg);
ac731ab6 6946 } else if (sp->s2io_entries[i].type ==
d44570e4 6947 MSIX_ALARM_TYPE) {
a8c1d28a
DC
6948 snprintf(sp->desc[i],
6949 sizeof(sp->desc[i]),
6950 "%s:MSI-X-%d-TX",
d44570e4 6951 dev->name, i);
ac731ab6 6952 err = request_irq(sp->entries[i].vector,
d44570e4
JP
6953 s2io_msix_fifo_handle,
6954 0,
6955 sp->desc[i],
6956 sp->s2io_entries[i].arg);
ac731ab6 6957
fb6a825b 6958 }
ac731ab6
SH
6959 /* if either data or addr is zero print it. */
6960 if (!(sp->msix_info[i].addr &&
d44570e4 6961 sp->msix_info[i].data)) {
ac731ab6 6962 DBG_PRINT(ERR_DBG,
d44570e4
JP
6963 "%s @Addr:0x%llx Data:0x%llx\n",
6964 sp->desc[i],
6965 (unsigned long long)
6966 sp->msix_info[i].addr,
6967 (unsigned long long)
6968 ntohl(sp->msix_info[i].data));
ac731ab6 6969 } else
fb6a825b 6970 msix_rx_cnt++;
ac731ab6
SH
6971 if (err) {
6972 remove_msix_isr(sp);
6973
6974 DBG_PRINT(ERR_DBG,
d44570e4
JP
6975 "%s:MSI-X-%d registration "
6976 "failed\n", dev->name, i);
ac731ab6
SH
6977
6978 DBG_PRINT(ERR_DBG,
d44570e4
JP
6979 "%s: Defaulting to INTA\n",
6980 dev->name);
ac731ab6
SH
6981 sp->config.intr_type = INTA;
6982 break;
fb6a825b 6983 }
ac731ab6
SH
6984 sp->s2io_entries[i].in_use =
6985 MSIX_REGISTERED_SUCCESS;
c92ca04b 6986 }
e6a8fee2 6987 }
18b2b7bd 6988 if (!err) {
6cef2b8e 6989 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
9e39f7c5
JP
6990 DBG_PRINT(INFO_DBG,
6991 "MSI-X-TX entries enabled through alarm vector\n");
18b2b7bd 6992 }
e6a8fee2 6993 }
eaae7f72 6994 if (sp->config.intr_type == INTA) {
80777c54 6995 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
d44570e4 6996 sp->name, dev);
e6a8fee2
AR
6997 if (err) {
6998 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6999 dev->name);
7000 return -1;
7001 }
7002 }
7003 return 0;
7004}
d44570e4
JP
7005
7006static void s2io_rem_isr(struct s2io_nic *sp)
e6a8fee2 7007{
18b2b7bd
SH
7008 if (sp->config.intr_type == MSI_X)
7009 remove_msix_isr(sp);
7010 else
7011 remove_inta_isr(sp);
e6a8fee2
AR
7012}
7013
d44570e4 7014static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
e6a8fee2
AR
7015{
7016 int cnt = 0;
1ee6dd77 7017 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2 7018 register u64 val64 = 0;
5f490c96
SH
7019 struct config_param *config;
7020 config = &sp->config;
e6a8fee2 7021
9f74ffde
SH
7022 if (!is_s2io_card_up(sp))
7023 return;
7024
e6a8fee2
AR
7025 del_timer_sync(&sp->alarm_timer);
7026 /* If s2io_set_link task is executing, wait till it completes. */
d44570e4 7027 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
e6a8fee2 7028 msleep(50);
92b84437 7029 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
e6a8fee2 7030
5f490c96 7031 /* Disable napi */
f61e0a35
SH
7032 if (sp->config.napi) {
7033 int off = 0;
7034 if (config->intr_type == MSI_X) {
7035 for (; off < sp->config.rx_ring_num; off++)
7036 napi_disable(&sp->mac_control.rings[off].napi);
d44570e4 7037 }
f61e0a35
SH
7038 else
7039 napi_disable(&sp->napi);
7040 }
5f490c96 7041
e6a8fee2 7042 /* disable Tx and Rx traffic on the NIC */
d796fdb7
LV
7043 if (do_io)
7044 stop_nic(sp);
e6a8fee2
AR
7045
7046 s2io_rem_isr(sp);
1da177e4 7047
01e16faa
SH
7048 /* stop the tx queue, indicate link down */
7049 s2io_link(sp, LINK_DOWN);
7050
1da177e4 7051 /* Check if the device is Quiescent and then Reset the NIC */
d44570e4 7052 while (do_io) {
5d3213cc
AR
7053 /* As per the HW requirement we need to replenish the
7054 * receive buffer to avoid the ring bump. Since there is
7055 * no intention of processing the Rx frame at this pointwe are
70f23fd6 7056 * just setting the ownership bit of rxd in Each Rx
5d3213cc
AR
7057 * ring to HW and set the appropriate buffer size
7058 * based on the ring mode
7059 */
7060 rxd_owner_bit_reset(sp);
7061
1da177e4 7062 val64 = readq(&bar0->adapter_status);
19a60522 7063 if (verify_xena_quiescence(sp)) {
d44570e4
JP
7064 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7065 break;
1da177e4
LT
7066 }
7067
7068 msleep(50);
7069 cnt++;
7070 if (cnt == 10) {
9e39f7c5
JP
7071 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7072 "adapter status reads 0x%llx\n",
d44570e4 7073 (unsigned long long)val64);
1da177e4
LT
7074 break;
7075 }
d796fdb7
LV
7076 }
7077 if (do_io)
7078 s2io_reset(sp);
1da177e4 7079
7ba013ac 7080 /* Free all Tx buffers */
1da177e4 7081 free_tx_buffers(sp);
7ba013ac
K
7082
7083 /* Free all Rx buffers */
1da177e4
LT
7084 free_rx_buffers(sp);
7085
92b84437 7086 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
1da177e4
LT
7087}
7088
d44570e4 7089static void s2io_card_down(struct s2io_nic *sp)
d796fdb7
LV
7090{
7091 do_s2io_card_down(sp, 1);
7092}
7093
d44570e4 7094static int s2io_card_up(struct s2io_nic *sp)
1da177e4 7095{
cc6e7c44 7096 int i, ret = 0;
1da177e4 7097 struct config_param *config;
ffb5df6c 7098 struct mac_info *mac_control;
64699336 7099 struct net_device *dev = sp->dev;
e6a8fee2 7100 u16 interruptible;
1da177e4
LT
7101
7102 /* Initialize the H/W I/O registers */
9f74ffde
SH
7103 ret = init_nic(sp);
7104 if (ret != 0) {
1da177e4
LT
7105 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7106 dev->name);
9f74ffde
SH
7107 if (ret != -EIO)
7108 s2io_reset(sp);
7109 return ret;
1da177e4
LT
7110 }
7111
20346722
K
7112 /*
7113 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
7114 * Rx ring and initializing buffers into 30 Rx blocks
7115 */
1da177e4 7116 config = &sp->config;
ffb5df6c 7117 mac_control = &sp->mac_control;
1da177e4
LT
7118
7119 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7120 struct ring_info *ring = &mac_control->rings[i];
7121
7122 ring->mtu = dev->mtu;
f0c54ace 7123 ring->lro = !!(dev->features & NETIF_F_LRO);
13d866a9 7124 ret = fill_rx_buffers(sp, ring, 1);
0425b46a 7125 if (ret) {
1da177e4
LT
7126 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7127 dev->name);
7128 s2io_reset(sp);
7129 free_rx_buffers(sp);
7130 return -ENOMEM;
7131 }
7132 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
13d866a9 7133 ring->rx_bufs_left);
1da177e4 7134 }
5f490c96
SH
7135
7136 /* Initialise napi */
f61e0a35 7137 if (config->napi) {
f61e0a35
SH
7138 if (config->intr_type == MSI_X) {
7139 for (i = 0; i < sp->config.rx_ring_num; i++)
7140 napi_enable(&sp->mac_control.rings[i].napi);
7141 } else {
7142 napi_enable(&sp->napi);
7143 }
7144 }
5f490c96 7145
19a60522
SS
7146 /* Maintain the state prior to the open */
7147 if (sp->promisc_flg)
7148 sp->promisc_flg = 0;
7149 if (sp->m_cast_flg) {
7150 sp->m_cast_flg = 0;
d44570e4 7151 sp->all_multi_pos = 0;
19a60522 7152 }
1da177e4
LT
7153
7154 /* Setting its receive mode */
5ce7f3f4 7155 s2io_set_multicast(dev, true);
1da177e4 7156
f0c54ace 7157 if (dev->features & NETIF_F_LRO) {
b41477f3 7158 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439 7159 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
d44570e4 7160 /* Check if we can use (if specified) user provided value */
7d3d0439
RA
7161 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7162 sp->lro_max_aggr_per_sess = lro_max_pkts;
7163 }
7164
1da177e4
LT
7165 /* Enable Rx Traffic and interrupts on the NIC */
7166 if (start_nic(sp)) {
7167 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 7168 s2io_reset(sp);
e6a8fee2
AR
7169 free_rx_buffers(sp);
7170 return -ENODEV;
7171 }
7172
7173 /* Add interrupt service routine */
7174 if (s2io_add_isr(sp) != 0) {
eaae7f72 7175 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7176 s2io_rem_isr(sp);
7177 s2io_reset(sp);
1da177e4
LT
7178 free_rx_buffers(sp);
7179 return -ENODEV;
7180 }
7181
e84a2ac9
KC
7182 timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7183 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
25fff88e 7184
01e16faa
SH
7185 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7186
e6a8fee2 7187 /* Enable select interrupts */
9caab458 7188 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
01e16faa
SH
7189 if (sp->config.intr_type != INTA) {
7190 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7191 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7192 } else {
e6a8fee2 7193 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 7194 interruptible |= TX_PIC_INTR;
e6a8fee2
AR
7195 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7196 }
7197
1da177e4
LT
7198 return 0;
7199}
7200
20346722 7201/**
1da177e4 7202 * s2io_restart_nic - Resets the NIC.
d0ea5cbd 7203 * @work : work struct containing a pointer to the device private structure
1da177e4
LT
7204 * Description:
7205 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 7206 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
7207 * the run time of the watch dog routine which is run holding a
7208 * spin lock.
7209 */
7210
c4028958 7211static void s2io_restart_nic(struct work_struct *work)
1da177e4 7212{
1ee6dd77 7213 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 7214 struct net_device *dev = sp->dev;
1da177e4 7215
22747d6b
FR
7216 rtnl_lock();
7217
7218 if (!netif_running(dev))
7219 goto out_unlock;
7220
e6a8fee2 7221 s2io_card_down(sp);
1da177e4 7222 if (s2io_card_up(sp)) {
d44570e4 7223 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
1da177e4 7224 }
3a3d5756 7225 s2io_wake_all_tx_queue(sp);
d44570e4 7226 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
22747d6b
FR
7227out_unlock:
7228 rtnl_unlock();
1da177e4
LT
7229}
7230
20346722
K
7231/**
7232 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4 7233 * @dev : Pointer to net device structure
d0ea5cbd 7234 * @txqueue: index of the hanging queue
1da177e4
LT
7235 * Description:
7236 * This function is triggered if the Tx Queue is stopped
7237 * for a pre-defined amount of time when the Interface is still up.
7238 * If the Interface is jammed in such a situation, the hardware is
7239 * reset (by s2io_close) and restarted again (by s2io_open) to
7240 * overcome any problem that might have been caused in the hardware.
7241 * Return value:
7242 * void
7243 */
7244
0290bd29 7245static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
1da177e4 7246{
4cf1653a 7247 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 7248 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7249
7250 if (netif_carrier_ok(dev)) {
ffb5df6c 7251 swstats->watchdog_timer_cnt++;
1da177e4 7252 schedule_work(&sp->rst_timer_task);
ffb5df6c 7253 swstats->soft_reset_cnt++;
1da177e4
LT
7254 }
7255}
7256
7257/**
7258 * rx_osm_handler - To perform some OS related operations on SKB.
d0ea5cbd
JB
7259 * @ring_data : the ring from which this RxD was extracted.
7260 * @rxdp: descriptor
20346722 7261 * Description:
b41477f3 7262 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
7263 * some OS related operations on the SKB before passing it to the upper
7264 * layers. It mainly checks if the checksum is OK, if so adds it to the
7265 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7266 * to the upper layer. If the checksum is wrong, it increments the Rx
7267 * packet error count, frees the SKB and returns error.
7268 * Return value:
7269 * SUCCESS on success and -1 on failure.
7270 */
1ee6dd77 7271static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 7272{
1ee6dd77 7273 struct s2io_nic *sp = ring_data->nic;
64699336 7274 struct net_device *dev = ring_data->dev;
20346722 7275 struct sk_buff *skb = (struct sk_buff *)
d44570e4 7276 ((unsigned long)rxdp->Host_Control);
20346722 7277 int ring_no = ring_data->ring_no;
1da177e4 7278 u16 l3_csum, l4_csum;
863c11a9 7279 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
3f649ab7 7280 struct lro *lro;
f9046eb3 7281 u8 err_mask;
ffb5df6c 7282 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
da6971d8 7283
20346722 7284 skb->dev = dev;
c92ca04b 7285
863c11a9 7286 if (err) {
bd1034f0 7287 /* Check for parity error */
d44570e4 7288 if (err & 0x1)
ffb5df6c 7289 swstats->parity_err_cnt++;
d44570e4 7290
f9046eb3 7291 err_mask = err >> 48;
d44570e4
JP
7292 switch (err_mask) {
7293 case 1:
ffb5df6c 7294 swstats->rx_parity_err_cnt++;
491976b2
SH
7295 break;
7296
d44570e4 7297 case 2:
ffb5df6c 7298 swstats->rx_abort_cnt++;
491976b2
SH
7299 break;
7300
d44570e4 7301 case 3:
ffb5df6c 7302 swstats->rx_parity_abort_cnt++;
491976b2
SH
7303 break;
7304
d44570e4 7305 case 4:
ffb5df6c 7306 swstats->rx_rda_fail_cnt++;
491976b2
SH
7307 break;
7308
d44570e4 7309 case 5:
ffb5df6c 7310 swstats->rx_unkn_prot_cnt++;
491976b2
SH
7311 break;
7312
d44570e4 7313 case 6:
ffb5df6c 7314 swstats->rx_fcs_err_cnt++;
491976b2 7315 break;
bd1034f0 7316
d44570e4 7317 case 7:
ffb5df6c 7318 swstats->rx_buf_size_err_cnt++;
491976b2
SH
7319 break;
7320
d44570e4 7321 case 8:
ffb5df6c 7322 swstats->rx_rxd_corrupt_cnt++;
491976b2
SH
7323 break;
7324
d44570e4 7325 case 15:
ffb5df6c 7326 swstats->rx_unkn_err_cnt++;
491976b2
SH
7327 break;
7328 }
863c11a9 7329 /*
d44570e4
JP
7330 * Drop the packet if bad transfer code. Exception being
7331 * 0x5, which could be due to unsupported IPv6 extension header.
7332 * In this case, we let stack handle the packet.
7333 * Note that in this case, since checksum will be incorrect,
7334 * stack will validate the same.
7335 */
f9046eb3
OH
7336 if (err_mask != 0x5) {
7337 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
d44570e4 7338 dev->name, err_mask);
dc56e634 7339 dev->stats.rx_crc_errors++;
ffb5df6c 7340 swstats->mem_freed
491976b2 7341 += skb->truesize;
863c11a9 7342 dev_kfree_skb(skb);
0425b46a 7343 ring_data->rx_bufs_left -= 1;
863c11a9
AR
7344 rxdp->Host_Control = 0;
7345 return 0;
7346 }
20346722 7347 }
1da177e4 7348
20346722 7349 rxdp->Host_Control = 0;
da6971d8
AR
7350 if (sp->rxd_mode == RXD_MODE_1) {
7351 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 7352
da6971d8 7353 skb_put(skb, len);
6d517a27 7354 } else if (sp->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
7355 int get_block = ring_data->rx_curr_get_info.block_index;
7356 int get_off = ring_data->rx_curr_get_info.offset;
7357 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7358 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7359 unsigned char *buff = skb_push(skb, buf0_len);
7360
1ee6dd77 7361 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8 7362 memcpy(buff, ba->ba_0, buf0_len);
6d517a27 7363 skb_put(skb, buf2_len);
da6971d8 7364 }
20346722 7365
d44570e4
JP
7366 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7367 ((!ring_data->lro) ||
6d85a1bf 7368 (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
b437a8cc 7369 (dev->features & NETIF_F_RXCSUM)) {
20346722 7370 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
7371 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7372 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 7373 /*
1da177e4
LT
7374 * NIC verifies if the Checksum of the received
7375 * frame is Ok or not and accordingly returns
7376 * a flag in the RxD.
7377 */
7378 skb->ip_summed = CHECKSUM_UNNECESSARY;
0425b46a 7379 if (ring_data->lro) {
06f0c139 7380 u32 tcp_len = 0;
7d3d0439
RA
7381 u8 *tcp;
7382 int ret = 0;
7383
0425b46a 7384 ret = s2io_club_tcp_session(ring_data,
d44570e4
JP
7385 skb->data, &tcp,
7386 &tcp_len, &lro,
7387 rxdp, sp);
7d3d0439 7388 switch (ret) {
d44570e4
JP
7389 case 3: /* Begin anew */
7390 lro->parent = skb;
7391 goto aggregate;
7392 case 1: /* Aggregate */
7393 lro_append_pkt(sp, lro, skb, tcp_len);
7394 goto aggregate;
7395 case 4: /* Flush session */
7396 lro_append_pkt(sp, lro, skb, tcp_len);
7397 queue_rx_frame(lro->parent,
7398 lro->vlan_tag);
7399 clear_lro_session(lro);
ffb5df6c 7400 swstats->flush_max_pkts++;
d44570e4
JP
7401 goto aggregate;
7402 case 2: /* Flush both */
7403 lro->parent->data_len = lro->frags_len;
ffb5df6c 7404 swstats->sending_both++;
d44570e4
JP
7405 queue_rx_frame(lro->parent,
7406 lro->vlan_tag);
7407 clear_lro_session(lro);
7408 goto send_up;
7409 case 0: /* sessions exceeded */
7410 case -1: /* non-TCP or not L2 aggregatable */
7411 case 5: /*
7412 * First pkt in session not
7413 * L3/L4 aggregatable
7414 */
7415 break;
7416 default:
7417 DBG_PRINT(ERR_DBG,
7418 "%s: Samadhana!!\n",
7419 __func__);
7420 BUG();
7d3d0439
RA
7421 }
7422 }
1da177e4 7423 } else {
20346722
K
7424 /*
7425 * Packet with erroneous checksum, let the
1da177e4
LT
7426 * upper layers deal with it.
7427 */
bc8acf2c 7428 skb_checksum_none_assert(skb);
1da177e4 7429 }
cdb5bf02 7430 } else
bc8acf2c 7431 skb_checksum_none_assert(skb);
cdb5bf02 7432
ffb5df6c 7433 swstats->mem_freed += skb->truesize;
7d3d0439 7434send_up:
0c8dfc83 7435 skb_record_rx_queue(skb, ring_no);
cdb5bf02 7436 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 7437aggregate:
0425b46a 7438 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
1da177e4
LT
7439 return SUCCESS;
7440}
7441
7442/**
7443 * s2io_link - stops/starts the Tx queue.
7444 * @sp : private member of the device structure, which is a pointer to the
7445 * s2io_nic structure.
7446 * @link : inidicates whether link is UP/DOWN.
7447 * Description:
7448 * This function stops/starts the Tx queue depending on whether the link
20346722
K
7449 * status of the NIC is is down or up. This is called by the Alarm
7450 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
7451 * Return value:
7452 * void.
7453 */
7454
d44570e4 7455static void s2io_link(struct s2io_nic *sp, int link)
1da177e4 7456{
64699336 7457 struct net_device *dev = sp->dev;
ffb5df6c 7458 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7459
7460 if (link != sp->last_link_state) {
5ce7f3f4 7461 init_tti(sp, link, false);
1da177e4
LT
7462 if (link == LINK_DOWN) {
7463 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
3a3d5756 7464 s2io_stop_all_tx_queue(sp);
1da177e4 7465 netif_carrier_off(dev);
ffb5df6c
JP
7466 if (swstats->link_up_cnt)
7467 swstats->link_up_time =
7468 jiffies - sp->start_time;
7469 swstats->link_down_cnt++;
1da177e4
LT
7470 } else {
7471 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
ffb5df6c
JP
7472 if (swstats->link_down_cnt)
7473 swstats->link_down_time =
d44570e4 7474 jiffies - sp->start_time;
ffb5df6c 7475 swstats->link_up_cnt++;
1da177e4 7476 netif_carrier_on(dev);
3a3d5756 7477 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7478 }
7479 }
7480 sp->last_link_state = link;
491976b2 7481 sp->start_time = jiffies;
1da177e4
LT
7482}
7483
20346722
K
7484/**
7485 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7486 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
7487 * s2io_nic structure.
7488 * Description:
7489 * This function initializes a few of the PCI and PCI-X configuration registers
7490 * with recommended values.
7491 * Return value:
7492 * void
7493 */
7494
d44570e4 7495static void s2io_init_pci(struct s2io_nic *sp)
1da177e4 7496{
20346722 7497 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
7498
7499 /* Enable Data Parity Error Recovery in PCI-X command register. */
7500 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7501 &(pcix_cmd));
1da177e4 7502 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7503 (pcix_cmd | 1));
1da177e4 7504 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7505 &(pcix_cmd));
1da177e4
LT
7506
7507 /* Set the PErr Response bit in PCI command register. */
7508 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7509 pci_write_config_word(sp->pdev, PCI_COMMAND,
7510 (pci_cmd | PCI_COMMAND_PARITY));
7511 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
7512}
7513
3a3d5756 7514static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
d44570e4 7515 u8 *dev_multiq)
9dc737a7 7516{
1853e2e1
JM
7517 int i;
7518
d44570e4 7519 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
9e39f7c5 7520 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
d44570e4 7521 "(%d) not supported\n", tx_fifo_num);
6cfc482b
SH
7522
7523 if (tx_fifo_num < 1)
7524 tx_fifo_num = 1;
7525 else
7526 tx_fifo_num = MAX_TX_FIFOS;
7527
9e39f7c5 7528 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
9dc737a7 7529 }
2fda096d 7530
6cfc482b 7531 if (multiq)
3a3d5756 7532 *dev_multiq = multiq;
6cfc482b
SH
7533
7534 if (tx_steering_type && (1 == tx_fifo_num)) {
7535 if (tx_steering_type != TX_DEFAULT_STEERING)
7536 DBG_PRINT(ERR_DBG,
9e39f7c5 7537 "Tx steering is not supported with "
d44570e4 7538 "one fifo. Disabling Tx steering.\n");
6cfc482b
SH
7539 tx_steering_type = NO_STEERING;
7540 }
7541
7542 if ((tx_steering_type < NO_STEERING) ||
d44570e4
JP
7543 (tx_steering_type > TX_DEFAULT_STEERING)) {
7544 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7545 "Requested transmit steering not supported\n");
7546 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
6cfc482b 7547 tx_steering_type = NO_STEERING;
3a3d5756
SH
7548 }
7549
0425b46a 7550 if (rx_ring_num > MAX_RX_RINGS) {
d44570e4 7551 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7552 "Requested number of rx rings not supported\n");
7553 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
d44570e4 7554 MAX_RX_RINGS);
0425b46a 7555 rx_ring_num = MAX_RX_RINGS;
9dc737a7 7556 }
0425b46a 7557
eccb8628 7558 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
9e39f7c5 7559 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
9dc737a7
AR
7560 "Defaulting to INTA\n");
7561 *dev_intr_type = INTA;
7562 }
596c5c97 7563
9dc737a7 7564 if ((*dev_intr_type == MSI_X) &&
d44570e4
JP
7565 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7566 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
9e39f7c5 7567 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
d44570e4 7568 "Defaulting to INTA\n");
9dc737a7
AR
7569 *dev_intr_type = INTA;
7570 }
fb6a825b 7571
6d517a27 7572 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
9e39f7c5
JP
7573 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7574 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
6d517a27 7575 rx_ring_mode = 1;
9dc737a7 7576 }
1853e2e1
JM
7577
7578 for (i = 0; i < MAX_RX_RINGS; i++)
7579 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7580 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7581 "supported\nDefaulting to %d\n",
7582 MAX_RX_BLOCKS_PER_RING);
7583 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7584 }
7585
9dc737a7
AR
7586 return SUCCESS;
7587}
7588
9fc93a41 7589/**
d0ea5cbd 7590 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
b7c5678f 7591 * @nic: device private variable
d0ea5cbd
JB
7592 * @ds_codepoint: data
7593 * @ring: ring index
9fc93a41
SS
7594 * Description: The function configures the receive steering to
7595 * desired receive ring.
7596 * Return Value: SUCCESS on success and
7597 * '-1' on failure (endian settings incorrect).
7598 */
7599static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7600{
7601 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7602 register u64 val64 = 0;
7603
7604 if (ds_codepoint > 63)
7605 return FAILURE;
7606
7607 val64 = RTS_DS_MEM_DATA(ring);
7608 writeq(val64, &bar0->rts_ds_mem_data);
7609
7610 val64 = RTS_DS_MEM_CTRL_WE |
7611 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7612 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7613
7614 writeq(val64, &bar0->rts_ds_mem_ctrl);
7615
7616 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
d44570e4 7617 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
5ce7f3f4 7618 S2IO_BIT_RESET, true);
9fc93a41
SS
7619}
7620
04025095
SH
7621static const struct net_device_ops s2io_netdev_ops = {
7622 .ndo_open = s2io_open,
7623 .ndo_stop = s2io_close,
7624 .ndo_get_stats = s2io_get_stats,
7625 .ndo_start_xmit = s2io_xmit,
7626 .ndo_validate_addr = eth_validate_addr,
5ce7f3f4 7627 .ndo_set_rx_mode = s2io_ndo_set_multicast,
a7605370 7628 .ndo_eth_ioctl = s2io_ioctl,
04025095
SH
7629 .ndo_set_mac_address = s2io_set_mac_addr,
7630 .ndo_change_mtu = s2io_change_mtu,
b437a8cc 7631 .ndo_set_features = s2io_set_features,
04025095
SH
7632 .ndo_tx_timeout = s2io_tx_watchdog,
7633#ifdef CONFIG_NET_POLL_CONTROLLER
7634 .ndo_poll_controller = s2io_netpoll,
7635#endif
7636};
7637
1da177e4 7638/**
20346722 7639 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
7640 * @pdev : structure containing the PCI related information of the device.
7641 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7642 * Description:
7643 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
7644 * All OS related initialization including memory and device structure and
7645 * initlaization of the device private variable is done. Also the swapper
7646 * control register is initialized to enable read and write into the I/O
1da177e4
LT
7647 * registers of the device.
7648 * Return value:
7649 * returns 0 on success and negative on failure.
7650 */
7651
3a036ce5 7652static int
1da177e4
LT
7653s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7654{
1ee6dd77 7655 struct s2io_nic *sp;
1da177e4 7656 struct net_device *dev;
1da177e4 7657 int i, j, ret;
f957bcf0 7658 int dma_flag = false;
1da177e4
LT
7659 u32 mac_up, mac_down;
7660 u64 val64 = 0, tmp64 = 0;
1ee6dd77 7661 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 7662 u16 subid;
1da177e4 7663 struct config_param *config;
ffb5df6c 7664 struct mac_info *mac_control;
541ae68f 7665 int mode;
cc6e7c44 7666 u8 dev_intr_type = intr_type;
3a3d5756 7667 u8 dev_multiq = 0;
1da177e4 7668
3a3d5756
SH
7669 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7670 if (ret)
9dc737a7 7671 return ret;
1da177e4 7672
d44570e4
JP
7673 ret = pci_enable_device(pdev);
7674 if (ret) {
1da177e4 7675 DBG_PRINT(ERR_DBG,
9e39f7c5 7676 "%s: pci_enable_device failed\n", __func__);
1da177e4
LT
7677 return ret;
7678 }
7679
fb059b26 7680 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
9e39f7c5 7681 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
f957bcf0 7682 dma_flag = true;
fb059b26 7683 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1da177e4 7684 DBG_PRINT(ERR_DBG,
fb059b26 7685 "Unable to obtain 64bit DMA for coherent allocations\n");
1da177e4
LT
7686 pci_disable_device(pdev);
7687 return -ENOMEM;
7688 }
fb059b26 7689 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
9e39f7c5 7690 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
1da177e4
LT
7691 } else {
7692 pci_disable_device(pdev);
7693 return -ENOMEM;
7694 }
d44570e4
JP
7695 ret = pci_request_regions(pdev, s2io_driver_name);
7696 if (ret) {
9e39f7c5 7697 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
d44570e4 7698 __func__, ret);
eccb8628
VP
7699 pci_disable_device(pdev);
7700 return -ENODEV;
1da177e4 7701 }
3a3d5756 7702 if (dev_multiq)
6cfc482b 7703 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
3a3d5756 7704 else
b19fa1fa 7705 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4 7706 if (dev == NULL) {
1da177e4
LT
7707 pci_disable_device(pdev);
7708 pci_release_regions(pdev);
7709 return -ENODEV;
7710 }
7711
7712 pci_set_master(pdev);
7713 pci_set_drvdata(pdev, dev);
1da177e4
LT
7714 SET_NETDEV_DEV(dev, &pdev->dev);
7715
7716 /* Private member variable initialized to s2io NIC structure */
4cf1653a 7717 sp = netdev_priv(dev);
1da177e4
LT
7718 sp->dev = dev;
7719 sp->pdev = pdev;
1da177e4 7720 sp->high_dma_flag = dma_flag;
f957bcf0 7721 sp->device_enabled_once = false;
da6971d8
AR
7722 if (rx_ring_mode == 1)
7723 sp->rxd_mode = RXD_MODE_1;
7724 if (rx_ring_mode == 2)
7725 sp->rxd_mode = RXD_MODE_3B;
da6971d8 7726
eaae7f72 7727 sp->config.intr_type = dev_intr_type;
1da177e4 7728
541ae68f 7729 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
d44570e4 7730 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
541ae68f
K
7731 sp->device_type = XFRAME_II_DEVICE;
7732 else
7733 sp->device_type = XFRAME_I_DEVICE;
7734
6aa20a22 7735
1da177e4
LT
7736 /* Initialize some PCI/PCI-X fields of the NIC. */
7737 s2io_init_pci(sp);
7738
20346722 7739 /*
1da177e4 7740 * Setting the device configuration parameters.
20346722
K
7741 * Most of these parameters can be specified by the user during
7742 * module insertion as they are module loadable parameters. If
7743 * these parameters are not not specified during load time, they
1da177e4
LT
7744 * are initialized with default values.
7745 */
1da177e4 7746 config = &sp->config;
ffb5df6c 7747 mac_control = &sp->mac_control;
1da177e4 7748
596c5c97 7749 config->napi = napi;
6cfc482b 7750 config->tx_steering_type = tx_steering_type;
596c5c97 7751
1da177e4 7752 /* Tx side parameters. */
6cfc482b
SH
7753 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7754 config->tx_fifo_num = MAX_TX_FIFOS;
7755 else
7756 config->tx_fifo_num = tx_fifo_num;
7757
7758 /* Initialize the fifos used for tx steering */
7759 if (config->tx_fifo_num < 5) {
d44570e4
JP
7760 if (config->tx_fifo_num == 1)
7761 sp->total_tcp_fifos = 1;
7762 else
7763 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7764 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7765 sp->total_udp_fifos = 1;
7766 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
6cfc482b
SH
7767 } else {
7768 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
d44570e4 7769 FIFO_OTHER_MAX_NUM);
6cfc482b
SH
7770 sp->udp_fifo_idx = sp->total_tcp_fifos;
7771 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7772 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7773 }
7774
3a3d5756 7775 config->multiq = dev_multiq;
6cfc482b 7776 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7777 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7778
7779 tx_cfg->fifo_len = tx_fifo_len[i];
7780 tx_cfg->fifo_priority = i;
1da177e4
LT
7781 }
7782
20346722
K
7783 /* mapping the QoS priority to the configured fifos */
7784 for (i = 0; i < MAX_TX_FIFOS; i++)
3a3d5756 7785 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
20346722 7786
6cfc482b
SH
7787 /* map the hashing selector table to the configured fifos */
7788 for (i = 0; i < config->tx_fifo_num; i++)
7789 sp->fifo_selector[i] = fifo_selector[i];
7790
7791
1da177e4
LT
7792 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7793 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7794 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7795
7796 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7797 if (tx_cfg->fifo_len < 65) {
1da177e4
LT
7798 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7799 break;
7800 }
7801 }
fed5eccd
AR
7802 /* + 2 because one Txd for skb->data and one Txd for UFO */
7803 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7804
7805 /* Rx side parameters. */
1da177e4 7806 config->rx_ring_num = rx_ring_num;
0425b46a 7807 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7808 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7809 struct ring_info *ring = &mac_control->rings[i];
7810
7811 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7812 rx_cfg->ring_priority = i;
7813 ring->rx_bufs_left = 0;
7814 ring->rxd_mode = sp->rxd_mode;
7815 ring->rxd_count = rxd_count[sp->rxd_mode];
7816 ring->pdev = sp->pdev;
7817 ring->dev = sp->dev;
1da177e4
LT
7818 }
7819
7820 for (i = 0; i < rx_ring_num; i++) {
13d866a9
JP
7821 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7822
7823 rx_cfg->ring_org = RING_ORG_BUFF1;
7824 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
1da177e4
LT
7825 }
7826
7827 /* Setting Mac Control parameters */
7828 mac_control->rmac_pause_time = rmac_pause_time;
7829 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7830 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7831
7832
1da177e4
LT
7833 /* initialize the shared memory used by the NIC and the host */
7834 if (init_shared_mem(sp)) {
d44570e4 7835 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
1da177e4
LT
7836 ret = -ENOMEM;
7837 goto mem_alloc_failed;
7838 }
7839
275f165f 7840 sp->bar0 = pci_ioremap_bar(pdev, 0);
1da177e4 7841 if (!sp->bar0) {
19a60522 7842 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
7843 dev->name);
7844 ret = -ENOMEM;
7845 goto bar0_remap_failed;
7846 }
7847
275f165f 7848 sp->bar1 = pci_ioremap_bar(pdev, 2);
1da177e4 7849 if (!sp->bar1) {
19a60522 7850 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
7851 dev->name);
7852 ret = -ENOMEM;
7853 goto bar1_remap_failed;
7854 }
7855
1da177e4
LT
7856 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7857 for (j = 0; j < MAX_TX_FIFOS; j++) {
43d620c8 7858 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
1da177e4
LT
7859 }
7860
7861 /* Driver entry points */
04025095 7862 dev->netdev_ops = &s2io_netdev_ops;
7ad24ea4 7863 dev->ethtool_ops = &netdev_ethtool_ops;
b437a8cc
MM
7864 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7865 NETIF_F_TSO | NETIF_F_TSO6 |
7866 NETIF_F_RXCSUM | NETIF_F_LRO;
7867 dev->features |= dev->hw_features |
f646968f 7868 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
f957bcf0 7869 if (sp->high_dma_flag == true)
1da177e4 7870 dev->features |= NETIF_F_HIGHDMA;
1da177e4 7871 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
7872 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7873 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 7874
e960fc5c 7875 pci_save_state(sp->pdev);
1da177e4
LT
7876
7877 /* Setting swapper control on the NIC, for proper reset operation */
7878 if (s2io_set_swapper(sp)) {
9e39f7c5 7879 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
1da177e4
LT
7880 dev->name);
7881 ret = -EAGAIN;
7882 goto set_swap_failed;
7883 }
7884
541ae68f
K
7885 /* Verify if the Herc works on the slot its placed into */
7886 if (sp->device_type & XFRAME_II_DEVICE) {
7887 mode = s2io_verify_pci_mode(sp);
7888 if (mode < 0) {
9e39f7c5
JP
7889 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7890 __func__);
541ae68f
K
7891 ret = -EBADSLT;
7892 goto set_swap_failed;
7893 }
7894 }
7895
f61e0a35
SH
7896 if (sp->config.intr_type == MSI_X) {
7897 sp->num_entries = config->rx_ring_num + 1;
7898 ret = s2io_enable_msi_x(sp);
7899
7900 if (!ret) {
7901 ret = s2io_test_msi(sp);
7902 /* rollback MSI-X, will re-enable during add_isr() */
7903 remove_msix_isr(sp);
7904 }
7905 if (ret) {
7906
7907 DBG_PRINT(ERR_DBG,
9e39f7c5 7908 "MSI-X requested but failed to enable\n");
f61e0a35
SH
7909 sp->config.intr_type = INTA;
7910 }
7911 }
7912
7913 if (config->intr_type == MSI_X) {
13d866a9
JP
7914 for (i = 0; i < config->rx_ring_num ; i++) {
7915 struct ring_info *ring = &mac_control->rings[i];
7916
7917 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7918 }
f61e0a35
SH
7919 } else {
7920 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7921 }
7922
541ae68f
K
7923 /* Not needed for Herc */
7924 if (sp->device_type & XFRAME_I_DEVICE) {
7925 /*
7926 * Fix for all "FFs" MAC address problems observed on
7927 * Alpha platforms
7928 */
7929 fix_mac_address(sp);
7930 s2io_reset(sp);
7931 }
1da177e4
LT
7932
7933 /*
1da177e4
LT
7934 * MAC address initialization.
7935 * For now only one mac address will be read and used.
7936 */
7937 bar0 = sp->bar0;
7938 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
d44570e4 7939 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
1da177e4 7940 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 7941 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4 7942 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5ce7f3f4 7943 S2IO_BIT_RESET, true);
1da177e4 7944 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4 7945 mac_down = (u32)tmp64;
1da177e4
LT
7946 mac_up = (u32) (tmp64 >> 32);
7947
1da177e4
LT
7948 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7949 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7950 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7951 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7952 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7953 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7954
1da177e4
LT
7955 /* Set the factory defined MAC address initially */
7956 dev->addr_len = ETH_ALEN;
7957 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7958
faa4f796
SH
7959 /* initialize number of multicast & unicast MAC entries variables */
7960 if (sp->device_type == XFRAME_I_DEVICE) {
7961 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7962 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7963 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7964 } else if (sp->device_type == XFRAME_II_DEVICE) {
7965 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7966 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7967 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7968 }
7969
18c310fb
JW
7970 /* MTU range: 46 - 9600 */
7971 dev->min_mtu = MIN_MTU;
7972 dev->max_mtu = S2IO_JUMBO_SIZE;
7973
faa4f796
SH
7974 /* store mac addresses from CAM to s2io_nic structure */
7975 do_s2io_store_unicast_mc(sp);
7976
f61e0a35
SH
7977 /* Configure MSIX vector for number of rings configured plus one */
7978 if ((sp->device_type == XFRAME_II_DEVICE) &&
d44570e4 7979 (config->intr_type == MSI_X))
f61e0a35
SH
7980 sp->num_entries = config->rx_ring_num + 1;
7981
d44570e4 7982 /* Store the values of the MSIX table in the s2io_nic structure */
c77dd43e 7983 store_xmsi_data(sp);
b41477f3
AR
7984 /* reset Nic and bring it to known state */
7985 s2io_reset(sp);
7986
1da177e4 7987 /*
99993af6 7988 * Initialize link state flags
541ae68f 7989 * and the card state parameter
1da177e4 7990 */
92b84437 7991 sp->state = 0;
1da177e4 7992
1da177e4 7993 /* Initialize spinlocks */
13d866a9
JP
7994 for (i = 0; i < sp->config.tx_fifo_num; i++) {
7995 struct fifo_info *fifo = &mac_control->fifos[i];
7996
7997 spin_lock_init(&fifo->tx_lock);
7998 }
db874e65 7999
20346722
K
8000 /*
8001 * SXE-002: Configure link and activity LED to init state
8002 * on driver load.
1da177e4
LT
8003 */
8004 subid = sp->pdev->subsystem_device;
8005 if ((subid & 0xFF) >= 0x07) {
8006 val64 = readq(&bar0->gpio_control);
8007 val64 |= 0x0000800000000000ULL;
8008 writeq(val64, &bar0->gpio_control);
8009 val64 = 0x0411040400000000ULL;
d44570e4 8010 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
8011 val64 = readq(&bar0->gpio_control);
8012 }
8013
8014 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8015
8016 if (register_netdev(dev)) {
8017 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8018 ret = -ENODEV;
8019 goto register_failed;
8020 }
9dc737a7 8021 s2io_vpd_read(sp);
926bd900 8022 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
d44570e4 8023 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
44c10138 8024 sp->product_name, pdev->revision);
b41477f3
AR
8025 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8026 s2io_driver_version);
9e39f7c5
JP
8027 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8028 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
9dc737a7 8029 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 8030 mode = s2io_print_pci_mode(sp);
541ae68f 8031 if (mode < 0) {
541ae68f 8032 ret = -EBADSLT;
9dc737a7 8033 unregister_netdev(dev);
541ae68f
K
8034 goto set_swap_failed;
8035 }
541ae68f 8036 }
d44570e4
JP
8037 switch (sp->rxd_mode) {
8038 case RXD_MODE_1:
8039 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8040 dev->name);
8041 break;
8042 case RXD_MODE_3B:
8043 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8044 dev->name);
8045 break;
9dc737a7 8046 }
db874e65 8047
f61e0a35
SH
8048 switch (sp->config.napi) {
8049 case 0:
8050 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8051 break;
8052 case 1:
db874e65 8053 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
f61e0a35
SH
8054 break;
8055 }
3a3d5756
SH
8056
8057 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
d44570e4 8058 sp->config.tx_fifo_num);
3a3d5756 8059
0425b46a
SH
8060 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8061 sp->config.rx_ring_num);
8062
d44570e4
JP
8063 switch (sp->config.intr_type) {
8064 case INTA:
8065 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8066 break;
8067 case MSI_X:
8068 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8069 break;
9dc737a7 8070 }
3a3d5756 8071 if (sp->config.multiq) {
13d866a9
JP
8072 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8073 struct fifo_info *fifo = &mac_control->fifos[i];
8074
8075 fifo->multiq = config->multiq;
8076 }
3a3d5756 8077 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
d44570e4 8078 dev->name);
3a3d5756
SH
8079 } else
8080 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
d44570e4 8081 dev->name);
3a3d5756 8082
6cfc482b
SH
8083 switch (sp->config.tx_steering_type) {
8084 case NO_STEERING:
d44570e4
JP
8085 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8086 dev->name);
8087 break;
6cfc482b 8088 case TX_PRIORITY_STEERING:
d44570e4
JP
8089 DBG_PRINT(ERR_DBG,
8090 "%s: Priority steering enabled for transmit\n",
8091 dev->name);
6cfc482b
SH
8092 break;
8093 case TX_DEFAULT_STEERING:
d44570e4
JP
8094 DBG_PRINT(ERR_DBG,
8095 "%s: Default steering enabled for transmit\n",
8096 dev->name);
6cfc482b
SH
8097 }
8098
f0c54ace
AW
8099 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8100 dev->name);
7ba013ac 8101 /* Initialize device name */
a8c1d28a
DC
8102 snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8103 sp->product_name);
7ba013ac 8104
cd0fce03
BL
8105 if (vlan_tag_strip)
8106 sp->vlan_strip_flag = 1;
8107 else
8108 sp->vlan_strip_flag = 0;
8109
20346722
K
8110 /*
8111 * Make Link state as off at this point, when the Link change
8112 * interrupt comes the state will be automatically changed to
1da177e4
LT
8113 * the right state.
8114 */
8115 netif_carrier_off(dev);
1da177e4
LT
8116
8117 return 0;
8118
d44570e4
JP
8119register_failed:
8120set_swap_failed:
1da177e4 8121 iounmap(sp->bar1);
d44570e4 8122bar1_remap_failed:
1da177e4 8123 iounmap(sp->bar0);
d44570e4
JP
8124bar0_remap_failed:
8125mem_alloc_failed:
1da177e4
LT
8126 free_shared_mem(sp);
8127 pci_disable_device(pdev);
eccb8628 8128 pci_release_regions(pdev);
1da177e4
LT
8129 free_netdev(dev);
8130
8131 return ret;
8132}
8133
8134/**
20346722 8135 * s2io_rem_nic - Free the PCI device
1da177e4 8136 * @pdev: structure containing the PCI related information of the device.
20346722 8137 * Description: This function is called by the Pci subsystem to release a
1da177e4 8138 * PCI device and free up all resource held up by the device. This could
20346722 8139 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
8140 * from memory.
8141 */
8142
3a036ce5 8143static void s2io_rem_nic(struct pci_dev *pdev)
1da177e4 8144{
a31ff388 8145 struct net_device *dev = pci_get_drvdata(pdev);
1ee6dd77 8146 struct s2io_nic *sp;
1da177e4
LT
8147
8148 if (dev == NULL) {
8149 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8150 return;
8151 }
8152
4cf1653a 8153 sp = netdev_priv(dev);
23f333a2
TH
8154
8155 cancel_work_sync(&sp->rst_timer_task);
8156 cancel_work_sync(&sp->set_link_task);
8157
1da177e4
LT
8158 unregister_netdev(dev);
8159
8160 free_shared_mem(sp);
8161 iounmap(sp->bar0);
8162 iounmap(sp->bar1);
eccb8628 8163 pci_release_regions(pdev);
1da177e4 8164 free_netdev(dev);
19a60522 8165 pci_disable_device(pdev);
1da177e4
LT
8166}
8167
910be1ab 8168module_pci_driver(s2io_driver);
7d3d0439 8169
6aa20a22 8170static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
d44570e4
JP
8171 struct tcphdr **tcp, struct RxD_t *rxdp,
8172 struct s2io_nic *sp)
7d3d0439
RA
8173{
8174 int ip_off;
8175 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8176
8177 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
d44570e4
JP
8178 DBG_PRINT(INIT_DBG,
8179 "%s: Non-TCP frames not supported for LRO\n",
b39d66a8 8180 __func__);
7d3d0439
RA
8181 return -1;
8182 }
8183
cdb5bf02 8184 /* Checking for DIX type or DIX type with VLAN */
d44570e4 8185 if ((l2_type == 0) || (l2_type == 4)) {
cdb5bf02
SH
8186 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8187 /*
8188 * If vlan stripping is disabled and the frame is VLAN tagged,
8189 * shift the offset by the VLAN header size bytes.
8190 */
cd0fce03 8191 if ((!sp->vlan_strip_flag) &&
d44570e4 8192 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
cdb5bf02
SH
8193 ip_off += HEADER_VLAN_SIZE;
8194 } else {
7d3d0439 8195 /* LLC, SNAP etc are considered non-mergeable */
cdb5bf02 8196 return -1;
7d3d0439
RA
8197 }
8198
64699336 8199 *ip = (struct iphdr *)(buffer + ip_off);
7d3d0439
RA
8200 ip_len = (u8)((*ip)->ihl);
8201 ip_len <<= 2;
8202 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8203
8204 return 0;
8205}
8206
1ee6dd77 8207static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8208 struct tcphdr *tcp)
8209{
d44570e4
JP
8210 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8211 if ((lro->iph->saddr != ip->saddr) ||
8212 (lro->iph->daddr != ip->daddr) ||
8213 (lro->tcph->source != tcp->source) ||
8214 (lro->tcph->dest != tcp->dest))
7d3d0439
RA
8215 return -1;
8216 return 0;
8217}
8218
8219static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8220{
d44570e4 8221 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
7d3d0439
RA
8222}
8223
1ee6dd77 8224static void initiate_new_session(struct lro *lro, u8 *l2h,
d44570e4
JP
8225 struct iphdr *ip, struct tcphdr *tcp,
8226 u32 tcp_pyld_len, u16 vlan_tag)
7d3d0439 8227{
d44570e4 8228 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8229 lro->l2h = l2h;
8230 lro->iph = ip;
8231 lro->tcph = tcp;
8232 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
c8855953 8233 lro->tcp_ack = tcp->ack_seq;
7d3d0439
RA
8234 lro->sg_num = 1;
8235 lro->total_len = ntohs(ip->tot_len);
8236 lro->frags_len = 0;
cdb5bf02 8237 lro->vlan_tag = vlan_tag;
6aa20a22 8238 /*
d44570e4
JP
8239 * Check if we saw TCP timestamp.
8240 * Other consistency checks have already been done.
8241 */
7d3d0439 8242 if (tcp->doff == 8) {
c8855953
SR
8243 __be32 *ptr;
8244 ptr = (__be32 *)(tcp+1);
7d3d0439 8245 lro->saw_ts = 1;
c8855953 8246 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8247 lro->cur_tsecr = *(ptr+2);
8248 }
8249 lro->in_use = 1;
8250}
8251
1ee6dd77 8252static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
8253{
8254 struct iphdr *ip = lro->iph;
8255 struct tcphdr *tcp = lro->tcph;
ffb5df6c
JP
8256 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8257
d44570e4 8258 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8259
8260 /* Update L3 header */
9a18dd15 8261 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
7d3d0439 8262 ip->tot_len = htons(lro->total_len);
7d3d0439
RA
8263
8264 /* Update L4 header */
8265 tcp->ack_seq = lro->tcp_ack;
8266 tcp->window = lro->window;
8267
8268 /* Update tsecr field if this session has timestamps enabled */
8269 if (lro->saw_ts) {
c8855953 8270 __be32 *ptr = (__be32 *)(tcp + 1);
7d3d0439
RA
8271 *(ptr+2) = lro->cur_tsecr;
8272 }
8273
8274 /* Update counters required for calculation of
8275 * average no. of packets aggregated.
8276 */
ffb5df6c
JP
8277 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8278 swstats->num_aggregations++;
7d3d0439
RA
8279}
8280
1ee6dd77 8281static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
d44570e4 8282 struct tcphdr *tcp, u32 l4_pyld)
7d3d0439 8283{
d44570e4 8284 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8285 lro->total_len += l4_pyld;
8286 lro->frags_len += l4_pyld;
8287 lro->tcp_next_seq += l4_pyld;
8288 lro->sg_num++;
8289
8290 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8291 lro->tcp_ack = tcp->ack_seq;
8292 lro->window = tcp->window;
6aa20a22 8293
7d3d0439 8294 if (lro->saw_ts) {
c8855953 8295 __be32 *ptr;
7d3d0439 8296 /* Update tsecr and tsval from this packet */
c8855953
SR
8297 ptr = (__be32 *)(tcp+1);
8298 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8299 lro->cur_tsecr = *(ptr + 2);
8300 }
8301}
8302
1ee6dd77 8303static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
8304 struct tcphdr *tcp, u32 tcp_pyld_len)
8305{
7d3d0439
RA
8306 u8 *ptr;
8307
d44570e4 8308 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
79dc1901 8309
7d3d0439
RA
8310 if (!tcp_pyld_len) {
8311 /* Runt frame or a pure ack */
8312 return -1;
8313 }
8314
8315 if (ip->ihl != 5) /* IP has options */
8316 return -1;
8317
75c30b13
AR
8318 /* If we see CE codepoint in IP header, packet is not mergeable */
8319 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8320 return -1;
8321
8322 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
d44570e4
JP
8323 if (tcp->urg || tcp->psh || tcp->rst ||
8324 tcp->syn || tcp->fin ||
8325 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
8326 /*
8327 * Currently recognize only the ack control word and
8328 * any other control field being set would result in
8329 * flushing the LRO session
8330 */
8331 return -1;
8332 }
8333
6aa20a22 8334 /*
7d3d0439
RA
8335 * Allow only one TCP timestamp option. Don't aggregate if
8336 * any other options are detected.
8337 */
8338 if (tcp->doff != 5 && tcp->doff != 8)
8339 return -1;
8340
8341 if (tcp->doff == 8) {
6aa20a22 8342 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
8343 while (*ptr == TCPOPT_NOP)
8344 ptr++;
8345 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8346 return -1;
8347
8348 /* Ensure timestamp value increases monotonically */
8349 if (l_lro)
c8855953 8350 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
7d3d0439
RA
8351 return -1;
8352
8353 /* timestamp echo reply should be non-zero */
c8855953 8354 if (*((__be32 *)(ptr+6)) == 0)
7d3d0439
RA
8355 return -1;
8356 }
8357
8358 return 0;
8359}
8360
d44570e4
JP
8361static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8362 u8 **tcp, u32 *tcp_len, struct lro **lro,
8363 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
8364{
8365 struct iphdr *ip;
8366 struct tcphdr *tcph;
8367 int ret = 0, i;
cdb5bf02 8368 u16 vlan_tag = 0;
ffb5df6c 8369 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439 8370
d44570e4
JP
8371 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8372 rxdp, sp);
8373 if (ret)
7d3d0439 8374 return ret;
7d3d0439 8375
d44570e4
JP
8376 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8377
cdb5bf02 8378 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
7d3d0439
RA
8379 tcph = (struct tcphdr *)*tcp;
8380 *tcp_len = get_l4_pyld_length(ip, tcph);
d44570e4 8381 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8382 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8383 if (l_lro->in_use) {
8384 if (check_for_socket_match(l_lro, ip, tcph))
8385 continue;
8386 /* Sock pair matched */
8387 *lro = l_lro;
8388
8389 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
9e39f7c5
JP
8390 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8391 "expected 0x%x, actual 0x%x\n",
8392 __func__,
7d3d0439
RA
8393 (*lro)->tcp_next_seq,
8394 ntohl(tcph->seq));
8395
ffb5df6c 8396 swstats->outof_sequence_pkts++;
7d3d0439
RA
8397 ret = 2;
8398 break;
8399 }
8400
d44570e4
JP
8401 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8402 *tcp_len))
7d3d0439
RA
8403 ret = 1; /* Aggregate */
8404 else
8405 ret = 2; /* Flush both */
8406 break;
8407 }
8408 }
8409
8410 if (ret == 0) {
8411 /* Before searching for available LRO objects,
8412 * check if the pkt is L3/L4 aggregatable. If not
8413 * don't create new LRO session. Just send this
8414 * packet up.
8415 */
d44570e4 8416 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
7d3d0439 8417 return 5;
7d3d0439 8418
d44570e4 8419 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8420 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8421 if (!(l_lro->in_use)) {
8422 *lro = l_lro;
8423 ret = 3; /* Begin anew */
8424 break;
8425 }
8426 }
8427 }
8428
8429 if (ret == 0) { /* sessions exceeded */
9e39f7c5 8430 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
b39d66a8 8431 __func__);
7d3d0439
RA
8432 *lro = NULL;
8433 return ret;
8434 }
8435
8436 switch (ret) {
d44570e4
JP
8437 case 3:
8438 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8439 vlan_tag);
8440 break;
8441 case 2:
8442 update_L3L4_header(sp, *lro);
8443 break;
8444 case 1:
8445 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8446 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7d3d0439 8447 update_L3L4_header(sp, *lro);
d44570e4
JP
8448 ret = 4; /* Flush the LRO */
8449 }
8450 break;
8451 default:
9e39f7c5 8452 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
d44570e4 8453 break;
7d3d0439
RA
8454 }
8455
8456 return ret;
8457}
8458
1ee6dd77 8459static void clear_lro_session(struct lro *lro)
7d3d0439 8460{
1ee6dd77 8461 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
8462
8463 memset(lro, 0, lro_struct_size);
8464}
8465
cdb5bf02 8466static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
7d3d0439
RA
8467{
8468 struct net_device *dev = skb->dev;
4cf1653a 8469 struct s2io_nic *sp = netdev_priv(dev);
7d3d0439
RA
8470
8471 skb->protocol = eth_type_trans(skb, dev);
b85da2c0 8472 if (vlan_tag && sp->vlan_strip_flag)
86a9bad3 8473 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
b85da2c0
JP
8474 if (sp->config.napi)
8475 netif_receive_skb(skb);
8476 else
8477 netif_rx(skb);
7d3d0439
RA
8478}
8479
1ee6dd77 8480static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
d44570e4 8481 struct sk_buff *skb, u32 tcp_len)
7d3d0439 8482{
75c30b13 8483 struct sk_buff *first = lro->parent;
ffb5df6c 8484 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439
RA
8485
8486 first->len += tcp_len;
8487 first->data_len = lro->frags_len;
8488 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
8489 if (skb_shinfo(first)->frag_list)
8490 lro->last_frag->next = skb;
7d3d0439
RA
8491 else
8492 skb_shinfo(first)->frag_list = skb;
372cc597 8493 first->truesize += skb->truesize;
75c30b13 8494 lro->last_frag = skb;
ffb5df6c 8495 swstats->clubbed_frms_cnt++;
7d3d0439 8496}
d796fdb7
LV
8497
8498/**
8499 * s2io_io_error_detected - called when PCI error is detected
8500 * @pdev: Pointer to PCI device
8453d43f 8501 * @state: The current pci connection state
d796fdb7
LV
8502 *
8503 * This function is called after a PCI bus error affecting
8504 * this device has been detected.
8505 */
8506static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
d44570e4 8507 pci_channel_state_t state)
d796fdb7
LV
8508{
8509 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8510 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8511
8512 netif_device_detach(netdev);
8513
1e3c8bd6
DN
8514 if (state == pci_channel_io_perm_failure)
8515 return PCI_ERS_RESULT_DISCONNECT;
8516
d796fdb7
LV
8517 if (netif_running(netdev)) {
8518 /* Bring down the card, while avoiding PCI I/O */
8519 do_s2io_card_down(sp, 0);
d796fdb7
LV
8520 }
8521 pci_disable_device(pdev);
8522
8523 return PCI_ERS_RESULT_NEED_RESET;
8524}
8525
8526/**
8527 * s2io_io_slot_reset - called after the pci bus has been reset.
8528 * @pdev: Pointer to PCI device
8529 *
8530 * Restart the card from scratch, as if from a cold-boot.
8531 * At this point, the card has exprienced a hard reset,
8532 * followed by fixups by BIOS, and has its config space
8533 * set up identically to what it was at cold boot.
8534 */
8535static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8536{
8537 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8538 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8539
8540 if (pci_enable_device(pdev)) {
6cef2b8e 8541 pr_err("Cannot re-enable PCI device after reset.\n");
d796fdb7
LV
8542 return PCI_ERS_RESULT_DISCONNECT;
8543 }
8544
8545 pci_set_master(pdev);
8546 s2io_reset(sp);
8547
8548 return PCI_ERS_RESULT_RECOVERED;
8549}
8550
8551/**
8552 * s2io_io_resume - called when traffic can start flowing again.
8553 * @pdev: Pointer to PCI device
8554 *
8555 * This callback is called when the error recovery driver tells
8556 * us that its OK to resume normal operation.
8557 */
8558static void s2io_io_resume(struct pci_dev *pdev)
8559{
8560 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8561 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8562
8563 if (netif_running(netdev)) {
8564 if (s2io_card_up(sp)) {
6cef2b8e 8565 pr_err("Can't bring device back up after reset.\n");
d796fdb7
LV
8566 return;
8567 }
8568
40507e7a 8569 if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
d796fdb7 8570 s2io_card_down(sp);
6cef2b8e 8571 pr_err("Can't restore mac addr after reset.\n");
d796fdb7
LV
8572 return;
8573 }
8574 }
8575
8576 netif_device_attach(netdev);
fd2ea0a7 8577 netif_tx_wake_all_queues(netdev);
d796fdb7 8578}