]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/neterion/s2io.c
net: amd: remove redundant continue
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / neterion / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
926bd900 3 * Copyright(c) 2002-2010 Exar Corp.
d44570e4 4 *
1da177e4
LT
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4 27 * The module loadable parameters that are supported by the driver and a brief
a2a20aef 28 * explanation of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
6d517a27 35 * values are 1, 2.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
8abc4d5b 40 * 2(MSI_X). Default value is '2(MSI_X)'
9dc737a7
AR
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
926930b2
SS
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
926930b2
SS
45 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
46 * Possible values '1' for enable , '0' for disable.
47 * Default is '2' - which means disable in promisc mode
48 * and enable in non-promiscuous mode.
3a3d5756
SH
49 * multiq: This parameter used to enable/disable MULTIQUEUE support.
50 * Possible values '1' for enable and '0' for disable. Default is '0'
1da177e4
LT
51 ************************************************************************/
52
6cef2b8e
JP
53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
1da177e4
LT
55#include <linux/module.h>
56#include <linux/types.h>
57#include <linux/errno.h>
58#include <linux/ioport.h>
59#include <linux/pci.h>
1e7f0bd8 60#include <linux/dma-mapping.h>
1da177e4
LT
61#include <linux/kernel.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
40239396 64#include <linux/mdio.h>
1da177e4
LT
65#include <linux/skbuff.h>
66#include <linux/init.h>
67#include <linux/delay.h>
68#include <linux/stddef.h>
69#include <linux/ioctl.h>
70#include <linux/timex.h>
1da177e4 71#include <linux/ethtool.h>
1da177e4 72#include <linux/workqueue.h>
be3a6b02 73#include <linux/if_vlan.h>
7d3d0439
RA
74#include <linux/ip.h>
75#include <linux/tcp.h>
d44570e4
JP
76#include <linux/uaccess.h>
77#include <linux/io.h>
2208e9a7 78#include <linux/io-64-nonatomic-lo-hi.h>
5a0e3ad6 79#include <linux/slab.h>
70c71606 80#include <linux/prefetch.h>
7d3d0439 81#include <net/tcp.h>
9a18dd15 82#include <net/checksum.h>
1da177e4 83
fe931395 84#include <asm/div64.h>
330ce0de 85#include <asm/irq.h>
1da177e4
LT
86
87/* local include */
88#include "s2io.h"
89#include "s2io-regs.h"
90
11410b62 91#define DRV_VERSION "2.0.26.28"
6c1792f4 92
1da177e4 93/* S2io Driver name & version. */
c0dbf37e
JM
94static const char s2io_driver_name[] = "Neterion";
95static const char s2io_driver_version[] = DRV_VERSION;
1da177e4 96
c0dbf37e
JM
97static const int rxd_size[2] = {32, 48};
98static const int rxd_count[2] = {127, 85};
da6971d8 99
1ee6dd77 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd
K
101{
102 int ret;
103
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
d44570e4 105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
5e25b9dd
K
106
107 return ret;
108}
109
20346722 110/*
1da177e4
LT
111 * Cards with following subsystem_id have a link state indication
112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113 * macro below identifies these cards given the subsystem_id.
114 */
d44570e4
JP
115#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
119
120#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
1da177e4 122
d44570e4 123static inline int is_s2io_card_up(const struct s2io_nic *sp)
92b84437
SS
124{
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126}
127
1da177e4 128/* Ethtool related variables and Macros. */
6fce365d 129static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
1da177e4
LT
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
135};
136
6fce365d 137static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
138 {"tmac_frms"},
139 {"tmac_data_octets"},
140 {"tmac_drop_frms"},
141 {"tmac_mcst_frms"},
142 {"tmac_bcst_frms"},
143 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
144 {"tmac_ttl_octets"},
145 {"tmac_ucst_frms"},
146 {"tmac_nucst_frms"},
1da177e4 147 {"tmac_any_err_frms"},
bd1034f0 148 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
149 {"tmac_vld_ip_octets"},
150 {"tmac_vld_ip"},
151 {"tmac_drop_ip"},
152 {"tmac_icmp"},
153 {"tmac_rst_tcp"},
154 {"tmac_tcp"},
155 {"tmac_udp"},
156 {"rmac_vld_frms"},
157 {"rmac_data_octets"},
158 {"rmac_fcs_err_frms"},
159 {"rmac_drop_frms"},
160 {"rmac_vld_mcst_frms"},
161 {"rmac_vld_bcst_frms"},
162 {"rmac_in_rng_len_err_frms"},
bd1034f0 163 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
164 {"rmac_long_frms"},
165 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
166 {"rmac_unsup_ctrl_frms"},
167 {"rmac_ttl_octets"},
168 {"rmac_accepted_ucst_frms"},
169 {"rmac_accepted_nucst_frms"},
1da177e4 170 {"rmac_discarded_frms"},
bd1034f0
AR
171 {"rmac_drop_events"},
172 {"rmac_ttl_less_fb_octets"},
173 {"rmac_ttl_frms"},
1da177e4
LT
174 {"rmac_usized_frms"},
175 {"rmac_osized_frms"},
176 {"rmac_frag_frms"},
177 {"rmac_jabber_frms"},
bd1034f0
AR
178 {"rmac_ttl_64_frms"},
179 {"rmac_ttl_65_127_frms"},
180 {"rmac_ttl_128_255_frms"},
181 {"rmac_ttl_256_511_frms"},
182 {"rmac_ttl_512_1023_frms"},
183 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
184 {"rmac_ip"},
185 {"rmac_ip_octets"},
186 {"rmac_hdr_err_ip"},
187 {"rmac_drop_ip"},
188 {"rmac_icmp"},
189 {"rmac_tcp"},
190 {"rmac_udp"},
191 {"rmac_err_drp_udp"},
bd1034f0
AR
192 {"rmac_xgmii_err_sym"},
193 {"rmac_frms_q0"},
194 {"rmac_frms_q1"},
195 {"rmac_frms_q2"},
196 {"rmac_frms_q3"},
197 {"rmac_frms_q4"},
198 {"rmac_frms_q5"},
199 {"rmac_frms_q6"},
200 {"rmac_frms_q7"},
201 {"rmac_full_q0"},
202 {"rmac_full_q1"},
203 {"rmac_full_q2"},
204 {"rmac_full_q3"},
205 {"rmac_full_q4"},
206 {"rmac_full_q5"},
207 {"rmac_full_q6"},
208 {"rmac_full_q7"},
1da177e4 209 {"rmac_pause_cnt"},
bd1034f0
AR
210 {"rmac_xgmii_data_err_cnt"},
211 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
212 {"rmac_accepted_ip"},
213 {"rmac_err_tcp"},
bd1034f0
AR
214 {"rd_req_cnt"},
215 {"new_rd_req_cnt"},
216 {"new_rd_req_rtry_cnt"},
217 {"rd_rtry_cnt"},
218 {"wr_rtry_rd_ack_cnt"},
219 {"wr_req_cnt"},
220 {"new_wr_req_cnt"},
221 {"new_wr_req_rtry_cnt"},
222 {"wr_rtry_cnt"},
223 {"wr_disc_cnt"},
224 {"rd_rtry_wr_ack_cnt"},
225 {"txp_wr_cnt"},
226 {"txd_rd_cnt"},
227 {"txd_wr_cnt"},
228 {"rxd_rd_cnt"},
229 {"rxd_wr_cnt"},
230 {"txf_rd_cnt"},
fa1f0cb3
SS
231 {"rxf_wr_cnt"}
232};
233
6fce365d 234static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
235 {"rmac_ttl_1519_4095_frms"},
236 {"rmac_ttl_4096_8191_frms"},
237 {"rmac_ttl_8192_max_frms"},
238 {"rmac_ttl_gt_max_frms"},
239 {"rmac_osized_alt_frms"},
240 {"rmac_jabber_alt_frms"},
241 {"rmac_gt_max_alt_frms"},
242 {"rmac_vlan_frms"},
243 {"rmac_len_discard"},
244 {"rmac_fcs_discard"},
245 {"rmac_pf_discard"},
246 {"rmac_da_discard"},
247 {"rmac_red_discard"},
248 {"rmac_rts_discard"},
249 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
250 {"link_fault_cnt"}
251};
252
6fce365d 253static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac
K
254 {"\n DRIVER STATISTICS"},
255 {"single_bit_ecc_errs"},
256 {"double_bit_ecc_errs"},
bd1034f0
AR
257 {"parity_err_cnt"},
258 {"serious_err_cnt"},
259 {"soft_reset_cnt"},
260 {"fifo_full_cnt"},
8116f3cf
SS
261 {"ring_0_full_cnt"},
262 {"ring_1_full_cnt"},
263 {"ring_2_full_cnt"},
264 {"ring_3_full_cnt"},
265 {"ring_4_full_cnt"},
266 {"ring_5_full_cnt"},
267 {"ring_6_full_cnt"},
268 {"ring_7_full_cnt"},
43b7c451
SH
269 {"alarm_transceiver_temp_high"},
270 {"alarm_transceiver_temp_low"},
271 {"alarm_laser_bias_current_high"},
272 {"alarm_laser_bias_current_low"},
273 {"alarm_laser_output_power_high"},
274 {"alarm_laser_output_power_low"},
275 {"warn_transceiver_temp_high"},
276 {"warn_transceiver_temp_low"},
277 {"warn_laser_bias_current_high"},
278 {"warn_laser_bias_current_low"},
279 {"warn_laser_output_power_high"},
280 {"warn_laser_output_power_low"},
281 {"lro_aggregated_pkts"},
282 {"lro_flush_both_count"},
283 {"lro_out_of_sequence_pkts"},
284 {"lro_flush_due_to_max_pkts"},
285 {"lro_avg_aggr_pkts"},
286 {"mem_alloc_fail_cnt"},
287 {"pci_map_fail_cnt"},
288 {"watchdog_timer_cnt"},
289 {"mem_allocated"},
290 {"mem_freed"},
291 {"link_up_cnt"},
292 {"link_down_cnt"},
293 {"link_up_time"},
294 {"link_down_time"},
295 {"tx_tcode_buf_abort_cnt"},
296 {"tx_tcode_desc_abort_cnt"},
297 {"tx_tcode_parity_err_cnt"},
298 {"tx_tcode_link_loss_cnt"},
299 {"tx_tcode_list_proc_err_cnt"},
300 {"rx_tcode_parity_err_cnt"},
301 {"rx_tcode_abort_cnt"},
302 {"rx_tcode_parity_abort_cnt"},
303 {"rx_tcode_rda_fail_cnt"},
304 {"rx_tcode_unkn_prot_cnt"},
305 {"rx_tcode_fcs_err_cnt"},
306 {"rx_tcode_buf_size_err_cnt"},
307 {"rx_tcode_rxd_corrupt_cnt"},
308 {"rx_tcode_unkn_err_cnt"},
8116f3cf
SS
309 {"tda_err_cnt"},
310 {"pfc_err_cnt"},
311 {"pcc_err_cnt"},
312 {"tti_err_cnt"},
313 {"tpa_err_cnt"},
314 {"sm_err_cnt"},
315 {"lso_err_cnt"},
316 {"mac_tmac_err_cnt"},
317 {"mac_rmac_err_cnt"},
318 {"xgxs_txgxs_err_cnt"},
319 {"xgxs_rxgxs_err_cnt"},
320 {"rc_err_cnt"},
321 {"prc_pcix_err_cnt"},
322 {"rpa_err_cnt"},
323 {"rda_err_cnt"},
324 {"rti_err_cnt"},
325 {"mc_err_cnt"}
1da177e4
LT
326};
327
4c3616cd
AMR
328#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
fa1f0cb3 331
d44570e4
JP
332#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
fa1f0cb3 334
d44570e4
JP
335#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
1da177e4 337
4c3616cd 338#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
d44570e4 339#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
1da177e4 340
2fd37688
SS
341/* copy mac addr to def_mac_addr array */
342static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
343{
344 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
345 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
346 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
347 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
348 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
349 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
350}
04025095 351
20346722 352/*
1da177e4
LT
353 * Constants to be programmed into the Xena's registers, to configure
354 * the XAUI.
355 */
356
1da177e4 357#define END_SIGN 0x0
f71e1309 358static const u64 herc_act_dtx_cfg[] = {
541ae68f 359 /* Set address */
e960fc5c 360 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 361 /* Write data */
e960fc5c 362 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
363 /* Set address */
364 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
365 /* Write data */
366 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
367 /* Set address */
e960fc5c 368 0x801205150D440000ULL, 0x801205150D4400E0ULL,
369 /* Write data */
370 0x801205150D440004ULL, 0x801205150D4400E4ULL,
371 /* Set address */
541ae68f
K
372 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
373 /* Write data */
374 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
375 /* Done */
376 END_SIGN
377};
378
f71e1309 379static const u64 xena_dtx_cfg[] = {
c92ca04b 380 /* Set address */
1da177e4 381 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
382 /* Write data */
383 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
384 /* Set address */
385 0x8001051500000000ULL, 0x80010515000000E0ULL,
386 /* Write data */
387 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
388 /* Set address */
1da177e4 389 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
390 /* Write data */
391 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
392 END_SIGN
393};
394
20346722 395/*
1da177e4
LT
396 * Constants for Fixing the MacAddress problem seen mostly on
397 * Alpha machines.
398 */
f71e1309 399static const u64 fix_mac[] = {
1da177e4
LT
400 0x0060000000000000ULL, 0x0060600000000000ULL,
401 0x0040600000000000ULL, 0x0000600000000000ULL,
402 0x0020600000000000ULL, 0x0060600000000000ULL,
403 0x0020600000000000ULL, 0x0060600000000000ULL,
404 0x0020600000000000ULL, 0x0060600000000000ULL,
405 0x0020600000000000ULL, 0x0060600000000000ULL,
406 0x0020600000000000ULL, 0x0060600000000000ULL,
407 0x0020600000000000ULL, 0x0060600000000000ULL,
408 0x0020600000000000ULL, 0x0060600000000000ULL,
409 0x0020600000000000ULL, 0x0060600000000000ULL,
410 0x0020600000000000ULL, 0x0060600000000000ULL,
411 0x0020600000000000ULL, 0x0060600000000000ULL,
412 0x0020600000000000ULL, 0x0000600000000000ULL,
413 0x0040600000000000ULL, 0x0060600000000000ULL,
414 END_SIGN
415};
416
b41477f3
AR
417MODULE_LICENSE("GPL");
418MODULE_VERSION(DRV_VERSION);
419
420
1da177e4 421/* Module Loadable parameters. */
6cfc482b 422S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
b41477f3 423S2IO_PARM_INT(rx_ring_num, 1);
3a3d5756 424S2IO_PARM_INT(multiq, 0);
b41477f3
AR
425S2IO_PARM_INT(rx_ring_mode, 1);
426S2IO_PARM_INT(use_continuous_tx_intrs, 1);
427S2IO_PARM_INT(rmac_pause_time, 0x100);
428S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
429S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
430S2IO_PARM_INT(shared_splits, 0);
431S2IO_PARM_INT(tmac_util_period, 5);
432S2IO_PARM_INT(rmac_util_period, 5);
b41477f3 433S2IO_PARM_INT(l3l4hdr_size, 128);
6cfc482b
SH
434/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
435S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
303bcb4b 436/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 437S2IO_PARM_INT(rxsync_frequency, 3);
eccb8628 438/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
8abc4d5b 439S2IO_PARM_INT(intr_type, 2);
7d3d0439 440/* Large receive offload feature */
43b7c451 441
7d3d0439
RA
442/* Max pkts to be aggregated by LRO at one time. If not specified,
443 * aggregation happens until we hit max IP pkt size(64K)
444 */
b41477f3 445S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 446S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
447
448S2IO_PARM_INT(napi, 1);
926930b2 449S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
450
451static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
d44570e4 452{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
b41477f3 453static unsigned int rx_ring_sz[MAX_RX_RINGS] =
d44570e4 454{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
b41477f3 455static unsigned int rts_frm_len[MAX_RX_RINGS] =
d44570e4 456{[0 ...(MAX_RX_RINGS - 1)] = 0 };
b41477f3
AR
457
458module_param_array(tx_fifo_len, uint, NULL, 0);
459module_param_array(rx_ring_sz, uint, NULL, 0);
460module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 461
20346722 462/*
1da177e4 463 * S2IO device table.
20346722 464 * This table lists all the devices that this driver supports.
1da177e4 465 */
9baa3c34 466static const struct pci_device_id s2io_tbl[] = {
1da177e4
LT
467 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
468 PCI_ANY_ID, PCI_ANY_ID},
469 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
470 PCI_ANY_ID, PCI_ANY_ID},
471 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
d44570e4
JP
472 PCI_ANY_ID, PCI_ANY_ID},
473 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
474 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
475 {0,}
476};
477
478MODULE_DEVICE_TABLE(pci, s2io_tbl);
479
3646f0e5 480static const struct pci_error_handlers s2io_err_handler = {
d796fdb7
LV
481 .error_detected = s2io_io_error_detected,
482 .slot_reset = s2io_io_slot_reset,
483 .resume = s2io_io_resume,
484};
485
1da177e4 486static struct pci_driver s2io_driver = {
d44570e4
JP
487 .name = "S2IO",
488 .id_table = s2io_tbl,
489 .probe = s2io_init_nic,
3a036ce5 490 .remove = s2io_rem_nic,
d44570e4 491 .err_handler = &s2io_err_handler,
1da177e4
LT
492};
493
494/* A simplifier macro used both by init and free shared_mem Fns(). */
f8a1988f 495#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
1da177e4 496
3a3d5756
SH
497/* netqueue manipulation helper functions */
498static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
499{
fd2ea0a7
DM
500 if (!sp->config.multiq) {
501 int i;
502
3a3d5756
SH
503 for (i = 0; i < sp->config.tx_fifo_num; i++)
504 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
3a3d5756 505 }
fd2ea0a7 506 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
507}
508
509static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
510{
fd2ea0a7 511 if (!sp->config.multiq)
3a3d5756
SH
512 sp->mac_control.fifos[fifo_no].queue_state =
513 FIFO_QUEUE_STOP;
fd2ea0a7
DM
514
515 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
516}
517
518static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
519{
fd2ea0a7
DM
520 if (!sp->config.multiq) {
521 int i;
522
3a3d5756
SH
523 for (i = 0; i < sp->config.tx_fifo_num; i++)
524 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 525 }
fd2ea0a7 526 netif_tx_start_all_queues(sp->dev);
3a3d5756
SH
527}
528
3a3d5756
SH
529static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
530{
fd2ea0a7
DM
531 if (!sp->config.multiq) {
532 int i;
533
3a3d5756
SH
534 for (i = 0; i < sp->config.tx_fifo_num; i++)
535 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 536 }
fd2ea0a7 537 netif_tx_wake_all_queues(sp->dev);
3a3d5756
SH
538}
539
540static inline void s2io_wake_tx_queue(
541 struct fifo_info *fifo, int cnt, u8 multiq)
542{
543
3a3d5756
SH
544 if (multiq) {
545 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
546 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
b19fa1fa 547 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
548 if (netif_queue_stopped(fifo->dev)) {
549 fifo->queue_state = FIFO_QUEUE_START;
550 netif_wake_queue(fifo->dev);
551 }
552 }
553}
554
1da177e4
LT
555/**
556 * init_shared_mem - Allocation and Initialization of Memory
557 * @nic: Device private variable.
20346722
K
558 * Description: The function allocates all the memory areas shared
559 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
560 * Rx descriptors and the statistics block.
561 */
562
563static int init_shared_mem(struct s2io_nic *nic)
564{
565 u32 size;
566 void *tmp_v_addr, *tmp_v_addr_next;
567 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 568 struct RxD_block *pre_rxd_blk = NULL;
372cc597 569 int i, j, blk_cnt;
1da177e4
LT
570 int lst_size, lst_per_page;
571 struct net_device *dev = nic->dev;
8ae418cf 572 unsigned long tmp;
1ee6dd77 573 struct buffAdd *ba;
ffb5df6c
JP
574 struct config_param *config = &nic->config;
575 struct mac_info *mac_control = &nic->mac_control;
491976b2 576 unsigned long long mem_allocated = 0;
1da177e4 577
13d866a9 578 /* Allocation and initialization of TXDLs in FIFOs */
1da177e4
LT
579 size = 0;
580 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
581 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
582
583 size += tx_cfg->fifo_len;
1da177e4
LT
584 }
585 if (size > MAX_AVAILABLE_TXDS) {
9e39f7c5
JP
586 DBG_PRINT(ERR_DBG,
587 "Too many TxDs requested: %d, max supported: %d\n",
588 size, MAX_AVAILABLE_TXDS);
b41477f3 589 return -EINVAL;
1da177e4
LT
590 }
591
2fda096d
SR
592 size = 0;
593 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
594 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
595
596 size = tx_cfg->fifo_len;
2fda096d
SR
597 /*
598 * Legal values are from 2 to 8192
599 */
600 if (size < 2) {
9e39f7c5
JP
601 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
602 "Valid lengths are 2 through 8192\n",
603 i, size);
2fda096d
SR
604 return -EINVAL;
605 }
606 }
607
1ee6dd77 608 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
609 lst_per_page = PAGE_SIZE / lst_size;
610
611 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
612 struct fifo_info *fifo = &mac_control->fifos[i];
613 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
614 int fifo_len = tx_cfg->fifo_len;
1ee6dd77 615 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
13d866a9
JP
616
617 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
618 if (!fifo->list_info) {
d44570e4 619 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
1da177e4
LT
620 return -ENOMEM;
621 }
491976b2 622 mem_allocated += list_holder_size;
1da177e4
LT
623 }
624 for (i = 0; i < config->tx_fifo_num; i++) {
625 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
626 lst_per_page);
13d866a9
JP
627 struct fifo_info *fifo = &mac_control->fifos[i];
628 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
629
630 fifo->tx_curr_put_info.offset = 0;
631 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
632 fifo->tx_curr_get_info.offset = 0;
633 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
634 fifo->fifo_no = i;
635 fifo->nic = nic;
636 fifo->max_txds = MAX_SKB_FRAGS + 2;
637 fifo->dev = dev;
20346722 638
1da177e4
LT
639 for (j = 0; j < page_num; j++) {
640 int k = 0;
641 dma_addr_t tmp_p;
642 void *tmp_v;
643 tmp_v = pci_alloc_consistent(nic->pdev,
644 PAGE_SIZE, &tmp_p);
645 if (!tmp_v) {
9e39f7c5
JP
646 DBG_PRINT(INFO_DBG,
647 "pci_alloc_consistent failed for TxDL\n");
1da177e4
LT
648 return -ENOMEM;
649 }
776bd20f 650 /* If we got a zero DMA address(can happen on
651 * certain platforms like PPC), reallocate.
652 * Store virtual address of page we don't want,
653 * to be freed later.
654 */
655 if (!tmp_p) {
656 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 657 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
658 "%s: Zero DMA address for TxDL. "
659 "Virtual address %p\n",
660 dev->name, tmp_v);
776bd20f 661 tmp_v = pci_alloc_consistent(nic->pdev,
d44570e4 662 PAGE_SIZE, &tmp_p);
776bd20f 663 if (!tmp_v) {
0c61ed5f 664 DBG_PRINT(INFO_DBG,
9e39f7c5 665 "pci_alloc_consistent failed for TxDL\n");
776bd20f 666 return -ENOMEM;
667 }
491976b2 668 mem_allocated += PAGE_SIZE;
776bd20f 669 }
1da177e4
LT
670 while (k < lst_per_page) {
671 int l = (j * lst_per_page) + k;
13d866a9 672 if (l == tx_cfg->fifo_len)
20346722 673 break;
13d866a9 674 fifo->list_info[l].list_virt_addr =
d44570e4 675 tmp_v + (k * lst_size);
13d866a9 676 fifo->list_info[l].list_phy_addr =
d44570e4 677 tmp_p + (k * lst_size);
1da177e4
LT
678 k++;
679 }
680 }
681 }
1da177e4 682
2fda096d 683 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
684 struct fifo_info *fifo = &mac_control->fifos[i];
685 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
686
687 size = tx_cfg->fifo_len;
688 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
689 if (!fifo->ufo_in_band_v)
2fda096d
SR
690 return -ENOMEM;
691 mem_allocated += (size * sizeof(u64));
692 }
fed5eccd 693
1da177e4
LT
694 /* Allocation and initialization of RXDs in Rings */
695 size = 0;
696 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
697 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
698 struct ring_info *ring = &mac_control->rings[i];
699
700 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
9e39f7c5
JP
701 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
702 "multiple of RxDs per Block\n",
703 dev->name, i);
1da177e4
LT
704 return FAILURE;
705 }
13d866a9
JP
706 size += rx_cfg->num_rxd;
707 ring->block_count = rx_cfg->num_rxd /
d44570e4 708 (rxd_count[nic->rxd_mode] + 1);
13d866a9 709 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
1da177e4 710 }
da6971d8 711 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 712 size = (size * (sizeof(struct RxD1)));
da6971d8 713 else
1ee6dd77 714 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
715
716 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
717 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
718 struct ring_info *ring = &mac_control->rings[i];
719
720 ring->rx_curr_get_info.block_index = 0;
721 ring->rx_curr_get_info.offset = 0;
722 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
723 ring->rx_curr_put_info.block_index = 0;
724 ring->rx_curr_put_info.offset = 0;
725 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
726 ring->nic = nic;
727 ring->ring_no = i;
13d866a9
JP
728
729 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
730 /* Allocating all the Rx blocks */
731 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 732 struct rx_block_info *rx_blocks;
da6971d8
AR
733 int l;
734
13d866a9 735 rx_blocks = &ring->rx_blocks[j];
d44570e4 736 size = SIZE_OF_BLOCK; /* size is always page size */
1da177e4
LT
737 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
738 &tmp_p_addr);
739 if (tmp_v_addr == NULL) {
740 /*
20346722
K
741 * In case of failure, free_shared_mem()
742 * is called, which should free any
743 * memory that was alloced till the
1da177e4
LT
744 * failure happened.
745 */
da6971d8 746 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
747 return -ENOMEM;
748 }
491976b2 749 mem_allocated += size;
1da177e4 750 memset(tmp_v_addr, 0, size);
4f870320
JP
751
752 size = sizeof(struct rxd_info) *
753 rxd_count[nic->rxd_mode];
da6971d8
AR
754 rx_blocks->block_virt_addr = tmp_v_addr;
755 rx_blocks->block_dma_addr = tmp_p_addr;
4f870320 756 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
372cc597
SS
757 if (!rx_blocks->rxds)
758 return -ENOMEM;
4f870320 759 mem_allocated += size;
d44570e4 760 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
da6971d8
AR
761 rx_blocks->rxds[l].virt_addr =
762 rx_blocks->block_virt_addr +
763 (rxd_size[nic->rxd_mode] * l);
764 rx_blocks->rxds[l].dma_addr =
765 rx_blocks->block_dma_addr +
766 (rxd_size[nic->rxd_mode] * l);
767 }
1da177e4
LT
768 }
769 /* Interlinking all Rx Blocks */
770 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
771 int next = (j + 1) % blk_cnt;
772 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
773 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
774 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
775 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
1da177e4 776
43d620c8 777 pre_rxd_blk = tmp_v_addr;
1da177e4 778 pre_rxd_blk->reserved_2_pNext_RxD_block =
d44570e4 779 (unsigned long)tmp_v_addr_next;
1da177e4 780 pre_rxd_blk->pNext_RxD_Blk_physical =
d44570e4 781 (u64)tmp_p_addr_next;
1da177e4
LT
782 }
783 }
6d517a27 784 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
785 /*
786 * Allocation of Storages for buffer addresses in 2BUFF mode
787 * and the buffers as well.
788 */
789 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
790 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
791 struct ring_info *ring = &mac_control->rings[i];
792
793 blk_cnt = rx_cfg->num_rxd /
d44570e4 794 (rxd_count[nic->rxd_mode] + 1);
4f870320
JP
795 size = sizeof(struct buffAdd *) * blk_cnt;
796 ring->ba = kmalloc(size, GFP_KERNEL);
13d866a9 797 if (!ring->ba)
1da177e4 798 return -ENOMEM;
4f870320 799 mem_allocated += size;
da6971d8
AR
800 for (j = 0; j < blk_cnt; j++) {
801 int k = 0;
4f870320
JP
802
803 size = sizeof(struct buffAdd) *
804 (rxd_count[nic->rxd_mode] + 1);
805 ring->ba[j] = kmalloc(size, GFP_KERNEL);
13d866a9 806 if (!ring->ba[j])
1da177e4 807 return -ENOMEM;
4f870320 808 mem_allocated += size;
da6971d8 809 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 810 ba = &ring->ba[j][k];
4f870320
JP
811 size = BUF0_LEN + ALIGN_SIZE;
812 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
813 if (!ba->ba_0_org)
814 return -ENOMEM;
4f870320 815 mem_allocated += size;
da6971d8
AR
816 tmp = (unsigned long)ba->ba_0_org;
817 tmp += ALIGN_SIZE;
d44570e4
JP
818 tmp &= ~((unsigned long)ALIGN_SIZE);
819 ba->ba_0 = (void *)tmp;
da6971d8 820
4f870320
JP
821 size = BUF1_LEN + ALIGN_SIZE;
822 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
823 if (!ba->ba_1_org)
824 return -ENOMEM;
4f870320 825 mem_allocated += size;
d44570e4 826 tmp = (unsigned long)ba->ba_1_org;
da6971d8 827 tmp += ALIGN_SIZE;
d44570e4
JP
828 tmp &= ~((unsigned long)ALIGN_SIZE);
829 ba->ba_1 = (void *)tmp;
da6971d8
AR
830 k++;
831 }
1da177e4
LT
832 }
833 }
834 }
1da177e4
LT
835
836 /* Allocation and initialization of Statistics block */
1ee6dd77 837 size = sizeof(struct stat_block);
d44570e4
JP
838 mac_control->stats_mem =
839 pci_alloc_consistent(nic->pdev, size,
840 &mac_control->stats_mem_phy);
1da177e4
LT
841
842 if (!mac_control->stats_mem) {
20346722
K
843 /*
844 * In case of failure, free_shared_mem() is called, which
845 * should free any memory that was alloced till the
1da177e4
LT
846 * failure happened.
847 */
848 return -ENOMEM;
849 }
491976b2 850 mem_allocated += size;
1da177e4
LT
851 mac_control->stats_mem_sz = size;
852
853 tmp_v_addr = mac_control->stats_mem;
43d620c8 854 mac_control->stats_info = tmp_v_addr;
1da177e4 855 memset(tmp_v_addr, 0, size);
3a22813a
BL
856 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
857 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
491976b2 858 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
1da177e4
LT
859 return SUCCESS;
860}
861
20346722
K
862/**
863 * free_shared_mem - Free the allocated Memory
1da177e4
LT
864 * @nic: Device private variable.
865 * Description: This function is to free all memory locations allocated by
866 * the init_shared_mem() function and return it to the kernel.
867 */
868
869static void free_shared_mem(struct s2io_nic *nic)
870{
871 int i, j, blk_cnt, size;
872 void *tmp_v_addr;
873 dma_addr_t tmp_p_addr;
1da177e4 874 int lst_size, lst_per_page;
8910b49f 875 struct net_device *dev;
491976b2 876 int page_num = 0;
ffb5df6c
JP
877 struct config_param *config;
878 struct mac_info *mac_control;
879 struct stat_block *stats;
880 struct swStat *swstats;
1da177e4
LT
881
882 if (!nic)
883 return;
884
8910b49f
MG
885 dev = nic->dev;
886
1da177e4 887 config = &nic->config;
ffb5df6c
JP
888 mac_control = &nic->mac_control;
889 stats = mac_control->stats_info;
890 swstats = &stats->sw_stat;
1da177e4 891
d44570e4 892 lst_size = sizeof(struct TxD) * config->max_txds;
1da177e4
LT
893 lst_per_page = PAGE_SIZE / lst_size;
894
895 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
896 struct fifo_info *fifo = &mac_control->fifos[i];
897 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
898
899 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
1da177e4
LT
900 for (j = 0; j < page_num; j++) {
901 int mem_blks = (j * lst_per_page);
13d866a9
JP
902 struct list_info_hold *fli;
903
904 if (!fifo->list_info)
6aa20a22 905 return;
13d866a9
JP
906
907 fli = &fifo->list_info[mem_blks];
908 if (!fli->list_virt_addr)
1da177e4
LT
909 break;
910 pci_free_consistent(nic->pdev, PAGE_SIZE,
13d866a9
JP
911 fli->list_virt_addr,
912 fli->list_phy_addr);
ffb5df6c 913 swstats->mem_freed += PAGE_SIZE;
1da177e4 914 }
776bd20f 915 /* If we got a zero DMA address during allocation,
916 * free the page now
917 */
918 if (mac_control->zerodma_virt_addr) {
919 pci_free_consistent(nic->pdev, PAGE_SIZE,
920 mac_control->zerodma_virt_addr,
921 (dma_addr_t)0);
6aa20a22 922 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
923 "%s: Freeing TxDL with zero DMA address. "
924 "Virtual address %p\n",
925 dev->name, mac_control->zerodma_virt_addr);
ffb5df6c 926 swstats->mem_freed += PAGE_SIZE;
776bd20f 927 }
13d866a9 928 kfree(fifo->list_info);
82c2d023 929 swstats->mem_freed += tx_cfg->fifo_len *
d44570e4 930 sizeof(struct list_info_hold);
1da177e4
LT
931 }
932
1da177e4 933 size = SIZE_OF_BLOCK;
1da177e4 934 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
935 struct ring_info *ring = &mac_control->rings[i];
936
937 blk_cnt = ring->block_count;
1da177e4 938 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
939 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
940 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1da177e4
LT
941 if (tmp_v_addr == NULL)
942 break;
943 pci_free_consistent(nic->pdev, size,
944 tmp_v_addr, tmp_p_addr);
ffb5df6c 945 swstats->mem_freed += size;
13d866a9 946 kfree(ring->rx_blocks[j].rxds);
ffb5df6c
JP
947 swstats->mem_freed += sizeof(struct rxd_info) *
948 rxd_count[nic->rxd_mode];
1da177e4
LT
949 }
950 }
951
6d517a27 952 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
953 /* Freeing buffer storage addresses in 2BUFF mode. */
954 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
955 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
956 struct ring_info *ring = &mac_control->rings[i];
957
958 blk_cnt = rx_cfg->num_rxd /
959 (rxd_count[nic->rxd_mode] + 1);
da6971d8
AR
960 for (j = 0; j < blk_cnt; j++) {
961 int k = 0;
13d866a9 962 if (!ring->ba[j])
da6971d8
AR
963 continue;
964 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 965 struct buffAdd *ba = &ring->ba[j][k];
da6971d8 966 kfree(ba->ba_0_org);
ffb5df6c
JP
967 swstats->mem_freed +=
968 BUF0_LEN + ALIGN_SIZE;
da6971d8 969 kfree(ba->ba_1_org);
ffb5df6c
JP
970 swstats->mem_freed +=
971 BUF1_LEN + ALIGN_SIZE;
da6971d8
AR
972 k++;
973 }
13d866a9 974 kfree(ring->ba[j]);
ffb5df6c
JP
975 swstats->mem_freed += sizeof(struct buffAdd) *
976 (rxd_count[nic->rxd_mode] + 1);
1da177e4 977 }
13d866a9 978 kfree(ring->ba);
ffb5df6c
JP
979 swstats->mem_freed += sizeof(struct buffAdd *) *
980 blk_cnt;
1da177e4 981 }
1da177e4 982 }
1da177e4 983
2fda096d 984 for (i = 0; i < nic->config.tx_fifo_num; i++) {
13d866a9
JP
985 struct fifo_info *fifo = &mac_control->fifos[i];
986 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
987
988 if (fifo->ufo_in_band_v) {
ffb5df6c
JP
989 swstats->mem_freed += tx_cfg->fifo_len *
990 sizeof(u64);
13d866a9 991 kfree(fifo->ufo_in_band_v);
2fda096d
SR
992 }
993 }
994
1da177e4 995 if (mac_control->stats_mem) {
ffb5df6c 996 swstats->mem_freed += mac_control->stats_mem_sz;
1da177e4
LT
997 pci_free_consistent(nic->pdev,
998 mac_control->stats_mem_sz,
999 mac_control->stats_mem,
1000 mac_control->stats_mem_phy);
491976b2 1001 }
1da177e4
LT
1002}
1003
541ae68f
K
1004/**
1005 * s2io_verify_pci_mode -
1006 */
1007
1ee6dd77 1008static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 1009{
1ee6dd77 1010 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1011 register u64 val64 = 0;
1012 int mode;
1013
1014 val64 = readq(&bar0->pci_mode);
1015 mode = (u8)GET_PCI_MODE(val64);
1016
d44570e4 1017 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f
K
1018 return -1; /* Unknown PCI mode */
1019 return mode;
1020}
1021
c92ca04b
AR
1022#define NEC_VENID 0x1033
1023#define NEC_DEVID 0x0125
1024static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1025{
1026 struct pci_dev *tdev = NULL;
008d845c 1027 for_each_pci_dev(tdev) {
26d36b64 1028 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
7ad62dbc 1029 if (tdev->bus == s2io_pdev->bus->parent) {
26d36b64 1030 pci_dev_put(tdev);
c92ca04b 1031 return 1;
7ad62dbc 1032 }
c92ca04b
AR
1033 }
1034 }
1035 return 0;
1036}
541ae68f 1037
7b32a312 1038static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f
K
1039/**
1040 * s2io_print_pci_mode -
1041 */
1ee6dd77 1042static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 1043{
1ee6dd77 1044 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1045 register u64 val64 = 0;
1046 int mode;
1047 struct config_param *config = &nic->config;
9e39f7c5 1048 const char *pcimode;
541ae68f
K
1049
1050 val64 = readq(&bar0->pci_mode);
1051 mode = (u8)GET_PCI_MODE(val64);
1052
d44570e4 1053 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f
K
1054 return -1; /* Unknown PCI mode */
1055
c92ca04b
AR
1056 config->bus_speed = bus_speed[mode];
1057
1058 if (s2io_on_nec_bridge(nic->pdev)) {
1059 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
d44570e4 1060 nic->dev->name);
c92ca04b
AR
1061 return mode;
1062 }
1063
d44570e4
JP
1064 switch (mode) {
1065 case PCI_MODE_PCI_33:
9e39f7c5 1066 pcimode = "33MHz PCI bus";
d44570e4
JP
1067 break;
1068 case PCI_MODE_PCI_66:
9e39f7c5 1069 pcimode = "66MHz PCI bus";
d44570e4
JP
1070 break;
1071 case PCI_MODE_PCIX_M1_66:
9e39f7c5 1072 pcimode = "66MHz PCIX(M1) bus";
d44570e4
JP
1073 break;
1074 case PCI_MODE_PCIX_M1_100:
9e39f7c5 1075 pcimode = "100MHz PCIX(M1) bus";
d44570e4
JP
1076 break;
1077 case PCI_MODE_PCIX_M1_133:
9e39f7c5 1078 pcimode = "133MHz PCIX(M1) bus";
d44570e4
JP
1079 break;
1080 case PCI_MODE_PCIX_M2_66:
9e39f7c5 1081 pcimode = "133MHz PCIX(M2) bus";
d44570e4
JP
1082 break;
1083 case PCI_MODE_PCIX_M2_100:
9e39f7c5 1084 pcimode = "200MHz PCIX(M2) bus";
d44570e4
JP
1085 break;
1086 case PCI_MODE_PCIX_M2_133:
9e39f7c5 1087 pcimode = "266MHz PCIX(M2) bus";
d44570e4
JP
1088 break;
1089 default:
9e39f7c5
JP
1090 pcimode = "unsupported bus!";
1091 mode = -1;
541ae68f
K
1092 }
1093
9e39f7c5
JP
1094 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1095 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1096
541ae68f
K
1097 return mode;
1098}
1099
b7c5678f
RV
1100/**
1101 * init_tti - Initialization transmit traffic interrupt scheme
1102 * @nic: device private variable
1103 * @link: link status (UP/DOWN) used to enable/disable continuous
1104 * transmit interrupts
1105 * Description: The function configures transmit traffic interrupts
1106 * Return Value: SUCCESS on success and
1107 * '-1' on failure
1108 */
1109
0d66afe7 1110static int init_tti(struct s2io_nic *nic, int link)
b7c5678f
RV
1111{
1112 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1113 register u64 val64 = 0;
1114 int i;
ffb5df6c 1115 struct config_param *config = &nic->config;
b7c5678f
RV
1116
1117 for (i = 0; i < config->tx_fifo_num; i++) {
1118 /*
1119 * TTI Initialization. Default Tx timer gets us about
1120 * 250 interrupts per sec. Continuous interrupts are enabled
1121 * by default.
1122 */
1123 if (nic->device_type == XFRAME_II_DEVICE) {
1124 int count = (nic->config.bus_speed * 125)/2;
1125 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1126 } else
1127 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1128
1129 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
d44570e4
JP
1130 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1131 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1132 TTI_DATA1_MEM_TX_TIMER_AC_EN;
ac731ab6
SH
1133 if (i == 0)
1134 if (use_continuous_tx_intrs && (link == LINK_UP))
1135 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
b7c5678f
RV
1136 writeq(val64, &bar0->tti_data1_mem);
1137
ac731ab6
SH
1138 if (nic->config.intr_type == MSI_X) {
1139 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1140 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1141 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1142 TTI_DATA2_MEM_TX_UFC_D(0x300);
1143 } else {
1144 if ((nic->config.tx_steering_type ==
d44570e4
JP
1145 TX_DEFAULT_STEERING) &&
1146 (config->tx_fifo_num > 1) &&
1147 (i >= nic->udp_fifo_idx) &&
1148 (i < (nic->udp_fifo_idx +
1149 nic->total_udp_fifos)))
ac731ab6
SH
1150 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1151 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1152 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1153 TTI_DATA2_MEM_TX_UFC_D(0x120);
1154 else
1155 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1156 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1157 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1158 TTI_DATA2_MEM_TX_UFC_D(0x80);
1159 }
b7c5678f
RV
1160
1161 writeq(val64, &bar0->tti_data2_mem);
1162
d44570e4
JP
1163 val64 = TTI_CMD_MEM_WE |
1164 TTI_CMD_MEM_STROBE_NEW_CMD |
1165 TTI_CMD_MEM_OFFSET(i);
b7c5678f
RV
1166 writeq(val64, &bar0->tti_command_mem);
1167
1168 if (wait_for_cmd_complete(&bar0->tti_command_mem,
d44570e4
JP
1169 TTI_CMD_MEM_STROBE_NEW_CMD,
1170 S2IO_BIT_RESET) != SUCCESS)
b7c5678f
RV
1171 return FAILURE;
1172 }
1173
1174 return SUCCESS;
1175}
1176
20346722
K
1177/**
1178 * init_nic - Initialization of hardware
b7c5678f 1179 * @nic: device private variable
20346722
K
1180 * Description: The function sequentially configures every block
1181 * of the H/W from their reset values.
1182 * Return Value: SUCCESS on success and
1da177e4
LT
1183 * '-1' on failure (endian settings incorrect).
1184 */
1185
1186static int init_nic(struct s2io_nic *nic)
1187{
1ee6dd77 1188 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1189 struct net_device *dev = nic->dev;
1190 register u64 val64 = 0;
1191 void __iomem *add;
1192 u32 time;
1193 int i, j;
c92ca04b 1194 int dtx_cnt = 0;
1da177e4 1195 unsigned long long mem_share;
20346722 1196 int mem_size;
ffb5df6c
JP
1197 struct config_param *config = &nic->config;
1198 struct mac_info *mac_control = &nic->mac_control;
1da177e4 1199
5e25b9dd 1200 /* to set the swapper controle on the card */
d44570e4
JP
1201 if (s2io_set_swapper(nic)) {
1202 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
9f74ffde 1203 return -EIO;
1da177e4
LT
1204 }
1205
541ae68f
K
1206 /*
1207 * Herc requires EOI to be removed from reset before XGXS, so..
1208 */
1209 if (nic->device_type & XFRAME_II_DEVICE) {
1210 val64 = 0xA500000000ULL;
1211 writeq(val64, &bar0->sw_reset);
1212 msleep(500);
1213 val64 = readq(&bar0->sw_reset);
1214 }
1215
1da177e4
LT
1216 /* Remove XGXS from reset state */
1217 val64 = 0;
1218 writeq(val64, &bar0->sw_reset);
1da177e4 1219 msleep(500);
20346722 1220 val64 = readq(&bar0->sw_reset);
1da177e4 1221
7962024e
SH
1222 /* Ensure that it's safe to access registers by checking
1223 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1224 */
1225 if (nic->device_type == XFRAME_II_DEVICE) {
1226 for (i = 0; i < 50; i++) {
1227 val64 = readq(&bar0->adapter_status);
1228 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1229 break;
1230 msleep(10);
1231 }
1232 if (i == 50)
1233 return -ENODEV;
1234 }
1235
1da177e4
LT
1236 /* Enable Receiving broadcasts */
1237 add = &bar0->mac_cfg;
1238 val64 = readq(&bar0->mac_cfg);
1239 val64 |= MAC_RMAC_BCAST_ENABLE;
1240 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 1241 writel((u32)val64, add);
1da177e4
LT
1242 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1243 writel((u32) (val64 >> 32), (add + 4));
1244
1245 /* Read registers in all blocks */
1246 val64 = readq(&bar0->mac_int_mask);
1247 val64 = readq(&bar0->mc_int_mask);
1248 val64 = readq(&bar0->xgxs_int_mask);
1249
1250 /* Set MTU */
1251 val64 = dev->mtu;
1252 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1253
541ae68f
K
1254 if (nic->device_type & XFRAME_II_DEVICE) {
1255 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1256 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1257 &bar0->dtx_control, UF);
541ae68f
K
1258 if (dtx_cnt & 0x1)
1259 msleep(1); /* Necessary!! */
1da177e4
LT
1260 dtx_cnt++;
1261 }
541ae68f 1262 } else {
c92ca04b
AR
1263 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1264 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1265 &bar0->dtx_control, UF);
1266 val64 = readq(&bar0->dtx_control);
1267 dtx_cnt++;
1da177e4
LT
1268 }
1269 }
1270
1271 /* Tx DMA Initialization */
1272 val64 = 0;
1273 writeq(val64, &bar0->tx_fifo_partition_0);
1274 writeq(val64, &bar0->tx_fifo_partition_1);
1275 writeq(val64, &bar0->tx_fifo_partition_2);
1276 writeq(val64, &bar0->tx_fifo_partition_3);
1277
1da177e4 1278 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
1279 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1280
1281 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1282 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1da177e4
LT
1283
1284 if (i == (config->tx_fifo_num - 1)) {
1285 if (i % 2 == 0)
1286 i++;
1287 }
1288
1289 switch (i) {
1290 case 1:
1291 writeq(val64, &bar0->tx_fifo_partition_0);
1292 val64 = 0;
b7c5678f 1293 j = 0;
1da177e4
LT
1294 break;
1295 case 3:
1296 writeq(val64, &bar0->tx_fifo_partition_1);
1297 val64 = 0;
b7c5678f 1298 j = 0;
1da177e4
LT
1299 break;
1300 case 5:
1301 writeq(val64, &bar0->tx_fifo_partition_2);
1302 val64 = 0;
b7c5678f 1303 j = 0;
1da177e4
LT
1304 break;
1305 case 7:
1306 writeq(val64, &bar0->tx_fifo_partition_3);
b7c5678f
RV
1307 val64 = 0;
1308 j = 0;
1309 break;
1310 default:
1311 j++;
1da177e4
LT
1312 break;
1313 }
1314 }
1315
5e25b9dd
K
1316 /*
1317 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1318 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1319 */
d44570e4 1320 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
5e25b9dd
K
1321 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1322
1da177e4
LT
1323 val64 = readq(&bar0->tx_fifo_partition_0);
1324 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
d44570e4 1325 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1da177e4 1326
20346722
K
1327 /*
1328 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1329 * integrity checking.
1330 */
1331 val64 = readq(&bar0->tx_pa_cfg);
d44570e4
JP
1332 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1333 TX_PA_CFG_IGNORE_SNAP_OUI |
1334 TX_PA_CFG_IGNORE_LLC_CTRL |
1335 TX_PA_CFG_IGNORE_L2_ERR;
1da177e4
LT
1336 writeq(val64, &bar0->tx_pa_cfg);
1337
dbedd44e 1338 /* Rx DMA initialization. */
1da177e4
LT
1339 val64 = 0;
1340 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1341 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1342
1343 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1da177e4
LT
1344 }
1345 writeq(val64, &bar0->rx_queue_priority);
1346
20346722
K
1347 /*
1348 * Allocating equal share of memory to all the
1da177e4
LT
1349 * configured Rings.
1350 */
1351 val64 = 0;
541ae68f
K
1352 if (nic->device_type & XFRAME_II_DEVICE)
1353 mem_size = 32;
1354 else
1355 mem_size = 64;
1356
1da177e4
LT
1357 for (i = 0; i < config->rx_ring_num; i++) {
1358 switch (i) {
1359 case 0:
20346722
K
1360 mem_share = (mem_size / config->rx_ring_num +
1361 mem_size % config->rx_ring_num);
1da177e4
LT
1362 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1363 continue;
1364 case 1:
20346722 1365 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1366 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1367 continue;
1368 case 2:
20346722 1369 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1370 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1371 continue;
1372 case 3:
20346722 1373 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1374 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1375 continue;
1376 case 4:
20346722 1377 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1378 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1379 continue;
1380 case 5:
20346722 1381 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1382 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1383 continue;
1384 case 6:
20346722 1385 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1386 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1387 continue;
1388 case 7:
20346722 1389 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1390 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1391 continue;
1392 }
1393 }
1394 writeq(val64, &bar0->rx_queue_cfg);
1395
20346722 1396 /*
5e25b9dd 1397 * Filling Tx round robin registers
b7c5678f 1398 * as per the number of FIFOs for equal scheduling priority
1da177e4 1399 */
5e25b9dd
K
1400 switch (config->tx_fifo_num) {
1401 case 1:
b7c5678f 1402 val64 = 0x0;
5e25b9dd
K
1403 writeq(val64, &bar0->tx_w_round_robin_0);
1404 writeq(val64, &bar0->tx_w_round_robin_1);
1405 writeq(val64, &bar0->tx_w_round_robin_2);
1406 writeq(val64, &bar0->tx_w_round_robin_3);
1407 writeq(val64, &bar0->tx_w_round_robin_4);
1408 break;
1409 case 2:
b7c5678f 1410 val64 = 0x0001000100010001ULL;
5e25b9dd 1411 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1412 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1413 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1414 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1415 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1416 writeq(val64, &bar0->tx_w_round_robin_4);
1417 break;
1418 case 3:
b7c5678f 1419 val64 = 0x0001020001020001ULL;
5e25b9dd 1420 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1421 val64 = 0x0200010200010200ULL;
5e25b9dd 1422 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1423 val64 = 0x0102000102000102ULL;
5e25b9dd 1424 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1425 val64 = 0x0001020001020001ULL;
5e25b9dd 1426 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1427 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1428 writeq(val64, &bar0->tx_w_round_robin_4);
1429 break;
1430 case 4:
b7c5678f 1431 val64 = 0x0001020300010203ULL;
5e25b9dd 1432 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1433 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1434 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1435 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1436 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1437 writeq(val64, &bar0->tx_w_round_robin_4);
1438 break;
1439 case 5:
b7c5678f 1440 val64 = 0x0001020304000102ULL;
5e25b9dd 1441 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1442 val64 = 0x0304000102030400ULL;
5e25b9dd 1443 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1444 val64 = 0x0102030400010203ULL;
5e25b9dd 1445 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1446 val64 = 0x0400010203040001ULL;
5e25b9dd 1447 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1448 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1449 writeq(val64, &bar0->tx_w_round_robin_4);
1450 break;
1451 case 6:
b7c5678f 1452 val64 = 0x0001020304050001ULL;
5e25b9dd 1453 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1454 val64 = 0x0203040500010203ULL;
5e25b9dd 1455 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1456 val64 = 0x0405000102030405ULL;
5e25b9dd 1457 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1458 val64 = 0x0001020304050001ULL;
5e25b9dd 1459 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1460 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1461 writeq(val64, &bar0->tx_w_round_robin_4);
1462 break;
1463 case 7:
b7c5678f 1464 val64 = 0x0001020304050600ULL;
5e25b9dd 1465 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1466 val64 = 0x0102030405060001ULL;
5e25b9dd 1467 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1468 val64 = 0x0203040506000102ULL;
5e25b9dd 1469 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1470 val64 = 0x0304050600010203ULL;
5e25b9dd 1471 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1472 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1473 writeq(val64, &bar0->tx_w_round_robin_4);
1474 break;
1475 case 8:
b7c5678f 1476 val64 = 0x0001020304050607ULL;
5e25b9dd 1477 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1478 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1479 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1480 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1481 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1482 writeq(val64, &bar0->tx_w_round_robin_4);
1483 break;
1484 }
1485
b41477f3 1486 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1487 val64 = readq(&bar0->tx_fifo_partition_0);
1488 val64 |= (TX_FIFO_PARTITION_EN);
1489 writeq(val64, &bar0->tx_fifo_partition_0);
1490
5e25b9dd 1491 /* Filling the Rx round robin registers as per the
0425b46a
SH
1492 * number of Rings and steering based on QoS with
1493 * equal priority.
1494 */
5e25b9dd
K
1495 switch (config->rx_ring_num) {
1496 case 1:
0425b46a
SH
1497 val64 = 0x0;
1498 writeq(val64, &bar0->rx_w_round_robin_0);
1499 writeq(val64, &bar0->rx_w_round_robin_1);
1500 writeq(val64, &bar0->rx_w_round_robin_2);
1501 writeq(val64, &bar0->rx_w_round_robin_3);
1502 writeq(val64, &bar0->rx_w_round_robin_4);
1503
5e25b9dd
K
1504 val64 = 0x8080808080808080ULL;
1505 writeq(val64, &bar0->rts_qos_steering);
1506 break;
1507 case 2:
0425b46a 1508 val64 = 0x0001000100010001ULL;
5e25b9dd 1509 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1510 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1511 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1512 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1513 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1514 writeq(val64, &bar0->rx_w_round_robin_4);
1515
1516 val64 = 0x8080808040404040ULL;
1517 writeq(val64, &bar0->rts_qos_steering);
1518 break;
1519 case 3:
0425b46a 1520 val64 = 0x0001020001020001ULL;
5e25b9dd 1521 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1522 val64 = 0x0200010200010200ULL;
5e25b9dd 1523 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1524 val64 = 0x0102000102000102ULL;
5e25b9dd 1525 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1526 val64 = 0x0001020001020001ULL;
5e25b9dd 1527 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1528 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1529 writeq(val64, &bar0->rx_w_round_robin_4);
1530
1531 val64 = 0x8080804040402020ULL;
1532 writeq(val64, &bar0->rts_qos_steering);
1533 break;
1534 case 4:
0425b46a 1535 val64 = 0x0001020300010203ULL;
5e25b9dd 1536 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1537 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1538 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1539 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1540 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1541 writeq(val64, &bar0->rx_w_round_robin_4);
1542
1543 val64 = 0x8080404020201010ULL;
1544 writeq(val64, &bar0->rts_qos_steering);
1545 break;
1546 case 5:
0425b46a 1547 val64 = 0x0001020304000102ULL;
5e25b9dd 1548 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1549 val64 = 0x0304000102030400ULL;
5e25b9dd 1550 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1551 val64 = 0x0102030400010203ULL;
5e25b9dd 1552 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1553 val64 = 0x0400010203040001ULL;
5e25b9dd 1554 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1555 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1556 writeq(val64, &bar0->rx_w_round_robin_4);
1557
1558 val64 = 0x8080404020201008ULL;
1559 writeq(val64, &bar0->rts_qos_steering);
1560 break;
1561 case 6:
0425b46a 1562 val64 = 0x0001020304050001ULL;
5e25b9dd 1563 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1564 val64 = 0x0203040500010203ULL;
5e25b9dd 1565 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1566 val64 = 0x0405000102030405ULL;
5e25b9dd 1567 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1568 val64 = 0x0001020304050001ULL;
5e25b9dd 1569 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1570 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1571 writeq(val64, &bar0->rx_w_round_robin_4);
1572
1573 val64 = 0x8080404020100804ULL;
1574 writeq(val64, &bar0->rts_qos_steering);
1575 break;
1576 case 7:
0425b46a 1577 val64 = 0x0001020304050600ULL;
5e25b9dd 1578 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1579 val64 = 0x0102030405060001ULL;
5e25b9dd 1580 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1581 val64 = 0x0203040506000102ULL;
5e25b9dd 1582 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1583 val64 = 0x0304050600010203ULL;
5e25b9dd 1584 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1585 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1586 writeq(val64, &bar0->rx_w_round_robin_4);
1587
1588 val64 = 0x8080402010080402ULL;
1589 writeq(val64, &bar0->rts_qos_steering);
1590 break;
1591 case 8:
0425b46a 1592 val64 = 0x0001020304050607ULL;
5e25b9dd 1593 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1594 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1595 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1596 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1597 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1598 writeq(val64, &bar0->rx_w_round_robin_4);
1599
1600 val64 = 0x8040201008040201ULL;
1601 writeq(val64, &bar0->rts_qos_steering);
1602 break;
1603 }
1da177e4
LT
1604
1605 /* UDP Fix */
1606 val64 = 0;
20346722 1607 for (i = 0; i < 8; i++)
1da177e4
LT
1608 writeq(val64, &bar0->rts_frm_len_n[i]);
1609
5e25b9dd
K
1610 /* Set the default rts frame length for the rings configured */
1611 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1612 for (i = 0 ; i < config->rx_ring_num ; i++)
1613 writeq(val64, &bar0->rts_frm_len_n[i]);
1614
1615 /* Set the frame length for the configured rings
1616 * desired by the user
1617 */
1618 for (i = 0; i < config->rx_ring_num; i++) {
1619 /* If rts_frm_len[i] == 0 then it is assumed that user not
1620 * specified frame length steering.
1621 * If the user provides the frame length then program
1622 * the rts_frm_len register for those values or else
1623 * leave it as it is.
1624 */
1625 if (rts_frm_len[i] != 0) {
1626 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
d44570e4 1627 &bar0->rts_frm_len_n[i]);
5e25b9dd
K
1628 }
1629 }
8a4bdbaa 1630
9fc93a41
SS
1631 /* Disable differentiated services steering logic */
1632 for (i = 0; i < 64; i++) {
1633 if (rts_ds_steer(nic, i, 0) == FAILURE) {
9e39f7c5
JP
1634 DBG_PRINT(ERR_DBG,
1635 "%s: rts_ds_steer failed on codepoint %d\n",
1636 dev->name, i);
9f74ffde 1637 return -ENODEV;
9fc93a41
SS
1638 }
1639 }
1640
20346722 1641 /* Program statistics memory */
1da177e4 1642 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1643
541ae68f
K
1644 if (nic->device_type == XFRAME_II_DEVICE) {
1645 val64 = STAT_BC(0x320);
1646 writeq(val64, &bar0->stat_byte_cnt);
1647 }
1648
20346722 1649 /*
1da177e4
LT
1650 * Initializing the sampling rate for the device to calculate the
1651 * bandwidth utilization.
1652 */
1653 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
d44570e4 1654 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1da177e4
LT
1655 writeq(val64, &bar0->mac_link_util);
1656
20346722
K
1657 /*
1658 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1659 * Scheme.
1660 */
1da177e4 1661
b7c5678f
RV
1662 /* Initialize TTI */
1663 if (SUCCESS != init_tti(nic, nic->last_link_state))
1664 return -ENODEV;
1da177e4 1665
8a4bdbaa
SS
1666 /* RTI Initialization */
1667 if (nic->device_type == XFRAME_II_DEVICE) {
541ae68f 1668 /*
8a4bdbaa
SS
1669 * Programmed to generate Apprx 500 Intrs per
1670 * second
1671 */
1672 int count = (nic->config.bus_speed * 125)/4;
1673 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1674 } else
1675 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1676 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
d44570e4
JP
1677 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1678 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1679 RTI_DATA1_MEM_RX_TIMER_AC_EN;
8a4bdbaa
SS
1680
1681 writeq(val64, &bar0->rti_data1_mem);
1682
1683 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1684 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1685 if (nic->config.intr_type == MSI_X)
d44570e4
JP
1686 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1687 RTI_DATA2_MEM_RX_UFC_D(0x40));
8a4bdbaa 1688 else
d44570e4
JP
1689 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1690 RTI_DATA2_MEM_RX_UFC_D(0x80));
8a4bdbaa 1691 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1692
8a4bdbaa 1693 for (i = 0; i < config->rx_ring_num; i++) {
d44570e4
JP
1694 val64 = RTI_CMD_MEM_WE |
1695 RTI_CMD_MEM_STROBE_NEW_CMD |
1696 RTI_CMD_MEM_OFFSET(i);
8a4bdbaa 1697 writeq(val64, &bar0->rti_command_mem);
1da177e4 1698
8a4bdbaa
SS
1699 /*
1700 * Once the operation completes, the Strobe bit of the
1701 * command register will be reset. We poll for this
1702 * particular condition. We wait for a maximum of 500ms
1703 * for the operation to complete, if it's not complete
1704 * by then we return error.
1705 */
1706 time = 0;
f957bcf0 1707 while (true) {
8a4bdbaa
SS
1708 val64 = readq(&bar0->rti_command_mem);
1709 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1710 break;
b6e3f982 1711
8a4bdbaa 1712 if (time > 10) {
9e39f7c5 1713 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
8a4bdbaa 1714 dev->name);
9f74ffde 1715 return -ENODEV;
b6e3f982 1716 }
8a4bdbaa
SS
1717 time++;
1718 msleep(50);
1da177e4 1719 }
1da177e4
LT
1720 }
1721
20346722
K
1722 /*
1723 * Initializing proper values as Pause threshold into all
1da177e4
LT
1724 * the 8 Queues on Rx side.
1725 */
1726 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1727 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1728
1729 /* Disable RMAC PAD STRIPPING */
509a2671 1730 add = &bar0->mac_cfg;
1da177e4
LT
1731 val64 = readq(&bar0->mac_cfg);
1732 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1733 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1734 writel((u32) (val64), add);
1735 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1736 writel((u32) (val64 >> 32), (add + 4));
1737 val64 = readq(&bar0->mac_cfg);
1738
7d3d0439
RA
1739 /* Enable FCS stripping by adapter */
1740 add = &bar0->mac_cfg;
1741 val64 = readq(&bar0->mac_cfg);
1742 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1743 if (nic->device_type == XFRAME_II_DEVICE)
1744 writeq(val64, &bar0->mac_cfg);
1745 else {
1746 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1747 writel((u32) (val64), add);
1748 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1749 writel((u32) (val64 >> 32), (add + 4));
1750 }
1751
20346722
K
1752 /*
1753 * Set the time value to be inserted in the pause frame
1da177e4
LT
1754 * generated by xena.
1755 */
1756 val64 = readq(&bar0->rmac_pause_cfg);
1757 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1758 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1759 writeq(val64, &bar0->rmac_pause_cfg);
1760
20346722 1761 /*
1da177e4
LT
1762 * Set the Threshold Limit for Generating the pause frame
1763 * If the amount of data in any Queue exceeds ratio of
1764 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1765 * pause frame is generated
1766 */
1767 val64 = 0;
1768 for (i = 0; i < 4; i++) {
d44570e4
JP
1769 val64 |= (((u64)0xFF00 |
1770 nic->mac_control.mc_pause_threshold_q0q3)
1771 << (i * 2 * 8));
1da177e4
LT
1772 }
1773 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1774
1775 val64 = 0;
1776 for (i = 0; i < 4; i++) {
d44570e4
JP
1777 val64 |= (((u64)0xFF00 |
1778 nic->mac_control.mc_pause_threshold_q4q7)
1779 << (i * 2 * 8));
1da177e4
LT
1780 }
1781 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1782
20346722
K
1783 /*
1784 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1785 * exceeded the limit pointed by shared_splits
1786 */
1787 val64 = readq(&bar0->pic_control);
1788 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1789 writeq(val64, &bar0->pic_control);
1790
863c11a9
AR
1791 if (nic->config.bus_speed == 266) {
1792 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1793 writeq(0x0, &bar0->read_retry_delay);
1794 writeq(0x0, &bar0->write_retry_delay);
1795 }
1796
541ae68f
K
1797 /*
1798 * Programming the Herc to split every write transaction
1799 * that does not start on an ADB to reduce disconnects.
1800 */
1801 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1802 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1803 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1804 writeq(val64, &bar0->misc_control);
1805 val64 = readq(&bar0->pic_control2);
b7b5a128 1806 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
863c11a9 1807 writeq(val64, &bar0->pic_control2);
541ae68f 1808 }
c92ca04b
AR
1809 if (strstr(nic->product_name, "CX4")) {
1810 val64 = TMAC_AVG_IPG(0x17);
1811 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1812 }
1813
1da177e4
LT
1814 return SUCCESS;
1815}
a371a07d
K
1816#define LINK_UP_DOWN_INTERRUPT 1
1817#define MAC_RMAC_ERR_TIMER 2
1818
1ee6dd77 1819static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d
K
1820{
1821 if (nic->device_type == XFRAME_II_DEVICE)
1822 return LINK_UP_DOWN_INTERRUPT;
1823 else
1824 return MAC_RMAC_ERR_TIMER;
1825}
8116f3cf 1826
9caab458
SS
1827/**
1828 * do_s2io_write_bits - update alarm bits in alarm register
1829 * @value: alarm bits
1830 * @flag: interrupt status
1831 * @addr: address value
1832 * Description: update alarm bits in alarm register
1833 * Return Value:
1834 * NONE.
1835 */
1836static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1837{
1838 u64 temp64;
1839
1840 temp64 = readq(addr);
1841
d44570e4
JP
1842 if (flag == ENABLE_INTRS)
1843 temp64 &= ~((u64)value);
9caab458 1844 else
d44570e4 1845 temp64 |= ((u64)value);
9caab458
SS
1846 writeq(temp64, addr);
1847}
1da177e4 1848
43b7c451 1849static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
9caab458
SS
1850{
1851 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1852 register u64 gen_int_mask = 0;
01e16faa 1853 u64 interruptible;
9caab458 1854
01e16faa 1855 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
9caab458 1856 if (mask & TX_DMA_INTR) {
9caab458
SS
1857 gen_int_mask |= TXDMA_INT_M;
1858
1859 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
d44570e4
JP
1860 TXDMA_PCC_INT | TXDMA_TTI_INT |
1861 TXDMA_LSO_INT | TXDMA_TPA_INT |
1862 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
9caab458
SS
1863
1864 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
d44570e4
JP
1865 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1866 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1867 &bar0->pfc_err_mask);
9caab458
SS
1868
1869 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
d44570e4
JP
1870 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1871 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
9caab458
SS
1872
1873 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
d44570e4
JP
1874 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1875 PCC_N_SERR | PCC_6_COF_OV_ERR |
1876 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1877 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1878 PCC_TXB_ECC_SG_ERR,
1879 flag, &bar0->pcc_err_mask);
9caab458
SS
1880
1881 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
d44570e4 1882 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
9caab458
SS
1883
1884 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
d44570e4
JP
1885 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1886 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1887 flag, &bar0->lso_err_mask);
9caab458
SS
1888
1889 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
d44570e4 1890 flag, &bar0->tpa_err_mask);
9caab458
SS
1891
1892 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
9caab458
SS
1893 }
1894
1895 if (mask & TX_MAC_INTR) {
1896 gen_int_mask |= TXMAC_INT_M;
1897 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
d44570e4 1898 &bar0->mac_int_mask);
9caab458 1899 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
d44570e4
JP
1900 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1901 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1902 flag, &bar0->mac_tmac_err_mask);
9caab458
SS
1903 }
1904
1905 if (mask & TX_XGXS_INTR) {
1906 gen_int_mask |= TXXGXS_INT_M;
1907 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
d44570e4 1908 &bar0->xgxs_int_mask);
9caab458 1909 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
d44570e4
JP
1910 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1911 flag, &bar0->xgxs_txgxs_err_mask);
9caab458
SS
1912 }
1913
1914 if (mask & RX_DMA_INTR) {
1915 gen_int_mask |= RXDMA_INT_M;
1916 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
d44570e4
JP
1917 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1918 flag, &bar0->rxdma_int_mask);
9caab458 1919 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
d44570e4
JP
1920 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1921 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1922 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
9caab458 1923 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
d44570e4
JP
1924 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1925 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1926 &bar0->prc_pcix_err_mask);
9caab458 1927 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
d44570e4
JP
1928 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1929 &bar0->rpa_err_mask);
9caab458 1930 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
d44570e4
JP
1931 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1932 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1933 RDA_FRM_ECC_SG_ERR |
1934 RDA_MISC_ERR|RDA_PCIX_ERR,
1935 flag, &bar0->rda_err_mask);
9caab458 1936 do_s2io_write_bits(RTI_SM_ERR_ALARM |
d44570e4
JP
1937 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1938 flag, &bar0->rti_err_mask);
9caab458
SS
1939 }
1940
1941 if (mask & RX_MAC_INTR) {
1942 gen_int_mask |= RXMAC_INT_M;
1943 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
d44570e4
JP
1944 &bar0->mac_int_mask);
1945 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1946 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1947 RMAC_DOUBLE_ECC_ERR);
01e16faa
SH
1948 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1949 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1950 do_s2io_write_bits(interruptible,
d44570e4 1951 flag, &bar0->mac_rmac_err_mask);
9caab458
SS
1952 }
1953
d44570e4 1954 if (mask & RX_XGXS_INTR) {
9caab458
SS
1955 gen_int_mask |= RXXGXS_INT_M;
1956 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
d44570e4 1957 &bar0->xgxs_int_mask);
9caab458 1958 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
d44570e4 1959 &bar0->xgxs_rxgxs_err_mask);
9caab458
SS
1960 }
1961
1962 if (mask & MC_INTR) {
1963 gen_int_mask |= MC_INT_M;
d44570e4
JP
1964 do_s2io_write_bits(MC_INT_MASK_MC_INT,
1965 flag, &bar0->mc_int_mask);
9caab458 1966 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
d44570e4
JP
1967 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1968 &bar0->mc_err_mask);
9caab458
SS
1969 }
1970 nic->general_int_mask = gen_int_mask;
1971
1972 /* Remove this line when alarm interrupts are enabled */
1973 nic->general_int_mask = 0;
1974}
d44570e4 1975
20346722
K
1976/**
1977 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1978 * @nic: device private variable,
1979 * @mask: A mask indicating which Intr block must be modified and,
1980 * @flag: A flag indicating whether to enable or disable the Intrs.
1981 * Description: This function will either disable or enable the interrupts
20346722
K
1982 * depending on the flag argument. The mask argument can be used to
1983 * enable/disable any Intr block.
1da177e4
LT
1984 * Return Value: NONE.
1985 */
1986
1987static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1988{
1ee6dd77 1989 struct XENA_dev_config __iomem *bar0 = nic->bar0;
9caab458
SS
1990 register u64 temp64 = 0, intr_mask = 0;
1991
1992 intr_mask = nic->general_int_mask;
1da177e4
LT
1993
1994 /* Top level interrupt classification */
1995 /* PIC Interrupts */
9caab458 1996 if (mask & TX_PIC_INTR) {
1da177e4 1997 /* Enable PIC Intrs in the general intr mask register */
9caab458 1998 intr_mask |= TXPIC_INT_M;
1da177e4 1999 if (flag == ENABLE_INTRS) {
20346722 2000 /*
a371a07d 2001 * If Hercules adapter enable GPIO otherwise
b41477f3 2002 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
2003 * interrupts for now.
2004 * TODO
1da177e4 2005 */
a371a07d 2006 if (s2io_link_fault_indication(nic) ==
d44570e4 2007 LINK_UP_DOWN_INTERRUPT) {
9caab458 2008 do_s2io_write_bits(PIC_INT_GPIO, flag,
d44570e4 2009 &bar0->pic_int_mask);
9caab458 2010 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
d44570e4 2011 &bar0->gpio_int_mask);
9caab458 2012 } else
a371a07d 2013 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4 2014 } else if (flag == DISABLE_INTRS) {
20346722
K
2015 /*
2016 * Disable PIC Intrs in the general
2017 * intr mask register
1da177e4
LT
2018 */
2019 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4
LT
2020 }
2021 }
2022
1da177e4
LT
2023 /* Tx traffic interrupts */
2024 if (mask & TX_TRAFFIC_INTR) {
9caab458 2025 intr_mask |= TXTRAFFIC_INT_M;
1da177e4 2026 if (flag == ENABLE_INTRS) {
20346722 2027 /*
1da177e4 2028 * Enable all the Tx side interrupts
20346722 2029 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
2030 */
2031 writeq(0x0, &bar0->tx_traffic_mask);
2032 } else if (flag == DISABLE_INTRS) {
20346722
K
2033 /*
2034 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
2035 * register.
2036 */
2037 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1da177e4
LT
2038 }
2039 }
2040
2041 /* Rx traffic interrupts */
2042 if (mask & RX_TRAFFIC_INTR) {
9caab458 2043 intr_mask |= RXTRAFFIC_INT_M;
1da177e4 2044 if (flag == ENABLE_INTRS) {
1da177e4
LT
2045 /* writing 0 Enables all 8 RX interrupt levels */
2046 writeq(0x0, &bar0->rx_traffic_mask);
2047 } else if (flag == DISABLE_INTRS) {
20346722
K
2048 /*
2049 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
2050 * register.
2051 */
2052 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1da177e4
LT
2053 }
2054 }
9caab458
SS
2055
2056 temp64 = readq(&bar0->general_int_mask);
2057 if (flag == ENABLE_INTRS)
d44570e4 2058 temp64 &= ~((u64)intr_mask);
9caab458
SS
2059 else
2060 temp64 = DISABLE_ALL_INTRS;
2061 writeq(temp64, &bar0->general_int_mask);
2062
2063 nic->general_int_mask = readq(&bar0->general_int_mask);
1da177e4
LT
2064}
2065
19a60522
SS
2066/**
2067 * verify_pcc_quiescent- Checks for PCC quiescent state
2068 * Return: 1 If PCC is quiescence
2069 * 0 If PCC is not quiescence
2070 */
1ee6dd77 2071static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 2072{
19a60522 2073 int ret = 0, herc;
1ee6dd77 2074 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522 2075 u64 val64 = readq(&bar0->adapter_status);
8a4bdbaa 2076
19a60522 2077 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722 2078
f957bcf0 2079 if (flag == false) {
44c10138 2080 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
19a60522 2081 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2082 ret = 1;
19a60522
SS
2083 } else {
2084 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2085 ret = 1;
20346722
K
2086 }
2087 } else {
44c10138 2088 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
5e25b9dd 2089 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 2090 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2091 ret = 1;
5e25b9dd
K
2092 } else {
2093 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 2094 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2095 ret = 1;
20346722
K
2096 }
2097 }
2098
2099 return ret;
2100}
2101/**
2102 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 2103 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 2104 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
2105 * differs and the calling function passes the input argument flag to
2106 * indicate this.
20346722 2107 * Return: 1 If xena is quiescence
1da177e4
LT
2108 * 0 If Xena is not quiescence
2109 */
2110
1ee6dd77 2111static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 2112{
19a60522 2113 int mode;
1ee6dd77 2114 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
2115 u64 val64 = readq(&bar0->adapter_status);
2116 mode = s2io_verify_pci_mode(sp);
1da177e4 2117
19a60522 2118 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
9e39f7c5 2119 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
19a60522
SS
2120 return 0;
2121 }
2122 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
9e39f7c5 2123 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
19a60522
SS
2124 return 0;
2125 }
2126 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
9e39f7c5 2127 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
19a60522
SS
2128 return 0;
2129 }
2130 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
9e39f7c5 2131 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
19a60522
SS
2132 return 0;
2133 }
2134 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
9e39f7c5 2135 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
19a60522
SS
2136 return 0;
2137 }
2138 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
9e39f7c5 2139 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
19a60522
SS
2140 return 0;
2141 }
2142 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
9e39f7c5 2143 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
19a60522
SS
2144 return 0;
2145 }
2146 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
9e39f7c5 2147 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
19a60522 2148 return 0;
1da177e4
LT
2149 }
2150
19a60522
SS
2151 /*
2152 * In PCI 33 mode, the P_PLL is not used, and therefore,
2153 * the the P_PLL_LOCK bit in the adapter_status register will
2154 * not be asserted.
2155 */
2156 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
d44570e4
JP
2157 sp->device_type == XFRAME_II_DEVICE &&
2158 mode != PCI_MODE_PCI_33) {
9e39f7c5 2159 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
19a60522
SS
2160 return 0;
2161 }
2162 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
d44570e4 2163 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
9e39f7c5 2164 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
19a60522
SS
2165 return 0;
2166 }
2167 return 1;
1da177e4
LT
2168}
2169
2170/**
2171 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2172 * @sp: Pointer to device specifc structure
20346722 2173 * Description :
1da177e4
LT
2174 * New procedure to clear mac address reading problems on Alpha platforms
2175 *
2176 */
2177
d44570e4 2178static void fix_mac_address(struct s2io_nic *sp)
1da177e4 2179{
1ee6dd77 2180 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
2181 int i = 0;
2182
2183 while (fix_mac[i] != END_SIGN) {
2184 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 2185 udelay(10);
d83d282b 2186 (void) readq(&bar0->gpio_control);
1da177e4
LT
2187 }
2188}
2189
2190/**
20346722 2191 * start_nic - Turns the device on
1da177e4 2192 * @nic : device private variable.
20346722
K
2193 * Description:
2194 * This function actually turns the device on. Before this function is
2195 * called,all Registers are configured from their reset states
2196 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
2197 * calling this function, the device interrupts are cleared and the NIC is
2198 * literally switched on by writing into the adapter control register.
20346722 2199 * Return Value:
1da177e4
LT
2200 * SUCCESS on success and -1 on failure.
2201 */
2202
2203static int start_nic(struct s2io_nic *nic)
2204{
1ee6dd77 2205 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
2206 struct net_device *dev = nic->dev;
2207 register u64 val64 = 0;
20346722 2208 u16 subid, i;
ffb5df6c
JP
2209 struct config_param *config = &nic->config;
2210 struct mac_info *mac_control = &nic->mac_control;
1da177e4
LT
2211
2212 /* PRC Initialization and configuration */
2213 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2214 struct ring_info *ring = &mac_control->rings[i];
2215
d44570e4 2216 writeq((u64)ring->rx_blocks[0].block_dma_addr,
1da177e4
LT
2217 &bar0->prc_rxd0_n[i]);
2218
2219 val64 = readq(&bar0->prc_ctrl_n[i]);
da6971d8
AR
2220 if (nic->rxd_mode == RXD_MODE_1)
2221 val64 |= PRC_CTRL_RC_ENABLED;
2222 else
2223 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
2224 if (nic->device_type == XFRAME_II_DEVICE)
2225 val64 |= PRC_CTRL_GROUP_READS;
2226 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2227 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2228 writeq(val64, &bar0->prc_ctrl_n[i]);
2229 }
2230
da6971d8
AR
2231 if (nic->rxd_mode == RXD_MODE_3B) {
2232 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2233 val64 = readq(&bar0->rx_pa_cfg);
2234 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2235 writeq(val64, &bar0->rx_pa_cfg);
2236 }
1da177e4 2237
926930b2
SS
2238 if (vlan_tag_strip == 0) {
2239 val64 = readq(&bar0->rx_pa_cfg);
2240 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2241 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 2242 nic->vlan_strip_flag = 0;
926930b2
SS
2243 }
2244
20346722 2245 /*
1da177e4
LT
2246 * Enabling MC-RLDRAM. After enabling the device, we timeout
2247 * for around 100ms, which is approximately the time required
2248 * for the device to be ready for operation.
2249 */
2250 val64 = readq(&bar0->mc_rldram_mrs);
2251 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2252 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2253 val64 = readq(&bar0->mc_rldram_mrs);
2254
20346722 2255 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2256
2257 /* Enabling ECC Protection. */
2258 val64 = readq(&bar0->adapter_control);
2259 val64 &= ~ADAPTER_ECC_EN;
2260 writeq(val64, &bar0->adapter_control);
2261
20346722
K
2262 /*
2263 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2264 * it.
2265 */
2266 val64 = readq(&bar0->adapter_status);
19a60522 2267 if (!verify_xena_quiescence(nic)) {
9e39f7c5
JP
2268 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2269 "Adapter status reads: 0x%llx\n",
2270 dev->name, (unsigned long long)val64);
1da177e4
LT
2271 return FAILURE;
2272 }
2273
20346722 2274 /*
1da177e4 2275 * With some switches, link might be already up at this point.
20346722
K
2276 * Because of this weird behavior, when we enable laser,
2277 * we may not get link. We need to handle this. We cannot
2278 * figure out which switch is misbehaving. So we are forced to
2279 * make a global change.
1da177e4
LT
2280 */
2281
2282 /* Enabling Laser. */
2283 val64 = readq(&bar0->adapter_control);
2284 val64 |= ADAPTER_EOI_TX_ON;
2285 writeq(val64, &bar0->adapter_control);
2286
c92ca04b
AR
2287 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2288 /*
25985edc 2289 * Dont see link state interrupts initially on some switches,
c92ca04b
AR
2290 * so directly scheduling the link state task here.
2291 */
2292 schedule_work(&nic->set_link_task);
2293 }
1da177e4
LT
2294 /* SXE-002: Initialize link and activity LED */
2295 subid = nic->pdev->subsystem_device;
541ae68f
K
2296 if (((subid & 0xFF) >= 0x07) &&
2297 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2298 val64 = readq(&bar0->gpio_control);
2299 val64 |= 0x0000800000000000ULL;
2300 writeq(val64, &bar0->gpio_control);
2301 val64 = 0x0411040400000000ULL;
509a2671 2302 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2303 }
2304
1da177e4
LT
2305 return SUCCESS;
2306}
fed5eccd
AR
2307/**
2308 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2309 */
d44570e4
JP
2310static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2311 struct TxD *txdlp, int get_off)
fed5eccd 2312{
1ee6dd77 2313 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2314 struct sk_buff *skb;
1ee6dd77 2315 struct TxD *txds;
fed5eccd
AR
2316 u16 j, frg_cnt;
2317
2318 txds = txdlp;
2fda096d 2319 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
d44570e4
JP
2320 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2321 sizeof(u64), PCI_DMA_TODEVICE);
fed5eccd
AR
2322 txds++;
2323 }
2324
d44570e4 2325 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
fed5eccd 2326 if (!skb) {
1ee6dd77 2327 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2328 return NULL;
2329 }
d44570e4 2330 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
e743d313 2331 skb_headlen(skb), PCI_DMA_TODEVICE);
fed5eccd
AR
2332 frg_cnt = skb_shinfo(skb)->nr_frags;
2333 if (frg_cnt) {
2334 txds++;
2335 for (j = 0; j < frg_cnt; j++, txds++) {
9e903e08 2336 const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
fed5eccd
AR
2337 if (!txds->Buffer_Pointer)
2338 break;
d44570e4
JP
2339 pci_unmap_page(nic->pdev,
2340 (dma_addr_t)txds->Buffer_Pointer,
9e903e08 2341 skb_frag_size(frag), PCI_DMA_TODEVICE);
fed5eccd
AR
2342 }
2343 }
d44570e4
JP
2344 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2345 return skb;
fed5eccd 2346}
1da177e4 2347
20346722
K
2348/**
2349 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2350 * @nic : device private variable.
20346722 2351 * Description:
1da177e4 2352 * Free all queued Tx buffers.
20346722 2353 * Return Value: void
d44570e4 2354 */
1da177e4
LT
2355
2356static void free_tx_buffers(struct s2io_nic *nic)
2357{
2358 struct net_device *dev = nic->dev;
2359 struct sk_buff *skb;
1ee6dd77 2360 struct TxD *txdp;
1da177e4 2361 int i, j;
fed5eccd 2362 int cnt = 0;
ffb5df6c
JP
2363 struct config_param *config = &nic->config;
2364 struct mac_info *mac_control = &nic->mac_control;
2365 struct stat_block *stats = mac_control->stats_info;
2366 struct swStat *swstats = &stats->sw_stat;
1da177e4
LT
2367
2368 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
2369 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2370 struct fifo_info *fifo = &mac_control->fifos[i];
2fda096d 2371 unsigned long flags;
13d866a9
JP
2372
2373 spin_lock_irqsave(&fifo->tx_lock, flags);
2374 for (j = 0; j < tx_cfg->fifo_len; j++) {
43d620c8 2375 txdp = fifo->list_info[j].list_virt_addr;
fed5eccd
AR
2376 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2377 if (skb) {
ffb5df6c 2378 swstats->mem_freed += skb->truesize;
fed5eccd
AR
2379 dev_kfree_skb(skb);
2380 cnt++;
1da177e4 2381 }
1da177e4
LT
2382 }
2383 DBG_PRINT(INTR_DBG,
9e39f7c5 2384 "%s: forcibly freeing %d skbs on FIFO%d\n",
1da177e4 2385 dev->name, cnt, i);
13d866a9
JP
2386 fifo->tx_curr_get_info.offset = 0;
2387 fifo->tx_curr_put_info.offset = 0;
2388 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
2389 }
2390}
2391
20346722
K
2392/**
2393 * stop_nic - To stop the nic
1da177e4 2394 * @nic ; device private variable.
20346722
K
2395 * Description:
2396 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2397 * function does. This function is called to stop the device.
2398 * Return Value:
2399 * void.
2400 */
2401
2402static void stop_nic(struct s2io_nic *nic)
2403{
1ee6dd77 2404 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2405 register u64 val64 = 0;
5d3213cc 2406 u16 interruptible;
1da177e4
LT
2407
2408 /* Disable all interrupts */
9caab458 2409 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
e960fc5c 2410 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 2411 interruptible |= TX_PIC_INTR;
1da177e4
LT
2412 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2413
5d3213cc
AR
2414 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2415 val64 = readq(&bar0->adapter_control);
2416 val64 &= ~(ADAPTER_CNTL_EN);
2417 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2418}
2419
20346722
K
2420/**
2421 * fill_rx_buffers - Allocates the Rx side skbs
0425b46a 2422 * @ring_info: per ring structure
3f78d885
SH
2423 * @from_card_up: If this is true, we will map the buffer to get
2424 * the dma address for buf0 and buf1 to give it to the card.
2425 * Else we will sync the already mapped buffer to give it to the card.
20346722 2426 * Description:
1da177e4
LT
2427 * The function allocates Rx side skbs and puts the physical
2428 * address of these buffers into the RxD buffer pointers, so that the NIC
2429 * can DMA the received frame into these locations.
2430 * The NIC supports 3 receive modes, viz
2431 * 1. single buffer,
2432 * 2. three buffer and
2433 * 3. Five buffer modes.
20346722
K
2434 * Each mode defines how many fragments the received frame will be split
2435 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2436 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2437 * is split into 3 fragments. As of now only single buffer mode is
2438 * supported.
2439 * Return Value:
2440 * SUCCESS on success or an appropriate -ve value on failure.
2441 */
8d8bb39b 2442static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
d44570e4 2443 int from_card_up)
1da177e4 2444{
1da177e4 2445 struct sk_buff *skb;
1ee6dd77 2446 struct RxD_t *rxdp;
0425b46a 2447 int off, size, block_no, block_no1;
1da177e4 2448 u32 alloc_tab = 0;
20346722 2449 u32 alloc_cnt;
20346722 2450 u64 tmp;
1ee6dd77 2451 struct buffAdd *ba;
1ee6dd77 2452 struct RxD_t *first_rxdp = NULL;
363dc367 2453 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
6d517a27
VP
2454 struct RxD1 *rxdp1;
2455 struct RxD3 *rxdp3;
ffb5df6c 2456 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
1da177e4 2457
0425b46a 2458 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
1da177e4 2459
0425b46a 2460 block_no1 = ring->rx_curr_get_info.block_index;
1da177e4 2461 while (alloc_tab < alloc_cnt) {
0425b46a 2462 block_no = ring->rx_curr_put_info.block_index;
1da177e4 2463
0425b46a
SH
2464 off = ring->rx_curr_put_info.offset;
2465
2466 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2467
7d2e3cb7 2468 if ((block_no == block_no1) &&
d44570e4
JP
2469 (off == ring->rx_curr_get_info.offset) &&
2470 (rxdp->Host_Control)) {
9e39f7c5
JP
2471 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2472 ring->dev->name);
1da177e4
LT
2473 goto end;
2474 }
0425b46a
SH
2475 if (off && (off == ring->rxd_count)) {
2476 ring->rx_curr_put_info.block_index++;
2477 if (ring->rx_curr_put_info.block_index ==
d44570e4 2478 ring->block_count)
0425b46a
SH
2479 ring->rx_curr_put_info.block_index = 0;
2480 block_no = ring->rx_curr_put_info.block_index;
2481 off = 0;
2482 ring->rx_curr_put_info.offset = off;
2483 rxdp = ring->rx_blocks[block_no].block_virt_addr;
1da177e4 2484 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
0425b46a
SH
2485 ring->dev->name, rxdp);
2486
1da177e4 2487 }
c9fcbf47 2488
da6971d8 2489 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
d44570e4
JP
2490 ((ring->rxd_mode == RXD_MODE_3B) &&
2491 (rxdp->Control_2 & s2BIT(0)))) {
0425b46a 2492 ring->rx_curr_put_info.offset = off;
1da177e4
LT
2493 goto end;
2494 }
da6971d8 2495 /* calculate size of skb based on ring mode */
d44570e4
JP
2496 size = ring->mtu +
2497 HEADER_ETHERNET_II_802_3_SIZE +
2498 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
0425b46a 2499 if (ring->rxd_mode == RXD_MODE_1)
da6971d8 2500 size += NET_IP_ALIGN;
da6971d8 2501 else
0425b46a 2502 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2503
da6971d8 2504 /* allocate skb */
c056b734 2505 skb = netdev_alloc_skb(nic->dev, size);
d44570e4 2506 if (!skb) {
9e39f7c5
JP
2507 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2508 ring->dev->name);
303bcb4b 2509 if (first_rxdp) {
03cc864a 2510 dma_wmb();
303bcb4b
K
2511 first_rxdp->Control_1 |= RXD_OWN_XENA;
2512 }
ffb5df6c 2513 swstats->mem_alloc_fail_cnt++;
7d2e3cb7 2514
da6971d8
AR
2515 return -ENOMEM ;
2516 }
ffb5df6c 2517 swstats->mem_allocated += skb->truesize;
0425b46a
SH
2518
2519 if (ring->rxd_mode == RXD_MODE_1) {
da6971d8 2520 /* 1 buffer mode - normal operation mode */
d44570e4 2521 rxdp1 = (struct RxD1 *)rxdp;
1ee6dd77 2522 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2523 skb_reserve(skb, NET_IP_ALIGN);
d44570e4
JP
2524 rxdp1->Buffer0_ptr =
2525 pci_map_single(ring->pdev, skb->data,
2526 size - NET_IP_ALIGN,
2527 PCI_DMA_FROMDEVICE);
8d8bb39b 2528 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2529 rxdp1->Buffer0_ptr))
491abf25
VP
2530 goto pci_map_failed;
2531
8a4bdbaa 2532 rxdp->Control_2 =
491976b2 2533 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
d44570e4 2534 rxdp->Host_Control = (unsigned long)skb;
0425b46a 2535 } else if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8 2536 /*
6d517a27
VP
2537 * 2 buffer mode -
2538 * 2 buffer mode provides 128
da6971d8 2539 * byte aligned receive buffers.
da6971d8
AR
2540 */
2541
d44570e4 2542 rxdp3 = (struct RxD3 *)rxdp;
491976b2 2543 /* save buffer pointers to avoid frequent dma mapping */
6d517a27
VP
2544 Buffer0_ptr = rxdp3->Buffer0_ptr;
2545 Buffer1_ptr = rxdp3->Buffer1_ptr;
1ee6dd77 2546 memset(rxdp, 0, sizeof(struct RxD3));
363dc367 2547 /* restore the buffer pointers for dma sync*/
6d517a27
VP
2548 rxdp3->Buffer0_ptr = Buffer0_ptr;
2549 rxdp3->Buffer1_ptr = Buffer1_ptr;
363dc367 2550
0425b46a 2551 ba = &ring->ba[block_no][off];
da6971d8 2552 skb_reserve(skb, BUF0_LEN);
d44570e4 2553 tmp = (u64)(unsigned long)skb->data;
da6971d8
AR
2554 tmp += ALIGN_SIZE;
2555 tmp &= ~ALIGN_SIZE;
2556 skb->data = (void *) (unsigned long)tmp;
27a884dc 2557 skb_reset_tail_pointer(skb);
da6971d8 2558
3f78d885 2559 if (from_card_up) {
6d517a27 2560 rxdp3->Buffer0_ptr =
d44570e4
JP
2561 pci_map_single(ring->pdev, ba->ba_0,
2562 BUF0_LEN,
2563 PCI_DMA_FROMDEVICE);
2564 if (pci_dma_mapping_error(nic->pdev,
2565 rxdp3->Buffer0_ptr))
3f78d885
SH
2566 goto pci_map_failed;
2567 } else
0425b46a 2568 pci_dma_sync_single_for_device(ring->pdev,
d44570e4
JP
2569 (dma_addr_t)rxdp3->Buffer0_ptr,
2570 BUF0_LEN,
2571 PCI_DMA_FROMDEVICE);
491abf25 2572
da6971d8 2573 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
0425b46a 2574 if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
2575 /* Two buffer mode */
2576
2577 /*
6aa20a22 2578 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2579 * L4 payload
2580 */
d44570e4
JP
2581 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2582 skb->data,
2583 ring->mtu + 4,
2584 PCI_DMA_FROMDEVICE);
da6971d8 2585
8d8bb39b 2586 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2587 rxdp3->Buffer2_ptr))
491abf25
VP
2588 goto pci_map_failed;
2589
3f78d885 2590 if (from_card_up) {
0425b46a
SH
2591 rxdp3->Buffer1_ptr =
2592 pci_map_single(ring->pdev,
d44570e4
JP
2593 ba->ba_1,
2594 BUF1_LEN,
2595 PCI_DMA_FROMDEVICE);
0425b46a 2596
8d8bb39b 2597 if (pci_dma_mapping_error(nic->pdev,
d44570e4
JP
2598 rxdp3->Buffer1_ptr)) {
2599 pci_unmap_single(ring->pdev,
2600 (dma_addr_t)(unsigned long)
2601 skb->data,
2602 ring->mtu + 4,
2603 PCI_DMA_FROMDEVICE);
3f78d885
SH
2604 goto pci_map_failed;
2605 }
75c30b13 2606 }
da6971d8
AR
2607 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2608 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
d44570e4 2609 (ring->mtu + 4);
da6971d8 2610 }
b7b5a128 2611 rxdp->Control_2 |= s2BIT(0);
0425b46a 2612 rxdp->Host_Control = (unsigned long) (skb);
1da177e4 2613 }
303bcb4b
K
2614 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2615 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2616 off++;
0425b46a 2617 if (off == (ring->rxd_count + 1))
da6971d8 2618 off = 0;
0425b46a 2619 ring->rx_curr_put_info.offset = off;
20346722 2620
da6971d8 2621 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2622 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2623 if (first_rxdp) {
03cc864a 2624 dma_wmb();
303bcb4b
K
2625 first_rxdp->Control_1 |= RXD_OWN_XENA;
2626 }
2627 first_rxdp = rxdp;
2628 }
0425b46a 2629 ring->rx_bufs_left += 1;
1da177e4
LT
2630 alloc_tab++;
2631 }
2632
d44570e4 2633end:
303bcb4b
K
2634 /* Transfer ownership of first descriptor to adapter just before
2635 * exiting. Before that, use memory barrier so that ownership
2636 * and other fields are seen by adapter correctly.
2637 */
2638 if (first_rxdp) {
03cc864a 2639 dma_wmb();
303bcb4b
K
2640 first_rxdp->Control_1 |= RXD_OWN_XENA;
2641 }
2642
1da177e4 2643 return SUCCESS;
d44570e4 2644
491abf25 2645pci_map_failed:
ffb5df6c
JP
2646 swstats->pci_map_fail_cnt++;
2647 swstats->mem_freed += skb->truesize;
491abf25
VP
2648 dev_kfree_skb_irq(skb);
2649 return -ENOMEM;
1da177e4
LT
2650}
2651
da6971d8
AR
2652static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2653{
2654 struct net_device *dev = sp->dev;
2655 int j;
2656 struct sk_buff *skb;
1ee6dd77 2657 struct RxD_t *rxdp;
6d517a27
VP
2658 struct RxD1 *rxdp1;
2659 struct RxD3 *rxdp3;
ffb5df6c
JP
2660 struct mac_info *mac_control = &sp->mac_control;
2661 struct stat_block *stats = mac_control->stats_info;
2662 struct swStat *swstats = &stats->sw_stat;
da6971d8 2663
da6971d8
AR
2664 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2665 rxdp = mac_control->rings[ring_no].
d44570e4
JP
2666 rx_blocks[blk].rxds[j].virt_addr;
2667 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2668 if (!skb)
da6971d8 2669 continue;
da6971d8 2670 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4
JP
2671 rxdp1 = (struct RxD1 *)rxdp;
2672 pci_unmap_single(sp->pdev,
2673 (dma_addr_t)rxdp1->Buffer0_ptr,
2674 dev->mtu +
2675 HEADER_ETHERNET_II_802_3_SIZE +
2676 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2677 PCI_DMA_FROMDEVICE);
1ee6dd77 2678 memset(rxdp, 0, sizeof(struct RxD1));
d44570e4
JP
2679 } else if (sp->rxd_mode == RXD_MODE_3B) {
2680 rxdp3 = (struct RxD3 *)rxdp;
d44570e4
JP
2681 pci_unmap_single(sp->pdev,
2682 (dma_addr_t)rxdp3->Buffer0_ptr,
2683 BUF0_LEN,
2684 PCI_DMA_FROMDEVICE);
2685 pci_unmap_single(sp->pdev,
2686 (dma_addr_t)rxdp3->Buffer1_ptr,
2687 BUF1_LEN,
2688 PCI_DMA_FROMDEVICE);
2689 pci_unmap_single(sp->pdev,
2690 (dma_addr_t)rxdp3->Buffer2_ptr,
2691 dev->mtu + 4,
2692 PCI_DMA_FROMDEVICE);
1ee6dd77 2693 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8 2694 }
ffb5df6c 2695 swstats->mem_freed += skb->truesize;
da6971d8 2696 dev_kfree_skb(skb);
0425b46a 2697 mac_control->rings[ring_no].rx_bufs_left -= 1;
da6971d8
AR
2698 }
2699}
2700
1da177e4 2701/**
20346722 2702 * free_rx_buffers - Frees all Rx buffers
1da177e4 2703 * @sp: device private variable.
20346722 2704 * Description:
1da177e4
LT
2705 * This function will free all Rx buffers allocated by host.
2706 * Return Value:
2707 * NONE.
2708 */
2709
2710static void free_rx_buffers(struct s2io_nic *sp)
2711{
2712 struct net_device *dev = sp->dev;
da6971d8 2713 int i, blk = 0, buf_cnt = 0;
ffb5df6c
JP
2714 struct config_param *config = &sp->config;
2715 struct mac_info *mac_control = &sp->mac_control;
1da177e4
LT
2716
2717 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2718 struct ring_info *ring = &mac_control->rings[i];
2719
da6971d8 2720 for (blk = 0; blk < rx_ring_sz[i]; blk++)
d44570e4 2721 free_rxd_blk(sp, i, blk);
1da177e4 2722
13d866a9
JP
2723 ring->rx_curr_put_info.block_index = 0;
2724 ring->rx_curr_get_info.block_index = 0;
2725 ring->rx_curr_put_info.offset = 0;
2726 ring->rx_curr_get_info.offset = 0;
2727 ring->rx_bufs_left = 0;
9e39f7c5 2728 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
1da177e4
LT
2729 dev->name, buf_cnt, i);
2730 }
2731}
2732
8d8bb39b 2733static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
f61e0a35 2734{
8d8bb39b 2735 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2736 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2737 ring->dev->name);
f61e0a35
SH
2738 }
2739 return 0;
2740}
2741
1da177e4
LT
2742/**
2743 * s2io_poll - Rx interrupt handler for NAPI support
bea3348e 2744 * @napi : pointer to the napi structure.
20346722 2745 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2746 * during one pass through the 'Poll" function.
2747 * Description:
2748 * Comes into picture only if NAPI support has been incorporated. It does
2749 * the same thing that rx_intr_handler does, but not in a interrupt context
2750 * also It will process only a given number of packets.
2751 * Return value:
2752 * 0 on success and 1 if there are No Rx packets to be processed.
2753 */
2754
f61e0a35 2755static int s2io_poll_msix(struct napi_struct *napi, int budget)
1da177e4 2756{
f61e0a35
SH
2757 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2758 struct net_device *dev = ring->dev;
f61e0a35 2759 int pkts_processed = 0;
1a79d1c3
AV
2760 u8 __iomem *addr = NULL;
2761 u8 val8 = 0;
4cf1653a 2762 struct s2io_nic *nic = netdev_priv(dev);
1ee6dd77 2763 struct XENA_dev_config __iomem *bar0 = nic->bar0;
f61e0a35 2764 int budget_org = budget;
1da177e4 2765
f61e0a35
SH
2766 if (unlikely(!is_s2io_card_up(nic)))
2767 return 0;
1da177e4 2768
f61e0a35 2769 pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2770 s2io_chk_rx_buffers(nic, ring);
1da177e4 2771
f61e0a35 2772 if (pkts_processed < budget_org) {
6ad20165 2773 napi_complete_done(napi, pkts_processed);
f61e0a35 2774 /*Re Enable MSI-Rx Vector*/
1a79d1c3 2775 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
2776 addr += 7 - ring->ring_no;
2777 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2778 writeb(val8, addr);
2779 val8 = readb(addr);
1da177e4 2780 }
f61e0a35
SH
2781 return pkts_processed;
2782}
d44570e4 2783
f61e0a35
SH
2784static int s2io_poll_inta(struct napi_struct *napi, int budget)
2785{
2786 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
f61e0a35
SH
2787 int pkts_processed = 0;
2788 int ring_pkts_processed, i;
2789 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2790 int budget_org = budget;
ffb5df6c
JP
2791 struct config_param *config = &nic->config;
2792 struct mac_info *mac_control = &nic->mac_control;
1da177e4 2793
f61e0a35
SH
2794 if (unlikely(!is_s2io_card_up(nic)))
2795 return 0;
1da177e4 2796
1da177e4 2797 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9 2798 struct ring_info *ring = &mac_control->rings[i];
f61e0a35 2799 ring_pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2800 s2io_chk_rx_buffers(nic, ring);
f61e0a35
SH
2801 pkts_processed += ring_pkts_processed;
2802 budget -= ring_pkts_processed;
2803 if (budget <= 0)
1da177e4 2804 break;
1da177e4 2805 }
f61e0a35 2806 if (pkts_processed < budget_org) {
6ad20165 2807 napi_complete_done(napi, pkts_processed);
f61e0a35
SH
2808 /* Re enable the Rx interrupts for the ring */
2809 writeq(0, &bar0->rx_traffic_mask);
2810 readl(&bar0->rx_traffic_mask);
2811 }
2812 return pkts_processed;
1da177e4 2813}
20346722 2814
b41477f3 2815#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2816/**
b41477f3 2817 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2818 * @dev : pointer to the device structure.
2819 * Description:
b41477f3
AR
2820 * This function will be called by upper layer to check for events on the
2821 * interface in situations where interrupts are disabled. It is used for
2822 * specific in-kernel networking tasks, such as remote consoles and kernel
2823 * debugging over the network (example netdump in RedHat).
612eff0e 2824 */
612eff0e
BH
2825static void s2io_netpoll(struct net_device *dev)
2826{
4cf1653a 2827 struct s2io_nic *nic = netdev_priv(dev);
80777c54 2828 const int irq = nic->pdev->irq;
1ee6dd77 2829 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2830 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e 2831 int i;
ffb5df6c
JP
2832 struct config_param *config = &nic->config;
2833 struct mac_info *mac_control = &nic->mac_control;
612eff0e 2834
d796fdb7
LV
2835 if (pci_channel_offline(nic->pdev))
2836 return;
2837
80777c54 2838 disable_irq(irq);
612eff0e 2839
612eff0e 2840 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2841 writeq(val64, &bar0->tx_traffic_int);
2842
6aa20a22 2843 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2844 * run out of skbs and will fail and eventually netpoll application such
2845 * as netdump will fail.
2846 */
2847 for (i = 0; i < config->tx_fifo_num; i++)
2848 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2849
b41477f3 2850 /* check for received packet and indicate up to network */
13d866a9
JP
2851 for (i = 0; i < config->rx_ring_num; i++) {
2852 struct ring_info *ring = &mac_control->rings[i];
2853
2854 rx_intr_handler(ring, 0);
2855 }
612eff0e
BH
2856
2857 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2858 struct ring_info *ring = &mac_control->rings[i];
2859
2860 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2861 DBG_PRINT(INFO_DBG,
2862 "%s: Out of memory in Rx Netpoll!!\n",
2863 dev->name);
612eff0e
BH
2864 break;
2865 }
2866 }
80777c54 2867 enable_irq(irq);
612eff0e
BH
2868}
2869#endif
2870
20346722 2871/**
1da177e4 2872 * rx_intr_handler - Rx interrupt handler
f61e0a35
SH
2873 * @ring_info: per ring structure.
2874 * @budget: budget for napi processing.
20346722
K
2875 * Description:
2876 * If the interrupt is because of a received frame or if the
1da177e4 2877 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2878 * called. It picks out the RxD at which place the last Rx processing had
2879 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2880 * the offset.
2881 * Return Value:
f61e0a35 2882 * No. of napi packets processed.
1da177e4 2883 */
f61e0a35 2884static int rx_intr_handler(struct ring_info *ring_data, int budget)
1da177e4 2885{
c9fcbf47 2886 int get_block, put_block;
1ee6dd77
RB
2887 struct rx_curr_get_info get_info, put_info;
2888 struct RxD_t *rxdp;
1da177e4 2889 struct sk_buff *skb;
f61e0a35 2890 int pkt_cnt = 0, napi_pkts = 0;
7d3d0439 2891 int i;
d44570e4
JP
2892 struct RxD1 *rxdp1;
2893 struct RxD3 *rxdp3;
7d3d0439 2894
99a09c26
EB
2895 if (budget <= 0)
2896 return napi_pkts;
2897
20346722
K
2898 get_info = ring_data->rx_curr_get_info;
2899 get_block = get_info.block_index;
1ee6dd77 2900 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2901 put_block = put_info.block_index;
da6971d8 2902 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65 2903
da6971d8 2904 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2905 /*
2906 * If your are next to put index then it's
2907 * FIFO full condition
2908 */
da6971d8
AR
2909 if ((get_block == put_block) &&
2910 (get_info.offset + 1) == put_info.offset) {
0425b46a 2911 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
d44570e4 2912 ring_data->dev->name);
da6971d8
AR
2913 break;
2914 }
d44570e4 2915 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
20346722 2916 if (skb == NULL) {
9e39f7c5 2917 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
0425b46a 2918 ring_data->dev->name);
f61e0a35 2919 return 0;
1da177e4 2920 }
0425b46a 2921 if (ring_data->rxd_mode == RXD_MODE_1) {
d44570e4 2922 rxdp1 = (struct RxD1 *)rxdp;
0425b46a 2923 pci_unmap_single(ring_data->pdev, (dma_addr_t)
d44570e4
JP
2924 rxdp1->Buffer0_ptr,
2925 ring_data->mtu +
2926 HEADER_ETHERNET_II_802_3_SIZE +
2927 HEADER_802_2_SIZE +
2928 HEADER_SNAP_SIZE,
2929 PCI_DMA_FROMDEVICE);
0425b46a 2930 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
d44570e4
JP
2931 rxdp3 = (struct RxD3 *)rxdp;
2932 pci_dma_sync_single_for_cpu(ring_data->pdev,
2933 (dma_addr_t)rxdp3->Buffer0_ptr,
2934 BUF0_LEN,
2935 PCI_DMA_FROMDEVICE);
2936 pci_unmap_single(ring_data->pdev,
2937 (dma_addr_t)rxdp3->Buffer2_ptr,
2938 ring_data->mtu + 4,
2939 PCI_DMA_FROMDEVICE);
da6971d8 2940 }
863c11a9 2941 prefetch(skb->data);
20346722
K
2942 rx_osm_handler(ring_data, rxdp);
2943 get_info.offset++;
da6971d8
AR
2944 ring_data->rx_curr_get_info.offset = get_info.offset;
2945 rxdp = ring_data->rx_blocks[get_block].
d44570e4 2946 rxds[get_info.offset].virt_addr;
0425b46a 2947 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
20346722 2948 get_info.offset = 0;
da6971d8 2949 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 2950 get_block++;
da6971d8
AR
2951 if (get_block == ring_data->block_count)
2952 get_block = 0;
2953 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
2954 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2955 }
1da177e4 2956
f61e0a35
SH
2957 if (ring_data->nic->config.napi) {
2958 budget--;
2959 napi_pkts++;
2960 if (!budget)
0425b46a
SH
2961 break;
2962 }
20346722 2963 pkt_cnt++;
1da177e4
LT
2964 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2965 break;
2966 }
0425b46a 2967 if (ring_data->lro) {
7d3d0439 2968 /* Clear all LRO sessions before exiting */
d44570e4 2969 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 2970 struct lro *lro = &ring_data->lro0_n[i];
7d3d0439 2971 if (lro->in_use) {
0425b46a 2972 update_L3L4_header(ring_data->nic, lro);
cdb5bf02 2973 queue_rx_frame(lro->parent, lro->vlan_tag);
7d3d0439
RA
2974 clear_lro_session(lro);
2975 }
2976 }
2977 }
d44570e4 2978 return napi_pkts;
1da177e4 2979}
20346722
K
2980
2981/**
1da177e4
LT
2982 * tx_intr_handler - Transmit interrupt handler
2983 * @nic : device private variable
20346722
K
2984 * Description:
2985 * If an interrupt was raised to indicate DMA complete of the
2986 * Tx packet, this function is called. It identifies the last TxD
2987 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2988 * DMA'ed into the NICs internal memory.
2989 * Return Value:
2990 * NONE
2991 */
2992
1ee6dd77 2993static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 2994{
1ee6dd77 2995 struct s2io_nic *nic = fifo_data->nic;
1ee6dd77 2996 struct tx_curr_get_info get_info, put_info;
3a3d5756 2997 struct sk_buff *skb = NULL;
1ee6dd77 2998 struct TxD *txdlp;
3a3d5756 2999 int pkt_cnt = 0;
2fda096d 3000 unsigned long flags = 0;
f9046eb3 3001 u8 err_mask;
ffb5df6c
JP
3002 struct stat_block *stats = nic->mac_control.stats_info;
3003 struct swStat *swstats = &stats->sw_stat;
1da177e4 3004
2fda096d 3005 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
d44570e4 3006 return;
2fda096d 3007
20346722 3008 get_info = fifo_data->tx_curr_get_info;
1ee6dd77 3009 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
43d620c8 3010 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
20346722
K
3011 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3012 (get_info.offset != put_info.offset) &&
3013 (txdlp->Host_Control)) {
3014 /* Check for TxD errors */
3015 if (txdlp->Control_1 & TXD_T_CODE) {
3016 unsigned long long err;
3017 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0 3018 if (err & 0x1) {
ffb5df6c 3019 swstats->parity_err_cnt++;
bd1034f0 3020 }
491976b2
SH
3021
3022 /* update t_code statistics */
f9046eb3 3023 err_mask = err >> 48;
d44570e4
JP
3024 switch (err_mask) {
3025 case 2:
ffb5df6c 3026 swstats->tx_buf_abort_cnt++;
491976b2
SH
3027 break;
3028
d44570e4 3029 case 3:
ffb5df6c 3030 swstats->tx_desc_abort_cnt++;
491976b2
SH
3031 break;
3032
d44570e4 3033 case 7:
ffb5df6c 3034 swstats->tx_parity_err_cnt++;
491976b2
SH
3035 break;
3036
d44570e4 3037 case 10:
ffb5df6c 3038 swstats->tx_link_loss_cnt++;
491976b2
SH
3039 break;
3040
d44570e4 3041 case 15:
ffb5df6c 3042 swstats->tx_list_proc_err_cnt++;
491976b2 3043 break;
d44570e4 3044 }
20346722 3045 }
1da177e4 3046
fed5eccd 3047 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 3048 if (skb == NULL) {
2fda096d 3049 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
9e39f7c5
JP
3050 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3051 __func__);
20346722
K
3052 return;
3053 }
3a3d5756 3054 pkt_cnt++;
20346722 3055
20346722 3056 /* Updating the statistics block */
ffb5df6c 3057 swstats->mem_freed += skb->truesize;
20346722
K
3058 dev_kfree_skb_irq(skb);
3059
3060 get_info.offset++;
863c11a9
AR
3061 if (get_info.offset == get_info.fifo_len + 1)
3062 get_info.offset = 0;
43d620c8 3063 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
d44570e4 3064 fifo_data->tx_curr_get_info.offset = get_info.offset;
1da177e4
LT
3065 }
3066
3a3d5756 3067 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
2fda096d
SR
3068
3069 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
1da177e4
LT
3070}
3071
bd1034f0
AR
3072/**
3073 * s2io_mdio_write - Function to write in to MDIO registers
3074 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3075 * @addr : address value
3076 * @value : data value
3077 * @dev : pointer to net_device structure
3078 * Description:
3079 * This function is used to write values to the MDIO registers
3080 * NONE
3081 */
d44570e4
JP
3082static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3083 struct net_device *dev)
bd1034f0 3084{
d44570e4 3085 u64 val64;
4cf1653a 3086 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3087 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0 3088
d44570e4
JP
3089 /* address transaction */
3090 val64 = MDIO_MMD_INDX_ADDR(addr) |
3091 MDIO_MMD_DEV_ADDR(mmd_type) |
3092 MDIO_MMS_PRT_ADDR(0x0);
bd1034f0
AR
3093 writeq(val64, &bar0->mdio_control);
3094 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3095 writeq(val64, &bar0->mdio_control);
3096 udelay(100);
3097
d44570e4
JP
3098 /* Data transaction */
3099 val64 = MDIO_MMD_INDX_ADDR(addr) |
3100 MDIO_MMD_DEV_ADDR(mmd_type) |
3101 MDIO_MMS_PRT_ADDR(0x0) |
3102 MDIO_MDIO_DATA(value) |
3103 MDIO_OP(MDIO_OP_WRITE_TRANS);
bd1034f0
AR
3104 writeq(val64, &bar0->mdio_control);
3105 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3106 writeq(val64, &bar0->mdio_control);
3107 udelay(100);
3108
d44570e4
JP
3109 val64 = MDIO_MMD_INDX_ADDR(addr) |
3110 MDIO_MMD_DEV_ADDR(mmd_type) |
3111 MDIO_MMS_PRT_ADDR(0x0) |
3112 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3113 writeq(val64, &bar0->mdio_control);
3114 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3115 writeq(val64, &bar0->mdio_control);
3116 udelay(100);
bd1034f0
AR
3117}
3118
3119/**
3120 * s2io_mdio_read - Function to write in to MDIO registers
3121 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3122 * @addr : address value
3123 * @dev : pointer to net_device structure
3124 * Description:
3125 * This function is used to read values to the MDIO registers
3126 * NONE
3127 */
3128static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3129{
3130 u64 val64 = 0x0;
3131 u64 rval64 = 0x0;
4cf1653a 3132 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3133 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3134
3135 /* address transaction */
d44570e4
JP
3136 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3137 | MDIO_MMD_DEV_ADDR(mmd_type)
3138 | MDIO_MMS_PRT_ADDR(0x0));
bd1034f0
AR
3139 writeq(val64, &bar0->mdio_control);
3140 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3141 writeq(val64, &bar0->mdio_control);
3142 udelay(100);
3143
3144 /* Data transaction */
d44570e4
JP
3145 val64 = MDIO_MMD_INDX_ADDR(addr) |
3146 MDIO_MMD_DEV_ADDR(mmd_type) |
3147 MDIO_MMS_PRT_ADDR(0x0) |
3148 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3149 writeq(val64, &bar0->mdio_control);
3150 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3151 writeq(val64, &bar0->mdio_control);
3152 udelay(100);
3153
3154 /* Read the value from regs */
3155 rval64 = readq(&bar0->mdio_control);
3156 rval64 = rval64 & 0xFFFF0000;
3157 rval64 = rval64 >> 16;
3158 return rval64;
3159}
d44570e4 3160
bd1034f0
AR
3161/**
3162 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
fbfecd37 3163 * @counter : counter value to be updated
bd1034f0
AR
3164 * @flag : flag to indicate the status
3165 * @type : counter type
3166 * Description:
3167 * This function is to check the status of the xpak counters value
3168 * NONE
3169 */
3170
d44570e4
JP
3171static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3172 u16 flag, u16 type)
bd1034f0
AR
3173{
3174 u64 mask = 0x3;
3175 u64 val64;
3176 int i;
d44570e4 3177 for (i = 0; i < index; i++)
bd1034f0
AR
3178 mask = mask << 0x2;
3179
d44570e4 3180 if (flag > 0) {
bd1034f0
AR
3181 *counter = *counter + 1;
3182 val64 = *regs_stat & mask;
3183 val64 = val64 >> (index * 0x2);
3184 val64 = val64 + 1;
d44570e4
JP
3185 if (val64 == 3) {
3186 switch (type) {
bd1034f0 3187 case 1:
9e39f7c5
JP
3188 DBG_PRINT(ERR_DBG,
3189 "Take Xframe NIC out of service.\n");
3190 DBG_PRINT(ERR_DBG,
3191"Excessive temperatures may result in premature transceiver failure.\n");
d44570e4 3192 break;
bd1034f0 3193 case 2:
9e39f7c5
JP
3194 DBG_PRINT(ERR_DBG,
3195 "Take Xframe NIC out of service.\n");
3196 DBG_PRINT(ERR_DBG,
3197"Excessive bias currents may indicate imminent laser diode failure.\n");
d44570e4 3198 break;
bd1034f0 3199 case 3:
9e39f7c5
JP
3200 DBG_PRINT(ERR_DBG,
3201 "Take Xframe NIC out of service.\n");
3202 DBG_PRINT(ERR_DBG,
3203"Excessive laser output power may saturate far-end receiver.\n");
d44570e4 3204 break;
bd1034f0 3205 default:
d44570e4
JP
3206 DBG_PRINT(ERR_DBG,
3207 "Incorrect XPAK Alarm type\n");
bd1034f0
AR
3208 }
3209 val64 = 0x0;
3210 }
3211 val64 = val64 << (index * 0x2);
3212 *regs_stat = (*regs_stat & (~mask)) | (val64);
3213
3214 } else {
3215 *regs_stat = *regs_stat & (~mask);
3216 }
3217}
3218
3219/**
3220 * s2io_updt_xpak_counter - Function to update the xpak counters
3221 * @dev : pointer to net_device struct
3222 * Description:
3223 * This function is to upate the status of the xpak counters value
3224 * NONE
3225 */
3226static void s2io_updt_xpak_counter(struct net_device *dev)
3227{
3228 u16 flag = 0x0;
3229 u16 type = 0x0;
3230 u16 val16 = 0x0;
3231 u64 val64 = 0x0;
3232 u64 addr = 0x0;
3233
4cf1653a 3234 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
3235 struct stat_block *stats = sp->mac_control.stats_info;
3236 struct xpakStat *xstats = &stats->xpak_stat;
bd1034f0
AR
3237
3238 /* Check the communication with the MDIO slave */
40239396 3239 addr = MDIO_CTRL1;
bd1034f0 3240 val64 = 0x0;
40239396 3241 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
d44570e4 3242 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
9e39f7c5
JP
3243 DBG_PRINT(ERR_DBG,
3244 "ERR: MDIO slave access failed - Returned %llx\n",
3245 (unsigned long long)val64);
bd1034f0
AR
3246 return;
3247 }
3248
40239396 3249 /* Check for the expected value of control reg 1 */
d44570e4 3250 if (val64 != MDIO_CTRL1_SPEED10G) {
9e39f7c5
JP
3251 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3252 "Returned: %llx- Expected: 0x%x\n",
40239396 3253 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
bd1034f0
AR
3254 return;
3255 }
3256
3257 /* Loading the DOM register to MDIO register */
3258 addr = 0xA100;
40239396
BH
3259 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3260 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3261
3262 /* Reading the Alarm flags */
3263 addr = 0xA070;
3264 val64 = 0x0;
40239396 3265 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3266
3267 flag = CHECKBIT(val64, 0x7);
3268 type = 1;
ffb5df6c
JP
3269 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3270 &xstats->xpak_regs_stat,
d44570e4 3271 0x0, flag, type);
bd1034f0 3272
d44570e4 3273 if (CHECKBIT(val64, 0x6))
ffb5df6c 3274 xstats->alarm_transceiver_temp_low++;
bd1034f0
AR
3275
3276 flag = CHECKBIT(val64, 0x3);
3277 type = 2;
ffb5df6c
JP
3278 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3279 &xstats->xpak_regs_stat,
d44570e4 3280 0x2, flag, type);
bd1034f0 3281
d44570e4 3282 if (CHECKBIT(val64, 0x2))
ffb5df6c 3283 xstats->alarm_laser_bias_current_low++;
bd1034f0
AR
3284
3285 flag = CHECKBIT(val64, 0x1);
3286 type = 3;
ffb5df6c
JP
3287 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3288 &xstats->xpak_regs_stat,
d44570e4 3289 0x4, flag, type);
bd1034f0 3290
d44570e4 3291 if (CHECKBIT(val64, 0x0))
ffb5df6c 3292 xstats->alarm_laser_output_power_low++;
bd1034f0
AR
3293
3294 /* Reading the Warning flags */
3295 addr = 0xA074;
3296 val64 = 0x0;
40239396 3297 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0 3298
d44570e4 3299 if (CHECKBIT(val64, 0x7))
ffb5df6c 3300 xstats->warn_transceiver_temp_high++;
bd1034f0 3301
d44570e4 3302 if (CHECKBIT(val64, 0x6))
ffb5df6c 3303 xstats->warn_transceiver_temp_low++;
bd1034f0 3304
d44570e4 3305 if (CHECKBIT(val64, 0x3))
ffb5df6c 3306 xstats->warn_laser_bias_current_high++;
bd1034f0 3307
d44570e4 3308 if (CHECKBIT(val64, 0x2))
ffb5df6c 3309 xstats->warn_laser_bias_current_low++;
bd1034f0 3310
d44570e4 3311 if (CHECKBIT(val64, 0x1))
ffb5df6c 3312 xstats->warn_laser_output_power_high++;
bd1034f0 3313
d44570e4 3314 if (CHECKBIT(val64, 0x0))
ffb5df6c 3315 xstats->warn_laser_output_power_low++;
bd1034f0
AR
3316}
3317
20346722 3318/**
1da177e4 3319 * wait_for_cmd_complete - waits for a command to complete.
20346722 3320 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3321 * s2io_nic structure.
20346722
K
3322 * Description: Function that waits for a command to Write into RMAC
3323 * ADDR DATA registers to be completed and returns either success or
3324 * error depending on whether the command was complete or not.
1da177e4
LT
3325 * Return value:
3326 * SUCCESS on success and FAILURE on failure.
3327 */
3328
9fc93a41 3329static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
d44570e4 3330 int bit_state)
1da177e4 3331{
9fc93a41 3332 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3333 u64 val64;
3334
9fc93a41
SS
3335 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3336 return FAILURE;
3337
3338 do {
c92ca04b 3339 val64 = readq(addr);
9fc93a41
SS
3340 if (bit_state == S2IO_BIT_RESET) {
3341 if (!(val64 & busy_bit)) {
3342 ret = SUCCESS;
3343 break;
3344 }
3345 } else {
2d146eb1 3346 if (val64 & busy_bit) {
9fc93a41
SS
3347 ret = SUCCESS;
3348 break;
3349 }
1da177e4 3350 }
c92ca04b 3351
d44570e4 3352 if (in_interrupt())
9fc93a41 3353 mdelay(delay);
c92ca04b 3354 else
9fc93a41 3355 msleep(delay);
c92ca04b 3356
9fc93a41
SS
3357 if (++cnt >= 10)
3358 delay = 50;
3359 } while (cnt < 20);
1da177e4
LT
3360 return ret;
3361}
49ce9c2c 3362/**
19a60522
SS
3363 * check_pci_device_id - Checks if the device id is supported
3364 * @id : device id
3365 * Description: Function to check if the pci device id is supported by driver.
3366 * Return value: Actual device id if supported else PCI_ANY_ID
3367 */
3368static u16 check_pci_device_id(u16 id)
3369{
3370 switch (id) {
3371 case PCI_DEVICE_ID_HERC_WIN:
3372 case PCI_DEVICE_ID_HERC_UNI:
3373 return XFRAME_II_DEVICE;
3374 case PCI_DEVICE_ID_S2IO_UNI:
3375 case PCI_DEVICE_ID_S2IO_WIN:
3376 return XFRAME_I_DEVICE;
3377 default:
3378 return PCI_ANY_ID;
3379 }
3380}
1da177e4 3381
20346722
K
3382/**
3383 * s2io_reset - Resets the card.
1da177e4
LT
3384 * @sp : private member of the device structure.
3385 * Description: Function to Reset the card. This function then also
20346722 3386 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3387 * the card reset also resets the configuration space.
3388 * Return value:
3389 * void.
3390 */
3391
d44570e4 3392static void s2io_reset(struct s2io_nic *sp)
1da177e4 3393{
1ee6dd77 3394 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3395 u64 val64;
5e25b9dd 3396 u16 subid, pci_cmd;
19a60522
SS
3397 int i;
3398 u16 val16;
491976b2
SH
3399 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3400 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
ffb5df6c
JP
3401 struct stat_block *stats;
3402 struct swStat *swstats;
491976b2 3403
9e39f7c5 3404 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3a22813a 3405 __func__, pci_name(sp->pdev));
1da177e4 3406
0b1f7ebe 3407 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3408 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3409
1da177e4
LT
3410 val64 = SW_RESET_ALL;
3411 writeq(val64, &bar0->sw_reset);
d44570e4 3412 if (strstr(sp->product_name, "CX4"))
c92ca04b 3413 msleep(750);
19a60522
SS
3414 msleep(250);
3415 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3416
19a60522
SS
3417 /* Restore the PCI state saved during initialization. */
3418 pci_restore_state(sp->pdev);
b8a623bf 3419 pci_save_state(sp->pdev);
19a60522
SS
3420 pci_read_config_word(sp->pdev, 0x2, &val16);
3421 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3422 break;
3423 msleep(200);
3424 }
1da177e4 3425
d44570e4
JP
3426 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3427 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
19a60522
SS
3428
3429 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3430
3431 s2io_init_pci(sp);
1da177e4 3432
20346722
K
3433 /* Set swapper to enable I/O register access */
3434 s2io_set_swapper(sp);
3435
faa4f796
SH
3436 /* restore mac_addr entries */
3437 do_s2io_restore_unicast_mc(sp);
3438
cc6e7c44
RA
3439 /* Restore the MSIX table entries from local variables */
3440 restore_xmsi_data(sp);
3441
5e25b9dd 3442 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3443 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3444 /* Clear "detected parity error" bit */
303bcb4b 3445 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3446
303bcb4b
K
3447 /* Clearing PCIX Ecc status register */
3448 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3449
303bcb4b 3450 /* Clearing PCI_STATUS error reflected here */
b7b5a128 3451 writeq(s2BIT(62), &bar0->txpic_int_reg);
303bcb4b 3452 }
5e25b9dd 3453
20346722 3454 /* Reset device statistics maintained by OS */
d44570e4 3455 memset(&sp->stats, 0, sizeof(struct net_device_stats));
8a4bdbaa 3456
ffb5df6c
JP
3457 stats = sp->mac_control.stats_info;
3458 swstats = &stats->sw_stat;
3459
491976b2 3460 /* save link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3461 up_cnt = swstats->link_up_cnt;
3462 down_cnt = swstats->link_down_cnt;
3463 up_time = swstats->link_up_time;
3464 down_time = swstats->link_down_time;
3465 reset_cnt = swstats->soft_reset_cnt;
3466 mem_alloc_cnt = swstats->mem_allocated;
3467 mem_free_cnt = swstats->mem_freed;
3468 watchdog_cnt = swstats->watchdog_timer_cnt;
3469
3470 memset(stats, 0, sizeof(struct stat_block));
3471
491976b2 3472 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3473 swstats->link_up_cnt = up_cnt;
3474 swstats->link_down_cnt = down_cnt;
3475 swstats->link_up_time = up_time;
3476 swstats->link_down_time = down_time;
3477 swstats->soft_reset_cnt = reset_cnt;
3478 swstats->mem_allocated = mem_alloc_cnt;
3479 swstats->mem_freed = mem_free_cnt;
3480 swstats->watchdog_timer_cnt = watchdog_cnt;
20346722 3481
1da177e4
LT
3482 /* SXE-002: Configure link and activity LED to turn it off */
3483 subid = sp->pdev->subsystem_device;
541ae68f
K
3484 if (((subid & 0xFF) >= 0x07) &&
3485 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3486 val64 = readq(&bar0->gpio_control);
3487 val64 |= 0x0000800000000000ULL;
3488 writeq(val64, &bar0->gpio_control);
3489 val64 = 0x0411040400000000ULL;
509a2671 3490 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3491 }
3492
541ae68f 3493 /*
25985edc 3494 * Clear spurious ECC interrupts that would have occurred on
541ae68f
K
3495 * XFRAME II cards after reset.
3496 */
3497 if (sp->device_type == XFRAME_II_DEVICE) {
3498 val64 = readq(&bar0->pcc_err_reg);
3499 writeq(val64, &bar0->pcc_err_reg);
3500 }
3501
f957bcf0 3502 sp->device_enabled_once = false;
1da177e4
LT
3503}
3504
3505/**
20346722
K
3506 * s2io_set_swapper - to set the swapper controle on the card
3507 * @sp : private member of the device structure,
1da177e4 3508 * pointer to the s2io_nic structure.
20346722 3509 * Description: Function to set the swapper control on the card
1da177e4
LT
3510 * correctly depending on the 'endianness' of the system.
3511 * Return value:
3512 * SUCCESS on success and FAILURE on failure.
3513 */
3514
d44570e4 3515static int s2io_set_swapper(struct s2io_nic *sp)
1da177e4
LT
3516{
3517 struct net_device *dev = sp->dev;
1ee6dd77 3518 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3519 u64 val64, valt, valr;
3520
20346722 3521 /*
1da177e4
LT
3522 * Set proper endian settings and verify the same by reading
3523 * the PIF Feed-back register.
3524 */
3525
3526 val64 = readq(&bar0->pif_rd_swapper_fb);
3527 if (val64 != 0x0123456789ABCDEFULL) {
3528 int i = 0;
85a56498
JM
3529 static const u64 value[] = {
3530 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3531 0x8100008181000081ULL, /* FE=1, SE=0 */
3532 0x4200004242000042ULL, /* FE=0, SE=1 */
3533 0 /* FE=0, SE=0 */
3534 };
1da177e4 3535
d44570e4 3536 while (i < 4) {
1da177e4
LT
3537 writeq(value[i], &bar0->swapper_ctrl);
3538 val64 = readq(&bar0->pif_rd_swapper_fb);
3539 if (val64 == 0x0123456789ABCDEFULL)
3540 break;
3541 i++;
3542 }
3543 if (i == 4) {
9e39f7c5
JP
3544 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3545 "feedback read %llx\n",
3546 dev->name, (unsigned long long)val64);
1da177e4
LT
3547 return FAILURE;
3548 }
3549 valr = value[i];
3550 } else {
3551 valr = readq(&bar0->swapper_ctrl);
3552 }
3553
3554 valt = 0x0123456789ABCDEFULL;
3555 writeq(valt, &bar0->xmsi_address);
3556 val64 = readq(&bar0->xmsi_address);
3557
d44570e4 3558 if (val64 != valt) {
1da177e4 3559 int i = 0;
85a56498
JM
3560 static const u64 value[] = {
3561 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3562 0x0081810000818100ULL, /* FE=1, SE=0 */
3563 0x0042420000424200ULL, /* FE=0, SE=1 */
3564 0 /* FE=0, SE=0 */
3565 };
1da177e4 3566
d44570e4 3567 while (i < 4) {
1da177e4
LT
3568 writeq((value[i] | valr), &bar0->swapper_ctrl);
3569 writeq(valt, &bar0->xmsi_address);
3570 val64 = readq(&bar0->xmsi_address);
d44570e4 3571 if (val64 == valt)
1da177e4
LT
3572 break;
3573 i++;
3574 }
d44570e4 3575 if (i == 4) {
20346722 3576 unsigned long long x = val64;
9e39f7c5
JP
3577 DBG_PRINT(ERR_DBG,
3578 "Write failed, Xmsi_addr reads:0x%llx\n", x);
1da177e4
LT
3579 return FAILURE;
3580 }
3581 }
3582 val64 = readq(&bar0->swapper_ctrl);
3583 val64 &= 0xFFFF000000000000ULL;
3584
d44570e4 3585#ifdef __BIG_ENDIAN
20346722
K
3586 /*
3587 * The device by default set to a big endian format, so a
1da177e4
LT
3588 * big endian driver need not set anything.
3589 */
3590 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3591 SWAPPER_CTRL_TXP_SE |
3592 SWAPPER_CTRL_TXD_R_FE |
3593 SWAPPER_CTRL_TXD_W_FE |
3594 SWAPPER_CTRL_TXF_R_FE |
3595 SWAPPER_CTRL_RXD_R_FE |
3596 SWAPPER_CTRL_RXD_W_FE |
3597 SWAPPER_CTRL_RXF_W_FE |
3598 SWAPPER_CTRL_XMSI_FE |
3599 SWAPPER_CTRL_STATS_FE |
3600 SWAPPER_CTRL_STATS_SE);
eaae7f72 3601 if (sp->config.intr_type == INTA)
cc6e7c44 3602 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3603 writeq(val64, &bar0->swapper_ctrl);
3604#else
20346722 3605 /*
1da177e4 3606 * Initially we enable all bits to make it accessible by the
20346722 3607 * driver, then we selectively enable only those bits that
1da177e4
LT
3608 * we want to set.
3609 */
3610 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3611 SWAPPER_CTRL_TXP_SE |
3612 SWAPPER_CTRL_TXD_R_FE |
3613 SWAPPER_CTRL_TXD_R_SE |
3614 SWAPPER_CTRL_TXD_W_FE |
3615 SWAPPER_CTRL_TXD_W_SE |
3616 SWAPPER_CTRL_TXF_R_FE |
3617 SWAPPER_CTRL_RXD_R_FE |
3618 SWAPPER_CTRL_RXD_R_SE |
3619 SWAPPER_CTRL_RXD_W_FE |
3620 SWAPPER_CTRL_RXD_W_SE |
3621 SWAPPER_CTRL_RXF_W_FE |
3622 SWAPPER_CTRL_XMSI_FE |
3623 SWAPPER_CTRL_STATS_FE |
3624 SWAPPER_CTRL_STATS_SE);
eaae7f72 3625 if (sp->config.intr_type == INTA)
cc6e7c44 3626 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3627 writeq(val64, &bar0->swapper_ctrl);
3628#endif
3629 val64 = readq(&bar0->swapper_ctrl);
3630
20346722
K
3631 /*
3632 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3633 * feedback register.
3634 */
3635 val64 = readq(&bar0->pif_rd_swapper_fb);
3636 if (val64 != 0x0123456789ABCDEFULL) {
3637 /* Endian settings are incorrect, calls for another dekko. */
9e39f7c5
JP
3638 DBG_PRINT(ERR_DBG,
3639 "%s: Endian settings are wrong, feedback read %llx\n",
3640 dev->name, (unsigned long long)val64);
1da177e4
LT
3641 return FAILURE;
3642 }
3643
3644 return SUCCESS;
3645}
3646
1ee6dd77 3647static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3648{
1ee6dd77 3649 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3650 u64 val64;
3651 int ret = 0, cnt = 0;
3652
3653 do {
3654 val64 = readq(&bar0->xmsi_access);
b7b5a128 3655 if (!(val64 & s2BIT(15)))
cc6e7c44
RA
3656 break;
3657 mdelay(1);
3658 cnt++;
d44570e4 3659 } while (cnt < 5);
cc6e7c44
RA
3660 if (cnt == 5) {
3661 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3662 ret = 1;
3663 }
3664
3665 return ret;
3666}
3667
1ee6dd77 3668static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3669{
1ee6dd77 3670 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3671 u64 val64;
f61e0a35
SH
3672 int i, msix_index;
3673
f61e0a35
SH
3674 if (nic->device_type == XFRAME_I_DEVICE)
3675 return;
cc6e7c44 3676
d44570e4
JP
3677 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3678 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
cc6e7c44
RA
3679 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3680 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
f61e0a35 3681 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3682 writeq(val64, &bar0->xmsi_access);
f61e0a35 3683 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3684 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3685 __func__, msix_index);
cc6e7c44
RA
3686 continue;
3687 }
3688 }
3689}
3690
1ee6dd77 3691static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3692{
1ee6dd77 3693 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3694 u64 val64, addr, data;
f61e0a35
SH
3695 int i, msix_index;
3696
3697 if (nic->device_type == XFRAME_I_DEVICE)
3698 return;
cc6e7c44
RA
3699
3700 /* Store and display */
d44570e4
JP
3701 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3702 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
f61e0a35 3703 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3704 writeq(val64, &bar0->xmsi_access);
f61e0a35 3705 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3706 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3707 __func__, msix_index);
cc6e7c44
RA
3708 continue;
3709 }
3710 addr = readq(&bar0->xmsi_address);
3711 data = readq(&bar0->xmsi_data);
3712 if (addr && data) {
3713 nic->msix_info[i].addr = addr;
3714 nic->msix_info[i].data = data;
3715 }
3716 }
3717}
3718
1ee6dd77 3719static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3720{
1ee6dd77 3721 struct XENA_dev_config __iomem *bar0 = nic->bar0;
ac731ab6 3722 u64 rx_mat;
cc6e7c44
RA
3723 u16 msi_control; /* Temp variable */
3724 int ret, i, j, msix_indx = 1;
4f870320 3725 int size;
ffb5df6c
JP
3726 struct stat_block *stats = nic->mac_control.stats_info;
3727 struct swStat *swstats = &stats->sw_stat;
cc6e7c44 3728
4f870320 3729 size = nic->num_entries * sizeof(struct msix_entry);
44364a03 3730 nic->entries = kzalloc(size, GFP_KERNEL);
bd684e43 3731 if (!nic->entries) {
d44570e4
JP
3732 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3733 __func__);
ffb5df6c 3734 swstats->mem_alloc_fail_cnt++;
cc6e7c44
RA
3735 return -ENOMEM;
3736 }
ffb5df6c 3737 swstats->mem_allocated += size;
f61e0a35 3738
4f870320 3739 size = nic->num_entries * sizeof(struct s2io_msix_entry);
44364a03 3740 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
bd684e43 3741 if (!nic->s2io_entries) {
8a4bdbaa 3742 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
d44570e4 3743 __func__);
ffb5df6c 3744 swstats->mem_alloc_fail_cnt++;
cc6e7c44 3745 kfree(nic->entries);
ffb5df6c 3746 swstats->mem_freed
f61e0a35 3747 += (nic->num_entries * sizeof(struct msix_entry));
cc6e7c44
RA
3748 return -ENOMEM;
3749 }
ffb5df6c 3750 swstats->mem_allocated += size;
cc6e7c44 3751
ac731ab6
SH
3752 nic->entries[0].entry = 0;
3753 nic->s2io_entries[0].entry = 0;
3754 nic->s2io_entries[0].in_use = MSIX_FLG;
3755 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3756 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3757
f61e0a35
SH
3758 for (i = 1; i < nic->num_entries; i++) {
3759 nic->entries[i].entry = ((i - 1) * 8) + 1;
3760 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
cc6e7c44
RA
3761 nic->s2io_entries[i].arg = NULL;
3762 nic->s2io_entries[i].in_use = 0;
3763 }
3764
8a4bdbaa 3765 rx_mat = readq(&bar0->rx_mat);
f61e0a35 3766 for (j = 0; j < nic->config.rx_ring_num; j++) {
8a4bdbaa 3767 rx_mat |= RX_MAT_SET(j, msix_indx);
f61e0a35
SH
3768 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3769 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3770 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3771 msix_indx += 8;
cc6e7c44 3772 }
8a4bdbaa 3773 writeq(rx_mat, &bar0->rx_mat);
f61e0a35 3774 readq(&bar0->rx_mat);
cc6e7c44 3775
37a15ed3
AG
3776 ret = pci_enable_msix_range(nic->pdev, nic->entries,
3777 nic->num_entries, nic->num_entries);
c92ca04b 3778 /* We fail init if error or we get less vectors than min required */
37a15ed3 3779 if (ret < 0) {
9e39f7c5 3780 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
cc6e7c44 3781 kfree(nic->entries);
ffb5df6c
JP
3782 swstats->mem_freed += nic->num_entries *
3783 sizeof(struct msix_entry);
cc6e7c44 3784 kfree(nic->s2io_entries);
ffb5df6c
JP
3785 swstats->mem_freed += nic->num_entries *
3786 sizeof(struct s2io_msix_entry);
cc6e7c44
RA
3787 nic->entries = NULL;
3788 nic->s2io_entries = NULL;
3789 return -ENOMEM;
3790 }
3791
3792 /*
3793 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3794 * in the herc NIC. (Temp change, needs to be removed later)
3795 */
3796 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3797 msi_control |= 0x1; /* Enable MSI */
3798 pci_write_config_word(nic->pdev, 0x42, msi_control);
3799
3800 return 0;
3801}
3802
8abc4d5b 3803/* Handle software interrupt used during MSI(X) test */
33390a70 3804static irqreturn_t s2io_test_intr(int irq, void *dev_id)
8abc4d5b
SS
3805{
3806 struct s2io_nic *sp = dev_id;
3807
3808 sp->msi_detected = 1;
3809 wake_up(&sp->msi_wait);
3810
3811 return IRQ_HANDLED;
3812}
3813
3814/* Test interrupt path by forcing a a software IRQ */
33390a70 3815static int s2io_test_msi(struct s2io_nic *sp)
8abc4d5b
SS
3816{
3817 struct pci_dev *pdev = sp->pdev;
3818 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3819 int err;
3820 u64 val64, saved64;
3821
3822 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
d44570e4 3823 sp->name, sp);
8abc4d5b
SS
3824 if (err) {
3825 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
d44570e4 3826 sp->dev->name, pci_name(pdev), pdev->irq);
8abc4d5b
SS
3827 return err;
3828 }
3829
d44570e4 3830 init_waitqueue_head(&sp->msi_wait);
8abc4d5b
SS
3831 sp->msi_detected = 0;
3832
3833 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3834 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3835 val64 |= SCHED_INT_CTRL_TIMER_EN;
3836 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3837 writeq(val64, &bar0->scheduled_int_ctrl);
3838
3839 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3840
3841 if (!sp->msi_detected) {
3842 /* MSI(X) test failed, go back to INTx mode */
2450022a 3843 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
9e39f7c5
JP
3844 "using MSI(X) during test\n",
3845 sp->dev->name, pci_name(pdev));
8abc4d5b
SS
3846
3847 err = -EOPNOTSUPP;
3848 }
3849
3850 free_irq(sp->entries[1].vector, sp);
3851
3852 writeq(saved64, &bar0->scheduled_int_ctrl);
3853
3854 return err;
3855}
18b2b7bd
SH
3856
3857static void remove_msix_isr(struct s2io_nic *sp)
3858{
3859 int i;
3860 u16 msi_control;
3861
f61e0a35 3862 for (i = 0; i < sp->num_entries; i++) {
d44570e4 3863 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
18b2b7bd
SH
3864 int vector = sp->entries[i].vector;
3865 void *arg = sp->s2io_entries[i].arg;
3866 free_irq(vector, arg);
3867 }
3868 }
3869
3870 kfree(sp->entries);
3871 kfree(sp->s2io_entries);
3872 sp->entries = NULL;
3873 sp->s2io_entries = NULL;
3874
3875 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3876 msi_control &= 0xFFFE; /* Disable MSI */
3877 pci_write_config_word(sp->pdev, 0x42, msi_control);
3878
3879 pci_disable_msix(sp->pdev);
3880}
3881
3882static void remove_inta_isr(struct s2io_nic *sp)
3883{
80777c54 3884 free_irq(sp->pdev->irq, sp->dev);
18b2b7bd
SH
3885}
3886
1da177e4
LT
3887/* ********************************************************* *
3888 * Functions defined below concern the OS part of the driver *
3889 * ********************************************************* */
3890
20346722 3891/**
1da177e4
LT
3892 * s2io_open - open entry point of the driver
3893 * @dev : pointer to the device structure.
3894 * Description:
3895 * This function is the open entry point of the driver. It mainly calls a
3896 * function to allocate Rx buffers and inserts them into the buffer
20346722 3897 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3898 * Return value:
3899 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3900 * file on failure.
3901 */
3902
ac1f60db 3903static int s2io_open(struct net_device *dev)
1da177e4 3904{
4cf1653a 3905 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 3906 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
3907 int err = 0;
3908
20346722
K
3909 /*
3910 * Make sure you have link off by default every time
1da177e4
LT
3911 * Nic is initialized
3912 */
3913 netif_carrier_off(dev);
0b1f7ebe 3914 sp->last_link_state = 0;
1da177e4
LT
3915
3916 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3917 err = s2io_card_up(sp);
3918 if (err) {
1da177e4
LT
3919 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3920 dev->name);
e6a8fee2 3921 goto hw_init_failed;
1da177e4
LT
3922 }
3923
2fd37688 3924 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
1da177e4 3925 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3926 s2io_card_down(sp);
20346722 3927 err = -ENODEV;
e6a8fee2 3928 goto hw_init_failed;
1da177e4 3929 }
3a3d5756 3930 s2io_start_all_tx_queue(sp);
1da177e4 3931 return 0;
20346722 3932
20346722 3933hw_init_failed:
eaae7f72 3934 if (sp->config.intr_type == MSI_X) {
491976b2 3935 if (sp->entries) {
cc6e7c44 3936 kfree(sp->entries);
ffb5df6c
JP
3937 swstats->mem_freed += sp->num_entries *
3938 sizeof(struct msix_entry);
491976b2
SH
3939 }
3940 if (sp->s2io_entries) {
cc6e7c44 3941 kfree(sp->s2io_entries);
ffb5df6c
JP
3942 swstats->mem_freed += sp->num_entries *
3943 sizeof(struct s2io_msix_entry);
491976b2 3944 }
cc6e7c44 3945 }
20346722 3946 return err;
1da177e4
LT
3947}
3948
3949/**
3950 * s2io_close -close entry point of the driver
3951 * @dev : device pointer.
3952 * Description:
3953 * This is the stop entry point of the driver. It needs to undo exactly
3954 * whatever was done by the open entry point,thus it's usually referred to
3955 * as the close function.Among other things this function mainly stops the
3956 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3957 * Return value:
3958 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3959 * file on failure.
3960 */
3961
ac1f60db 3962static int s2io_close(struct net_device *dev)
1da177e4 3963{
4cf1653a 3964 struct s2io_nic *sp = netdev_priv(dev);
faa4f796
SH
3965 struct config_param *config = &sp->config;
3966 u64 tmp64;
3967 int offset;
cc6e7c44 3968
9f74ffde 3969 /* Return if the device is already closed *
d44570e4
JP
3970 * Can happen when s2io_card_up failed in change_mtu *
3971 */
9f74ffde
SH
3972 if (!is_s2io_card_up(sp))
3973 return 0;
3974
3a3d5756 3975 s2io_stop_all_tx_queue(sp);
faa4f796
SH
3976 /* delete all populated mac entries */
3977 for (offset = 1; offset < config->max_mc_addr; offset++) {
3978 tmp64 = do_s2io_read_unicast_mc(sp, offset);
3979 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3980 do_s2io_delete_unicast_mc(sp, tmp64);
3981 }
3982
e6a8fee2 3983 s2io_card_down(sp);
cc6e7c44 3984
1da177e4
LT
3985 return 0;
3986}
3987
3988/**
3989 * s2io_xmit - Tx entry point of te driver
3990 * @skb : the socket buffer containing the Tx data.
3991 * @dev : device pointer.
3992 * Description :
3993 * This function is the Tx entry point of the driver. S2IO NIC supports
3994 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
25985edc 3995 * NOTE: when device can't queue the pkt,just the trans_start variable will
1da177e4
LT
3996 * not be upadted.
3997 * Return value:
3998 * 0 on success & 1 on failure.
3999 */
4000
61357325 4001static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 4002{
4cf1653a 4003 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
4004 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4005 register u64 val64;
1ee6dd77
RB
4006 struct TxD *txdp;
4007 struct TxFIFO_element __iomem *tx_fifo;
2fda096d 4008 unsigned long flags = 0;
be3a6b02 4009 u16 vlan_tag = 0;
2fda096d 4010 struct fifo_info *fifo = NULL;
75c30b13 4011 int offload_type;
6cfc482b 4012 int enable_per_list_interrupt = 0;
ffb5df6c
JP
4013 struct config_param *config = &sp->config;
4014 struct mac_info *mac_control = &sp->mac_control;
4015 struct stat_block *stats = mac_control->stats_info;
4016 struct swStat *swstats = &stats->sw_stat;
1da177e4 4017
20346722 4018 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
491976b2
SH
4019
4020 if (unlikely(skb->len <= 0)) {
9e39f7c5 4021 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
491976b2 4022 dev_kfree_skb_any(skb);
6ed10654 4023 return NETDEV_TX_OK;
2fda096d 4024 }
491976b2 4025
92b84437 4026 if (!is_s2io_card_up(sp)) {
20346722 4027 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4 4028 dev->name);
e6d26bd0 4029 dev_kfree_skb_any(skb);
6ed10654 4030 return NETDEV_TX_OK;
1da177e4
LT
4031 }
4032
4033 queue = 0;
df8a39de
JP
4034 if (skb_vlan_tag_present(skb))
4035 vlan_tag = skb_vlan_tag_get(skb);
6cfc482b
SH
4036 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4037 if (skb->protocol == htons(ETH_P_IP)) {
4038 struct iphdr *ip;
4039 struct tcphdr *th;
4040 ip = ip_hdr(skb);
4041
56f8a75c 4042 if (!ip_is_fragment(ip)) {
6cfc482b 4043 th = (struct tcphdr *)(((unsigned char *)ip) +
d44570e4 4044 ip->ihl*4);
6cfc482b
SH
4045
4046 if (ip->protocol == IPPROTO_TCP) {
4047 queue_len = sp->total_tcp_fifos;
4048 queue = (ntohs(th->source) +
d44570e4
JP
4049 ntohs(th->dest)) &
4050 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4051 if (queue >= queue_len)
4052 queue = queue_len - 1;
4053 } else if (ip->protocol == IPPROTO_UDP) {
4054 queue_len = sp->total_udp_fifos;
4055 queue = (ntohs(th->source) +
d44570e4
JP
4056 ntohs(th->dest)) &
4057 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4058 if (queue >= queue_len)
4059 queue = queue_len - 1;
4060 queue += sp->udp_fifo_idx;
4061 if (skb->len > 1024)
4062 enable_per_list_interrupt = 1;
6cfc482b
SH
4063 }
4064 }
4065 }
4066 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4067 /* get fifo number based on skb->priority value */
4068 queue = config->fifo_mapping
d44570e4 4069 [skb->priority & (MAX_TX_FIFOS - 1)];
6cfc482b 4070 fifo = &mac_control->fifos[queue];
3a3d5756 4071
a6086a89 4072 spin_lock_irqsave(&fifo->tx_lock, flags);
be3a6b02 4073
3a3d5756
SH
4074 if (sp->config.multiq) {
4075 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4076 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4077 return NETDEV_TX_BUSY;
4078 }
b19fa1fa 4079 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
4080 if (netif_queue_stopped(dev)) {
4081 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4082 return NETDEV_TX_BUSY;
4083 }
4084 }
4085
d44570e4
JP
4086 put_off = (u16)fifo->tx_curr_put_info.offset;
4087 get_off = (u16)fifo->tx_curr_get_info.offset;
43d620c8 4088 txdp = fifo->list_info[put_off].list_virt_addr;
20346722 4089
2fda096d 4090 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
1da177e4 4091 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4092 if (txdp->Host_Control ||
d44570e4 4093 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 4094 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3a3d5756 4095 s2io_stop_tx_queue(sp, fifo->fifo_no);
e6d26bd0 4096 dev_kfree_skb_any(skb);
2fda096d 4097 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4098 return NETDEV_TX_OK;
1da177e4 4099 }
0b1f7ebe 4100
75c30b13 4101 offload_type = s2io_offload_type(skb);
75c30b13 4102 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 4103 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 4104 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 4105 }
84fa7933 4106 if (skb->ip_summed == CHECKSUM_PARTIAL) {
d44570e4
JP
4107 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4108 TXD_TX_CKO_TCP_EN |
4109 TXD_TX_CKO_UDP_EN);
1da177e4 4110 }
fed5eccd
AR
4111 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4112 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2fda096d 4113 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
6cfc482b
SH
4114 if (enable_per_list_interrupt)
4115 if (put_off & (queue_len >> 5))
4116 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
3a3d5756 4117 if (vlan_tag) {
be3a6b02
K
4118 txdp->Control_2 |= TXD_VLAN_ENABLE;
4119 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4120 }
4121
e743d313 4122 frg_len = skb_headlen(skb);
d44570e4
JP
4123 txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4124 frg_len, PCI_DMA_TODEVICE);
8d8bb39b 4125 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
491abf25
VP
4126 goto pci_map_failed;
4127
d44570e4 4128 txdp->Host_Control = (unsigned long)skb;
fed5eccd 4129 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
fed5eccd
AR
4130
4131 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
4132 /* For fragmented SKB. */
4133 for (i = 0; i < frg_cnt; i++) {
9e903e08 4134 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe 4135 /* A '0' length fragment will be ignored */
9e903e08 4136 if (!skb_frag_size(frag))
0b1f7ebe 4137 continue;
1da177e4 4138 txdp++;
f0d06d82
IC
4139 txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4140 frag, 0,
9e903e08 4141 skb_frag_size(frag),
5d6bcdfe 4142 DMA_TO_DEVICE);
9e903e08 4143 txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
1da177e4
LT
4144 }
4145 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4146
4147 tx_fifo = mac_control->tx_FIFO_start[queue];
2fda096d 4148 val64 = fifo->list_info[put_off].list_phy_addr;
1da177e4
LT
4149 writeq(val64, &tx_fifo->TxDL_Pointer);
4150
4151 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4152 TX_FIFO_LAST_LIST);
75c30b13 4153 if (offload_type)
fed5eccd 4154 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 4155
1da177e4
LT
4156 writeq(val64, &tx_fifo->List_Control);
4157
303bcb4b
K
4158 mmiowb();
4159
1da177e4 4160 put_off++;
2fda096d 4161 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
863c11a9 4162 put_off = 0;
2fda096d 4163 fifo->tx_curr_put_info.offset = put_off;
1da177e4
LT
4164
4165 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4166 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
ffb5df6c 4167 swstats->fifo_full_cnt++;
1da177e4
LT
4168 DBG_PRINT(TX_DBG,
4169 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4170 put_off, get_off);
3a3d5756 4171 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4172 }
ffb5df6c 4173 swstats->mem_allocated += skb->truesize;
2fda096d 4174 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4 4175
f6f4bfa3
SH
4176 if (sp->config.intr_type == MSI_X)
4177 tx_intr_handler(fifo);
4178
6ed10654 4179 return NETDEV_TX_OK;
ffb5df6c 4180
491abf25 4181pci_map_failed:
ffb5df6c 4182 swstats->pci_map_fail_cnt++;
3a3d5756 4183 s2io_stop_tx_queue(sp, fifo->fifo_no);
ffb5df6c 4184 swstats->mem_freed += skb->truesize;
e6d26bd0 4185 dev_kfree_skb_any(skb);
2fda096d 4186 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4187 return NETDEV_TX_OK;
1da177e4
LT
4188}
4189
25fff88e 4190static void
e84a2ac9 4191s2io_alarm_handle(struct timer_list *t)
25fff88e 4192{
e84a2ac9 4193 struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
8116f3cf 4194 struct net_device *dev = sp->dev;
25fff88e 4195
8116f3cf 4196 s2io_handle_errors(dev);
25fff88e
K
4197 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4198}
4199
7d12e780 4200static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4201{
1ee6dd77
RB
4202 struct ring_info *ring = (struct ring_info *)dev_id;
4203 struct s2io_nic *sp = ring->nic;
f61e0a35 4204 struct XENA_dev_config __iomem *bar0 = sp->bar0;
cc6e7c44 4205
f61e0a35 4206 if (unlikely(!is_s2io_card_up(sp)))
92b84437 4207 return IRQ_HANDLED;
92b84437 4208
f61e0a35 4209 if (sp->config.napi) {
1a79d1c3
AV
4210 u8 __iomem *addr = NULL;
4211 u8 val8 = 0;
f61e0a35 4212
1a79d1c3 4213 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
4214 addr += (7 - ring->ring_no);
4215 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4216 writeb(val8, addr);
4217 val8 = readb(addr);
288379f0 4218 napi_schedule(&ring->napi);
f61e0a35
SH
4219 } else {
4220 rx_intr_handler(ring, 0);
8d8bb39b 4221 s2io_chk_rx_buffers(sp, ring);
f61e0a35 4222 }
7d3d0439 4223
cc6e7c44
RA
4224 return IRQ_HANDLED;
4225}
4226
7d12e780 4227static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4228{
ac731ab6
SH
4229 int i;
4230 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4231 struct s2io_nic *sp = fifos->nic;
4232 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4233 struct config_param *config = &sp->config;
4234 u64 reason;
cc6e7c44 4235
ac731ab6
SH
4236 if (unlikely(!is_s2io_card_up(sp)))
4237 return IRQ_NONE;
4238
4239 reason = readq(&bar0->general_int_status);
4240 if (unlikely(reason == S2IO_MINUS_ONE))
4241 /* Nothing much can be done. Get out */
92b84437 4242 return IRQ_HANDLED;
92b84437 4243
01e16faa
SH
4244 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4245 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
ac731ab6 4246
01e16faa
SH
4247 if (reason & GEN_INTR_TXPIC)
4248 s2io_txpic_intr_handle(sp);
ac731ab6 4249
01e16faa
SH
4250 if (reason & GEN_INTR_TXTRAFFIC)
4251 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
ac731ab6 4252
01e16faa
SH
4253 for (i = 0; i < config->tx_fifo_num; i++)
4254 tx_intr_handler(&fifos[i]);
ac731ab6 4255
01e16faa
SH
4256 writeq(sp->general_int_mask, &bar0->general_int_mask);
4257 readl(&bar0->general_int_status);
4258 return IRQ_HANDLED;
4259 }
4260 /* The interrupt was not raised by us */
4261 return IRQ_NONE;
cc6e7c44 4262}
ac731ab6 4263
1ee6dd77 4264static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4265{
1ee6dd77 4266 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d
K
4267 u64 val64;
4268
4269 val64 = readq(&bar0->pic_int_status);
4270 if (val64 & PIC_INT_GPIO) {
4271 val64 = readq(&bar0->gpio_int_reg);
4272 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4273 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4274 /*
4275 * This is unstable state so clear both up/down
4276 * interrupt and adapter to re-evaluate the link state.
4277 */
d44570e4 4278 val64 |= GPIO_INT_REG_LINK_DOWN;
a371a07d
K
4279 val64 |= GPIO_INT_REG_LINK_UP;
4280 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4281 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4282 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4283 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4284 writeq(val64, &bar0->gpio_int_mask);
d44570e4 4285 } else if (val64 & GPIO_INT_REG_LINK_UP) {
c92ca04b 4286 val64 = readq(&bar0->adapter_status);
d44570e4 4287 /* Enable Adapter */
19a60522
SS
4288 val64 = readq(&bar0->adapter_control);
4289 val64 |= ADAPTER_CNTL_EN;
4290 writeq(val64, &bar0->adapter_control);
4291 val64 |= ADAPTER_LED_ON;
4292 writeq(val64, &bar0->adapter_control);
4293 if (!sp->device_enabled_once)
4294 sp->device_enabled_once = 1;
c92ca04b 4295
19a60522
SS
4296 s2io_link(sp, LINK_UP);
4297 /*
4298 * unmask link down interrupt and mask link-up
4299 * intr
4300 */
4301 val64 = readq(&bar0->gpio_int_mask);
4302 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4303 val64 |= GPIO_INT_MASK_LINK_UP;
4304 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4305
d44570e4 4306 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
c92ca04b 4307 val64 = readq(&bar0->adapter_status);
19a60522
SS
4308 s2io_link(sp, LINK_DOWN);
4309 /* Link is down so unmaks link up interrupt */
4310 val64 = readq(&bar0->gpio_int_mask);
4311 val64 &= ~GPIO_INT_MASK_LINK_UP;
4312 val64 |= GPIO_INT_MASK_LINK_DOWN;
4313 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4314
4315 /* turn off LED */
4316 val64 = readq(&bar0->adapter_control);
d44570e4 4317 val64 = val64 & (~ADAPTER_LED_ON);
ac1f90d6 4318 writeq(val64, &bar0->adapter_control);
a371a07d
K
4319 }
4320 }
c92ca04b 4321 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4322}
4323
8116f3cf
SS
4324/**
4325 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4326 * @value: alarm bits
4327 * @addr: address value
4328 * @cnt: counter variable
4329 * Description: Check for alarm and increment the counter
4330 * Return Value:
4331 * 1 - if alarm bit set
4332 * 0 - if alarm bit is not set
4333 */
d44570e4
JP
4334static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4335 unsigned long long *cnt)
8116f3cf
SS
4336{
4337 u64 val64;
4338 val64 = readq(addr);
d44570e4 4339 if (val64 & value) {
8116f3cf
SS
4340 writeq(val64, addr);
4341 (*cnt)++;
4342 return 1;
4343 }
4344 return 0;
4345
4346}
4347
4348/**
4349 * s2io_handle_errors - Xframe error indication handler
4350 * @nic: device private variable
4351 * Description: Handle alarms such as loss of link, single or
4352 * double ECC errors, critical and serious errors.
4353 * Return Value:
4354 * NONE
4355 */
d44570e4 4356static void s2io_handle_errors(void *dev_id)
8116f3cf 4357{
d44570e4 4358 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4359 struct s2io_nic *sp = netdev_priv(dev);
8116f3cf 4360 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d44570e4 4361 u64 temp64 = 0, val64 = 0;
8116f3cf
SS
4362 int i = 0;
4363
4364 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4365 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4366
92b84437 4367 if (!is_s2io_card_up(sp))
8116f3cf
SS
4368 return;
4369
4370 if (pci_channel_offline(sp->pdev))
4371 return;
4372
4373 memset(&sw_stat->ring_full_cnt, 0,
d44570e4 4374 sizeof(sw_stat->ring_full_cnt));
8116f3cf
SS
4375
4376 /* Handling the XPAK counters update */
d44570e4 4377 if (stats->xpak_timer_count < 72000) {
8116f3cf
SS
4378 /* waiting for an hour */
4379 stats->xpak_timer_count++;
4380 } else {
4381 s2io_updt_xpak_counter(dev);
4382 /* reset the count to zero */
4383 stats->xpak_timer_count = 0;
4384 }
4385
4386 /* Handling link status change error Intr */
4387 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4388 val64 = readq(&bar0->mac_rmac_err_reg);
4389 writeq(val64, &bar0->mac_rmac_err_reg);
4390 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4391 schedule_work(&sp->set_link_task);
4392 }
4393
4394 /* In case of a serious error, the device will be Reset. */
4395 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
d44570e4 4396 &sw_stat->serious_err_cnt))
8116f3cf
SS
4397 goto reset;
4398
4399 /* Check for data parity error */
4400 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
d44570e4 4401 &sw_stat->parity_err_cnt))
8116f3cf
SS
4402 goto reset;
4403
4404 /* Check for ring full counter */
4405 if (sp->device_type == XFRAME_II_DEVICE) {
4406 val64 = readq(&bar0->ring_bump_counter1);
d44570e4
JP
4407 for (i = 0; i < 4; i++) {
4408 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf
SS
4409 temp64 >>= 64 - ((i+1)*16);
4410 sw_stat->ring_full_cnt[i] += temp64;
4411 }
4412
4413 val64 = readq(&bar0->ring_bump_counter2);
d44570e4
JP
4414 for (i = 0; i < 4; i++) {
4415 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf 4416 temp64 >>= 64 - ((i+1)*16);
d44570e4 4417 sw_stat->ring_full_cnt[i+4] += temp64;
8116f3cf
SS
4418 }
4419 }
4420
4421 val64 = readq(&bar0->txdma_int_status);
4422 /*check for pfc_err*/
4423 if (val64 & TXDMA_PFC_INT) {
d44570e4
JP
4424 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4425 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4426 PFC_PCIX_ERR,
4427 &bar0->pfc_err_reg,
4428 &sw_stat->pfc_err_cnt))
8116f3cf 4429 goto reset;
d44570e4
JP
4430 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4431 &bar0->pfc_err_reg,
4432 &sw_stat->pfc_err_cnt);
8116f3cf
SS
4433 }
4434
4435 /*check for tda_err*/
4436 if (val64 & TXDMA_TDA_INT) {
d44570e4
JP
4437 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4438 TDA_SM0_ERR_ALARM |
4439 TDA_SM1_ERR_ALARM,
4440 &bar0->tda_err_reg,
4441 &sw_stat->tda_err_cnt))
8116f3cf
SS
4442 goto reset;
4443 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
d44570e4
JP
4444 &bar0->tda_err_reg,
4445 &sw_stat->tda_err_cnt);
8116f3cf
SS
4446 }
4447 /*check for pcc_err*/
4448 if (val64 & TXDMA_PCC_INT) {
d44570e4
JP
4449 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4450 PCC_N_SERR | PCC_6_COF_OV_ERR |
4451 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4452 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4453 PCC_TXB_ECC_DB_ERR,
4454 &bar0->pcc_err_reg,
4455 &sw_stat->pcc_err_cnt))
8116f3cf
SS
4456 goto reset;
4457 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
d44570e4
JP
4458 &bar0->pcc_err_reg,
4459 &sw_stat->pcc_err_cnt);
8116f3cf
SS
4460 }
4461
4462 /*check for tti_err*/
4463 if (val64 & TXDMA_TTI_INT) {
d44570e4
JP
4464 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4465 &bar0->tti_err_reg,
4466 &sw_stat->tti_err_cnt))
8116f3cf
SS
4467 goto reset;
4468 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
d44570e4
JP
4469 &bar0->tti_err_reg,
4470 &sw_stat->tti_err_cnt);
8116f3cf
SS
4471 }
4472
4473 /*check for lso_err*/
4474 if (val64 & TXDMA_LSO_INT) {
d44570e4
JP
4475 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4476 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4477 &bar0->lso_err_reg,
4478 &sw_stat->lso_err_cnt))
8116f3cf
SS
4479 goto reset;
4480 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
d44570e4
JP
4481 &bar0->lso_err_reg,
4482 &sw_stat->lso_err_cnt);
8116f3cf
SS
4483 }
4484
4485 /*check for tpa_err*/
4486 if (val64 & TXDMA_TPA_INT) {
d44570e4
JP
4487 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4488 &bar0->tpa_err_reg,
4489 &sw_stat->tpa_err_cnt))
8116f3cf 4490 goto reset;
d44570e4
JP
4491 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4492 &bar0->tpa_err_reg,
4493 &sw_stat->tpa_err_cnt);
8116f3cf
SS
4494 }
4495
4496 /*check for sm_err*/
4497 if (val64 & TXDMA_SM_INT) {
d44570e4
JP
4498 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4499 &bar0->sm_err_reg,
4500 &sw_stat->sm_err_cnt))
8116f3cf
SS
4501 goto reset;
4502 }
4503
4504 val64 = readq(&bar0->mac_int_status);
4505 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4506 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
d44570e4
JP
4507 &bar0->mac_tmac_err_reg,
4508 &sw_stat->mac_tmac_err_cnt))
8116f3cf 4509 goto reset;
d44570e4
JP
4510 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4511 TMAC_DESC_ECC_SG_ERR |
4512 TMAC_DESC_ECC_DB_ERR,
4513 &bar0->mac_tmac_err_reg,
4514 &sw_stat->mac_tmac_err_cnt);
8116f3cf
SS
4515 }
4516
4517 val64 = readq(&bar0->xgxs_int_status);
4518 if (val64 & XGXS_INT_STATUS_TXGXS) {
4519 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
d44570e4
JP
4520 &bar0->xgxs_txgxs_err_reg,
4521 &sw_stat->xgxs_txgxs_err_cnt))
8116f3cf
SS
4522 goto reset;
4523 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
d44570e4
JP
4524 &bar0->xgxs_txgxs_err_reg,
4525 &sw_stat->xgxs_txgxs_err_cnt);
8116f3cf
SS
4526 }
4527
4528 val64 = readq(&bar0->rxdma_int_status);
4529 if (val64 & RXDMA_INT_RC_INT_M) {
d44570e4
JP
4530 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4531 RC_FTC_ECC_DB_ERR |
4532 RC_PRCn_SM_ERR_ALARM |
4533 RC_FTC_SM_ERR_ALARM,
4534 &bar0->rc_err_reg,
4535 &sw_stat->rc_err_cnt))
8116f3cf 4536 goto reset;
d44570e4
JP
4537 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4538 RC_FTC_ECC_SG_ERR |
4539 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4540 &sw_stat->rc_err_cnt);
4541 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4542 PRC_PCI_AB_WR_Rn |
4543 PRC_PCI_AB_F_WR_Rn,
4544 &bar0->prc_pcix_err_reg,
4545 &sw_stat->prc_pcix_err_cnt))
8116f3cf 4546 goto reset;
d44570e4
JP
4547 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4548 PRC_PCI_DP_WR_Rn |
4549 PRC_PCI_DP_F_WR_Rn,
4550 &bar0->prc_pcix_err_reg,
4551 &sw_stat->prc_pcix_err_cnt);
8116f3cf
SS
4552 }
4553
4554 if (val64 & RXDMA_INT_RPA_INT_M) {
4555 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
d44570e4
JP
4556 &bar0->rpa_err_reg,
4557 &sw_stat->rpa_err_cnt))
8116f3cf
SS
4558 goto reset;
4559 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
d44570e4
JP
4560 &bar0->rpa_err_reg,
4561 &sw_stat->rpa_err_cnt);
8116f3cf
SS
4562 }
4563
4564 if (val64 & RXDMA_INT_RDA_INT_M) {
d44570e4
JP
4565 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4566 RDA_FRM_ECC_DB_N_AERR |
4567 RDA_SM1_ERR_ALARM |
4568 RDA_SM0_ERR_ALARM |
4569 RDA_RXD_ECC_DB_SERR,
4570 &bar0->rda_err_reg,
4571 &sw_stat->rda_err_cnt))
8116f3cf 4572 goto reset;
d44570e4
JP
4573 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4574 RDA_FRM_ECC_SG_ERR |
4575 RDA_MISC_ERR |
4576 RDA_PCIX_ERR,
4577 &bar0->rda_err_reg,
4578 &sw_stat->rda_err_cnt);
8116f3cf
SS
4579 }
4580
4581 if (val64 & RXDMA_INT_RTI_INT_M) {
d44570e4
JP
4582 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4583 &bar0->rti_err_reg,
4584 &sw_stat->rti_err_cnt))
8116f3cf
SS
4585 goto reset;
4586 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
d44570e4
JP
4587 &bar0->rti_err_reg,
4588 &sw_stat->rti_err_cnt);
8116f3cf
SS
4589 }
4590
4591 val64 = readq(&bar0->mac_int_status);
4592 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4593 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
d44570e4
JP
4594 &bar0->mac_rmac_err_reg,
4595 &sw_stat->mac_rmac_err_cnt))
8116f3cf 4596 goto reset;
d44570e4
JP
4597 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4598 RMAC_SINGLE_ECC_ERR |
4599 RMAC_DOUBLE_ECC_ERR,
4600 &bar0->mac_rmac_err_reg,
4601 &sw_stat->mac_rmac_err_cnt);
8116f3cf
SS
4602 }
4603
4604 val64 = readq(&bar0->xgxs_int_status);
4605 if (val64 & XGXS_INT_STATUS_RXGXS) {
4606 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
d44570e4
JP
4607 &bar0->xgxs_rxgxs_err_reg,
4608 &sw_stat->xgxs_rxgxs_err_cnt))
8116f3cf
SS
4609 goto reset;
4610 }
4611
4612 val64 = readq(&bar0->mc_int_status);
d44570e4
JP
4613 if (val64 & MC_INT_STATUS_MC_INT) {
4614 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4615 &bar0->mc_err_reg,
4616 &sw_stat->mc_err_cnt))
8116f3cf
SS
4617 goto reset;
4618
4619 /* Handling Ecc errors */
4620 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4621 writeq(val64, &bar0->mc_err_reg);
4622 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4623 sw_stat->double_ecc_errs++;
4624 if (sp->device_type != XFRAME_II_DEVICE) {
4625 /*
4626 * Reset XframeI only if critical error
4627 */
4628 if (val64 &
d44570e4
JP
4629 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4630 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4631 goto reset;
4632 }
8116f3cf
SS
4633 } else
4634 sw_stat->single_ecc_errs++;
4635 }
4636 }
4637 return;
4638
4639reset:
3a3d5756 4640 s2io_stop_all_tx_queue(sp);
8116f3cf
SS
4641 schedule_work(&sp->rst_timer_task);
4642 sw_stat->soft_reset_cnt++;
8116f3cf
SS
4643}
4644
1da177e4
LT
4645/**
4646 * s2io_isr - ISR handler of the device .
4647 * @irq: the irq of the device.
4648 * @dev_id: a void pointer to the dev structure of the NIC.
20346722
K
4649 * Description: This function is the ISR handler of the device. It
4650 * identifies the reason for the interrupt and calls the relevant
4651 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4652 * recv buffers, if their numbers are below the panic value which is
4653 * presently set to 25% of the original number of rcv buffers allocated.
4654 * Return value:
20346722 4655 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4656 * IRQ_NONE: will be returned if interrupt is not from our device
4657 */
7d12e780 4658static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4 4659{
d44570e4 4660 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4661 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4662 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4663 int i;
19a60522 4664 u64 reason = 0;
1ee6dd77 4665 struct mac_info *mac_control;
1da177e4
LT
4666 struct config_param *config;
4667
d796fdb7
LV
4668 /* Pretend we handled any irq's from a disconnected card */
4669 if (pci_channel_offline(sp->pdev))
4670 return IRQ_NONE;
4671
596c5c97 4672 if (!is_s2io_card_up(sp))
92b84437 4673 return IRQ_NONE;
92b84437 4674
1da177e4 4675 config = &sp->config;
ffb5df6c 4676 mac_control = &sp->mac_control;
1da177e4 4677
20346722 4678 /*
1da177e4
LT
4679 * Identify the cause for interrupt and call the appropriate
4680 * interrupt handler. Causes for the interrupt could be;
4681 * 1. Rx of packet.
4682 * 2. Tx complete.
4683 * 3. Link down.
1da177e4
LT
4684 */
4685 reason = readq(&bar0->general_int_status);
4686
d44570e4
JP
4687 if (unlikely(reason == S2IO_MINUS_ONE))
4688 return IRQ_HANDLED; /* Nothing much can be done. Get out */
5d3213cc 4689
d44570e4
JP
4690 if (reason &
4691 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
596c5c97
SS
4692 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4693
4694 if (config->napi) {
4695 if (reason & GEN_INTR_RXTRAFFIC) {
288379f0 4696 napi_schedule(&sp->napi);
f61e0a35
SH
4697 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4698 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4699 readl(&bar0->rx_traffic_int);
db874e65 4700 }
596c5c97
SS
4701 } else {
4702 /*
4703 * rx_traffic_int reg is an R1 register, writing all 1's
4704 * will ensure that the actual interrupt causing bit
4705 * get's cleared and hence a read can be avoided.
4706 */
4707 if (reason & GEN_INTR_RXTRAFFIC)
19a60522 4708 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
596c5c97 4709
13d866a9
JP
4710 for (i = 0; i < config->rx_ring_num; i++) {
4711 struct ring_info *ring = &mac_control->rings[i];
4712
4713 rx_intr_handler(ring, 0);
4714 }
db874e65 4715 }
596c5c97 4716
db874e65 4717 /*
596c5c97 4718 * tx_traffic_int reg is an R1 register, writing all 1's
db874e65
SS
4719 * will ensure that the actual interrupt causing bit get's
4720 * cleared and hence a read can be avoided.
4721 */
596c5c97
SS
4722 if (reason & GEN_INTR_TXTRAFFIC)
4723 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
19a60522 4724
596c5c97
SS
4725 for (i = 0; i < config->tx_fifo_num; i++)
4726 tx_intr_handler(&mac_control->fifos[i]);
1da177e4 4727
596c5c97
SS
4728 if (reason & GEN_INTR_TXPIC)
4729 s2io_txpic_intr_handle(sp);
fe113638 4730
596c5c97
SS
4731 /*
4732 * Reallocate the buffers from the interrupt handler itself.
4733 */
4734 if (!config->napi) {
13d866a9
JP
4735 for (i = 0; i < config->rx_ring_num; i++) {
4736 struct ring_info *ring = &mac_control->rings[i];
4737
4738 s2io_chk_rx_buffers(sp, ring);
4739 }
596c5c97
SS
4740 }
4741 writeq(sp->general_int_mask, &bar0->general_int_mask);
4742 readl(&bar0->general_int_status);
20346722 4743
596c5c97 4744 return IRQ_HANDLED;
db874e65 4745
d44570e4 4746 } else if (!reason) {
596c5c97
SS
4747 /* The interrupt was not raised by us */
4748 return IRQ_NONE;
4749 }
db874e65 4750
1da177e4
LT
4751 return IRQ_HANDLED;
4752}
4753
7ba013ac
K
4754/**
4755 * s2io_updt_stats -
4756 */
1ee6dd77 4757static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4758{
1ee6dd77 4759 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac
K
4760 u64 val64;
4761 int cnt = 0;
4762
92b84437 4763 if (is_s2io_card_up(sp)) {
7ba013ac
K
4764 /* Apprx 30us on a 133 MHz bus */
4765 val64 = SET_UPDT_CLICKS(10) |
4766 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4767 writeq(val64, &bar0->stat_cfg);
4768 do {
4769 udelay(100);
4770 val64 = readq(&bar0->stat_cfg);
b7b5a128 4771 if (!(val64 & s2BIT(0)))
7ba013ac
K
4772 break;
4773 cnt++;
4774 if (cnt == 5)
4775 break; /* Updt failed */
d44570e4 4776 } while (1);
8a4bdbaa 4777 }
7ba013ac
K
4778}
4779
1da177e4 4780/**
20346722 4781 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4782 * @dev : pointer to the device structure.
4783 * Description:
20346722 4784 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4785 * structure and returns a pointer to the same.
4786 * Return value:
4787 * pointer to the updated net_device_stats structure.
4788 */
ac1f60db 4789static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4790{
4cf1653a 4791 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
4792 struct mac_info *mac_control = &sp->mac_control;
4793 struct stat_block *stats = mac_control->stats_info;
4a490432 4794 u64 delta;
1da177e4 4795
7ba013ac
K
4796 /* Configure Stats for immediate updt */
4797 s2io_updt_stats(sp);
4798
4a490432
JM
4799 /* A device reset will cause the on-adapter statistics to be zero'ed.
4800 * This can be done while running by changing the MTU. To prevent the
4801 * system from having the stats zero'ed, the driver keeps a copy of the
4802 * last update to the system (which is also zero'ed on reset). This
4803 * enables the driver to accurately know the delta between the last
4804 * update and the current update.
4805 */
4806 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4807 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4808 sp->stats.rx_packets += delta;
4809 dev->stats.rx_packets += delta;
4810
4811 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4812 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4813 sp->stats.tx_packets += delta;
4814 dev->stats.tx_packets += delta;
4815
4816 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4817 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4818 sp->stats.rx_bytes += delta;
4819 dev->stats.rx_bytes += delta;
4820
4821 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4822 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4823 sp->stats.tx_bytes += delta;
4824 dev->stats.tx_bytes += delta;
4825
4826 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4827 sp->stats.rx_errors += delta;
4828 dev->stats.rx_errors += delta;
4829
4830 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4831 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4832 sp->stats.tx_errors += delta;
4833 dev->stats.tx_errors += delta;
4834
4835 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4836 sp->stats.rx_dropped += delta;
4837 dev->stats.rx_dropped += delta;
4838
4839 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4840 sp->stats.tx_dropped += delta;
4841 dev->stats.tx_dropped += delta;
4842
4843 /* The adapter MAC interprets pause frames as multicast packets, but
4844 * does not pass them up. This erroneously increases the multicast
4845 * packet count and needs to be deducted when the multicast frame count
4846 * is queried.
4847 */
4848 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4849 le32_to_cpu(stats->rmac_vld_mcst_frms);
4850 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4851 delta -= sp->stats.multicast;
4852 sp->stats.multicast += delta;
4853 dev->stats.multicast += delta;
1da177e4 4854
4a490432
JM
4855 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4856 le32_to_cpu(stats->rmac_usized_frms)) +
4857 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4858 sp->stats.rx_length_errors += delta;
4859 dev->stats.rx_length_errors += delta;
13d866a9 4860
4a490432
JM
4861 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4862 sp->stats.rx_crc_errors += delta;
4863 dev->stats.rx_crc_errors += delta;
0425b46a 4864
d44570e4 4865 return &dev->stats;
1da177e4
LT
4866}
4867
4868/**
4869 * s2io_set_multicast - entry point for multicast address enable/disable.
4870 * @dev : pointer to the device structure
4871 * Description:
20346722
K
4872 * This function is a driver entry point which gets called by the kernel
4873 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4874 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4875 * determine, if multicast address must be enabled or if promiscuous mode
4876 * is to be disabled etc.
4877 * Return value:
4878 * void.
4879 */
4880
4881static void s2io_set_multicast(struct net_device *dev)
4882{
4883 int i, j, prev_cnt;
22bedad3 4884 struct netdev_hw_addr *ha;
4cf1653a 4885 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4886 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4887 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
d44570e4 4888 0xfeffffffffffULL;
faa4f796 4889 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
1da177e4 4890 void __iomem *add;
faa4f796 4891 struct config_param *config = &sp->config;
1da177e4
LT
4892
4893 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4894 /* Enable all Multicast addresses */
4895 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4896 &bar0->rmac_addr_data0_mem);
4897 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4898 &bar0->rmac_addr_data1_mem);
4899 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
4900 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4901 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
1da177e4
LT
4902 writeq(val64, &bar0->rmac_addr_cmd_mem);
4903 /* Wait till command completes */
c92ca04b 4904 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
4905 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4906 S2IO_BIT_RESET);
1da177e4
LT
4907
4908 sp->m_cast_flg = 1;
faa4f796 4909 sp->all_multi_pos = config->max_mc_addr - 1;
1da177e4
LT
4910 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4911 /* Disable all Multicast addresses */
4912 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4913 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
4914 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4915 &bar0->rmac_addr_data1_mem);
1da177e4 4916 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
4917 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4918 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
1da177e4
LT
4919 writeq(val64, &bar0->rmac_addr_cmd_mem);
4920 /* Wait till command completes */
c92ca04b 4921 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
4922 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4923 S2IO_BIT_RESET);
1da177e4
LT
4924
4925 sp->m_cast_flg = 0;
4926 sp->all_multi_pos = 0;
4927 }
4928
4929 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4930 /* Put the NIC into promiscuous mode */
4931 add = &bar0->mac_cfg;
4932 val64 = readq(&bar0->mac_cfg);
4933 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4934
4935 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 4936 writel((u32)val64, add);
1da177e4
LT
4937 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4938 writel((u32) (val64 >> 32), (add + 4));
4939
926930b2
SS
4940 if (vlan_tag_strip != 1) {
4941 val64 = readq(&bar0->rx_pa_cfg);
4942 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4943 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 4944 sp->vlan_strip_flag = 0;
926930b2
SS
4945 }
4946
1da177e4
LT
4947 val64 = readq(&bar0->mac_cfg);
4948 sp->promisc_flg = 1;
776bd20f 4949 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
4950 dev->name);
4951 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4952 /* Remove the NIC from promiscuous mode */
4953 add = &bar0->mac_cfg;
4954 val64 = readq(&bar0->mac_cfg);
4955 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4956
4957 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 4958 writel((u32)val64, add);
1da177e4
LT
4959 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4960 writel((u32) (val64 >> 32), (add + 4));
4961
926930b2
SS
4962 if (vlan_tag_strip != 0) {
4963 val64 = readq(&bar0->rx_pa_cfg);
4964 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4965 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 4966 sp->vlan_strip_flag = 1;
926930b2
SS
4967 }
4968
1da177e4
LT
4969 val64 = readq(&bar0->mac_cfg);
4970 sp->promisc_flg = 0;
9e39f7c5 4971 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
1da177e4
LT
4972 }
4973
4974 /* Update individual M_CAST address list */
4cd24eaf
JP
4975 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4976 if (netdev_mc_count(dev) >
faa4f796 4977 (config->max_mc_addr - config->max_mac_addr)) {
9e39f7c5
JP
4978 DBG_PRINT(ERR_DBG,
4979 "%s: No more Rx filters can be added - "
4980 "please enable ALL_MULTI instead\n",
1da177e4 4981 dev->name);
1da177e4
LT
4982 return;
4983 }
4984
4985 prev_cnt = sp->mc_addr_count;
4cd24eaf 4986 sp->mc_addr_count = netdev_mc_count(dev);
1da177e4
LT
4987
4988 /* Clear out the previous list of Mc in the H/W. */
4989 for (i = 0; i < prev_cnt; i++) {
4990 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4991 &bar0->rmac_addr_data0_mem);
4992 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 4993 &bar0->rmac_addr_data1_mem);
1da177e4 4994 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
4995 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4996 RMAC_ADDR_CMD_MEM_OFFSET
4997 (config->mc_start_offset + i);
1da177e4
LT
4998 writeq(val64, &bar0->rmac_addr_cmd_mem);
4999
5000 /* Wait for command completes */
c92ca04b 5001 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5002 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5003 S2IO_BIT_RESET)) {
9e39f7c5
JP
5004 DBG_PRINT(ERR_DBG,
5005 "%s: Adding Multicasts failed\n",
5006 dev->name);
1da177e4
LT
5007 return;
5008 }
5009 }
5010
5011 /* Create the new Rx filter list and update the same in H/W. */
5508590c 5012 i = 0;
22bedad3 5013 netdev_for_each_mc_addr(ha, dev) {
a7a80d5a 5014 mac_addr = 0;
1da177e4 5015 for (j = 0; j < ETH_ALEN; j++) {
22bedad3 5016 mac_addr |= ha->addr[j];
1da177e4
LT
5017 mac_addr <<= 8;
5018 }
5019 mac_addr >>= 8;
5020 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5021 &bar0->rmac_addr_data0_mem);
5022 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 5023 &bar0->rmac_addr_data1_mem);
1da177e4 5024 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5025 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5026 RMAC_ADDR_CMD_MEM_OFFSET
5027 (i + config->mc_start_offset);
1da177e4
LT
5028 writeq(val64, &bar0->rmac_addr_cmd_mem);
5029
5030 /* Wait for command completes */
c92ca04b 5031 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5032 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5033 S2IO_BIT_RESET)) {
9e39f7c5
JP
5034 DBG_PRINT(ERR_DBG,
5035 "%s: Adding Multicasts failed\n",
5036 dev->name);
1da177e4
LT
5037 return;
5038 }
5508590c 5039 i++;
1da177e4
LT
5040 }
5041 }
5042}
5043
faa4f796
SH
5044/* read from CAM unicast & multicast addresses and store it in
5045 * def_mac_addr structure
5046 */
dac499f9 5047static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
faa4f796
SH
5048{
5049 int offset;
5050 u64 mac_addr = 0x0;
5051 struct config_param *config = &sp->config;
5052
5053 /* store unicast & multicast mac addresses */
5054 for (offset = 0; offset < config->max_mc_addr; offset++) {
5055 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5056 /* if read fails disable the entry */
5057 if (mac_addr == FAILURE)
5058 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5059 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5060 }
5061}
5062
5063/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5064static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5065{
5066 int offset;
5067 struct config_param *config = &sp->config;
5068 /* restore unicast mac address */
5069 for (offset = 0; offset < config->max_mac_addr; offset++)
5070 do_s2io_prog_unicast(sp->dev,
d44570e4 5071 sp->def_mac_addr[offset].mac_addr);
faa4f796
SH
5072
5073 /* restore multicast mac address */
5074 for (offset = config->mc_start_offset;
d44570e4 5075 offset < config->max_mc_addr; offset++)
faa4f796
SH
5076 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5077}
5078
5079/* add a multicast MAC address to CAM */
5080static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5081{
5082 int i;
5083 u64 mac_addr = 0;
5084 struct config_param *config = &sp->config;
5085
5086 for (i = 0; i < ETH_ALEN; i++) {
5087 mac_addr <<= 8;
5088 mac_addr |= addr[i];
5089 }
5090 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5091 return SUCCESS;
5092
5093 /* check if the multicast mac already preset in CAM */
5094 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5095 u64 tmp64;
5096 tmp64 = do_s2io_read_unicast_mc(sp, i);
5097 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5098 break;
5099
5100 if (tmp64 == mac_addr)
5101 return SUCCESS;
5102 }
5103 if (i == config->max_mc_addr) {
5104 DBG_PRINT(ERR_DBG,
d44570e4 5105 "CAM full no space left for multicast MAC\n");
faa4f796
SH
5106 return FAILURE;
5107 }
5108 /* Update the internal structure with this new mac address */
5109 do_s2io_copy_mac_addr(sp, i, mac_addr);
5110
d44570e4 5111 return do_s2io_add_mac(sp, mac_addr, i);
faa4f796
SH
5112}
5113
5114/* add MAC address to CAM */
5115static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
2fd37688
SS
5116{
5117 u64 val64;
5118 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5119
5120 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
d44570e4 5121 &bar0->rmac_addr_data0_mem);
2fd37688 5122
d44570e4 5123 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2fd37688
SS
5124 RMAC_ADDR_CMD_MEM_OFFSET(off);
5125 writeq(val64, &bar0->rmac_addr_cmd_mem);
5126
5127 /* Wait till command completes */
5128 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5129 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5130 S2IO_BIT_RESET)) {
faa4f796 5131 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
2fd37688
SS
5132 return FAILURE;
5133 }
5134 return SUCCESS;
5135}
faa4f796
SH
5136/* deletes a specified unicast/multicast mac entry from CAM */
5137static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5138{
5139 int offset;
5140 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5141 struct config_param *config = &sp->config;
5142
5143 for (offset = 1;
d44570e4 5144 offset < config->max_mc_addr; offset++) {
faa4f796
SH
5145 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5146 if (tmp64 == addr) {
5147 /* disable the entry by writing 0xffffffffffffULL */
5148 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5149 return FAILURE;
5150 /* store the new mac list from CAM */
5151 do_s2io_store_unicast_mc(sp);
5152 return SUCCESS;
5153 }
5154 }
5155 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
d44570e4 5156 (unsigned long long)addr);
faa4f796
SH
5157 return FAILURE;
5158}
5159
5160/* read mac entries from CAM */
5161static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5162{
5163 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5164 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5165
5166 /* read mac addr */
d44570e4 5167 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796
SH
5168 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5169 writeq(val64, &bar0->rmac_addr_cmd_mem);
5170
5171 /* Wait till command completes */
5172 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5173 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5174 S2IO_BIT_RESET)) {
faa4f796
SH
5175 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5176 return FAILURE;
5177 }
5178 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4
JP
5179
5180 return tmp64 >> 16;
faa4f796 5181}
2fd37688
SS
5182
5183/**
49ce9c2c 5184 * s2io_set_mac_addr - driver entry point
2fd37688 5185 */
faa4f796 5186
2fd37688
SS
5187static int s2io_set_mac_addr(struct net_device *dev, void *p)
5188{
5189 struct sockaddr *addr = p;
5190
5191 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 5192 return -EADDRNOTAVAIL;
2fd37688
SS
5193
5194 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5195
5196 /* store the MAC address in CAM */
d44570e4 5197 return do_s2io_prog_unicast(dev, dev->dev_addr);
2fd37688 5198}
1da177e4 5199/**
2fd37688 5200 * do_s2io_prog_unicast - Programs the Xframe mac address
1da177e4
LT
5201 * @dev : pointer to the device structure.
5202 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 5203 * Description : This procedure will program the Xframe to receive
1da177e4 5204 * frames with new Mac Address
20346722 5205 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
5206 * as defined in errno.h file on failure.
5207 */
faa4f796 5208
2fd37688 5209static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
1da177e4 5210{
4cf1653a 5211 struct s2io_nic *sp = netdev_priv(dev);
2fd37688 5212 register u64 mac_addr = 0, perm_addr = 0;
1da177e4 5213 int i;
faa4f796
SH
5214 u64 tmp64;
5215 struct config_param *config = &sp->config;
1da177e4 5216
20346722 5217 /*
d44570e4
JP
5218 * Set the new MAC address as the new unicast filter and reflect this
5219 * change on the device address registered with the OS. It will be
5220 * at offset 0.
5221 */
1da177e4
LT
5222 for (i = 0; i < ETH_ALEN; i++) {
5223 mac_addr <<= 8;
5224 mac_addr |= addr[i];
2fd37688
SS
5225 perm_addr <<= 8;
5226 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
d8d70caf
SS
5227 }
5228
2fd37688
SS
5229 /* check if the dev_addr is different than perm_addr */
5230 if (mac_addr == perm_addr)
d8d70caf
SS
5231 return SUCCESS;
5232
faa4f796
SH
5233 /* check if the mac already preset in CAM */
5234 for (i = 1; i < config->max_mac_addr; i++) {
5235 tmp64 = do_s2io_read_unicast_mc(sp, i);
5236 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5237 break;
5238
5239 if (tmp64 == mac_addr) {
5240 DBG_PRINT(INFO_DBG,
d44570e4
JP
5241 "MAC addr:0x%llx already present in CAM\n",
5242 (unsigned long long)mac_addr);
faa4f796
SH
5243 return SUCCESS;
5244 }
5245 }
5246 if (i == config->max_mac_addr) {
5247 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5248 return FAILURE;
5249 }
d8d70caf 5250 /* Update the internal structure with this new mac address */
faa4f796 5251 do_s2io_copy_mac_addr(sp, i, mac_addr);
d44570e4
JP
5252
5253 return do_s2io_add_mac(sp, mac_addr, i);
1da177e4
LT
5254}
5255
5256/**
51f21442 5257 * s2io_ethtool_set_link_ksettings - Sets different link parameters.
d07ce242
JP
5258 * @sp : private member of the device structure, which is a pointer to the
5259 * s2io_nic structure.
51f21442 5260 * @cmd: pointer to the structure with parameters given by ethtool to set
1da177e4
LT
5261 * link information.
5262 * Description:
20346722 5263 * The function sets different link parameters provided by the user onto
1da177e4
LT
5264 * the NIC.
5265 * Return value:
5266 * 0 on success.
d44570e4 5267 */
1da177e4 5268
51f21442
PR
5269static int
5270s2io_ethtool_set_link_ksettings(struct net_device *dev,
5271 const struct ethtool_link_ksettings *cmd)
1da177e4 5272{
4cf1653a 5273 struct s2io_nic *sp = netdev_priv(dev);
51f21442
PR
5274 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5275 (cmd->base.speed != SPEED_10000) ||
5276 (cmd->base.duplex != DUPLEX_FULL))
1da177e4
LT
5277 return -EINVAL;
5278 else {
5279 s2io_close(sp->dev);
5280 s2io_open(sp->dev);
5281 }
5282
5283 return 0;
5284}
5285
5286/**
51f21442 5287 * s2io_ethtol_get_link_ksettings - Return link specific information.
1da177e4
LT
5288 * @sp : private member of the device structure, pointer to the
5289 * s2io_nic structure.
51f21442 5290 * @cmd : pointer to the structure with parameters given by ethtool
1da177e4
LT
5291 * to return link information.
5292 * Description:
5293 * Returns link specific information like speed, duplex etc.. to ethtool.
5294 * Return value :
5295 * return 0 on success.
5296 */
5297
51f21442
PR
5298static int
5299s2io_ethtool_get_link_ksettings(struct net_device *dev,
5300 struct ethtool_link_ksettings *cmd)
1da177e4 5301{
4cf1653a 5302 struct s2io_nic *sp = netdev_priv(dev);
1a7eb72b 5303
51f21442
PR
5304 ethtool_link_ksettings_zero_link_mode(cmd, supported);
5305 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5306 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5307
5308 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5309 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5310 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5311
5312 cmd->base.port = PORT_FIBRE;
1da177e4
LT
5313
5314 if (netif_carrier_ok(sp->dev)) {
51f21442
PR
5315 cmd->base.speed = SPEED_10000;
5316 cmd->base.duplex = DUPLEX_FULL;
1da177e4 5317 } else {
51f21442
PR
5318 cmd->base.speed = SPEED_UNKNOWN;
5319 cmd->base.duplex = DUPLEX_UNKNOWN;
1da177e4
LT
5320 }
5321
51f21442 5322 cmd->base.autoneg = AUTONEG_DISABLE;
1da177e4
LT
5323 return 0;
5324}
5325
5326/**
20346722
K
5327 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5328 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5329 * s2io_nic structure.
5330 * @info : pointer to the structure with parameters given by ethtool to
5331 * return driver information.
5332 * Description:
5333 * Returns driver specefic information like name, version etc.. to ethtool.
5334 * Return value:
5335 * void
5336 */
5337
5338static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5339 struct ethtool_drvinfo *info)
5340{
4cf1653a 5341 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 5342
68aad78c
RJ
5343 strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5344 strlcpy(info->version, s2io_driver_version, sizeof(info->version));
68aad78c 5345 strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
5346}
5347
5348/**
5349 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 5350 * @sp: private member of the device structure, which is a pointer to the
1da177e4 5351 * s2io_nic structure.
20346722 5352 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4 5353 * dumping the registers.
8b1bb92b 5354 * @reg_space: The input argument into which all the registers are dumped.
1da177e4
LT
5355 * Description:
5356 * Dumps the entire register space of xFrame NIC into the user given
5357 * buffer area.
5358 * Return value :
5359 * void .
d44570e4 5360 */
1da177e4
LT
5361
5362static void s2io_ethtool_gregs(struct net_device *dev,
5363 struct ethtool_regs *regs, void *space)
5364{
5365 int i;
5366 u64 reg;
d44570e4 5367 u8 *reg_space = (u8 *)space;
4cf1653a 5368 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5369
5370 regs->len = XENA_REG_SPACE;
5371 regs->version = sp->pdev->subsystem_device;
5372
5373 for (i = 0; i < regs->len; i += 8) {
5374 reg = readq(sp->bar0 + i);
5375 memcpy((reg_space + i), &reg, 8);
5376 }
5377}
5378
034e3450 5379/*
5380 * s2io_set_led - control NIC led
d44570e4 5381 */
034e3450 5382static void s2io_set_led(struct s2io_nic *sp, bool on)
1da177e4 5383{
1ee6dd77 5384 struct XENA_dev_config __iomem *bar0 = sp->bar0;
034e3450 5385 u16 subid = sp->pdev->subsystem_device;
5386 u64 val64;
1da177e4 5387
541ae68f 5388 if ((sp->device_type == XFRAME_II_DEVICE) ||
d44570e4 5389 ((subid & 0xFF) >= 0x07)) {
1da177e4 5390 val64 = readq(&bar0->gpio_control);
034e3450 5391 if (on)
5392 val64 |= GPIO_CTRL_GPIO_0;
5393 else
5394 val64 &= ~GPIO_CTRL_GPIO_0;
5395
1da177e4
LT
5396 writeq(val64, &bar0->gpio_control);
5397 } else {
5398 val64 = readq(&bar0->adapter_control);
034e3450 5399 if (on)
5400 val64 |= ADAPTER_LED_ON;
5401 else
5402 val64 &= ~ADAPTER_LED_ON;
5403
1da177e4
LT
5404 writeq(val64, &bar0->adapter_control);
5405 }
5406
1da177e4
LT
5407}
5408
5409/**
034e3450 5410 * s2io_ethtool_set_led - To physically identify the nic on the system.
5411 * @dev : network device
5412 * @state: led setting
5413 *
1da177e4 5414 * Description: Used to physically identify the NIC on the system.
20346722 5415 * The Link LED will blink for a time specified by the user for
1da177e4 5416 * identification.
20346722 5417 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4 5418 * identification is possible only if it's link is up.
1da177e4
LT
5419 */
5420
034e3450 5421static int s2io_ethtool_set_led(struct net_device *dev,
5422 enum ethtool_phys_id_state state)
1da177e4 5423{
4cf1653a 5424 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5425 struct XENA_dev_config __iomem *bar0 = sp->bar0;
034e3450 5426 u16 subid = sp->pdev->subsystem_device;
1da177e4 5427
d44570e4 5428 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
034e3450 5429 u64 val64 = readq(&bar0->adapter_control);
1da177e4 5430 if (!(val64 & ADAPTER_CNTL_EN)) {
6cef2b8e 5431 pr_err("Adapter Link down, cannot blink LED\n");
034e3450 5432 return -EAGAIN;
1da177e4
LT
5433 }
5434 }
1da177e4 5435
034e3450 5436 switch (state) {
5437 case ETHTOOL_ID_ACTIVE:
5438 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
fce55922 5439 return 1; /* cycle on/off once per second */
034e3450 5440
5441 case ETHTOOL_ID_ON:
5442 s2io_set_led(sp, true);
5443 break;
5444
5445 case ETHTOOL_ID_OFF:
5446 s2io_set_led(sp, false);
5447 break;
5448
5449 case ETHTOOL_ID_INACTIVE:
5450 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5451 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
1da177e4
LT
5452 }
5453
5454 return 0;
5455}
5456
0cec35eb 5457static void s2io_ethtool_gringparam(struct net_device *dev,
d44570e4 5458 struct ethtool_ringparam *ering)
0cec35eb 5459{
4cf1653a 5460 struct s2io_nic *sp = netdev_priv(dev);
d44570e4 5461 int i, tx_desc_count = 0, rx_desc_count = 0;
0cec35eb 5462
1853e2e1 5463 if (sp->rxd_mode == RXD_MODE_1) {
0cec35eb 5464 ering->rx_max_pending = MAX_RX_DESC_1;
1853e2e1
JM
5465 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5466 } else {
0cec35eb 5467 ering->rx_max_pending = MAX_RX_DESC_2;
1853e2e1
JM
5468 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5469 }
0cec35eb
SH
5470
5471 ering->tx_max_pending = MAX_TX_DESC;
8a4bdbaa 5472
1853e2e1 5473 for (i = 0; i < sp->config.rx_ring_num; i++)
0cec35eb 5474 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
0cec35eb 5475 ering->rx_pending = rx_desc_count;
0cec35eb 5476 ering->rx_jumbo_pending = rx_desc_count;
1853e2e1
JM
5477
5478 for (i = 0; i < sp->config.tx_fifo_num; i++)
5479 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5480 ering->tx_pending = tx_desc_count;
5481 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
0cec35eb
SH
5482}
5483
1da177e4
LT
5484/**
5485 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
5486 * @sp : private member of the device structure, which is a pointer to the
5487 * s2io_nic structure.
1da177e4
LT
5488 * @ep : pointer to the structure with pause parameters given by ethtool.
5489 * Description:
5490 * Returns the Pause frame generation and reception capability of the NIC.
5491 * Return value:
5492 * void
5493 */
5494static void s2io_ethtool_getpause_data(struct net_device *dev,
5495 struct ethtool_pauseparam *ep)
5496{
5497 u64 val64;
4cf1653a 5498 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5499 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5500
5501 val64 = readq(&bar0->rmac_pause_cfg);
5502 if (val64 & RMAC_PAUSE_GEN_ENABLE)
f957bcf0 5503 ep->tx_pause = true;
1da177e4 5504 if (val64 & RMAC_PAUSE_RX_ENABLE)
f957bcf0
TK
5505 ep->rx_pause = true;
5506 ep->autoneg = false;
1da177e4
LT
5507}
5508
5509/**
5510 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 5511 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5512 * s2io_nic structure.
5513 * @ep : pointer to the structure with pause parameters given by ethtool.
5514 * Description:
5515 * It can be used to set or reset Pause frame generation or reception
5516 * support of the NIC.
5517 * Return value:
5518 * int, returns 0 on Success
5519 */
5520
5521static int s2io_ethtool_setpause_data(struct net_device *dev,
d44570e4 5522 struct ethtool_pauseparam *ep)
1da177e4
LT
5523{
5524 u64 val64;
4cf1653a 5525 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5526 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5527
5528 val64 = readq(&bar0->rmac_pause_cfg);
5529 if (ep->tx_pause)
5530 val64 |= RMAC_PAUSE_GEN_ENABLE;
5531 else
5532 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5533 if (ep->rx_pause)
5534 val64 |= RMAC_PAUSE_RX_ENABLE;
5535 else
5536 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5537 writeq(val64, &bar0->rmac_pause_cfg);
5538 return 0;
5539}
5540
5541/**
5542 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 5543 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5544 * s2io_nic structure.
5545 * @off : offset at which the data must be written
5546 * @data : Its an output parameter where the data read at the given
20346722 5547 * offset is stored.
1da177e4 5548 * Description:
20346722 5549 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
5550 * read data.
5551 * NOTE: Will allow to read only part of the EEPROM visible through the
5552 * I2C bus.
5553 * Return value:
5554 * -1 on failure and 0 on success.
5555 */
5556
5557#define S2IO_DEV_ID 5
d44570e4 5558static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
1da177e4
LT
5559{
5560 int ret = -1;
5561 u32 exit_cnt = 0;
5562 u64 val64;
1ee6dd77 5563 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5564
ad4ebed0 5565 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5566 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5567 I2C_CONTROL_ADDR(off) |
5568 I2C_CONTROL_BYTE_CNT(0x3) |
5569 I2C_CONTROL_READ |
5570 I2C_CONTROL_CNTL_START;
ad4ebed0 5571 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 5572
ad4ebed0 5573 while (exit_cnt < 5) {
5574 val64 = readq(&bar0->i2c_control);
5575 if (I2C_CONTROL_CNTL_END(val64)) {
5576 *data = I2C_CONTROL_GET_DATA(val64);
5577 ret = 0;
5578 break;
5579 }
5580 msleep(50);
5581 exit_cnt++;
1da177e4 5582 }
1da177e4
LT
5583 }
5584
ad4ebed0 5585 if (sp->device_type == XFRAME_II_DEVICE) {
5586 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5587 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 5588 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5589 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5590 val64 |= SPI_CONTROL_REQ;
5591 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5592 while (exit_cnt < 5) {
5593 val64 = readq(&bar0->spi_control);
5594 if (val64 & SPI_CONTROL_NACK) {
5595 ret = 1;
5596 break;
5597 } else if (val64 & SPI_CONTROL_DONE) {
5598 *data = readq(&bar0->spi_data);
5599 *data &= 0xffffff;
5600 ret = 0;
5601 break;
5602 }
5603 msleep(50);
5604 exit_cnt++;
5605 }
5606 }
1da177e4
LT
5607 return ret;
5608}
5609
5610/**
5611 * write_eeprom - actually writes the relevant part of the data value.
5612 * @sp : private member of the device structure, which is a pointer to the
5613 * s2io_nic structure.
5614 * @off : offset at which the data must be written
5615 * @data : The data that is to be written
20346722 5616 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
5617 * the Eeprom. (max of 3)
5618 * Description:
5619 * Actually writes the relevant part of the data value into the Eeprom
5620 * through the I2C bus.
5621 * Return value:
5622 * 0 on success, -1 on failure.
5623 */
5624
d44570e4 5625static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
1da177e4
LT
5626{
5627 int exit_cnt = 0, ret = -1;
5628 u64 val64;
1ee6dd77 5629 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5630
ad4ebed0 5631 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5632 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5633 I2C_CONTROL_ADDR(off) |
5634 I2C_CONTROL_BYTE_CNT(cnt) |
5635 I2C_CONTROL_SET_DATA((u32)data) |
5636 I2C_CONTROL_CNTL_START;
ad4ebed0 5637 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5638
5639 while (exit_cnt < 5) {
5640 val64 = readq(&bar0->i2c_control);
5641 if (I2C_CONTROL_CNTL_END(val64)) {
5642 if (!(val64 & I2C_CONTROL_NACK))
5643 ret = 0;
5644 break;
5645 }
5646 msleep(50);
5647 exit_cnt++;
5648 }
5649 }
1da177e4 5650
ad4ebed0 5651 if (sp->device_type == XFRAME_II_DEVICE) {
5652 int write_cnt = (cnt == 8) ? 0 : cnt;
d44570e4 5653 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
ad4ebed0 5654
5655 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5656 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 5657 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5658 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5659 val64 |= SPI_CONTROL_REQ;
5660 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5661 while (exit_cnt < 5) {
5662 val64 = readq(&bar0->spi_control);
5663 if (val64 & SPI_CONTROL_NACK) {
5664 ret = 1;
5665 break;
5666 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 5667 ret = 0;
ad4ebed0 5668 break;
5669 }
5670 msleep(50);
5671 exit_cnt++;
1da177e4 5672 }
1da177e4 5673 }
1da177e4
LT
5674 return ret;
5675}
1ee6dd77 5676static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5677{
b41477f3
AR
5678 u8 *vpd_data;
5679 u8 data;
9c179780 5680 int i = 0, cnt, len, fail = 0;
9dc737a7 5681 int vpd_addr = 0x80;
ffb5df6c 5682 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
9dc737a7
AR
5683
5684 if (nic->device_type == XFRAME_II_DEVICE) {
5685 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5686 vpd_addr = 0x80;
d44570e4 5687 } else {
9dc737a7
AR
5688 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5689 vpd_addr = 0x50;
5690 }
19a60522 5691 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5692
b41477f3 5693 vpd_data = kmalloc(256, GFP_KERNEL);
c53d4945 5694 if (!vpd_data) {
ffb5df6c 5695 swstats->mem_alloc_fail_cnt++;
b41477f3 5696 return;
c53d4945 5697 }
ffb5df6c 5698 swstats->mem_allocated += 256;
b41477f3 5699
d44570e4 5700 for (i = 0; i < 256; i += 4) {
9dc737a7
AR
5701 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5702 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5703 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
d44570e4 5704 for (cnt = 0; cnt < 5; cnt++) {
9dc737a7
AR
5705 msleep(2);
5706 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5707 if (data == 0x80)
5708 break;
5709 }
5710 if (cnt >= 5) {
5711 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5712 fail = 1;
5713 break;
5714 }
5715 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5716 (u32 *)&vpd_data[i]);
5717 }
19a60522 5718
d44570e4 5719 if (!fail) {
19a60522 5720 /* read serial number of adapter */
9c179780 5721 for (cnt = 0; cnt < 252; cnt++) {
d44570e4 5722 if ((vpd_data[cnt] == 'S') &&
9c179780
KV
5723 (vpd_data[cnt+1] == 'N')) {
5724 len = vpd_data[cnt+2];
5725 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5726 memcpy(nic->serial_num,
5727 &vpd_data[cnt + 3],
5728 len);
5729 memset(nic->serial_num+len,
5730 0,
5731 VPD_STRING_LEN-len);
5732 break;
5733 }
19a60522
SS
5734 }
5735 }
5736 }
5737
9c179780
KV
5738 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5739 len = vpd_data[1];
5740 memcpy(nic->product_name, &vpd_data[3], len);
5741 nic->product_name[len] = 0;
5742 }
b41477f3 5743 kfree(vpd_data);
ffb5df6c 5744 swstats->mem_freed += 256;
9dc737a7
AR
5745}
5746
1da177e4
LT
5747/**
5748 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
d07ce242
JP
5749 * @sp : private member of the device structure, which is a pointer to the
5750 * s2io_nic structure.
20346722 5751 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5752 * containing all relevant information.
5753 * @data_buf : user defined value to be written into Eeprom.
5754 * Description: Reads the values stored in the Eeprom at given offset
5755 * for a given length. Stores these values int the input argument data
5756 * buffer 'data_buf' and returns these to the caller (ethtool.)
5757 * Return value:
5758 * int 0 on success
5759 */
5760
5761static int s2io_ethtool_geeprom(struct net_device *dev,
d44570e4 5762 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5763{
ad4ebed0 5764 u32 i, valid;
5765 u64 data;
4cf1653a 5766 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5767
5768 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5769
5770 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5771 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5772
5773 for (i = 0; i < eeprom->len; i += 4) {
5774 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5775 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5776 return -EFAULT;
5777 }
5778 valid = INV(data);
5779 memcpy((data_buf + i), &valid, 4);
5780 }
5781 return 0;
5782}
5783
5784/**
5785 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5786 * @sp : private member of the device structure, which is a pointer to the
5787 * s2io_nic structure.
20346722 5788 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5789 * containing all relevant information.
5790 * @data_buf ; user defined value to be written into Eeprom.
5791 * Description:
5792 * Tries to write the user provided value in the Eeprom, at the offset
5793 * given by the user.
5794 * Return value:
5795 * 0 on success, -EFAULT on failure.
5796 */
5797
5798static int s2io_ethtool_seeprom(struct net_device *dev,
5799 struct ethtool_eeprom *eeprom,
d44570e4 5800 u8 *data_buf)
1da177e4
LT
5801{
5802 int len = eeprom->len, cnt = 0;
ad4ebed0 5803 u64 valid = 0, data;
4cf1653a 5804 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5805
5806 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5807 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5808 "ETHTOOL_WRITE_EEPROM Err: "
5809 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5810 (sp->pdev->vendor | (sp->pdev->device << 16)),
5811 eeprom->magic);
1da177e4
LT
5812 return -EFAULT;
5813 }
5814
5815 while (len) {
d44570e4
JP
5816 data = (u32)data_buf[cnt] & 0x000000FF;
5817 if (data)
5818 valid = (u32)(data << 24);
5819 else
1da177e4
LT
5820 valid = data;
5821
5822 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5823 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5824 "ETHTOOL_WRITE_EEPROM Err: "
5825 "Cannot write into the specified offset\n");
1da177e4
LT
5826 return -EFAULT;
5827 }
5828 cnt++;
5829 len--;
5830 }
5831
5832 return 0;
5833}
5834
5835/**
20346722
K
5836 * s2io_register_test - reads and writes into all clock domains.
5837 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5838 * s2io_nic structure.
5839 * @data : variable that returns the result of each of the test conducted b
5840 * by the driver.
5841 * Description:
5842 * Read and write into all clock domains. The NIC has 3 clock domains,
5843 * see that registers in all the three regions are accessible.
5844 * Return value:
5845 * 0 on success.
5846 */
5847
d44570e4 5848static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 5849{
1ee6dd77 5850 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5851 u64 val64 = 0, exp_val;
1da177e4
LT
5852 int fail = 0;
5853
20346722
K
5854 val64 = readq(&bar0->pif_rd_swapper_fb);
5855 if (val64 != 0x123456789abcdefULL) {
1da177e4 5856 fail = 1;
9e39f7c5 5857 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
1da177e4
LT
5858 }
5859
5860 val64 = readq(&bar0->rmac_pause_cfg);
5861 if (val64 != 0xc000ffff00000000ULL) {
5862 fail = 1;
9e39f7c5 5863 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
1da177e4
LT
5864 }
5865
5866 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5867 if (sp->device_type == XFRAME_II_DEVICE)
5868 exp_val = 0x0404040404040404ULL;
5869 else
5870 exp_val = 0x0808080808080808ULL;
5871 if (val64 != exp_val) {
1da177e4 5872 fail = 1;
9e39f7c5 5873 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
1da177e4
LT
5874 }
5875
5876 val64 = readq(&bar0->xgxs_efifo_cfg);
5877 if (val64 != 0x000000001923141EULL) {
5878 fail = 1;
9e39f7c5 5879 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
1da177e4
LT
5880 }
5881
5882 val64 = 0x5A5A5A5A5A5A5A5AULL;
5883 writeq(val64, &bar0->xmsi_data);
5884 val64 = readq(&bar0->xmsi_data);
5885 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5886 fail = 1;
9e39f7c5 5887 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
1da177e4
LT
5888 }
5889
5890 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5891 writeq(val64, &bar0->xmsi_data);
5892 val64 = readq(&bar0->xmsi_data);
5893 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5894 fail = 1;
9e39f7c5 5895 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
1da177e4
LT
5896 }
5897
5898 *data = fail;
ad4ebed0 5899 return fail;
1da177e4
LT
5900}
5901
5902/**
20346722 5903 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5904 * @sp : private member of the device structure, which is a pointer to the
5905 * s2io_nic structure.
5906 * @data:variable that returns the result of each of the test conducted by
5907 * the driver.
5908 * Description:
20346722 5909 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5910 * register.
5911 * Return value:
5912 * 0 on success.
5913 */
5914
d44570e4 5915static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
5916{
5917 int fail = 0;
ad4ebed0 5918 u64 ret_data, org_4F0, org_7F0;
5919 u8 saved_4F0 = 0, saved_7F0 = 0;
5920 struct net_device *dev = sp->dev;
1da177e4
LT
5921
5922 /* Test Write Error at offset 0 */
ad4ebed0 5923 /* Note that SPI interface allows write access to all areas
5924 * of EEPROM. Hence doing all negative testing only for Xframe I.
5925 */
5926 if (sp->device_type == XFRAME_I_DEVICE)
5927 if (!write_eeprom(sp, 0, 0, 3))
5928 fail = 1;
5929
5930 /* Save current values at offsets 0x4F0 and 0x7F0 */
5931 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5932 saved_4F0 = 1;
5933 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5934 saved_7F0 = 1;
1da177e4
LT
5935
5936 /* Test Write at offset 4f0 */
ad4ebed0 5937 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5938 fail = 1;
5939 if (read_eeprom(sp, 0x4F0, &ret_data))
5940 fail = 1;
5941
ad4ebed0 5942 if (ret_data != 0x012345) {
26b7625c 5943 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
d44570e4
JP
5944 "Data written %llx Data read %llx\n",
5945 dev->name, (unsigned long long)0x12345,
5946 (unsigned long long)ret_data);
1da177e4 5947 fail = 1;
ad4ebed0 5948 }
1da177e4
LT
5949
5950 /* Reset the EEPROM data go FFFF */
ad4ebed0 5951 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5952
5953 /* Test Write Request Error at offset 0x7c */
ad4ebed0 5954 if (sp->device_type == XFRAME_I_DEVICE)
5955 if (!write_eeprom(sp, 0x07C, 0, 3))
5956 fail = 1;
1da177e4 5957
ad4ebed0 5958 /* Test Write Request at offset 0x7f0 */
5959 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 5960 fail = 1;
ad4ebed0 5961 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
5962 fail = 1;
5963
ad4ebed0 5964 if (ret_data != 0x012345) {
26b7625c 5965 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
d44570e4
JP
5966 "Data written %llx Data read %llx\n",
5967 dev->name, (unsigned long long)0x12345,
5968 (unsigned long long)ret_data);
1da177e4 5969 fail = 1;
ad4ebed0 5970 }
1da177e4
LT
5971
5972 /* Reset the EEPROM data go FFFF */
ad4ebed0 5973 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 5974
ad4ebed0 5975 if (sp->device_type == XFRAME_I_DEVICE) {
5976 /* Test Write Error at offset 0x80 */
5977 if (!write_eeprom(sp, 0x080, 0, 3))
5978 fail = 1;
1da177e4 5979
ad4ebed0 5980 /* Test Write Error at offset 0xfc */
5981 if (!write_eeprom(sp, 0x0FC, 0, 3))
5982 fail = 1;
1da177e4 5983
ad4ebed0 5984 /* Test Write Error at offset 0x100 */
5985 if (!write_eeprom(sp, 0x100, 0, 3))
5986 fail = 1;
1da177e4 5987
ad4ebed0 5988 /* Test Write Error at offset 4ec */
5989 if (!write_eeprom(sp, 0x4EC, 0, 3))
5990 fail = 1;
5991 }
5992
5993 /* Restore values at offsets 0x4F0 and 0x7F0 */
5994 if (saved_4F0)
5995 write_eeprom(sp, 0x4F0, org_4F0, 3);
5996 if (saved_7F0)
5997 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
5998
5999 *data = fail;
ad4ebed0 6000 return fail;
1da177e4
LT
6001}
6002
6003/**
6004 * s2io_bist_test - invokes the MemBist test of the card .
20346722 6005 * @sp : private member of the device structure, which is a pointer to the
1da177e4 6006 * s2io_nic structure.
20346722 6007 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
6008 * the driver.
6009 * Description:
6010 * This invokes the MemBist test of the card. We give around
6011 * 2 secs time for the Test to complete. If it's still not complete
20346722 6012 * within this peiod, we consider that the test failed.
1da177e4
LT
6013 * Return value:
6014 * 0 on success and -1 on failure.
6015 */
6016
d44570e4 6017static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
6018{
6019 u8 bist = 0;
6020 int cnt = 0, ret = -1;
6021
6022 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6023 bist |= PCI_BIST_START;
6024 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6025
6026 while (cnt < 20) {
6027 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6028 if (!(bist & PCI_BIST_START)) {
6029 *data = (bist & PCI_BIST_CODE_MASK);
6030 ret = 0;
6031 break;
6032 }
6033 msleep(100);
6034 cnt++;
6035 }
6036
6037 return ret;
6038}
6039
6040/**
49ce9c2c 6041 * s2io_link_test - verifies the link state of the nic
20346722 6042 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
6043 * s2io_nic structure.
6044 * @data: variable that returns the result of each of the test conducted by
6045 * the driver.
6046 * Description:
20346722 6047 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
6048 * argument 'data' appropriately.
6049 * Return value:
6050 * 0 on success.
6051 */
6052
d44570e4 6053static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6054{
1ee6dd77 6055 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
6056 u64 val64;
6057
6058 val64 = readq(&bar0->adapter_status);
d44570e4 6059 if (!(LINK_IS_UP(val64)))
1da177e4 6060 *data = 1;
c92ca04b
AR
6061 else
6062 *data = 0;
1da177e4 6063
b41477f3 6064 return *data;
1da177e4
LT
6065}
6066
6067/**
20346722 6068 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
49ce9c2c 6069 * @sp: private member of the device structure, which is a pointer to the
1da177e4 6070 * s2io_nic structure.
49ce9c2c 6071 * @data: variable that returns the result of each of the test
1da177e4
LT
6072 * conducted by the driver.
6073 * Description:
20346722 6074 * This is one of the offline test that tests the read and write
1da177e4
LT
6075 * access to the RldRam chip on the NIC.
6076 * Return value:
6077 * 0 on success.
6078 */
6079
d44570e4 6080static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6081{
1ee6dd77 6082 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 6083 u64 val64;
ad4ebed0 6084 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
6085
6086 val64 = readq(&bar0->adapter_control);
6087 val64 &= ~ADAPTER_ECC_EN;
6088 writeq(val64, &bar0->adapter_control);
6089
6090 val64 = readq(&bar0->mc_rldram_test_ctrl);
6091 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 6092 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6093
6094 val64 = readq(&bar0->mc_rldram_mrs);
6095 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6096 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6097
6098 val64 |= MC_RLDRAM_MRS_ENABLE;
6099 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6100
6101 while (iteration < 2) {
6102 val64 = 0x55555555aaaa0000ULL;
d44570e4 6103 if (iteration == 1)
1da177e4 6104 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6105 writeq(val64, &bar0->mc_rldram_test_d0);
6106
6107 val64 = 0xaaaa5a5555550000ULL;
d44570e4 6108 if (iteration == 1)
1da177e4 6109 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6110 writeq(val64, &bar0->mc_rldram_test_d1);
6111
6112 val64 = 0x55aaaaaaaa5a0000ULL;
d44570e4 6113 if (iteration == 1)
1da177e4 6114 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6115 writeq(val64, &bar0->mc_rldram_test_d2);
6116
ad4ebed0 6117 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
6118 writeq(val64, &bar0->mc_rldram_test_add);
6119
d44570e4
JP
6120 val64 = MC_RLDRAM_TEST_MODE |
6121 MC_RLDRAM_TEST_WRITE |
6122 MC_RLDRAM_TEST_GO;
ad4ebed0 6123 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6124
6125 for (cnt = 0; cnt < 5; cnt++) {
6126 val64 = readq(&bar0->mc_rldram_test_ctrl);
6127 if (val64 & MC_RLDRAM_TEST_DONE)
6128 break;
6129 msleep(200);
6130 }
6131
6132 if (cnt == 5)
6133 break;
6134
ad4ebed0 6135 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6136 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6137
6138 for (cnt = 0; cnt < 5; cnt++) {
6139 val64 = readq(&bar0->mc_rldram_test_ctrl);
6140 if (val64 & MC_RLDRAM_TEST_DONE)
6141 break;
6142 msleep(500);
6143 }
6144
6145 if (cnt == 5)
6146 break;
6147
6148 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 6149 if (!(val64 & MC_RLDRAM_TEST_PASS))
6150 test_fail = 1;
1da177e4
LT
6151
6152 iteration++;
6153 }
6154
ad4ebed0 6155 *data = test_fail;
1da177e4 6156
ad4ebed0 6157 /* Bring the adapter out of test mode */
6158 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6159
6160 return test_fail;
1da177e4
LT
6161}
6162
6163/**
6164 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6165 * @sp : private member of the device structure, which is a pointer to the
6166 * s2io_nic structure.
6167 * @ethtest : pointer to a ethtool command specific structure that will be
6168 * returned to the user.
20346722 6169 * @data : variable that returns the result of each of the test
1da177e4
LT
6170 * conducted by the driver.
6171 * Description:
6172 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6173 * the health of the card.
6174 * Return value:
6175 * void
6176 */
6177
6178static void s2io_ethtool_test(struct net_device *dev,
6179 struct ethtool_test *ethtest,
d44570e4 6180 uint64_t *data)
1da177e4 6181{
4cf1653a 6182 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
6183 int orig_state = netif_running(sp->dev);
6184
6185 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6186 /* Offline Tests. */
20346722 6187 if (orig_state)
1da177e4 6188 s2io_close(sp->dev);
1da177e4
LT
6189
6190 if (s2io_register_test(sp, &data[0]))
6191 ethtest->flags |= ETH_TEST_FL_FAILED;
6192
6193 s2io_reset(sp);
1da177e4
LT
6194
6195 if (s2io_rldram_test(sp, &data[3]))
6196 ethtest->flags |= ETH_TEST_FL_FAILED;
6197
6198 s2io_reset(sp);
1da177e4
LT
6199
6200 if (s2io_eeprom_test(sp, &data[1]))
6201 ethtest->flags |= ETH_TEST_FL_FAILED;
6202
6203 if (s2io_bist_test(sp, &data[4]))
6204 ethtest->flags |= ETH_TEST_FL_FAILED;
6205
6206 if (orig_state)
6207 s2io_open(sp->dev);
6208
6209 data[2] = 0;
6210 } else {
6211 /* Online Tests. */
6212 if (!orig_state) {
d44570e4 6213 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
1da177e4
LT
6214 dev->name);
6215 data[0] = -1;
6216 data[1] = -1;
6217 data[2] = -1;
6218 data[3] = -1;
6219 data[4] = -1;
6220 }
6221
6222 if (s2io_link_test(sp, &data[2]))
6223 ethtest->flags |= ETH_TEST_FL_FAILED;
6224
6225 data[0] = 0;
6226 data[1] = 0;
6227 data[3] = 0;
6228 data[4] = 0;
6229 }
6230}
6231
6232static void s2io_get_ethtool_stats(struct net_device *dev,
6233 struct ethtool_stats *estats,
d44570e4 6234 u64 *tmp_stats)
1da177e4 6235{
8116f3cf 6236 int i = 0, k;
4cf1653a 6237 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
6238 struct stat_block *stats = sp->mac_control.stats_info;
6239 struct swStat *swstats = &stats->sw_stat;
6240 struct xpakStat *xstats = &stats->xpak_stat;
1da177e4 6241
7ba013ac 6242 s2io_updt_stats(sp);
541ae68f 6243 tmp_stats[i++] =
ffb5df6c
JP
6244 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6245 le32_to_cpu(stats->tmac_frms);
541ae68f 6246 tmp_stats[i++] =
ffb5df6c
JP
6247 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6248 le32_to_cpu(stats->tmac_data_octets);
6249 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
541ae68f 6250 tmp_stats[i++] =
ffb5df6c
JP
6251 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6252 le32_to_cpu(stats->tmac_mcst_frms);
541ae68f 6253 tmp_stats[i++] =
ffb5df6c
JP
6254 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6255 le32_to_cpu(stats->tmac_bcst_frms);
6256 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
bd1034f0 6257 tmp_stats[i++] =
ffb5df6c
JP
6258 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6259 le32_to_cpu(stats->tmac_ttl_octets);
bd1034f0 6260 tmp_stats[i++] =
ffb5df6c
JP
6261 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6262 le32_to_cpu(stats->tmac_ucst_frms);
d44570e4 6263 tmp_stats[i++] =
ffb5df6c
JP
6264 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6265 le32_to_cpu(stats->tmac_nucst_frms);
541ae68f 6266 tmp_stats[i++] =
ffb5df6c
JP
6267 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6268 le32_to_cpu(stats->tmac_any_err_frms);
6269 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6270 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
541ae68f 6271 tmp_stats[i++] =
ffb5df6c
JP
6272 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6273 le32_to_cpu(stats->tmac_vld_ip);
541ae68f 6274 tmp_stats[i++] =
ffb5df6c
JP
6275 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6276 le32_to_cpu(stats->tmac_drop_ip);
541ae68f 6277 tmp_stats[i++] =
ffb5df6c
JP
6278 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6279 le32_to_cpu(stats->tmac_icmp);
541ae68f 6280 tmp_stats[i++] =
ffb5df6c
JP
6281 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6282 le32_to_cpu(stats->tmac_rst_tcp);
6283 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6284 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6285 le32_to_cpu(stats->tmac_udp);
541ae68f 6286 tmp_stats[i++] =
ffb5df6c
JP
6287 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6288 le32_to_cpu(stats->rmac_vld_frms);
541ae68f 6289 tmp_stats[i++] =
ffb5df6c
JP
6290 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6291 le32_to_cpu(stats->rmac_data_octets);
6292 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6293 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
541ae68f 6294 tmp_stats[i++] =
ffb5df6c
JP
6295 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6296 le32_to_cpu(stats->rmac_vld_mcst_frms);
541ae68f 6297 tmp_stats[i++] =
ffb5df6c
JP
6298 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6299 le32_to_cpu(stats->rmac_vld_bcst_frms);
6300 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6301 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6302 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6303 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6304 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
d44570e4 6305 tmp_stats[i++] =
ffb5df6c
JP
6306 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6307 le32_to_cpu(stats->rmac_ttl_octets);
bd1034f0 6308 tmp_stats[i++] =
ffb5df6c
JP
6309 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6310 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
d44570e4 6311 tmp_stats[i++] =
ffb5df6c
JP
6312 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6313 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
541ae68f 6314 tmp_stats[i++] =
ffb5df6c
JP
6315 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6316 le32_to_cpu(stats->rmac_discarded_frms);
d44570e4 6317 tmp_stats[i++] =
ffb5df6c
JP
6318 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6319 << 32 | le32_to_cpu(stats->rmac_drop_events);
6320 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6321 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
541ae68f 6322 tmp_stats[i++] =
ffb5df6c
JP
6323 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6324 le32_to_cpu(stats->rmac_usized_frms);
541ae68f 6325 tmp_stats[i++] =
ffb5df6c
JP
6326 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6327 le32_to_cpu(stats->rmac_osized_frms);
541ae68f 6328 tmp_stats[i++] =
ffb5df6c
JP
6329 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6330 le32_to_cpu(stats->rmac_frag_frms);
541ae68f 6331 tmp_stats[i++] =
ffb5df6c
JP
6332 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6333 le32_to_cpu(stats->rmac_jabber_frms);
6334 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6335 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6336 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6337 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6338 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6339 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
bd1034f0 6340 tmp_stats[i++] =
ffb5df6c
JP
6341 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6342 le32_to_cpu(stats->rmac_ip);
6343 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6344 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
bd1034f0 6345 tmp_stats[i++] =
ffb5df6c
JP
6346 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6347 le32_to_cpu(stats->rmac_drop_ip);
bd1034f0 6348 tmp_stats[i++] =
ffb5df6c
JP
6349 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6350 le32_to_cpu(stats->rmac_icmp);
6351 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
bd1034f0 6352 tmp_stats[i++] =
ffb5df6c
JP
6353 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6354 le32_to_cpu(stats->rmac_udp);
541ae68f 6355 tmp_stats[i++] =
ffb5df6c
JP
6356 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6357 le32_to_cpu(stats->rmac_err_drp_udp);
6358 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6359 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6360 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6361 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6362 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6363 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6364 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6365 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6366 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6367 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6368 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6369 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6370 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6371 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6372 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6373 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6374 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
541ae68f 6375 tmp_stats[i++] =
ffb5df6c
JP
6376 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6377 le32_to_cpu(stats->rmac_pause_cnt);
6378 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6379 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
541ae68f 6380 tmp_stats[i++] =
ffb5df6c
JP
6381 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6382 le32_to_cpu(stats->rmac_accepted_ip);
6383 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6384 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6385 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6386 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6387 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6388 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6389 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6390 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6391 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6392 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6393 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6394 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6395 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6396 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6397 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6398 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6399 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6400 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6401 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
fa1f0cb3
SS
6402
6403 /* Enhanced statistics exist only for Hercules */
d44570e4 6404 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6405 tmp_stats[i++] =
ffb5df6c 6406 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
fa1f0cb3 6407 tmp_stats[i++] =
ffb5df6c 6408 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
fa1f0cb3 6409 tmp_stats[i++] =
ffb5df6c
JP
6410 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6411 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6412 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6413 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6414 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6415 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6416 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6417 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6418 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6419 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6420 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6421 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6422 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6423 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
fa1f0cb3
SS
6424 }
6425
7ba013ac 6426 tmp_stats[i++] = 0;
ffb5df6c
JP
6427 tmp_stats[i++] = swstats->single_ecc_errs;
6428 tmp_stats[i++] = swstats->double_ecc_errs;
6429 tmp_stats[i++] = swstats->parity_err_cnt;
6430 tmp_stats[i++] = swstats->serious_err_cnt;
6431 tmp_stats[i++] = swstats->soft_reset_cnt;
6432 tmp_stats[i++] = swstats->fifo_full_cnt;
8116f3cf 6433 for (k = 0; k < MAX_RX_RINGS; k++)
ffb5df6c
JP
6434 tmp_stats[i++] = swstats->ring_full_cnt[k];
6435 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6436 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6437 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6438 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6439 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6440 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6441 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6442 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6443 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6444 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6445 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6446 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6447 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6448 tmp_stats[i++] = swstats->sending_both;
6449 tmp_stats[i++] = swstats->outof_sequence_pkts;
6450 tmp_stats[i++] = swstats->flush_max_pkts;
6451 if (swstats->num_aggregations) {
6452 u64 tmp = swstats->sum_avg_pkts_aggregated;
bd1034f0 6453 int count = 0;
6aa20a22 6454 /*
bd1034f0
AR
6455 * Since 64-bit divide does not work on all platforms,
6456 * do repeated subtraction.
6457 */
ffb5df6c
JP
6458 while (tmp >= swstats->num_aggregations) {
6459 tmp -= swstats->num_aggregations;
bd1034f0
AR
6460 count++;
6461 }
6462 tmp_stats[i++] = count;
d44570e4 6463 } else
bd1034f0 6464 tmp_stats[i++] = 0;
ffb5df6c
JP
6465 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6466 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6467 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6468 tmp_stats[i++] = swstats->mem_allocated;
6469 tmp_stats[i++] = swstats->mem_freed;
6470 tmp_stats[i++] = swstats->link_up_cnt;
6471 tmp_stats[i++] = swstats->link_down_cnt;
6472 tmp_stats[i++] = swstats->link_up_time;
6473 tmp_stats[i++] = swstats->link_down_time;
6474
6475 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6476 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6477 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6478 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6479 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6480
6481 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6482 tmp_stats[i++] = swstats->rx_abort_cnt;
6483 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6484 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6485 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6486 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6487 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6488 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6489 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6490 tmp_stats[i++] = swstats->tda_err_cnt;
6491 tmp_stats[i++] = swstats->pfc_err_cnt;
6492 tmp_stats[i++] = swstats->pcc_err_cnt;
6493 tmp_stats[i++] = swstats->tti_err_cnt;
6494 tmp_stats[i++] = swstats->tpa_err_cnt;
6495 tmp_stats[i++] = swstats->sm_err_cnt;
6496 tmp_stats[i++] = swstats->lso_err_cnt;
6497 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6498 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6499 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6500 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6501 tmp_stats[i++] = swstats->rc_err_cnt;
6502 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6503 tmp_stats[i++] = swstats->rpa_err_cnt;
6504 tmp_stats[i++] = swstats->rda_err_cnt;
6505 tmp_stats[i++] = swstats->rti_err_cnt;
6506 tmp_stats[i++] = swstats->mc_err_cnt;
1da177e4
LT
6507}
6508
ac1f60db 6509static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4 6510{
d44570e4 6511 return XENA_REG_SPACE;
1da177e4
LT
6512}
6513
6514
ac1f60db 6515static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4 6516{
d44570e4 6517 return XENA_EEPROM_SPACE;
1da177e4
LT
6518}
6519
b9f2c044 6520static int s2io_get_sset_count(struct net_device *dev, int sset)
1da177e4 6521{
4cf1653a 6522 struct s2io_nic *sp = netdev_priv(dev);
b9f2c044
JG
6523
6524 switch (sset) {
6525 case ETH_SS_TEST:
6526 return S2IO_TEST_LEN;
6527 case ETH_SS_STATS:
d44570e4 6528 switch (sp->device_type) {
b9f2c044
JG
6529 case XFRAME_I_DEVICE:
6530 return XFRAME_I_STAT_LEN;
6531 case XFRAME_II_DEVICE:
6532 return XFRAME_II_STAT_LEN;
6533 default:
6534 return 0;
6535 }
6536 default:
6537 return -EOPNOTSUPP;
6538 }
1da177e4 6539}
ac1f60db
AB
6540
6541static void s2io_ethtool_get_strings(struct net_device *dev,
d44570e4 6542 u32 stringset, u8 *data)
1da177e4 6543{
fa1f0cb3 6544 int stat_size = 0;
4cf1653a 6545 struct s2io_nic *sp = netdev_priv(dev);
fa1f0cb3 6546
1da177e4
LT
6547 switch (stringset) {
6548 case ETH_SS_TEST:
6549 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6550 break;
6551 case ETH_SS_STATS:
fa1f0cb3 6552 stat_size = sizeof(ethtool_xena_stats_keys);
d44570e4
JP
6553 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6554 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6555 memcpy(data + stat_size,
d44570e4
JP
6556 &ethtool_enhanced_stats_keys,
6557 sizeof(ethtool_enhanced_stats_keys));
fa1f0cb3
SS
6558 stat_size += sizeof(ethtool_enhanced_stats_keys);
6559 }
6560
6561 memcpy(data + stat_size, &ethtool_driver_stats_keys,
d44570e4 6562 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
6563 }
6564}
1da177e4 6565
c8f44aff 6566static int s2io_set_features(struct net_device *dev, netdev_features_t features)
958de193
JM
6567{
6568 struct s2io_nic *sp = netdev_priv(dev);
c8f44aff 6569 netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
958de193
JM
6570
6571 if (changed && netif_running(dev)) {
b437a8cc
MM
6572 int rc;
6573
958de193
JM
6574 s2io_stop_all_tx_queue(sp);
6575 s2io_card_down(sp);
b437a8cc 6576 dev->features = features;
958de193
JM
6577 rc = s2io_card_up(sp);
6578 if (rc)
6579 s2io_reset(sp);
6580 else
6581 s2io_start_all_tx_queue(sp);
b437a8cc
MM
6582
6583 return rc ? rc : 1;
958de193
JM
6584 }
6585
b437a8cc 6586 return 0;
958de193
JM
6587}
6588
7282d491 6589static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
6590 .get_drvinfo = s2io_ethtool_gdrvinfo,
6591 .get_regs_len = s2io_ethtool_get_regs_len,
6592 .get_regs = s2io_ethtool_gregs,
6593 .get_link = ethtool_op_get_link,
6594 .get_eeprom_len = s2io_get_eeprom_len,
6595 .get_eeprom = s2io_ethtool_geeprom,
6596 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 6597 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
6598 .get_pauseparam = s2io_ethtool_getpause_data,
6599 .set_pauseparam = s2io_ethtool_setpause_data,
1da177e4
LT
6600 .self_test = s2io_ethtool_test,
6601 .get_strings = s2io_ethtool_get_strings,
034e3450 6602 .set_phys_id = s2io_ethtool_set_led,
b9f2c044
JG
6603 .get_ethtool_stats = s2io_get_ethtool_stats,
6604 .get_sset_count = s2io_get_sset_count,
51f21442
PR
6605 .get_link_ksettings = s2io_ethtool_get_link_ksettings,
6606 .set_link_ksettings = s2io_ethtool_set_link_ksettings,
1da177e4
LT
6607};
6608
6609/**
20346722 6610 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
6611 * @dev : Device pointer.
6612 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6613 * a proprietary structure used to pass information to the driver.
6614 * @cmd : This is used to distinguish between the different commands that
6615 * can be passed to the IOCTL functions.
6616 * Description:
20346722
K
6617 * Currently there are no special functionality supported in IOCTL, hence
6618 * function always return EOPNOTSUPPORTED
1da177e4
LT
6619 */
6620
ac1f60db 6621static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
6622{
6623 return -EOPNOTSUPP;
6624}
6625
6626/**
6627 * s2io_change_mtu - entry point to change MTU size for the device.
6628 * @dev : device pointer.
6629 * @new_mtu : the new MTU size for the device.
6630 * Description: A driver entry point to change MTU size for the device.
6631 * Before changing the MTU the device must be stopped.
6632 * Return value:
6633 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6634 * file on failure.
6635 */
6636
ac1f60db 6637static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 6638{
4cf1653a 6639 struct s2io_nic *sp = netdev_priv(dev);
9f74ffde 6640 int ret = 0;
1da177e4 6641
1da177e4 6642 dev->mtu = new_mtu;
d8892c6e 6643 if (netif_running(dev)) {
3a3d5756 6644 s2io_stop_all_tx_queue(sp);
e6a8fee2 6645 s2io_card_down(sp);
9f74ffde
SH
6646 ret = s2io_card_up(sp);
6647 if (ret) {
d8892c6e 6648 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
b39d66a8 6649 __func__);
9f74ffde 6650 return ret;
d8892c6e 6651 }
3a3d5756 6652 s2io_wake_all_tx_queue(sp);
d8892c6e 6653 } else { /* Device is down */
1ee6dd77 6654 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e
K
6655 u64 val64 = new_mtu;
6656
6657 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6658 }
1da177e4 6659
9f74ffde 6660 return ret;
1da177e4
LT
6661}
6662
1da177e4
LT
6663/**
6664 * s2io_set_link - Set the LInk status
6665 * @data: long pointer to device private structue
6666 * Description: Sets the link status for the adapter
6667 */
6668
c4028958 6669static void s2io_set_link(struct work_struct *work)
1da177e4 6670{
d44570e4
JP
6671 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6672 set_link_task);
1da177e4 6673 struct net_device *dev = nic->dev;
1ee6dd77 6674 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6675 register u64 val64;
6676 u16 subid;
6677
22747d6b
FR
6678 rtnl_lock();
6679
6680 if (!netif_running(dev))
6681 goto out_unlock;
6682
92b84437 6683 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
1da177e4 6684 /* The card is being reset, no point doing anything */
22747d6b 6685 goto out_unlock;
1da177e4
LT
6686 }
6687
6688 subid = nic->pdev->subsystem_device;
a371a07d
K
6689 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6690 /*
6691 * Allow a small delay for the NICs self initiated
6692 * cleanup to complete.
6693 */
6694 msleep(100);
6695 }
1da177e4
LT
6696
6697 val64 = readq(&bar0->adapter_status);
19a60522
SS
6698 if (LINK_IS_UP(val64)) {
6699 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6700 if (verify_xena_quiescence(nic)) {
6701 val64 = readq(&bar0->adapter_control);
6702 val64 |= ADAPTER_CNTL_EN;
1da177e4 6703 writeq(val64, &bar0->adapter_control);
19a60522 6704 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
d44570e4 6705 nic->device_type, subid)) {
19a60522
SS
6706 val64 = readq(&bar0->gpio_control);
6707 val64 |= GPIO_CTRL_GPIO_0;
6708 writeq(val64, &bar0->gpio_control);
6709 val64 = readq(&bar0->gpio_control);
6710 } else {
6711 val64 |= ADAPTER_LED_ON;
6712 writeq(val64, &bar0->adapter_control);
a371a07d 6713 }
f957bcf0 6714 nic->device_enabled_once = true;
19a60522 6715 } else {
9e39f7c5
JP
6716 DBG_PRINT(ERR_DBG,
6717 "%s: Error: device is not Quiescent\n",
6718 dev->name);
3a3d5756 6719 s2io_stop_all_tx_queue(nic);
1da177e4 6720 }
19a60522 6721 }
92c48799
SS
6722 val64 = readq(&bar0->adapter_control);
6723 val64 |= ADAPTER_LED_ON;
6724 writeq(val64, &bar0->adapter_control);
6725 s2io_link(nic, LINK_UP);
19a60522
SS
6726 } else {
6727 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6728 subid)) {
6729 val64 = readq(&bar0->gpio_control);
6730 val64 &= ~GPIO_CTRL_GPIO_0;
6731 writeq(val64, &bar0->gpio_control);
6732 val64 = readq(&bar0->gpio_control);
1da177e4 6733 }
92c48799
SS
6734 /* turn off LED */
6735 val64 = readq(&bar0->adapter_control);
d44570e4 6736 val64 = val64 & (~ADAPTER_LED_ON);
92c48799 6737 writeq(val64, &bar0->adapter_control);
19a60522 6738 s2io_link(nic, LINK_DOWN);
1da177e4 6739 }
92b84437 6740 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
22747d6b
FR
6741
6742out_unlock:
d8d70caf 6743 rtnl_unlock();
1da177e4
LT
6744}
6745
1ee6dd77 6746static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
d44570e4
JP
6747 struct buffAdd *ba,
6748 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6749 u64 *temp2, int size)
5d3213cc
AR
6750{
6751 struct net_device *dev = sp->dev;
491abf25 6752 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
5d3213cc
AR
6753
6754 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6d517a27 6755 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
5d3213cc
AR
6756 /* allocate skb */
6757 if (*skb) {
6758 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6759 /*
6760 * As Rx frame are not going to be processed,
6761 * using same mapped address for the Rxd
6762 * buffer pointer
6763 */
6d517a27 6764 rxdp1->Buffer0_ptr = *temp0;
5d3213cc 6765 } else {
c056b734 6766 *skb = netdev_alloc_skb(dev, size);
5d3213cc 6767 if (!(*skb)) {
9e39f7c5
JP
6768 DBG_PRINT(INFO_DBG,
6769 "%s: Out of memory to allocate %s\n",
6770 dev->name, "1 buf mode SKBs");
ffb5df6c 6771 stats->mem_alloc_fail_cnt++;
5d3213cc
AR
6772 return -ENOMEM ;
6773 }
ffb5df6c 6774 stats->mem_allocated += (*skb)->truesize;
5d3213cc
AR
6775 /* storing the mapped addr in a temp variable
6776 * such it will be used for next rxd whose
6777 * Host Control is NULL
6778 */
6d517a27 6779 rxdp1->Buffer0_ptr = *temp0 =
d44570e4
JP
6780 pci_map_single(sp->pdev, (*skb)->data,
6781 size - NET_IP_ALIGN,
6782 PCI_DMA_FROMDEVICE);
8d8bb39b 6783 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
491abf25 6784 goto memalloc_failed;
5d3213cc
AR
6785 rxdp->Host_Control = (unsigned long) (*skb);
6786 }
6787 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6d517a27 6788 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
5d3213cc
AR
6789 /* Two buffer Mode */
6790 if (*skb) {
6d517a27
VP
6791 rxdp3->Buffer2_ptr = *temp2;
6792 rxdp3->Buffer0_ptr = *temp0;
6793 rxdp3->Buffer1_ptr = *temp1;
5d3213cc 6794 } else {
c056b734 6795 *skb = netdev_alloc_skb(dev, size);
2ceaac75 6796 if (!(*skb)) {
9e39f7c5
JP
6797 DBG_PRINT(INFO_DBG,
6798 "%s: Out of memory to allocate %s\n",
6799 dev->name,
6800 "2 buf mode SKBs");
ffb5df6c 6801 stats->mem_alloc_fail_cnt++;
2ceaac75
DR
6802 return -ENOMEM;
6803 }
ffb5df6c 6804 stats->mem_allocated += (*skb)->truesize;
6d517a27 6805 rxdp3->Buffer2_ptr = *temp2 =
5d3213cc
AR
6806 pci_map_single(sp->pdev, (*skb)->data,
6807 dev->mtu + 4,
6808 PCI_DMA_FROMDEVICE);
8d8bb39b 6809 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
491abf25 6810 goto memalloc_failed;
6d517a27 6811 rxdp3->Buffer0_ptr = *temp0 =
d44570e4
JP
6812 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6813 PCI_DMA_FROMDEVICE);
8d8bb39b 6814 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
6815 rxdp3->Buffer0_ptr)) {
6816 pci_unmap_single(sp->pdev,
6817 (dma_addr_t)rxdp3->Buffer2_ptr,
6818 dev->mtu + 4,
6819 PCI_DMA_FROMDEVICE);
491abf25
VP
6820 goto memalloc_failed;
6821 }
5d3213cc
AR
6822 rxdp->Host_Control = (unsigned long) (*skb);
6823
6824 /* Buffer-1 will be dummy buffer not used */
6d517a27 6825 rxdp3->Buffer1_ptr = *temp1 =
5d3213cc 6826 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
d44570e4 6827 PCI_DMA_FROMDEVICE);
8d8bb39b 6828 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
6829 rxdp3->Buffer1_ptr)) {
6830 pci_unmap_single(sp->pdev,
6831 (dma_addr_t)rxdp3->Buffer0_ptr,
6832 BUF0_LEN, PCI_DMA_FROMDEVICE);
6833 pci_unmap_single(sp->pdev,
6834 (dma_addr_t)rxdp3->Buffer2_ptr,
6835 dev->mtu + 4,
6836 PCI_DMA_FROMDEVICE);
491abf25
VP
6837 goto memalloc_failed;
6838 }
5d3213cc
AR
6839 }
6840 }
6841 return 0;
d44570e4
JP
6842
6843memalloc_failed:
6844 stats->pci_map_fail_cnt++;
6845 stats->mem_freed += (*skb)->truesize;
6846 dev_kfree_skb(*skb);
6847 return -ENOMEM;
5d3213cc 6848}
491abf25 6849
1ee6dd77
RB
6850static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6851 int size)
5d3213cc
AR
6852{
6853 struct net_device *dev = sp->dev;
6854 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4 6855 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
5d3213cc
AR
6856 } else if (sp->rxd_mode == RXD_MODE_3B) {
6857 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6858 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
d44570e4 6859 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
5d3213cc
AR
6860 }
6861}
6862
1ee6dd77 6863static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
6864{
6865 int i, j, k, blk_cnt = 0, size;
5d3213cc 6866 struct config_param *config = &sp->config;
ffb5df6c 6867 struct mac_info *mac_control = &sp->mac_control;
5d3213cc 6868 struct net_device *dev = sp->dev;
1ee6dd77 6869 struct RxD_t *rxdp = NULL;
5d3213cc 6870 struct sk_buff *skb = NULL;
1ee6dd77 6871 struct buffAdd *ba = NULL;
5d3213cc
AR
6872 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6873
6874 /* Calculate the size based on ring mode */
6875 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6876 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6877 if (sp->rxd_mode == RXD_MODE_1)
6878 size += NET_IP_ALIGN;
6879 else if (sp->rxd_mode == RXD_MODE_3B)
6880 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
5d3213cc
AR
6881
6882 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
6883 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6884 struct ring_info *ring = &mac_control->rings[i];
6885
d44570e4 6886 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
5d3213cc
AR
6887
6888 for (j = 0; j < blk_cnt; j++) {
6889 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
d44570e4
JP
6890 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6891 if (sp->rxd_mode == RXD_MODE_3B)
13d866a9 6892 ba = &ring->ba[j][k];
d44570e4 6893 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
64699336
JP
6894 &temp0_64,
6895 &temp1_64,
6896 &temp2_64,
d44570e4 6897 size) == -ENOMEM) {
ac1f90d6
SS
6898 return 0;
6899 }
5d3213cc
AR
6900
6901 set_rxd_buffer_size(sp, rxdp, size);
03cc864a 6902 dma_wmb();
5d3213cc
AR
6903 /* flip the Ownership bit to Hardware */
6904 rxdp->Control_1 |= RXD_OWN_XENA;
6905 }
6906 }
6907 }
6908 return 0;
6909
6910}
6911
d44570e4 6912static int s2io_add_isr(struct s2io_nic *sp)
1da177e4 6913{
e6a8fee2 6914 int ret = 0;
c92ca04b 6915 struct net_device *dev = sp->dev;
e6a8fee2 6916 int err = 0;
1da177e4 6917
eaae7f72 6918 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
6919 ret = s2io_enable_msi_x(sp);
6920 if (ret) {
6921 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
eaae7f72 6922 sp->config.intr_type = INTA;
20346722 6923 }
1da177e4 6924
d44570e4
JP
6925 /*
6926 * Store the values of the MSIX table in
6927 * the struct s2io_nic structure
6928 */
e6a8fee2 6929 store_xmsi_data(sp);
c92ca04b 6930
e6a8fee2 6931 /* After proper initialization of H/W, register ISR */
eaae7f72 6932 if (sp->config.intr_type == MSI_X) {
ac731ab6
SH
6933 int i, msix_rx_cnt = 0;
6934
f61e0a35
SH
6935 for (i = 0; i < sp->num_entries; i++) {
6936 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6937 if (sp->s2io_entries[i].type ==
d44570e4 6938 MSIX_RING_TYPE) {
a8c1d28a
DC
6939 snprintf(sp->desc[i],
6940 sizeof(sp->desc[i]),
6941 "%s:MSI-X-%d-RX",
ac731ab6
SH
6942 dev->name, i);
6943 err = request_irq(sp->entries[i].vector,
d44570e4
JP
6944 s2io_msix_ring_handle,
6945 0,
6946 sp->desc[i],
6947 sp->s2io_entries[i].arg);
ac731ab6 6948 } else if (sp->s2io_entries[i].type ==
d44570e4 6949 MSIX_ALARM_TYPE) {
a8c1d28a
DC
6950 snprintf(sp->desc[i],
6951 sizeof(sp->desc[i]),
6952 "%s:MSI-X-%d-TX",
d44570e4 6953 dev->name, i);
ac731ab6 6954 err = request_irq(sp->entries[i].vector,
d44570e4
JP
6955 s2io_msix_fifo_handle,
6956 0,
6957 sp->desc[i],
6958 sp->s2io_entries[i].arg);
ac731ab6 6959
fb6a825b 6960 }
ac731ab6
SH
6961 /* if either data or addr is zero print it. */
6962 if (!(sp->msix_info[i].addr &&
d44570e4 6963 sp->msix_info[i].data)) {
ac731ab6 6964 DBG_PRINT(ERR_DBG,
d44570e4
JP
6965 "%s @Addr:0x%llx Data:0x%llx\n",
6966 sp->desc[i],
6967 (unsigned long long)
6968 sp->msix_info[i].addr,
6969 (unsigned long long)
6970 ntohl(sp->msix_info[i].data));
ac731ab6 6971 } else
fb6a825b 6972 msix_rx_cnt++;
ac731ab6
SH
6973 if (err) {
6974 remove_msix_isr(sp);
6975
6976 DBG_PRINT(ERR_DBG,
d44570e4
JP
6977 "%s:MSI-X-%d registration "
6978 "failed\n", dev->name, i);
ac731ab6
SH
6979
6980 DBG_PRINT(ERR_DBG,
d44570e4
JP
6981 "%s: Defaulting to INTA\n",
6982 dev->name);
ac731ab6
SH
6983 sp->config.intr_type = INTA;
6984 break;
fb6a825b 6985 }
ac731ab6
SH
6986 sp->s2io_entries[i].in_use =
6987 MSIX_REGISTERED_SUCCESS;
c92ca04b 6988 }
e6a8fee2 6989 }
18b2b7bd 6990 if (!err) {
6cef2b8e 6991 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
9e39f7c5
JP
6992 DBG_PRINT(INFO_DBG,
6993 "MSI-X-TX entries enabled through alarm vector\n");
18b2b7bd 6994 }
e6a8fee2 6995 }
eaae7f72 6996 if (sp->config.intr_type == INTA) {
80777c54 6997 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
d44570e4 6998 sp->name, dev);
e6a8fee2
AR
6999 if (err) {
7000 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7001 dev->name);
7002 return -1;
7003 }
7004 }
7005 return 0;
7006}
d44570e4
JP
7007
7008static void s2io_rem_isr(struct s2io_nic *sp)
e6a8fee2 7009{
18b2b7bd
SH
7010 if (sp->config.intr_type == MSI_X)
7011 remove_msix_isr(sp);
7012 else
7013 remove_inta_isr(sp);
e6a8fee2
AR
7014}
7015
d44570e4 7016static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
e6a8fee2
AR
7017{
7018 int cnt = 0;
1ee6dd77 7019 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2 7020 register u64 val64 = 0;
5f490c96
SH
7021 struct config_param *config;
7022 config = &sp->config;
e6a8fee2 7023
9f74ffde
SH
7024 if (!is_s2io_card_up(sp))
7025 return;
7026
e6a8fee2
AR
7027 del_timer_sync(&sp->alarm_timer);
7028 /* If s2io_set_link task is executing, wait till it completes. */
d44570e4 7029 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
e6a8fee2 7030 msleep(50);
92b84437 7031 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
e6a8fee2 7032
5f490c96 7033 /* Disable napi */
f61e0a35
SH
7034 if (sp->config.napi) {
7035 int off = 0;
7036 if (config->intr_type == MSI_X) {
7037 for (; off < sp->config.rx_ring_num; off++)
7038 napi_disable(&sp->mac_control.rings[off].napi);
d44570e4 7039 }
f61e0a35
SH
7040 else
7041 napi_disable(&sp->napi);
7042 }
5f490c96 7043
e6a8fee2 7044 /* disable Tx and Rx traffic on the NIC */
d796fdb7
LV
7045 if (do_io)
7046 stop_nic(sp);
e6a8fee2
AR
7047
7048 s2io_rem_isr(sp);
1da177e4 7049
01e16faa
SH
7050 /* stop the tx queue, indicate link down */
7051 s2io_link(sp, LINK_DOWN);
7052
1da177e4 7053 /* Check if the device is Quiescent and then Reset the NIC */
d44570e4 7054 while (do_io) {
5d3213cc
AR
7055 /* As per the HW requirement we need to replenish the
7056 * receive buffer to avoid the ring bump. Since there is
7057 * no intention of processing the Rx frame at this pointwe are
70f23fd6 7058 * just setting the ownership bit of rxd in Each Rx
5d3213cc
AR
7059 * ring to HW and set the appropriate buffer size
7060 * based on the ring mode
7061 */
7062 rxd_owner_bit_reset(sp);
7063
1da177e4 7064 val64 = readq(&bar0->adapter_status);
19a60522 7065 if (verify_xena_quiescence(sp)) {
d44570e4
JP
7066 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7067 break;
1da177e4
LT
7068 }
7069
7070 msleep(50);
7071 cnt++;
7072 if (cnt == 10) {
9e39f7c5
JP
7073 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7074 "adapter status reads 0x%llx\n",
d44570e4 7075 (unsigned long long)val64);
1da177e4
LT
7076 break;
7077 }
d796fdb7
LV
7078 }
7079 if (do_io)
7080 s2io_reset(sp);
1da177e4 7081
7ba013ac 7082 /* Free all Tx buffers */
1da177e4 7083 free_tx_buffers(sp);
7ba013ac
K
7084
7085 /* Free all Rx buffers */
1da177e4
LT
7086 free_rx_buffers(sp);
7087
92b84437 7088 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
1da177e4
LT
7089}
7090
d44570e4 7091static void s2io_card_down(struct s2io_nic *sp)
d796fdb7
LV
7092{
7093 do_s2io_card_down(sp, 1);
7094}
7095
d44570e4 7096static int s2io_card_up(struct s2io_nic *sp)
1da177e4 7097{
cc6e7c44 7098 int i, ret = 0;
1da177e4 7099 struct config_param *config;
ffb5df6c 7100 struct mac_info *mac_control;
64699336 7101 struct net_device *dev = sp->dev;
e6a8fee2 7102 u16 interruptible;
1da177e4
LT
7103
7104 /* Initialize the H/W I/O registers */
9f74ffde
SH
7105 ret = init_nic(sp);
7106 if (ret != 0) {
1da177e4
LT
7107 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7108 dev->name);
9f74ffde
SH
7109 if (ret != -EIO)
7110 s2io_reset(sp);
7111 return ret;
1da177e4
LT
7112 }
7113
20346722
K
7114 /*
7115 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
7116 * Rx ring and initializing buffers into 30 Rx blocks
7117 */
1da177e4 7118 config = &sp->config;
ffb5df6c 7119 mac_control = &sp->mac_control;
1da177e4
LT
7120
7121 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7122 struct ring_info *ring = &mac_control->rings[i];
7123
7124 ring->mtu = dev->mtu;
f0c54ace 7125 ring->lro = !!(dev->features & NETIF_F_LRO);
13d866a9 7126 ret = fill_rx_buffers(sp, ring, 1);
0425b46a 7127 if (ret) {
1da177e4
LT
7128 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7129 dev->name);
7130 s2io_reset(sp);
7131 free_rx_buffers(sp);
7132 return -ENOMEM;
7133 }
7134 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
13d866a9 7135 ring->rx_bufs_left);
1da177e4 7136 }
5f490c96
SH
7137
7138 /* Initialise napi */
f61e0a35 7139 if (config->napi) {
f61e0a35
SH
7140 if (config->intr_type == MSI_X) {
7141 for (i = 0; i < sp->config.rx_ring_num; i++)
7142 napi_enable(&sp->mac_control.rings[i].napi);
7143 } else {
7144 napi_enable(&sp->napi);
7145 }
7146 }
5f490c96 7147
19a60522
SS
7148 /* Maintain the state prior to the open */
7149 if (sp->promisc_flg)
7150 sp->promisc_flg = 0;
7151 if (sp->m_cast_flg) {
7152 sp->m_cast_flg = 0;
d44570e4 7153 sp->all_multi_pos = 0;
19a60522 7154 }
1da177e4
LT
7155
7156 /* Setting its receive mode */
7157 s2io_set_multicast(dev);
7158
f0c54ace 7159 if (dev->features & NETIF_F_LRO) {
b41477f3 7160 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439 7161 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
d44570e4 7162 /* Check if we can use (if specified) user provided value */
7d3d0439
RA
7163 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7164 sp->lro_max_aggr_per_sess = lro_max_pkts;
7165 }
7166
1da177e4
LT
7167 /* Enable Rx Traffic and interrupts on the NIC */
7168 if (start_nic(sp)) {
7169 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 7170 s2io_reset(sp);
e6a8fee2
AR
7171 free_rx_buffers(sp);
7172 return -ENODEV;
7173 }
7174
7175 /* Add interrupt service routine */
7176 if (s2io_add_isr(sp) != 0) {
eaae7f72 7177 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7178 s2io_rem_isr(sp);
7179 s2io_reset(sp);
1da177e4
LT
7180 free_rx_buffers(sp);
7181 return -ENODEV;
7182 }
7183
e84a2ac9
KC
7184 timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7185 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
25fff88e 7186
01e16faa
SH
7187 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7188
e6a8fee2 7189 /* Enable select interrupts */
9caab458 7190 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
01e16faa
SH
7191 if (sp->config.intr_type != INTA) {
7192 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7193 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7194 } else {
e6a8fee2 7195 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 7196 interruptible |= TX_PIC_INTR;
e6a8fee2
AR
7197 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7198 }
7199
1da177e4
LT
7200 return 0;
7201}
7202
20346722 7203/**
1da177e4
LT
7204 * s2io_restart_nic - Resets the NIC.
7205 * @data : long pointer to the device private structure
7206 * Description:
7207 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 7208 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
7209 * the run time of the watch dog routine which is run holding a
7210 * spin lock.
7211 */
7212
c4028958 7213static void s2io_restart_nic(struct work_struct *work)
1da177e4 7214{
1ee6dd77 7215 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 7216 struct net_device *dev = sp->dev;
1da177e4 7217
22747d6b
FR
7218 rtnl_lock();
7219
7220 if (!netif_running(dev))
7221 goto out_unlock;
7222
e6a8fee2 7223 s2io_card_down(sp);
1da177e4 7224 if (s2io_card_up(sp)) {
d44570e4 7225 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
1da177e4 7226 }
3a3d5756 7227 s2io_wake_all_tx_queue(sp);
d44570e4 7228 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
22747d6b
FR
7229out_unlock:
7230 rtnl_unlock();
1da177e4
LT
7231}
7232
20346722
K
7233/**
7234 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
7235 * @dev : Pointer to net device structure
7236 * Description:
7237 * This function is triggered if the Tx Queue is stopped
7238 * for a pre-defined amount of time when the Interface is still up.
7239 * If the Interface is jammed in such a situation, the hardware is
7240 * reset (by s2io_close) and restarted again (by s2io_open) to
7241 * overcome any problem that might have been caused in the hardware.
7242 * Return value:
7243 * void
7244 */
7245
7246static void s2io_tx_watchdog(struct net_device *dev)
7247{
4cf1653a 7248 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 7249 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7250
7251 if (netif_carrier_ok(dev)) {
ffb5df6c 7252 swstats->watchdog_timer_cnt++;
1da177e4 7253 schedule_work(&sp->rst_timer_task);
ffb5df6c 7254 swstats->soft_reset_cnt++;
1da177e4
LT
7255 }
7256}
7257
7258/**
7259 * rx_osm_handler - To perform some OS related operations on SKB.
7260 * @sp: private member of the device structure,pointer to s2io_nic structure.
7261 * @skb : the socket buffer pointer.
7262 * @len : length of the packet
7263 * @cksum : FCS checksum of the frame.
7264 * @ring_no : the ring from which this RxD was extracted.
20346722 7265 * Description:
b41477f3 7266 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
7267 * some OS related operations on the SKB before passing it to the upper
7268 * layers. It mainly checks if the checksum is OK, if so adds it to the
7269 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7270 * to the upper layer. If the checksum is wrong, it increments the Rx
7271 * packet error count, frees the SKB and returns error.
7272 * Return value:
7273 * SUCCESS on success and -1 on failure.
7274 */
1ee6dd77 7275static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 7276{
1ee6dd77 7277 struct s2io_nic *sp = ring_data->nic;
64699336 7278 struct net_device *dev = ring_data->dev;
20346722 7279 struct sk_buff *skb = (struct sk_buff *)
d44570e4 7280 ((unsigned long)rxdp->Host_Control);
20346722 7281 int ring_no = ring_data->ring_no;
1da177e4 7282 u16 l3_csum, l4_csum;
863c11a9 7283 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
2e6a684b 7284 struct lro *uninitialized_var(lro);
f9046eb3 7285 u8 err_mask;
ffb5df6c 7286 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
da6971d8 7287
20346722 7288 skb->dev = dev;
c92ca04b 7289
863c11a9 7290 if (err) {
bd1034f0 7291 /* Check for parity error */
d44570e4 7292 if (err & 0x1)
ffb5df6c 7293 swstats->parity_err_cnt++;
d44570e4 7294
f9046eb3 7295 err_mask = err >> 48;
d44570e4
JP
7296 switch (err_mask) {
7297 case 1:
ffb5df6c 7298 swstats->rx_parity_err_cnt++;
491976b2
SH
7299 break;
7300
d44570e4 7301 case 2:
ffb5df6c 7302 swstats->rx_abort_cnt++;
491976b2
SH
7303 break;
7304
d44570e4 7305 case 3:
ffb5df6c 7306 swstats->rx_parity_abort_cnt++;
491976b2
SH
7307 break;
7308
d44570e4 7309 case 4:
ffb5df6c 7310 swstats->rx_rda_fail_cnt++;
491976b2
SH
7311 break;
7312
d44570e4 7313 case 5:
ffb5df6c 7314 swstats->rx_unkn_prot_cnt++;
491976b2
SH
7315 break;
7316
d44570e4 7317 case 6:
ffb5df6c 7318 swstats->rx_fcs_err_cnt++;
491976b2 7319 break;
bd1034f0 7320
d44570e4 7321 case 7:
ffb5df6c 7322 swstats->rx_buf_size_err_cnt++;
491976b2
SH
7323 break;
7324
d44570e4 7325 case 8:
ffb5df6c 7326 swstats->rx_rxd_corrupt_cnt++;
491976b2
SH
7327 break;
7328
d44570e4 7329 case 15:
ffb5df6c 7330 swstats->rx_unkn_err_cnt++;
491976b2
SH
7331 break;
7332 }
863c11a9 7333 /*
d44570e4
JP
7334 * Drop the packet if bad transfer code. Exception being
7335 * 0x5, which could be due to unsupported IPv6 extension header.
7336 * In this case, we let stack handle the packet.
7337 * Note that in this case, since checksum will be incorrect,
7338 * stack will validate the same.
7339 */
f9046eb3
OH
7340 if (err_mask != 0x5) {
7341 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
d44570e4 7342 dev->name, err_mask);
dc56e634 7343 dev->stats.rx_crc_errors++;
ffb5df6c 7344 swstats->mem_freed
491976b2 7345 += skb->truesize;
863c11a9 7346 dev_kfree_skb(skb);
0425b46a 7347 ring_data->rx_bufs_left -= 1;
863c11a9
AR
7348 rxdp->Host_Control = 0;
7349 return 0;
7350 }
20346722 7351 }
1da177e4 7352
20346722 7353 rxdp->Host_Control = 0;
da6971d8
AR
7354 if (sp->rxd_mode == RXD_MODE_1) {
7355 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 7356
da6971d8 7357 skb_put(skb, len);
6d517a27 7358 } else if (sp->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
7359 int get_block = ring_data->rx_curr_get_info.block_index;
7360 int get_off = ring_data->rx_curr_get_info.offset;
7361 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7362 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7363 unsigned char *buff = skb_push(skb, buf0_len);
7364
1ee6dd77 7365 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8 7366 memcpy(buff, ba->ba_0, buf0_len);
6d517a27 7367 skb_put(skb, buf2_len);
da6971d8 7368 }
20346722 7369
d44570e4
JP
7370 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7371 ((!ring_data->lro) ||
6d85a1bf 7372 (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
b437a8cc 7373 (dev->features & NETIF_F_RXCSUM)) {
20346722 7374 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
7375 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7376 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 7377 /*
1da177e4
LT
7378 * NIC verifies if the Checksum of the received
7379 * frame is Ok or not and accordingly returns
7380 * a flag in the RxD.
7381 */
7382 skb->ip_summed = CHECKSUM_UNNECESSARY;
0425b46a 7383 if (ring_data->lro) {
06f0c139 7384 u32 tcp_len = 0;
7d3d0439
RA
7385 u8 *tcp;
7386 int ret = 0;
7387
0425b46a 7388 ret = s2io_club_tcp_session(ring_data,
d44570e4
JP
7389 skb->data, &tcp,
7390 &tcp_len, &lro,
7391 rxdp, sp);
7d3d0439 7392 switch (ret) {
d44570e4
JP
7393 case 3: /* Begin anew */
7394 lro->parent = skb;
7395 goto aggregate;
7396 case 1: /* Aggregate */
7397 lro_append_pkt(sp, lro, skb, tcp_len);
7398 goto aggregate;
7399 case 4: /* Flush session */
7400 lro_append_pkt(sp, lro, skb, tcp_len);
7401 queue_rx_frame(lro->parent,
7402 lro->vlan_tag);
7403 clear_lro_session(lro);
ffb5df6c 7404 swstats->flush_max_pkts++;
d44570e4
JP
7405 goto aggregate;
7406 case 2: /* Flush both */
7407 lro->parent->data_len = lro->frags_len;
ffb5df6c 7408 swstats->sending_both++;
d44570e4
JP
7409 queue_rx_frame(lro->parent,
7410 lro->vlan_tag);
7411 clear_lro_session(lro);
7412 goto send_up;
7413 case 0: /* sessions exceeded */
7414 case -1: /* non-TCP or not L2 aggregatable */
7415 case 5: /*
7416 * First pkt in session not
7417 * L3/L4 aggregatable
7418 */
7419 break;
7420 default:
7421 DBG_PRINT(ERR_DBG,
7422 "%s: Samadhana!!\n",
7423 __func__);
7424 BUG();
7d3d0439
RA
7425 }
7426 }
1da177e4 7427 } else {
20346722
K
7428 /*
7429 * Packet with erroneous checksum, let the
1da177e4
LT
7430 * upper layers deal with it.
7431 */
bc8acf2c 7432 skb_checksum_none_assert(skb);
1da177e4 7433 }
cdb5bf02 7434 } else
bc8acf2c 7435 skb_checksum_none_assert(skb);
cdb5bf02 7436
ffb5df6c 7437 swstats->mem_freed += skb->truesize;
7d3d0439 7438send_up:
0c8dfc83 7439 skb_record_rx_queue(skb, ring_no);
cdb5bf02 7440 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 7441aggregate:
0425b46a 7442 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
1da177e4
LT
7443 return SUCCESS;
7444}
7445
7446/**
7447 * s2io_link - stops/starts the Tx queue.
7448 * @sp : private member of the device structure, which is a pointer to the
7449 * s2io_nic structure.
7450 * @link : inidicates whether link is UP/DOWN.
7451 * Description:
7452 * This function stops/starts the Tx queue depending on whether the link
20346722
K
7453 * status of the NIC is is down or up. This is called by the Alarm
7454 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
7455 * Return value:
7456 * void.
7457 */
7458
d44570e4 7459static void s2io_link(struct s2io_nic *sp, int link)
1da177e4 7460{
64699336 7461 struct net_device *dev = sp->dev;
ffb5df6c 7462 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7463
7464 if (link != sp->last_link_state) {
b7c5678f 7465 init_tti(sp, link);
1da177e4
LT
7466 if (link == LINK_DOWN) {
7467 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
3a3d5756 7468 s2io_stop_all_tx_queue(sp);
1da177e4 7469 netif_carrier_off(dev);
ffb5df6c
JP
7470 if (swstats->link_up_cnt)
7471 swstats->link_up_time =
7472 jiffies - sp->start_time;
7473 swstats->link_down_cnt++;
1da177e4
LT
7474 } else {
7475 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
ffb5df6c
JP
7476 if (swstats->link_down_cnt)
7477 swstats->link_down_time =
d44570e4 7478 jiffies - sp->start_time;
ffb5df6c 7479 swstats->link_up_cnt++;
1da177e4 7480 netif_carrier_on(dev);
3a3d5756 7481 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7482 }
7483 }
7484 sp->last_link_state = link;
491976b2 7485 sp->start_time = jiffies;
1da177e4
LT
7486}
7487
20346722
K
7488/**
7489 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7490 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
7491 * s2io_nic structure.
7492 * Description:
7493 * This function initializes a few of the PCI and PCI-X configuration registers
7494 * with recommended values.
7495 * Return value:
7496 * void
7497 */
7498
d44570e4 7499static void s2io_init_pci(struct s2io_nic *sp)
1da177e4 7500{
20346722 7501 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
7502
7503 /* Enable Data Parity Error Recovery in PCI-X command register. */
7504 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7505 &(pcix_cmd));
1da177e4 7506 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7507 (pcix_cmd | 1));
1da177e4 7508 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7509 &(pcix_cmd));
1da177e4
LT
7510
7511 /* Set the PErr Response bit in PCI command register. */
7512 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7513 pci_write_config_word(sp->pdev, PCI_COMMAND,
7514 (pci_cmd | PCI_COMMAND_PARITY));
7515 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
7516}
7517
3a3d5756 7518static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
d44570e4 7519 u8 *dev_multiq)
9dc737a7 7520{
1853e2e1
JM
7521 int i;
7522
d44570e4 7523 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
9e39f7c5 7524 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
d44570e4 7525 "(%d) not supported\n", tx_fifo_num);
6cfc482b
SH
7526
7527 if (tx_fifo_num < 1)
7528 tx_fifo_num = 1;
7529 else
7530 tx_fifo_num = MAX_TX_FIFOS;
7531
9e39f7c5 7532 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
9dc737a7 7533 }
2fda096d 7534
6cfc482b 7535 if (multiq)
3a3d5756 7536 *dev_multiq = multiq;
6cfc482b
SH
7537
7538 if (tx_steering_type && (1 == tx_fifo_num)) {
7539 if (tx_steering_type != TX_DEFAULT_STEERING)
7540 DBG_PRINT(ERR_DBG,
9e39f7c5 7541 "Tx steering is not supported with "
d44570e4 7542 "one fifo. Disabling Tx steering.\n");
6cfc482b
SH
7543 tx_steering_type = NO_STEERING;
7544 }
7545
7546 if ((tx_steering_type < NO_STEERING) ||
d44570e4
JP
7547 (tx_steering_type > TX_DEFAULT_STEERING)) {
7548 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7549 "Requested transmit steering not supported\n");
7550 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
6cfc482b 7551 tx_steering_type = NO_STEERING;
3a3d5756
SH
7552 }
7553
0425b46a 7554 if (rx_ring_num > MAX_RX_RINGS) {
d44570e4 7555 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7556 "Requested number of rx rings not supported\n");
7557 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
d44570e4 7558 MAX_RX_RINGS);
0425b46a 7559 rx_ring_num = MAX_RX_RINGS;
9dc737a7 7560 }
0425b46a 7561
eccb8628 7562 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
9e39f7c5 7563 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
9dc737a7
AR
7564 "Defaulting to INTA\n");
7565 *dev_intr_type = INTA;
7566 }
596c5c97 7567
9dc737a7 7568 if ((*dev_intr_type == MSI_X) &&
d44570e4
JP
7569 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7570 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
9e39f7c5 7571 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
d44570e4 7572 "Defaulting to INTA\n");
9dc737a7
AR
7573 *dev_intr_type = INTA;
7574 }
fb6a825b 7575
6d517a27 7576 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
9e39f7c5
JP
7577 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7578 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
6d517a27 7579 rx_ring_mode = 1;
9dc737a7 7580 }
1853e2e1
JM
7581
7582 for (i = 0; i < MAX_RX_RINGS; i++)
7583 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7584 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7585 "supported\nDefaulting to %d\n",
7586 MAX_RX_BLOCKS_PER_RING);
7587 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7588 }
7589
9dc737a7
AR
7590 return SUCCESS;
7591}
7592
9fc93a41
SS
7593/**
7594 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7595 * or Traffic class respectively.
b7c5678f 7596 * @nic: device private variable
9fc93a41
SS
7597 * Description: The function configures the receive steering to
7598 * desired receive ring.
7599 * Return Value: SUCCESS on success and
7600 * '-1' on failure (endian settings incorrect).
7601 */
7602static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7603{
7604 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7605 register u64 val64 = 0;
7606
7607 if (ds_codepoint > 63)
7608 return FAILURE;
7609
7610 val64 = RTS_DS_MEM_DATA(ring);
7611 writeq(val64, &bar0->rts_ds_mem_data);
7612
7613 val64 = RTS_DS_MEM_CTRL_WE |
7614 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7615 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7616
7617 writeq(val64, &bar0->rts_ds_mem_ctrl);
7618
7619 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
d44570e4
JP
7620 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7621 S2IO_BIT_RESET);
9fc93a41
SS
7622}
7623
04025095
SH
7624static const struct net_device_ops s2io_netdev_ops = {
7625 .ndo_open = s2io_open,
7626 .ndo_stop = s2io_close,
7627 .ndo_get_stats = s2io_get_stats,
7628 .ndo_start_xmit = s2io_xmit,
7629 .ndo_validate_addr = eth_validate_addr,
afc4b13d 7630 .ndo_set_rx_mode = s2io_set_multicast,
04025095
SH
7631 .ndo_do_ioctl = s2io_ioctl,
7632 .ndo_set_mac_address = s2io_set_mac_addr,
7633 .ndo_change_mtu = s2io_change_mtu,
b437a8cc 7634 .ndo_set_features = s2io_set_features,
04025095
SH
7635 .ndo_tx_timeout = s2io_tx_watchdog,
7636#ifdef CONFIG_NET_POLL_CONTROLLER
7637 .ndo_poll_controller = s2io_netpoll,
7638#endif
7639};
7640
1da177e4 7641/**
20346722 7642 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
7643 * @pdev : structure containing the PCI related information of the device.
7644 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7645 * Description:
7646 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
7647 * All OS related initialization including memory and device structure and
7648 * initlaization of the device private variable is done. Also the swapper
7649 * control register is initialized to enable read and write into the I/O
1da177e4
LT
7650 * registers of the device.
7651 * Return value:
7652 * returns 0 on success and negative on failure.
7653 */
7654
3a036ce5 7655static int
1da177e4
LT
7656s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7657{
1ee6dd77 7658 struct s2io_nic *sp;
1da177e4 7659 struct net_device *dev;
1da177e4 7660 int i, j, ret;
f957bcf0 7661 int dma_flag = false;
1da177e4
LT
7662 u32 mac_up, mac_down;
7663 u64 val64 = 0, tmp64 = 0;
1ee6dd77 7664 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 7665 u16 subid;
1da177e4 7666 struct config_param *config;
ffb5df6c 7667 struct mac_info *mac_control;
541ae68f 7668 int mode;
cc6e7c44 7669 u8 dev_intr_type = intr_type;
3a3d5756 7670 u8 dev_multiq = 0;
1da177e4 7671
3a3d5756
SH
7672 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7673 if (ret)
9dc737a7 7674 return ret;
1da177e4 7675
d44570e4
JP
7676 ret = pci_enable_device(pdev);
7677 if (ret) {
1da177e4 7678 DBG_PRINT(ERR_DBG,
9e39f7c5 7679 "%s: pci_enable_device failed\n", __func__);
1da177e4
LT
7680 return ret;
7681 }
7682
6a35528a 7683 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
9e39f7c5 7684 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
f957bcf0 7685 dma_flag = true;
d44570e4 7686 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1da177e4 7687 DBG_PRINT(ERR_DBG,
d44570e4
JP
7688 "Unable to obtain 64bit DMA "
7689 "for consistent allocations\n");
1da177e4
LT
7690 pci_disable_device(pdev);
7691 return -ENOMEM;
7692 }
284901a9 7693 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
9e39f7c5 7694 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
1da177e4
LT
7695 } else {
7696 pci_disable_device(pdev);
7697 return -ENOMEM;
7698 }
d44570e4
JP
7699 ret = pci_request_regions(pdev, s2io_driver_name);
7700 if (ret) {
9e39f7c5 7701 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
d44570e4 7702 __func__, ret);
eccb8628
VP
7703 pci_disable_device(pdev);
7704 return -ENODEV;
1da177e4 7705 }
3a3d5756 7706 if (dev_multiq)
6cfc482b 7707 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
3a3d5756 7708 else
b19fa1fa 7709 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4 7710 if (dev == NULL) {
1da177e4
LT
7711 pci_disable_device(pdev);
7712 pci_release_regions(pdev);
7713 return -ENODEV;
7714 }
7715
7716 pci_set_master(pdev);
7717 pci_set_drvdata(pdev, dev);
1da177e4
LT
7718 SET_NETDEV_DEV(dev, &pdev->dev);
7719
7720 /* Private member variable initialized to s2io NIC structure */
4cf1653a 7721 sp = netdev_priv(dev);
1da177e4
LT
7722 sp->dev = dev;
7723 sp->pdev = pdev;
1da177e4 7724 sp->high_dma_flag = dma_flag;
f957bcf0 7725 sp->device_enabled_once = false;
da6971d8
AR
7726 if (rx_ring_mode == 1)
7727 sp->rxd_mode = RXD_MODE_1;
7728 if (rx_ring_mode == 2)
7729 sp->rxd_mode = RXD_MODE_3B;
da6971d8 7730
eaae7f72 7731 sp->config.intr_type = dev_intr_type;
1da177e4 7732
541ae68f 7733 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
d44570e4 7734 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
541ae68f
K
7735 sp->device_type = XFRAME_II_DEVICE;
7736 else
7737 sp->device_type = XFRAME_I_DEVICE;
7738
6aa20a22 7739
1da177e4
LT
7740 /* Initialize some PCI/PCI-X fields of the NIC. */
7741 s2io_init_pci(sp);
7742
20346722 7743 /*
1da177e4 7744 * Setting the device configuration parameters.
20346722
K
7745 * Most of these parameters can be specified by the user during
7746 * module insertion as they are module loadable parameters. If
7747 * these parameters are not not specified during load time, they
1da177e4
LT
7748 * are initialized with default values.
7749 */
1da177e4 7750 config = &sp->config;
ffb5df6c 7751 mac_control = &sp->mac_control;
1da177e4 7752
596c5c97 7753 config->napi = napi;
6cfc482b 7754 config->tx_steering_type = tx_steering_type;
596c5c97 7755
1da177e4 7756 /* Tx side parameters. */
6cfc482b
SH
7757 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7758 config->tx_fifo_num = MAX_TX_FIFOS;
7759 else
7760 config->tx_fifo_num = tx_fifo_num;
7761
7762 /* Initialize the fifos used for tx steering */
7763 if (config->tx_fifo_num < 5) {
d44570e4
JP
7764 if (config->tx_fifo_num == 1)
7765 sp->total_tcp_fifos = 1;
7766 else
7767 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7768 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7769 sp->total_udp_fifos = 1;
7770 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
6cfc482b
SH
7771 } else {
7772 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
d44570e4 7773 FIFO_OTHER_MAX_NUM);
6cfc482b
SH
7774 sp->udp_fifo_idx = sp->total_tcp_fifos;
7775 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7776 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7777 }
7778
3a3d5756 7779 config->multiq = dev_multiq;
6cfc482b 7780 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7781 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7782
7783 tx_cfg->fifo_len = tx_fifo_len[i];
7784 tx_cfg->fifo_priority = i;
1da177e4
LT
7785 }
7786
20346722
K
7787 /* mapping the QoS priority to the configured fifos */
7788 for (i = 0; i < MAX_TX_FIFOS; i++)
3a3d5756 7789 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
20346722 7790
6cfc482b
SH
7791 /* map the hashing selector table to the configured fifos */
7792 for (i = 0; i < config->tx_fifo_num; i++)
7793 sp->fifo_selector[i] = fifo_selector[i];
7794
7795
1da177e4
LT
7796 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7797 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7798 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7799
7800 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7801 if (tx_cfg->fifo_len < 65) {
1da177e4
LT
7802 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7803 break;
7804 }
7805 }
fed5eccd
AR
7806 /* + 2 because one Txd for skb->data and one Txd for UFO */
7807 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7808
7809 /* Rx side parameters. */
1da177e4 7810 config->rx_ring_num = rx_ring_num;
0425b46a 7811 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7812 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7813 struct ring_info *ring = &mac_control->rings[i];
7814
7815 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7816 rx_cfg->ring_priority = i;
7817 ring->rx_bufs_left = 0;
7818 ring->rxd_mode = sp->rxd_mode;
7819 ring->rxd_count = rxd_count[sp->rxd_mode];
7820 ring->pdev = sp->pdev;
7821 ring->dev = sp->dev;
1da177e4
LT
7822 }
7823
7824 for (i = 0; i < rx_ring_num; i++) {
13d866a9
JP
7825 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7826
7827 rx_cfg->ring_org = RING_ORG_BUFF1;
7828 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
1da177e4
LT
7829 }
7830
7831 /* Setting Mac Control parameters */
7832 mac_control->rmac_pause_time = rmac_pause_time;
7833 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7834 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7835
7836
1da177e4
LT
7837 /* initialize the shared memory used by the NIC and the host */
7838 if (init_shared_mem(sp)) {
d44570e4 7839 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
1da177e4
LT
7840 ret = -ENOMEM;
7841 goto mem_alloc_failed;
7842 }
7843
275f165f 7844 sp->bar0 = pci_ioremap_bar(pdev, 0);
1da177e4 7845 if (!sp->bar0) {
19a60522 7846 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
7847 dev->name);
7848 ret = -ENOMEM;
7849 goto bar0_remap_failed;
7850 }
7851
275f165f 7852 sp->bar1 = pci_ioremap_bar(pdev, 2);
1da177e4 7853 if (!sp->bar1) {
19a60522 7854 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
7855 dev->name);
7856 ret = -ENOMEM;
7857 goto bar1_remap_failed;
7858 }
7859
1da177e4
LT
7860 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7861 for (j = 0; j < MAX_TX_FIFOS; j++) {
43d620c8 7862 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
1da177e4
LT
7863 }
7864
7865 /* Driver entry points */
04025095 7866 dev->netdev_ops = &s2io_netdev_ops;
7ad24ea4 7867 dev->ethtool_ops = &netdev_ethtool_ops;
b437a8cc
MM
7868 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7869 NETIF_F_TSO | NETIF_F_TSO6 |
7870 NETIF_F_RXCSUM | NETIF_F_LRO;
7871 dev->features |= dev->hw_features |
f646968f 7872 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
f957bcf0 7873 if (sp->high_dma_flag == true)
1da177e4 7874 dev->features |= NETIF_F_HIGHDMA;
1da177e4 7875 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
7876 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7877 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 7878
e960fc5c 7879 pci_save_state(sp->pdev);
1da177e4
LT
7880
7881 /* Setting swapper control on the NIC, for proper reset operation */
7882 if (s2io_set_swapper(sp)) {
9e39f7c5 7883 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
1da177e4
LT
7884 dev->name);
7885 ret = -EAGAIN;
7886 goto set_swap_failed;
7887 }
7888
541ae68f
K
7889 /* Verify if the Herc works on the slot its placed into */
7890 if (sp->device_type & XFRAME_II_DEVICE) {
7891 mode = s2io_verify_pci_mode(sp);
7892 if (mode < 0) {
9e39f7c5
JP
7893 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7894 __func__);
541ae68f
K
7895 ret = -EBADSLT;
7896 goto set_swap_failed;
7897 }
7898 }
7899
f61e0a35
SH
7900 if (sp->config.intr_type == MSI_X) {
7901 sp->num_entries = config->rx_ring_num + 1;
7902 ret = s2io_enable_msi_x(sp);
7903
7904 if (!ret) {
7905 ret = s2io_test_msi(sp);
7906 /* rollback MSI-X, will re-enable during add_isr() */
7907 remove_msix_isr(sp);
7908 }
7909 if (ret) {
7910
7911 DBG_PRINT(ERR_DBG,
9e39f7c5 7912 "MSI-X requested but failed to enable\n");
f61e0a35
SH
7913 sp->config.intr_type = INTA;
7914 }
7915 }
7916
7917 if (config->intr_type == MSI_X) {
13d866a9
JP
7918 for (i = 0; i < config->rx_ring_num ; i++) {
7919 struct ring_info *ring = &mac_control->rings[i];
7920
7921 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7922 }
f61e0a35
SH
7923 } else {
7924 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7925 }
7926
541ae68f
K
7927 /* Not needed for Herc */
7928 if (sp->device_type & XFRAME_I_DEVICE) {
7929 /*
7930 * Fix for all "FFs" MAC address problems observed on
7931 * Alpha platforms
7932 */
7933 fix_mac_address(sp);
7934 s2io_reset(sp);
7935 }
1da177e4
LT
7936
7937 /*
1da177e4
LT
7938 * MAC address initialization.
7939 * For now only one mac address will be read and used.
7940 */
7941 bar0 = sp->bar0;
7942 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
d44570e4 7943 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
1da177e4 7944 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 7945 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
7946 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7947 S2IO_BIT_RESET);
1da177e4 7948 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4 7949 mac_down = (u32)tmp64;
1da177e4
LT
7950 mac_up = (u32) (tmp64 >> 32);
7951
1da177e4
LT
7952 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7953 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7954 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7955 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7956 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7957 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7958
1da177e4
LT
7959 /* Set the factory defined MAC address initially */
7960 dev->addr_len = ETH_ALEN;
7961 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7962
faa4f796
SH
7963 /* initialize number of multicast & unicast MAC entries variables */
7964 if (sp->device_type == XFRAME_I_DEVICE) {
7965 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7966 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7967 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7968 } else if (sp->device_type == XFRAME_II_DEVICE) {
7969 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7970 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7971 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7972 }
7973
18c310fb
JW
7974 /* MTU range: 46 - 9600 */
7975 dev->min_mtu = MIN_MTU;
7976 dev->max_mtu = S2IO_JUMBO_SIZE;
7977
faa4f796
SH
7978 /* store mac addresses from CAM to s2io_nic structure */
7979 do_s2io_store_unicast_mc(sp);
7980
f61e0a35
SH
7981 /* Configure MSIX vector for number of rings configured plus one */
7982 if ((sp->device_type == XFRAME_II_DEVICE) &&
d44570e4 7983 (config->intr_type == MSI_X))
f61e0a35
SH
7984 sp->num_entries = config->rx_ring_num + 1;
7985
d44570e4 7986 /* Store the values of the MSIX table in the s2io_nic structure */
c77dd43e 7987 store_xmsi_data(sp);
b41477f3
AR
7988 /* reset Nic and bring it to known state */
7989 s2io_reset(sp);
7990
1da177e4 7991 /*
99993af6 7992 * Initialize link state flags
541ae68f 7993 * and the card state parameter
1da177e4 7994 */
92b84437 7995 sp->state = 0;
1da177e4 7996
1da177e4 7997 /* Initialize spinlocks */
13d866a9
JP
7998 for (i = 0; i < sp->config.tx_fifo_num; i++) {
7999 struct fifo_info *fifo = &mac_control->fifos[i];
8000
8001 spin_lock_init(&fifo->tx_lock);
8002 }
db874e65 8003
20346722
K
8004 /*
8005 * SXE-002: Configure link and activity LED to init state
8006 * on driver load.
1da177e4
LT
8007 */
8008 subid = sp->pdev->subsystem_device;
8009 if ((subid & 0xFF) >= 0x07) {
8010 val64 = readq(&bar0->gpio_control);
8011 val64 |= 0x0000800000000000ULL;
8012 writeq(val64, &bar0->gpio_control);
8013 val64 = 0x0411040400000000ULL;
d44570e4 8014 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
8015 val64 = readq(&bar0->gpio_control);
8016 }
8017
8018 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8019
8020 if (register_netdev(dev)) {
8021 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8022 ret = -ENODEV;
8023 goto register_failed;
8024 }
9dc737a7 8025 s2io_vpd_read(sp);
926bd900 8026 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
d44570e4 8027 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
44c10138 8028 sp->product_name, pdev->revision);
b41477f3
AR
8029 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8030 s2io_driver_version);
9e39f7c5
JP
8031 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8032 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
9dc737a7 8033 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 8034 mode = s2io_print_pci_mode(sp);
541ae68f 8035 if (mode < 0) {
541ae68f 8036 ret = -EBADSLT;
9dc737a7 8037 unregister_netdev(dev);
541ae68f
K
8038 goto set_swap_failed;
8039 }
541ae68f 8040 }
d44570e4
JP
8041 switch (sp->rxd_mode) {
8042 case RXD_MODE_1:
8043 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8044 dev->name);
8045 break;
8046 case RXD_MODE_3B:
8047 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8048 dev->name);
8049 break;
9dc737a7 8050 }
db874e65 8051
f61e0a35
SH
8052 switch (sp->config.napi) {
8053 case 0:
8054 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8055 break;
8056 case 1:
db874e65 8057 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
f61e0a35
SH
8058 break;
8059 }
3a3d5756
SH
8060
8061 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
d44570e4 8062 sp->config.tx_fifo_num);
3a3d5756 8063
0425b46a
SH
8064 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8065 sp->config.rx_ring_num);
8066
d44570e4
JP
8067 switch (sp->config.intr_type) {
8068 case INTA:
8069 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8070 break;
8071 case MSI_X:
8072 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8073 break;
9dc737a7 8074 }
3a3d5756 8075 if (sp->config.multiq) {
13d866a9
JP
8076 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8077 struct fifo_info *fifo = &mac_control->fifos[i];
8078
8079 fifo->multiq = config->multiq;
8080 }
3a3d5756 8081 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
d44570e4 8082 dev->name);
3a3d5756
SH
8083 } else
8084 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
d44570e4 8085 dev->name);
3a3d5756 8086
6cfc482b
SH
8087 switch (sp->config.tx_steering_type) {
8088 case NO_STEERING:
d44570e4
JP
8089 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8090 dev->name);
8091 break;
6cfc482b 8092 case TX_PRIORITY_STEERING:
d44570e4
JP
8093 DBG_PRINT(ERR_DBG,
8094 "%s: Priority steering enabled for transmit\n",
8095 dev->name);
6cfc482b
SH
8096 break;
8097 case TX_DEFAULT_STEERING:
d44570e4
JP
8098 DBG_PRINT(ERR_DBG,
8099 "%s: Default steering enabled for transmit\n",
8100 dev->name);
6cfc482b
SH
8101 }
8102
f0c54ace
AW
8103 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8104 dev->name);
7ba013ac 8105 /* Initialize device name */
a8c1d28a
DC
8106 snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8107 sp->product_name);
7ba013ac 8108
cd0fce03
BL
8109 if (vlan_tag_strip)
8110 sp->vlan_strip_flag = 1;
8111 else
8112 sp->vlan_strip_flag = 0;
8113
20346722
K
8114 /*
8115 * Make Link state as off at this point, when the Link change
8116 * interrupt comes the state will be automatically changed to
1da177e4
LT
8117 * the right state.
8118 */
8119 netif_carrier_off(dev);
1da177e4
LT
8120
8121 return 0;
8122
d44570e4
JP
8123register_failed:
8124set_swap_failed:
1da177e4 8125 iounmap(sp->bar1);
d44570e4 8126bar1_remap_failed:
1da177e4 8127 iounmap(sp->bar0);
d44570e4
JP
8128bar0_remap_failed:
8129mem_alloc_failed:
1da177e4
LT
8130 free_shared_mem(sp);
8131 pci_disable_device(pdev);
eccb8628 8132 pci_release_regions(pdev);
1da177e4
LT
8133 free_netdev(dev);
8134
8135 return ret;
8136}
8137
8138/**
20346722 8139 * s2io_rem_nic - Free the PCI device
1da177e4 8140 * @pdev: structure containing the PCI related information of the device.
20346722 8141 * Description: This function is called by the Pci subsystem to release a
1da177e4 8142 * PCI device and free up all resource held up by the device. This could
20346722 8143 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
8144 * from memory.
8145 */
8146
3a036ce5 8147static void s2io_rem_nic(struct pci_dev *pdev)
1da177e4 8148{
a31ff388 8149 struct net_device *dev = pci_get_drvdata(pdev);
1ee6dd77 8150 struct s2io_nic *sp;
1da177e4
LT
8151
8152 if (dev == NULL) {
8153 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8154 return;
8155 }
8156
4cf1653a 8157 sp = netdev_priv(dev);
23f333a2
TH
8158
8159 cancel_work_sync(&sp->rst_timer_task);
8160 cancel_work_sync(&sp->set_link_task);
8161
1da177e4
LT
8162 unregister_netdev(dev);
8163
8164 free_shared_mem(sp);
8165 iounmap(sp->bar0);
8166 iounmap(sp->bar1);
eccb8628 8167 pci_release_regions(pdev);
1da177e4 8168 free_netdev(dev);
19a60522 8169 pci_disable_device(pdev);
1da177e4
LT
8170}
8171
910be1ab 8172module_pci_driver(s2io_driver);
7d3d0439 8173
6aa20a22 8174static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
d44570e4
JP
8175 struct tcphdr **tcp, struct RxD_t *rxdp,
8176 struct s2io_nic *sp)
7d3d0439
RA
8177{
8178 int ip_off;
8179 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8180
8181 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
d44570e4
JP
8182 DBG_PRINT(INIT_DBG,
8183 "%s: Non-TCP frames not supported for LRO\n",
b39d66a8 8184 __func__);
7d3d0439
RA
8185 return -1;
8186 }
8187
cdb5bf02 8188 /* Checking for DIX type or DIX type with VLAN */
d44570e4 8189 if ((l2_type == 0) || (l2_type == 4)) {
cdb5bf02
SH
8190 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8191 /*
8192 * If vlan stripping is disabled and the frame is VLAN tagged,
8193 * shift the offset by the VLAN header size bytes.
8194 */
cd0fce03 8195 if ((!sp->vlan_strip_flag) &&
d44570e4 8196 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
cdb5bf02
SH
8197 ip_off += HEADER_VLAN_SIZE;
8198 } else {
7d3d0439 8199 /* LLC, SNAP etc are considered non-mergeable */
cdb5bf02 8200 return -1;
7d3d0439
RA
8201 }
8202
64699336 8203 *ip = (struct iphdr *)(buffer + ip_off);
7d3d0439
RA
8204 ip_len = (u8)((*ip)->ihl);
8205 ip_len <<= 2;
8206 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8207
8208 return 0;
8209}
8210
1ee6dd77 8211static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8212 struct tcphdr *tcp)
8213{
d44570e4
JP
8214 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8215 if ((lro->iph->saddr != ip->saddr) ||
8216 (lro->iph->daddr != ip->daddr) ||
8217 (lro->tcph->source != tcp->source) ||
8218 (lro->tcph->dest != tcp->dest))
7d3d0439
RA
8219 return -1;
8220 return 0;
8221}
8222
8223static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8224{
d44570e4 8225 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
7d3d0439
RA
8226}
8227
1ee6dd77 8228static void initiate_new_session(struct lro *lro, u8 *l2h,
d44570e4
JP
8229 struct iphdr *ip, struct tcphdr *tcp,
8230 u32 tcp_pyld_len, u16 vlan_tag)
7d3d0439 8231{
d44570e4 8232 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8233 lro->l2h = l2h;
8234 lro->iph = ip;
8235 lro->tcph = tcp;
8236 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
c8855953 8237 lro->tcp_ack = tcp->ack_seq;
7d3d0439
RA
8238 lro->sg_num = 1;
8239 lro->total_len = ntohs(ip->tot_len);
8240 lro->frags_len = 0;
cdb5bf02 8241 lro->vlan_tag = vlan_tag;
6aa20a22 8242 /*
d44570e4
JP
8243 * Check if we saw TCP timestamp.
8244 * Other consistency checks have already been done.
8245 */
7d3d0439 8246 if (tcp->doff == 8) {
c8855953
SR
8247 __be32 *ptr;
8248 ptr = (__be32 *)(tcp+1);
7d3d0439 8249 lro->saw_ts = 1;
c8855953 8250 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8251 lro->cur_tsecr = *(ptr+2);
8252 }
8253 lro->in_use = 1;
8254}
8255
1ee6dd77 8256static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
8257{
8258 struct iphdr *ip = lro->iph;
8259 struct tcphdr *tcp = lro->tcph;
ffb5df6c
JP
8260 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8261
d44570e4 8262 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8263
8264 /* Update L3 header */
9a18dd15 8265 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
7d3d0439 8266 ip->tot_len = htons(lro->total_len);
7d3d0439
RA
8267
8268 /* Update L4 header */
8269 tcp->ack_seq = lro->tcp_ack;
8270 tcp->window = lro->window;
8271
8272 /* Update tsecr field if this session has timestamps enabled */
8273 if (lro->saw_ts) {
c8855953 8274 __be32 *ptr = (__be32 *)(tcp + 1);
7d3d0439
RA
8275 *(ptr+2) = lro->cur_tsecr;
8276 }
8277
8278 /* Update counters required for calculation of
8279 * average no. of packets aggregated.
8280 */
ffb5df6c
JP
8281 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8282 swstats->num_aggregations++;
7d3d0439
RA
8283}
8284
1ee6dd77 8285static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
d44570e4 8286 struct tcphdr *tcp, u32 l4_pyld)
7d3d0439 8287{
d44570e4 8288 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8289 lro->total_len += l4_pyld;
8290 lro->frags_len += l4_pyld;
8291 lro->tcp_next_seq += l4_pyld;
8292 lro->sg_num++;
8293
8294 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8295 lro->tcp_ack = tcp->ack_seq;
8296 lro->window = tcp->window;
6aa20a22 8297
7d3d0439 8298 if (lro->saw_ts) {
c8855953 8299 __be32 *ptr;
7d3d0439 8300 /* Update tsecr and tsval from this packet */
c8855953
SR
8301 ptr = (__be32 *)(tcp+1);
8302 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8303 lro->cur_tsecr = *(ptr + 2);
8304 }
8305}
8306
1ee6dd77 8307static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
8308 struct tcphdr *tcp, u32 tcp_pyld_len)
8309{
7d3d0439
RA
8310 u8 *ptr;
8311
d44570e4 8312 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
79dc1901 8313
7d3d0439
RA
8314 if (!tcp_pyld_len) {
8315 /* Runt frame or a pure ack */
8316 return -1;
8317 }
8318
8319 if (ip->ihl != 5) /* IP has options */
8320 return -1;
8321
75c30b13
AR
8322 /* If we see CE codepoint in IP header, packet is not mergeable */
8323 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8324 return -1;
8325
8326 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
d44570e4
JP
8327 if (tcp->urg || tcp->psh || tcp->rst ||
8328 tcp->syn || tcp->fin ||
8329 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
8330 /*
8331 * Currently recognize only the ack control word and
8332 * any other control field being set would result in
8333 * flushing the LRO session
8334 */
8335 return -1;
8336 }
8337
6aa20a22 8338 /*
7d3d0439
RA
8339 * Allow only one TCP timestamp option. Don't aggregate if
8340 * any other options are detected.
8341 */
8342 if (tcp->doff != 5 && tcp->doff != 8)
8343 return -1;
8344
8345 if (tcp->doff == 8) {
6aa20a22 8346 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
8347 while (*ptr == TCPOPT_NOP)
8348 ptr++;
8349 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8350 return -1;
8351
8352 /* Ensure timestamp value increases monotonically */
8353 if (l_lro)
c8855953 8354 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
7d3d0439
RA
8355 return -1;
8356
8357 /* timestamp echo reply should be non-zero */
c8855953 8358 if (*((__be32 *)(ptr+6)) == 0)
7d3d0439
RA
8359 return -1;
8360 }
8361
8362 return 0;
8363}
8364
d44570e4
JP
8365static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8366 u8 **tcp, u32 *tcp_len, struct lro **lro,
8367 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
8368{
8369 struct iphdr *ip;
8370 struct tcphdr *tcph;
8371 int ret = 0, i;
cdb5bf02 8372 u16 vlan_tag = 0;
ffb5df6c 8373 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439 8374
d44570e4
JP
8375 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8376 rxdp, sp);
8377 if (ret)
7d3d0439 8378 return ret;
7d3d0439 8379
d44570e4
JP
8380 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8381
cdb5bf02 8382 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
7d3d0439
RA
8383 tcph = (struct tcphdr *)*tcp;
8384 *tcp_len = get_l4_pyld_length(ip, tcph);
d44570e4 8385 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8386 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8387 if (l_lro->in_use) {
8388 if (check_for_socket_match(l_lro, ip, tcph))
8389 continue;
8390 /* Sock pair matched */
8391 *lro = l_lro;
8392
8393 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
9e39f7c5
JP
8394 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8395 "expected 0x%x, actual 0x%x\n",
8396 __func__,
7d3d0439
RA
8397 (*lro)->tcp_next_seq,
8398 ntohl(tcph->seq));
8399
ffb5df6c 8400 swstats->outof_sequence_pkts++;
7d3d0439
RA
8401 ret = 2;
8402 break;
8403 }
8404
d44570e4
JP
8405 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8406 *tcp_len))
7d3d0439
RA
8407 ret = 1; /* Aggregate */
8408 else
8409 ret = 2; /* Flush both */
8410 break;
8411 }
8412 }
8413
8414 if (ret == 0) {
8415 /* Before searching for available LRO objects,
8416 * check if the pkt is L3/L4 aggregatable. If not
8417 * don't create new LRO session. Just send this
8418 * packet up.
8419 */
d44570e4 8420 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
7d3d0439 8421 return 5;
7d3d0439 8422
d44570e4 8423 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8424 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8425 if (!(l_lro->in_use)) {
8426 *lro = l_lro;
8427 ret = 3; /* Begin anew */
8428 break;
8429 }
8430 }
8431 }
8432
8433 if (ret == 0) { /* sessions exceeded */
9e39f7c5 8434 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
b39d66a8 8435 __func__);
7d3d0439
RA
8436 *lro = NULL;
8437 return ret;
8438 }
8439
8440 switch (ret) {
d44570e4
JP
8441 case 3:
8442 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8443 vlan_tag);
8444 break;
8445 case 2:
8446 update_L3L4_header(sp, *lro);
8447 break;
8448 case 1:
8449 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8450 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7d3d0439 8451 update_L3L4_header(sp, *lro);
d44570e4
JP
8452 ret = 4; /* Flush the LRO */
8453 }
8454 break;
8455 default:
9e39f7c5 8456 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
d44570e4 8457 break;
7d3d0439
RA
8458 }
8459
8460 return ret;
8461}
8462
1ee6dd77 8463static void clear_lro_session(struct lro *lro)
7d3d0439 8464{
1ee6dd77 8465 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
8466
8467 memset(lro, 0, lro_struct_size);
8468}
8469
cdb5bf02 8470static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
7d3d0439
RA
8471{
8472 struct net_device *dev = skb->dev;
4cf1653a 8473 struct s2io_nic *sp = netdev_priv(dev);
7d3d0439
RA
8474
8475 skb->protocol = eth_type_trans(skb, dev);
b85da2c0 8476 if (vlan_tag && sp->vlan_strip_flag)
86a9bad3 8477 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
b85da2c0
JP
8478 if (sp->config.napi)
8479 netif_receive_skb(skb);
8480 else
8481 netif_rx(skb);
7d3d0439
RA
8482}
8483
1ee6dd77 8484static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
d44570e4 8485 struct sk_buff *skb, u32 tcp_len)
7d3d0439 8486{
75c30b13 8487 struct sk_buff *first = lro->parent;
ffb5df6c 8488 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439
RA
8489
8490 first->len += tcp_len;
8491 first->data_len = lro->frags_len;
8492 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
8493 if (skb_shinfo(first)->frag_list)
8494 lro->last_frag->next = skb;
7d3d0439
RA
8495 else
8496 skb_shinfo(first)->frag_list = skb;
372cc597 8497 first->truesize += skb->truesize;
75c30b13 8498 lro->last_frag = skb;
ffb5df6c 8499 swstats->clubbed_frms_cnt++;
7d3d0439 8500}
d796fdb7
LV
8501
8502/**
8503 * s2io_io_error_detected - called when PCI error is detected
8504 * @pdev: Pointer to PCI device
8453d43f 8505 * @state: The current pci connection state
d796fdb7
LV
8506 *
8507 * This function is called after a PCI bus error affecting
8508 * this device has been detected.
8509 */
8510static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
d44570e4 8511 pci_channel_state_t state)
d796fdb7
LV
8512{
8513 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8514 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8515
8516 netif_device_detach(netdev);
8517
1e3c8bd6
DN
8518 if (state == pci_channel_io_perm_failure)
8519 return PCI_ERS_RESULT_DISCONNECT;
8520
d796fdb7
LV
8521 if (netif_running(netdev)) {
8522 /* Bring down the card, while avoiding PCI I/O */
8523 do_s2io_card_down(sp, 0);
d796fdb7
LV
8524 }
8525 pci_disable_device(pdev);
8526
8527 return PCI_ERS_RESULT_NEED_RESET;
8528}
8529
8530/**
8531 * s2io_io_slot_reset - called after the pci bus has been reset.
8532 * @pdev: Pointer to PCI device
8533 *
8534 * Restart the card from scratch, as if from a cold-boot.
8535 * At this point, the card has exprienced a hard reset,
8536 * followed by fixups by BIOS, and has its config space
8537 * set up identically to what it was at cold boot.
8538 */
8539static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8540{
8541 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8542 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8543
8544 if (pci_enable_device(pdev)) {
6cef2b8e 8545 pr_err("Cannot re-enable PCI device after reset.\n");
d796fdb7
LV
8546 return PCI_ERS_RESULT_DISCONNECT;
8547 }
8548
8549 pci_set_master(pdev);
8550 s2io_reset(sp);
8551
8552 return PCI_ERS_RESULT_RECOVERED;
8553}
8554
8555/**
8556 * s2io_io_resume - called when traffic can start flowing again.
8557 * @pdev: Pointer to PCI device
8558 *
8559 * This callback is called when the error recovery driver tells
8560 * us that its OK to resume normal operation.
8561 */
8562static void s2io_io_resume(struct pci_dev *pdev)
8563{
8564 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8565 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8566
8567 if (netif_running(netdev)) {
8568 if (s2io_card_up(sp)) {
6cef2b8e 8569 pr_err("Can't bring device back up after reset.\n");
d796fdb7
LV
8570 return;
8571 }
8572
8573 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8574 s2io_card_down(sp);
6cef2b8e 8575 pr_err("Can't restore mac addr after reset.\n");
d796fdb7
LV
8576 return;
8577 }
8578 }
8579
8580 netif_device_attach(netdev);
fd2ea0a7 8581 netif_tx_wake_all_queues(netdev);
d796fdb7 8582}