]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/neterion/s2io.c
UBUNTU: Ubuntu-5.3.0-29.31
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / neterion / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
926bd900 3 * Copyright(c) 2002-2010 Exar Corp.
d44570e4 4 *
1da177e4
LT
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4 27 * The module loadable parameters that are supported by the driver and a brief
a2a20aef 28 * explanation of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
6d517a27 35 * values are 1, 2.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
8abc4d5b 40 * 2(MSI_X). Default value is '2(MSI_X)'
9dc737a7
AR
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
926930b2
SS
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
926930b2
SS
45 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
46 * Possible values '1' for enable , '0' for disable.
47 * Default is '2' - which means disable in promisc mode
48 * and enable in non-promiscuous mode.
3a3d5756
SH
49 * multiq: This parameter used to enable/disable MULTIQUEUE support.
50 * Possible values '1' for enable and '0' for disable. Default is '0'
1da177e4
LT
51 ************************************************************************/
52
6cef2b8e
JP
53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
1da177e4
LT
55#include <linux/module.h>
56#include <linux/types.h>
57#include <linux/errno.h>
58#include <linux/ioport.h>
59#include <linux/pci.h>
1e7f0bd8 60#include <linux/dma-mapping.h>
1da177e4
LT
61#include <linux/kernel.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
40239396 64#include <linux/mdio.h>
1da177e4
LT
65#include <linux/skbuff.h>
66#include <linux/init.h>
67#include <linux/delay.h>
68#include <linux/stddef.h>
69#include <linux/ioctl.h>
70#include <linux/timex.h>
1da177e4 71#include <linux/ethtool.h>
1da177e4 72#include <linux/workqueue.h>
be3a6b02 73#include <linux/if_vlan.h>
7d3d0439
RA
74#include <linux/ip.h>
75#include <linux/tcp.h>
d44570e4
JP
76#include <linux/uaccess.h>
77#include <linux/io.h>
2208e9a7 78#include <linux/io-64-nonatomic-lo-hi.h>
5a0e3ad6 79#include <linux/slab.h>
70c71606 80#include <linux/prefetch.h>
7d3d0439 81#include <net/tcp.h>
9a18dd15 82#include <net/checksum.h>
1da177e4 83
fe931395 84#include <asm/div64.h>
330ce0de 85#include <asm/irq.h>
1da177e4
LT
86
87/* local include */
88#include "s2io.h"
89#include "s2io-regs.h"
90
11410b62 91#define DRV_VERSION "2.0.26.28"
6c1792f4 92
1da177e4 93/* S2io Driver name & version. */
c0dbf37e
JM
94static const char s2io_driver_name[] = "Neterion";
95static const char s2io_driver_version[] = DRV_VERSION;
1da177e4 96
c0dbf37e
JM
97static const int rxd_size[2] = {32, 48};
98static const int rxd_count[2] = {127, 85};
da6971d8 99
1ee6dd77 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd
K
101{
102 int ret;
103
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
d44570e4 105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
5e25b9dd
K
106
107 return ret;
108}
109
20346722 110/*
1da177e4
LT
111 * Cards with following subsystem_id have a link state indication
112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113 * macro below identifies these cards given the subsystem_id.
114 */
d44570e4
JP
115#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
119
120#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
1da177e4 122
d44570e4 123static inline int is_s2io_card_up(const struct s2io_nic *sp)
92b84437
SS
124{
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126}
127
1da177e4 128/* Ethtool related variables and Macros. */
6fce365d 129static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
1da177e4
LT
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
135};
136
6fce365d 137static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
138 {"tmac_frms"},
139 {"tmac_data_octets"},
140 {"tmac_drop_frms"},
141 {"tmac_mcst_frms"},
142 {"tmac_bcst_frms"},
143 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
144 {"tmac_ttl_octets"},
145 {"tmac_ucst_frms"},
146 {"tmac_nucst_frms"},
1da177e4 147 {"tmac_any_err_frms"},
bd1034f0 148 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
149 {"tmac_vld_ip_octets"},
150 {"tmac_vld_ip"},
151 {"tmac_drop_ip"},
152 {"tmac_icmp"},
153 {"tmac_rst_tcp"},
154 {"tmac_tcp"},
155 {"tmac_udp"},
156 {"rmac_vld_frms"},
157 {"rmac_data_octets"},
158 {"rmac_fcs_err_frms"},
159 {"rmac_drop_frms"},
160 {"rmac_vld_mcst_frms"},
161 {"rmac_vld_bcst_frms"},
162 {"rmac_in_rng_len_err_frms"},
bd1034f0 163 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
164 {"rmac_long_frms"},
165 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
166 {"rmac_unsup_ctrl_frms"},
167 {"rmac_ttl_octets"},
168 {"rmac_accepted_ucst_frms"},
169 {"rmac_accepted_nucst_frms"},
1da177e4 170 {"rmac_discarded_frms"},
bd1034f0
AR
171 {"rmac_drop_events"},
172 {"rmac_ttl_less_fb_octets"},
173 {"rmac_ttl_frms"},
1da177e4
LT
174 {"rmac_usized_frms"},
175 {"rmac_osized_frms"},
176 {"rmac_frag_frms"},
177 {"rmac_jabber_frms"},
bd1034f0
AR
178 {"rmac_ttl_64_frms"},
179 {"rmac_ttl_65_127_frms"},
180 {"rmac_ttl_128_255_frms"},
181 {"rmac_ttl_256_511_frms"},
182 {"rmac_ttl_512_1023_frms"},
183 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
184 {"rmac_ip"},
185 {"rmac_ip_octets"},
186 {"rmac_hdr_err_ip"},
187 {"rmac_drop_ip"},
188 {"rmac_icmp"},
189 {"rmac_tcp"},
190 {"rmac_udp"},
191 {"rmac_err_drp_udp"},
bd1034f0
AR
192 {"rmac_xgmii_err_sym"},
193 {"rmac_frms_q0"},
194 {"rmac_frms_q1"},
195 {"rmac_frms_q2"},
196 {"rmac_frms_q3"},
197 {"rmac_frms_q4"},
198 {"rmac_frms_q5"},
199 {"rmac_frms_q6"},
200 {"rmac_frms_q7"},
201 {"rmac_full_q0"},
202 {"rmac_full_q1"},
203 {"rmac_full_q2"},
204 {"rmac_full_q3"},
205 {"rmac_full_q4"},
206 {"rmac_full_q5"},
207 {"rmac_full_q6"},
208 {"rmac_full_q7"},
1da177e4 209 {"rmac_pause_cnt"},
bd1034f0
AR
210 {"rmac_xgmii_data_err_cnt"},
211 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
212 {"rmac_accepted_ip"},
213 {"rmac_err_tcp"},
bd1034f0
AR
214 {"rd_req_cnt"},
215 {"new_rd_req_cnt"},
216 {"new_rd_req_rtry_cnt"},
217 {"rd_rtry_cnt"},
218 {"wr_rtry_rd_ack_cnt"},
219 {"wr_req_cnt"},
220 {"new_wr_req_cnt"},
221 {"new_wr_req_rtry_cnt"},
222 {"wr_rtry_cnt"},
223 {"wr_disc_cnt"},
224 {"rd_rtry_wr_ack_cnt"},
225 {"txp_wr_cnt"},
226 {"txd_rd_cnt"},
227 {"txd_wr_cnt"},
228 {"rxd_rd_cnt"},
229 {"rxd_wr_cnt"},
230 {"txf_rd_cnt"},
fa1f0cb3
SS
231 {"rxf_wr_cnt"}
232};
233
6fce365d 234static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
235 {"rmac_ttl_1519_4095_frms"},
236 {"rmac_ttl_4096_8191_frms"},
237 {"rmac_ttl_8192_max_frms"},
238 {"rmac_ttl_gt_max_frms"},
239 {"rmac_osized_alt_frms"},
240 {"rmac_jabber_alt_frms"},
241 {"rmac_gt_max_alt_frms"},
242 {"rmac_vlan_frms"},
243 {"rmac_len_discard"},
244 {"rmac_fcs_discard"},
245 {"rmac_pf_discard"},
246 {"rmac_da_discard"},
247 {"rmac_red_discard"},
248 {"rmac_rts_discard"},
249 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
250 {"link_fault_cnt"}
251};
252
6fce365d 253static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac
K
254 {"\n DRIVER STATISTICS"},
255 {"single_bit_ecc_errs"},
256 {"double_bit_ecc_errs"},
bd1034f0
AR
257 {"parity_err_cnt"},
258 {"serious_err_cnt"},
259 {"soft_reset_cnt"},
260 {"fifo_full_cnt"},
8116f3cf
SS
261 {"ring_0_full_cnt"},
262 {"ring_1_full_cnt"},
263 {"ring_2_full_cnt"},
264 {"ring_3_full_cnt"},
265 {"ring_4_full_cnt"},
266 {"ring_5_full_cnt"},
267 {"ring_6_full_cnt"},
268 {"ring_7_full_cnt"},
43b7c451
SH
269 {"alarm_transceiver_temp_high"},
270 {"alarm_transceiver_temp_low"},
271 {"alarm_laser_bias_current_high"},
272 {"alarm_laser_bias_current_low"},
273 {"alarm_laser_output_power_high"},
274 {"alarm_laser_output_power_low"},
275 {"warn_transceiver_temp_high"},
276 {"warn_transceiver_temp_low"},
277 {"warn_laser_bias_current_high"},
278 {"warn_laser_bias_current_low"},
279 {"warn_laser_output_power_high"},
280 {"warn_laser_output_power_low"},
281 {"lro_aggregated_pkts"},
282 {"lro_flush_both_count"},
283 {"lro_out_of_sequence_pkts"},
284 {"lro_flush_due_to_max_pkts"},
285 {"lro_avg_aggr_pkts"},
286 {"mem_alloc_fail_cnt"},
287 {"pci_map_fail_cnt"},
288 {"watchdog_timer_cnt"},
289 {"mem_allocated"},
290 {"mem_freed"},
291 {"link_up_cnt"},
292 {"link_down_cnt"},
293 {"link_up_time"},
294 {"link_down_time"},
295 {"tx_tcode_buf_abort_cnt"},
296 {"tx_tcode_desc_abort_cnt"},
297 {"tx_tcode_parity_err_cnt"},
298 {"tx_tcode_link_loss_cnt"},
299 {"tx_tcode_list_proc_err_cnt"},
300 {"rx_tcode_parity_err_cnt"},
301 {"rx_tcode_abort_cnt"},
302 {"rx_tcode_parity_abort_cnt"},
303 {"rx_tcode_rda_fail_cnt"},
304 {"rx_tcode_unkn_prot_cnt"},
305 {"rx_tcode_fcs_err_cnt"},
306 {"rx_tcode_buf_size_err_cnt"},
307 {"rx_tcode_rxd_corrupt_cnt"},
308 {"rx_tcode_unkn_err_cnt"},
8116f3cf
SS
309 {"tda_err_cnt"},
310 {"pfc_err_cnt"},
311 {"pcc_err_cnt"},
312 {"tti_err_cnt"},
313 {"tpa_err_cnt"},
314 {"sm_err_cnt"},
315 {"lso_err_cnt"},
316 {"mac_tmac_err_cnt"},
317 {"mac_rmac_err_cnt"},
318 {"xgxs_txgxs_err_cnt"},
319 {"xgxs_rxgxs_err_cnt"},
320 {"rc_err_cnt"},
321 {"prc_pcix_err_cnt"},
322 {"rpa_err_cnt"},
323 {"rda_err_cnt"},
324 {"rti_err_cnt"},
325 {"mc_err_cnt"}
1da177e4
LT
326};
327
4c3616cd
AMR
328#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
fa1f0cb3 331
d44570e4
JP
332#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
fa1f0cb3 334
d44570e4
JP
335#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
1da177e4 337
4c3616cd 338#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
d44570e4 339#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
1da177e4 340
2fd37688
SS
341/* copy mac addr to def_mac_addr array */
342static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
343{
344 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
345 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
346 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
347 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
348 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
349 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
350}
04025095 351
20346722 352/*
1da177e4
LT
353 * Constants to be programmed into the Xena's registers, to configure
354 * the XAUI.
355 */
356
1da177e4 357#define END_SIGN 0x0
f71e1309 358static const u64 herc_act_dtx_cfg[] = {
541ae68f 359 /* Set address */
e960fc5c 360 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 361 /* Write data */
e960fc5c 362 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
363 /* Set address */
364 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
365 /* Write data */
366 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
367 /* Set address */
e960fc5c 368 0x801205150D440000ULL, 0x801205150D4400E0ULL,
369 /* Write data */
370 0x801205150D440004ULL, 0x801205150D4400E4ULL,
371 /* Set address */
541ae68f
K
372 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
373 /* Write data */
374 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
375 /* Done */
376 END_SIGN
377};
378
f71e1309 379static const u64 xena_dtx_cfg[] = {
c92ca04b 380 /* Set address */
1da177e4 381 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
382 /* Write data */
383 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
384 /* Set address */
385 0x8001051500000000ULL, 0x80010515000000E0ULL,
386 /* Write data */
387 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
388 /* Set address */
1da177e4 389 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
390 /* Write data */
391 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
392 END_SIGN
393};
394
20346722 395/*
1da177e4
LT
396 * Constants for Fixing the MacAddress problem seen mostly on
397 * Alpha machines.
398 */
f71e1309 399static const u64 fix_mac[] = {
1da177e4
LT
400 0x0060000000000000ULL, 0x0060600000000000ULL,
401 0x0040600000000000ULL, 0x0000600000000000ULL,
402 0x0020600000000000ULL, 0x0060600000000000ULL,
403 0x0020600000000000ULL, 0x0060600000000000ULL,
404 0x0020600000000000ULL, 0x0060600000000000ULL,
405 0x0020600000000000ULL, 0x0060600000000000ULL,
406 0x0020600000000000ULL, 0x0060600000000000ULL,
407 0x0020600000000000ULL, 0x0060600000000000ULL,
408 0x0020600000000000ULL, 0x0060600000000000ULL,
409 0x0020600000000000ULL, 0x0060600000000000ULL,
410 0x0020600000000000ULL, 0x0060600000000000ULL,
411 0x0020600000000000ULL, 0x0060600000000000ULL,
412 0x0020600000000000ULL, 0x0000600000000000ULL,
413 0x0040600000000000ULL, 0x0060600000000000ULL,
414 END_SIGN
415};
416
b41477f3
AR
417MODULE_LICENSE("GPL");
418MODULE_VERSION(DRV_VERSION);
419
420
1da177e4 421/* Module Loadable parameters. */
6cfc482b 422S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
b41477f3 423S2IO_PARM_INT(rx_ring_num, 1);
3a3d5756 424S2IO_PARM_INT(multiq, 0);
b41477f3
AR
425S2IO_PARM_INT(rx_ring_mode, 1);
426S2IO_PARM_INT(use_continuous_tx_intrs, 1);
427S2IO_PARM_INT(rmac_pause_time, 0x100);
428S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
429S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
430S2IO_PARM_INT(shared_splits, 0);
431S2IO_PARM_INT(tmac_util_period, 5);
432S2IO_PARM_INT(rmac_util_period, 5);
b41477f3 433S2IO_PARM_INT(l3l4hdr_size, 128);
6cfc482b
SH
434/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
435S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
303bcb4b 436/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 437S2IO_PARM_INT(rxsync_frequency, 3);
eccb8628 438/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
8abc4d5b 439S2IO_PARM_INT(intr_type, 2);
7d3d0439 440/* Large receive offload feature */
43b7c451 441
7d3d0439
RA
442/* Max pkts to be aggregated by LRO at one time. If not specified,
443 * aggregation happens until we hit max IP pkt size(64K)
444 */
b41477f3 445S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 446S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
447
448S2IO_PARM_INT(napi, 1);
926930b2 449S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
450
451static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
d44570e4 452{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
b41477f3 453static unsigned int rx_ring_sz[MAX_RX_RINGS] =
d44570e4 454{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
b41477f3 455static unsigned int rts_frm_len[MAX_RX_RINGS] =
d44570e4 456{[0 ...(MAX_RX_RINGS - 1)] = 0 };
b41477f3
AR
457
458module_param_array(tx_fifo_len, uint, NULL, 0);
459module_param_array(rx_ring_sz, uint, NULL, 0);
460module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 461
20346722 462/*
1da177e4 463 * S2IO device table.
20346722 464 * This table lists all the devices that this driver supports.
1da177e4 465 */
9baa3c34 466static const struct pci_device_id s2io_tbl[] = {
1da177e4
LT
467 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
468 PCI_ANY_ID, PCI_ANY_ID},
469 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
470 PCI_ANY_ID, PCI_ANY_ID},
471 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
d44570e4
JP
472 PCI_ANY_ID, PCI_ANY_ID},
473 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
474 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
475 {0,}
476};
477
478MODULE_DEVICE_TABLE(pci, s2io_tbl);
479
3646f0e5 480static const struct pci_error_handlers s2io_err_handler = {
d796fdb7
LV
481 .error_detected = s2io_io_error_detected,
482 .slot_reset = s2io_io_slot_reset,
483 .resume = s2io_io_resume,
484};
485
1da177e4 486static struct pci_driver s2io_driver = {
d44570e4
JP
487 .name = "S2IO",
488 .id_table = s2io_tbl,
489 .probe = s2io_init_nic,
3a036ce5 490 .remove = s2io_rem_nic,
d44570e4 491 .err_handler = &s2io_err_handler,
1da177e4
LT
492};
493
494/* A simplifier macro used both by init and free shared_mem Fns(). */
f8a1988f 495#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
1da177e4 496
3a3d5756
SH
497/* netqueue manipulation helper functions */
498static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
499{
fd2ea0a7
DM
500 if (!sp->config.multiq) {
501 int i;
502
3a3d5756
SH
503 for (i = 0; i < sp->config.tx_fifo_num; i++)
504 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
3a3d5756 505 }
fd2ea0a7 506 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
507}
508
509static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
510{
fd2ea0a7 511 if (!sp->config.multiq)
3a3d5756
SH
512 sp->mac_control.fifos[fifo_no].queue_state =
513 FIFO_QUEUE_STOP;
fd2ea0a7
DM
514
515 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
516}
517
518static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
519{
fd2ea0a7
DM
520 if (!sp->config.multiq) {
521 int i;
522
3a3d5756
SH
523 for (i = 0; i < sp->config.tx_fifo_num; i++)
524 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 525 }
fd2ea0a7 526 netif_tx_start_all_queues(sp->dev);
3a3d5756
SH
527}
528
3a3d5756
SH
529static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
530{
fd2ea0a7
DM
531 if (!sp->config.multiq) {
532 int i;
533
3a3d5756
SH
534 for (i = 0; i < sp->config.tx_fifo_num; i++)
535 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 536 }
fd2ea0a7 537 netif_tx_wake_all_queues(sp->dev);
3a3d5756
SH
538}
539
540static inline void s2io_wake_tx_queue(
541 struct fifo_info *fifo, int cnt, u8 multiq)
542{
543
3a3d5756
SH
544 if (multiq) {
545 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
546 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
b19fa1fa 547 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
548 if (netif_queue_stopped(fifo->dev)) {
549 fifo->queue_state = FIFO_QUEUE_START;
550 netif_wake_queue(fifo->dev);
551 }
552 }
553}
554
1da177e4
LT
555/**
556 * init_shared_mem - Allocation and Initialization of Memory
557 * @nic: Device private variable.
20346722
K
558 * Description: The function allocates all the memory areas shared
559 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
560 * Rx descriptors and the statistics block.
561 */
562
563static int init_shared_mem(struct s2io_nic *nic)
564{
565 u32 size;
566 void *tmp_v_addr, *tmp_v_addr_next;
567 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 568 struct RxD_block *pre_rxd_blk = NULL;
372cc597 569 int i, j, blk_cnt;
1da177e4
LT
570 int lst_size, lst_per_page;
571 struct net_device *dev = nic->dev;
8ae418cf 572 unsigned long tmp;
1ee6dd77 573 struct buffAdd *ba;
ffb5df6c
JP
574 struct config_param *config = &nic->config;
575 struct mac_info *mac_control = &nic->mac_control;
491976b2 576 unsigned long long mem_allocated = 0;
1da177e4 577
13d866a9 578 /* Allocation and initialization of TXDLs in FIFOs */
1da177e4
LT
579 size = 0;
580 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
581 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
582
583 size += tx_cfg->fifo_len;
1da177e4
LT
584 }
585 if (size > MAX_AVAILABLE_TXDS) {
9e39f7c5
JP
586 DBG_PRINT(ERR_DBG,
587 "Too many TxDs requested: %d, max supported: %d\n",
588 size, MAX_AVAILABLE_TXDS);
b41477f3 589 return -EINVAL;
1da177e4
LT
590 }
591
2fda096d
SR
592 size = 0;
593 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
594 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
595
596 size = tx_cfg->fifo_len;
2fda096d
SR
597 /*
598 * Legal values are from 2 to 8192
599 */
600 if (size < 2) {
9e39f7c5
JP
601 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
602 "Valid lengths are 2 through 8192\n",
603 i, size);
2fda096d
SR
604 return -EINVAL;
605 }
606 }
607
1ee6dd77 608 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
609 lst_per_page = PAGE_SIZE / lst_size;
610
611 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
612 struct fifo_info *fifo = &mac_control->fifos[i];
613 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
614 int fifo_len = tx_cfg->fifo_len;
1ee6dd77 615 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
13d866a9
JP
616
617 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
618 if (!fifo->list_info) {
d44570e4 619 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
1da177e4
LT
620 return -ENOMEM;
621 }
491976b2 622 mem_allocated += list_holder_size;
1da177e4
LT
623 }
624 for (i = 0; i < config->tx_fifo_num; i++) {
625 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
626 lst_per_page);
13d866a9
JP
627 struct fifo_info *fifo = &mac_control->fifos[i];
628 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
629
630 fifo->tx_curr_put_info.offset = 0;
631 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
632 fifo->tx_curr_get_info.offset = 0;
633 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
634 fifo->fifo_no = i;
635 fifo->nic = nic;
636 fifo->max_txds = MAX_SKB_FRAGS + 2;
637 fifo->dev = dev;
20346722 638
1da177e4
LT
639 for (j = 0; j < page_num; j++) {
640 int k = 0;
641 dma_addr_t tmp_p;
642 void *tmp_v;
643 tmp_v = pci_alloc_consistent(nic->pdev,
644 PAGE_SIZE, &tmp_p);
645 if (!tmp_v) {
9e39f7c5
JP
646 DBG_PRINT(INFO_DBG,
647 "pci_alloc_consistent failed for TxDL\n");
1da177e4
LT
648 return -ENOMEM;
649 }
776bd20f 650 /* If we got a zero DMA address(can happen on
651 * certain platforms like PPC), reallocate.
652 * Store virtual address of page we don't want,
653 * to be freed later.
654 */
655 if (!tmp_p) {
656 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 657 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
658 "%s: Zero DMA address for TxDL. "
659 "Virtual address %p\n",
660 dev->name, tmp_v);
776bd20f 661 tmp_v = pci_alloc_consistent(nic->pdev,
d44570e4 662 PAGE_SIZE, &tmp_p);
776bd20f 663 if (!tmp_v) {
0c61ed5f 664 DBG_PRINT(INFO_DBG,
9e39f7c5 665 "pci_alloc_consistent failed for TxDL\n");
776bd20f 666 return -ENOMEM;
667 }
491976b2 668 mem_allocated += PAGE_SIZE;
776bd20f 669 }
1da177e4
LT
670 while (k < lst_per_page) {
671 int l = (j * lst_per_page) + k;
13d866a9 672 if (l == tx_cfg->fifo_len)
20346722 673 break;
13d866a9 674 fifo->list_info[l].list_virt_addr =
d44570e4 675 tmp_v + (k * lst_size);
13d866a9 676 fifo->list_info[l].list_phy_addr =
d44570e4 677 tmp_p + (k * lst_size);
1da177e4
LT
678 k++;
679 }
680 }
681 }
1da177e4 682
2fda096d 683 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
684 struct fifo_info *fifo = &mac_control->fifos[i];
685 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
686
687 size = tx_cfg->fifo_len;
688 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
689 if (!fifo->ufo_in_band_v)
2fda096d
SR
690 return -ENOMEM;
691 mem_allocated += (size * sizeof(u64));
692 }
fed5eccd 693
1da177e4
LT
694 /* Allocation and initialization of RXDs in Rings */
695 size = 0;
696 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
697 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
698 struct ring_info *ring = &mac_control->rings[i];
699
700 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
9e39f7c5
JP
701 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
702 "multiple of RxDs per Block\n",
703 dev->name, i);
1da177e4
LT
704 return FAILURE;
705 }
13d866a9
JP
706 size += rx_cfg->num_rxd;
707 ring->block_count = rx_cfg->num_rxd /
d44570e4 708 (rxd_count[nic->rxd_mode] + 1);
13d866a9 709 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
1da177e4 710 }
da6971d8 711 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 712 size = (size * (sizeof(struct RxD1)));
da6971d8 713 else
1ee6dd77 714 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
715
716 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
717 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
718 struct ring_info *ring = &mac_control->rings[i];
719
720 ring->rx_curr_get_info.block_index = 0;
721 ring->rx_curr_get_info.offset = 0;
722 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
723 ring->rx_curr_put_info.block_index = 0;
724 ring->rx_curr_put_info.offset = 0;
725 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
726 ring->nic = nic;
727 ring->ring_no = i;
13d866a9
JP
728
729 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
730 /* Allocating all the Rx blocks */
731 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 732 struct rx_block_info *rx_blocks;
da6971d8
AR
733 int l;
734
13d866a9 735 rx_blocks = &ring->rx_blocks[j];
d44570e4 736 size = SIZE_OF_BLOCK; /* size is always page size */
1da177e4
LT
737 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
738 &tmp_p_addr);
739 if (tmp_v_addr == NULL) {
740 /*
20346722
K
741 * In case of failure, free_shared_mem()
742 * is called, which should free any
743 * memory that was alloced till the
1da177e4
LT
744 * failure happened.
745 */
da6971d8 746 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
747 return -ENOMEM;
748 }
491976b2 749 mem_allocated += size;
4f870320
JP
750
751 size = sizeof(struct rxd_info) *
752 rxd_count[nic->rxd_mode];
da6971d8
AR
753 rx_blocks->block_virt_addr = tmp_v_addr;
754 rx_blocks->block_dma_addr = tmp_p_addr;
4f870320 755 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
372cc597
SS
756 if (!rx_blocks->rxds)
757 return -ENOMEM;
4f870320 758 mem_allocated += size;
d44570e4 759 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
da6971d8
AR
760 rx_blocks->rxds[l].virt_addr =
761 rx_blocks->block_virt_addr +
762 (rxd_size[nic->rxd_mode] * l);
763 rx_blocks->rxds[l].dma_addr =
764 rx_blocks->block_dma_addr +
765 (rxd_size[nic->rxd_mode] * l);
766 }
1da177e4
LT
767 }
768 /* Interlinking all Rx Blocks */
769 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
770 int next = (j + 1) % blk_cnt;
771 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
772 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
773 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
774 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
1da177e4 775
43d620c8 776 pre_rxd_blk = tmp_v_addr;
1da177e4 777 pre_rxd_blk->reserved_2_pNext_RxD_block =
d44570e4 778 (unsigned long)tmp_v_addr_next;
1da177e4 779 pre_rxd_blk->pNext_RxD_Blk_physical =
d44570e4 780 (u64)tmp_p_addr_next;
1da177e4
LT
781 }
782 }
6d517a27 783 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
784 /*
785 * Allocation of Storages for buffer addresses in 2BUFF mode
786 * and the buffers as well.
787 */
788 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
789 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
790 struct ring_info *ring = &mac_control->rings[i];
791
792 blk_cnt = rx_cfg->num_rxd /
d44570e4 793 (rxd_count[nic->rxd_mode] + 1);
4f870320
JP
794 size = sizeof(struct buffAdd *) * blk_cnt;
795 ring->ba = kmalloc(size, GFP_KERNEL);
13d866a9 796 if (!ring->ba)
1da177e4 797 return -ENOMEM;
4f870320 798 mem_allocated += size;
da6971d8
AR
799 for (j = 0; j < blk_cnt; j++) {
800 int k = 0;
4f870320
JP
801
802 size = sizeof(struct buffAdd) *
803 (rxd_count[nic->rxd_mode] + 1);
804 ring->ba[j] = kmalloc(size, GFP_KERNEL);
13d866a9 805 if (!ring->ba[j])
1da177e4 806 return -ENOMEM;
4f870320 807 mem_allocated += size;
da6971d8 808 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 809 ba = &ring->ba[j][k];
4f870320
JP
810 size = BUF0_LEN + ALIGN_SIZE;
811 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
812 if (!ba->ba_0_org)
813 return -ENOMEM;
4f870320 814 mem_allocated += size;
da6971d8
AR
815 tmp = (unsigned long)ba->ba_0_org;
816 tmp += ALIGN_SIZE;
d44570e4
JP
817 tmp &= ~((unsigned long)ALIGN_SIZE);
818 ba->ba_0 = (void *)tmp;
da6971d8 819
4f870320
JP
820 size = BUF1_LEN + ALIGN_SIZE;
821 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
822 if (!ba->ba_1_org)
823 return -ENOMEM;
4f870320 824 mem_allocated += size;
d44570e4 825 tmp = (unsigned long)ba->ba_1_org;
da6971d8 826 tmp += ALIGN_SIZE;
d44570e4
JP
827 tmp &= ~((unsigned long)ALIGN_SIZE);
828 ba->ba_1 = (void *)tmp;
da6971d8
AR
829 k++;
830 }
1da177e4
LT
831 }
832 }
833 }
1da177e4
LT
834
835 /* Allocation and initialization of Statistics block */
1ee6dd77 836 size = sizeof(struct stat_block);
d44570e4
JP
837 mac_control->stats_mem =
838 pci_alloc_consistent(nic->pdev, size,
839 &mac_control->stats_mem_phy);
1da177e4
LT
840
841 if (!mac_control->stats_mem) {
20346722
K
842 /*
843 * In case of failure, free_shared_mem() is called, which
844 * should free any memory that was alloced till the
1da177e4
LT
845 * failure happened.
846 */
847 return -ENOMEM;
848 }
491976b2 849 mem_allocated += size;
1da177e4
LT
850 mac_control->stats_mem_sz = size;
851
852 tmp_v_addr = mac_control->stats_mem;
43d620c8 853 mac_control->stats_info = tmp_v_addr;
1da177e4 854 memset(tmp_v_addr, 0, size);
3a22813a
BL
855 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
856 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
491976b2 857 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
1da177e4
LT
858 return SUCCESS;
859}
860
20346722
K
861/**
862 * free_shared_mem - Free the allocated Memory
1da177e4
LT
863 * @nic: Device private variable.
864 * Description: This function is to free all memory locations allocated by
865 * the init_shared_mem() function and return it to the kernel.
866 */
867
868static void free_shared_mem(struct s2io_nic *nic)
869{
870 int i, j, blk_cnt, size;
871 void *tmp_v_addr;
872 dma_addr_t tmp_p_addr;
1da177e4 873 int lst_size, lst_per_page;
8910b49f 874 struct net_device *dev;
491976b2 875 int page_num = 0;
ffb5df6c
JP
876 struct config_param *config;
877 struct mac_info *mac_control;
878 struct stat_block *stats;
879 struct swStat *swstats;
1da177e4
LT
880
881 if (!nic)
882 return;
883
8910b49f
MG
884 dev = nic->dev;
885
1da177e4 886 config = &nic->config;
ffb5df6c
JP
887 mac_control = &nic->mac_control;
888 stats = mac_control->stats_info;
889 swstats = &stats->sw_stat;
1da177e4 890
d44570e4 891 lst_size = sizeof(struct TxD) * config->max_txds;
1da177e4
LT
892 lst_per_page = PAGE_SIZE / lst_size;
893
894 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
895 struct fifo_info *fifo = &mac_control->fifos[i];
896 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
897
898 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
1da177e4
LT
899 for (j = 0; j < page_num; j++) {
900 int mem_blks = (j * lst_per_page);
13d866a9
JP
901 struct list_info_hold *fli;
902
903 if (!fifo->list_info)
6aa20a22 904 return;
13d866a9
JP
905
906 fli = &fifo->list_info[mem_blks];
907 if (!fli->list_virt_addr)
1da177e4
LT
908 break;
909 pci_free_consistent(nic->pdev, PAGE_SIZE,
13d866a9
JP
910 fli->list_virt_addr,
911 fli->list_phy_addr);
ffb5df6c 912 swstats->mem_freed += PAGE_SIZE;
1da177e4 913 }
776bd20f 914 /* If we got a zero DMA address during allocation,
915 * free the page now
916 */
917 if (mac_control->zerodma_virt_addr) {
918 pci_free_consistent(nic->pdev, PAGE_SIZE,
919 mac_control->zerodma_virt_addr,
920 (dma_addr_t)0);
6aa20a22 921 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
922 "%s: Freeing TxDL with zero DMA address. "
923 "Virtual address %p\n",
924 dev->name, mac_control->zerodma_virt_addr);
ffb5df6c 925 swstats->mem_freed += PAGE_SIZE;
776bd20f 926 }
13d866a9 927 kfree(fifo->list_info);
82c2d023 928 swstats->mem_freed += tx_cfg->fifo_len *
d44570e4 929 sizeof(struct list_info_hold);
1da177e4
LT
930 }
931
1da177e4 932 size = SIZE_OF_BLOCK;
1da177e4 933 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
934 struct ring_info *ring = &mac_control->rings[i];
935
936 blk_cnt = ring->block_count;
1da177e4 937 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
938 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
939 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1da177e4
LT
940 if (tmp_v_addr == NULL)
941 break;
942 pci_free_consistent(nic->pdev, size,
943 tmp_v_addr, tmp_p_addr);
ffb5df6c 944 swstats->mem_freed += size;
13d866a9 945 kfree(ring->rx_blocks[j].rxds);
ffb5df6c
JP
946 swstats->mem_freed += sizeof(struct rxd_info) *
947 rxd_count[nic->rxd_mode];
1da177e4
LT
948 }
949 }
950
6d517a27 951 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
952 /* Freeing buffer storage addresses in 2BUFF mode. */
953 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
954 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
955 struct ring_info *ring = &mac_control->rings[i];
956
957 blk_cnt = rx_cfg->num_rxd /
958 (rxd_count[nic->rxd_mode] + 1);
da6971d8
AR
959 for (j = 0; j < blk_cnt; j++) {
960 int k = 0;
13d866a9 961 if (!ring->ba[j])
da6971d8
AR
962 continue;
963 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 964 struct buffAdd *ba = &ring->ba[j][k];
da6971d8 965 kfree(ba->ba_0_org);
ffb5df6c
JP
966 swstats->mem_freed +=
967 BUF0_LEN + ALIGN_SIZE;
da6971d8 968 kfree(ba->ba_1_org);
ffb5df6c
JP
969 swstats->mem_freed +=
970 BUF1_LEN + ALIGN_SIZE;
da6971d8
AR
971 k++;
972 }
13d866a9 973 kfree(ring->ba[j]);
ffb5df6c
JP
974 swstats->mem_freed += sizeof(struct buffAdd) *
975 (rxd_count[nic->rxd_mode] + 1);
1da177e4 976 }
13d866a9 977 kfree(ring->ba);
ffb5df6c
JP
978 swstats->mem_freed += sizeof(struct buffAdd *) *
979 blk_cnt;
1da177e4 980 }
1da177e4 981 }
1da177e4 982
2fda096d 983 for (i = 0; i < nic->config.tx_fifo_num; i++) {
13d866a9
JP
984 struct fifo_info *fifo = &mac_control->fifos[i];
985 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
986
987 if (fifo->ufo_in_band_v) {
ffb5df6c
JP
988 swstats->mem_freed += tx_cfg->fifo_len *
989 sizeof(u64);
13d866a9 990 kfree(fifo->ufo_in_band_v);
2fda096d
SR
991 }
992 }
993
1da177e4 994 if (mac_control->stats_mem) {
ffb5df6c 995 swstats->mem_freed += mac_control->stats_mem_sz;
1da177e4
LT
996 pci_free_consistent(nic->pdev,
997 mac_control->stats_mem_sz,
998 mac_control->stats_mem,
999 mac_control->stats_mem_phy);
491976b2 1000 }
1da177e4
LT
1001}
1002
541ae68f
K
1003/**
1004 * s2io_verify_pci_mode -
1005 */
1006
1ee6dd77 1007static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 1008{
1ee6dd77 1009 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1010 register u64 val64 = 0;
1011 int mode;
1012
1013 val64 = readq(&bar0->pci_mode);
1014 mode = (u8)GET_PCI_MODE(val64);
1015
d44570e4 1016 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f
K
1017 return -1; /* Unknown PCI mode */
1018 return mode;
1019}
1020
c92ca04b
AR
1021#define NEC_VENID 0x1033
1022#define NEC_DEVID 0x0125
1023static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024{
1025 struct pci_dev *tdev = NULL;
008d845c 1026 for_each_pci_dev(tdev) {
26d36b64 1027 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
7ad62dbc 1028 if (tdev->bus == s2io_pdev->bus->parent) {
26d36b64 1029 pci_dev_put(tdev);
c92ca04b 1030 return 1;
7ad62dbc 1031 }
c92ca04b
AR
1032 }
1033 }
1034 return 0;
1035}
541ae68f 1036
7b32a312 1037static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f
K
1038/**
1039 * s2io_print_pci_mode -
1040 */
1ee6dd77 1041static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 1042{
1ee6dd77 1043 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1044 register u64 val64 = 0;
1045 int mode;
1046 struct config_param *config = &nic->config;
9e39f7c5 1047 const char *pcimode;
541ae68f
K
1048
1049 val64 = readq(&bar0->pci_mode);
1050 mode = (u8)GET_PCI_MODE(val64);
1051
d44570e4 1052 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f
K
1053 return -1; /* Unknown PCI mode */
1054
c92ca04b
AR
1055 config->bus_speed = bus_speed[mode];
1056
1057 if (s2io_on_nec_bridge(nic->pdev)) {
1058 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
d44570e4 1059 nic->dev->name);
c92ca04b
AR
1060 return mode;
1061 }
1062
d44570e4
JP
1063 switch (mode) {
1064 case PCI_MODE_PCI_33:
9e39f7c5 1065 pcimode = "33MHz PCI bus";
d44570e4
JP
1066 break;
1067 case PCI_MODE_PCI_66:
9e39f7c5 1068 pcimode = "66MHz PCI bus";
d44570e4
JP
1069 break;
1070 case PCI_MODE_PCIX_M1_66:
9e39f7c5 1071 pcimode = "66MHz PCIX(M1) bus";
d44570e4
JP
1072 break;
1073 case PCI_MODE_PCIX_M1_100:
9e39f7c5 1074 pcimode = "100MHz PCIX(M1) bus";
d44570e4
JP
1075 break;
1076 case PCI_MODE_PCIX_M1_133:
9e39f7c5 1077 pcimode = "133MHz PCIX(M1) bus";
d44570e4
JP
1078 break;
1079 case PCI_MODE_PCIX_M2_66:
9e39f7c5 1080 pcimode = "133MHz PCIX(M2) bus";
d44570e4
JP
1081 break;
1082 case PCI_MODE_PCIX_M2_100:
9e39f7c5 1083 pcimode = "200MHz PCIX(M2) bus";
d44570e4
JP
1084 break;
1085 case PCI_MODE_PCIX_M2_133:
9e39f7c5 1086 pcimode = "266MHz PCIX(M2) bus";
d44570e4
JP
1087 break;
1088 default:
9e39f7c5
JP
1089 pcimode = "unsupported bus!";
1090 mode = -1;
541ae68f
K
1091 }
1092
9e39f7c5
JP
1093 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095
541ae68f
K
1096 return mode;
1097}
1098
b7c5678f
RV
1099/**
1100 * init_tti - Initialization transmit traffic interrupt scheme
1101 * @nic: device private variable
1102 * @link: link status (UP/DOWN) used to enable/disable continuous
1103 * transmit interrupts
1104 * Description: The function configures transmit traffic interrupts
1105 * Return Value: SUCCESS on success and
1106 * '-1' on failure
1107 */
1108
0d66afe7 1109static int init_tti(struct s2io_nic *nic, int link)
b7c5678f
RV
1110{
1111 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1112 register u64 val64 = 0;
1113 int i;
ffb5df6c 1114 struct config_param *config = &nic->config;
b7c5678f
RV
1115
1116 for (i = 0; i < config->tx_fifo_num; i++) {
1117 /*
1118 * TTI Initialization. Default Tx timer gets us about
1119 * 250 interrupts per sec. Continuous interrupts are enabled
1120 * by default.
1121 */
1122 if (nic->device_type == XFRAME_II_DEVICE) {
1123 int count = (nic->config.bus_speed * 125)/2;
1124 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1125 } else
1126 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1127
1128 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
d44570e4
JP
1129 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1130 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1131 TTI_DATA1_MEM_TX_TIMER_AC_EN;
ac731ab6
SH
1132 if (i == 0)
1133 if (use_continuous_tx_intrs && (link == LINK_UP))
1134 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
b7c5678f
RV
1135 writeq(val64, &bar0->tti_data1_mem);
1136
ac731ab6
SH
1137 if (nic->config.intr_type == MSI_X) {
1138 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1139 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1140 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1141 TTI_DATA2_MEM_TX_UFC_D(0x300);
1142 } else {
1143 if ((nic->config.tx_steering_type ==
d44570e4
JP
1144 TX_DEFAULT_STEERING) &&
1145 (config->tx_fifo_num > 1) &&
1146 (i >= nic->udp_fifo_idx) &&
1147 (i < (nic->udp_fifo_idx +
1148 nic->total_udp_fifos)))
ac731ab6
SH
1149 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1150 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1151 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1152 TTI_DATA2_MEM_TX_UFC_D(0x120);
1153 else
1154 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1155 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1156 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1157 TTI_DATA2_MEM_TX_UFC_D(0x80);
1158 }
b7c5678f
RV
1159
1160 writeq(val64, &bar0->tti_data2_mem);
1161
d44570e4
JP
1162 val64 = TTI_CMD_MEM_WE |
1163 TTI_CMD_MEM_STROBE_NEW_CMD |
1164 TTI_CMD_MEM_OFFSET(i);
b7c5678f
RV
1165 writeq(val64, &bar0->tti_command_mem);
1166
1167 if (wait_for_cmd_complete(&bar0->tti_command_mem,
d44570e4
JP
1168 TTI_CMD_MEM_STROBE_NEW_CMD,
1169 S2IO_BIT_RESET) != SUCCESS)
b7c5678f
RV
1170 return FAILURE;
1171 }
1172
1173 return SUCCESS;
1174}
1175
20346722
K
1176/**
1177 * init_nic - Initialization of hardware
b7c5678f 1178 * @nic: device private variable
20346722
K
1179 * Description: The function sequentially configures every block
1180 * of the H/W from their reset values.
1181 * Return Value: SUCCESS on success and
1da177e4
LT
1182 * '-1' on failure (endian settings incorrect).
1183 */
1184
1185static int init_nic(struct s2io_nic *nic)
1186{
1ee6dd77 1187 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1188 struct net_device *dev = nic->dev;
1189 register u64 val64 = 0;
1190 void __iomem *add;
1191 u32 time;
1192 int i, j;
c92ca04b 1193 int dtx_cnt = 0;
1da177e4 1194 unsigned long long mem_share;
20346722 1195 int mem_size;
ffb5df6c
JP
1196 struct config_param *config = &nic->config;
1197 struct mac_info *mac_control = &nic->mac_control;
1da177e4 1198
5e25b9dd 1199 /* to set the swapper controle on the card */
d44570e4
JP
1200 if (s2io_set_swapper(nic)) {
1201 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
9f74ffde 1202 return -EIO;
1da177e4
LT
1203 }
1204
541ae68f
K
1205 /*
1206 * Herc requires EOI to be removed from reset before XGXS, so..
1207 */
1208 if (nic->device_type & XFRAME_II_DEVICE) {
1209 val64 = 0xA500000000ULL;
1210 writeq(val64, &bar0->sw_reset);
1211 msleep(500);
1212 val64 = readq(&bar0->sw_reset);
1213 }
1214
1da177e4
LT
1215 /* Remove XGXS from reset state */
1216 val64 = 0;
1217 writeq(val64, &bar0->sw_reset);
1da177e4 1218 msleep(500);
20346722 1219 val64 = readq(&bar0->sw_reset);
1da177e4 1220
7962024e
SH
1221 /* Ensure that it's safe to access registers by checking
1222 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1223 */
1224 if (nic->device_type == XFRAME_II_DEVICE) {
1225 for (i = 0; i < 50; i++) {
1226 val64 = readq(&bar0->adapter_status);
1227 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1228 break;
1229 msleep(10);
1230 }
1231 if (i == 50)
1232 return -ENODEV;
1233 }
1234
1da177e4
LT
1235 /* Enable Receiving broadcasts */
1236 add = &bar0->mac_cfg;
1237 val64 = readq(&bar0->mac_cfg);
1238 val64 |= MAC_RMAC_BCAST_ENABLE;
1239 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 1240 writel((u32)val64, add);
1da177e4
LT
1241 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1242 writel((u32) (val64 >> 32), (add + 4));
1243
1244 /* Read registers in all blocks */
1245 val64 = readq(&bar0->mac_int_mask);
1246 val64 = readq(&bar0->mc_int_mask);
1247 val64 = readq(&bar0->xgxs_int_mask);
1248
1249 /* Set MTU */
1250 val64 = dev->mtu;
1251 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1252
541ae68f
K
1253 if (nic->device_type & XFRAME_II_DEVICE) {
1254 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1255 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1256 &bar0->dtx_control, UF);
541ae68f
K
1257 if (dtx_cnt & 0x1)
1258 msleep(1); /* Necessary!! */
1da177e4
LT
1259 dtx_cnt++;
1260 }
541ae68f 1261 } else {
c92ca04b
AR
1262 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1263 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1264 &bar0->dtx_control, UF);
1265 val64 = readq(&bar0->dtx_control);
1266 dtx_cnt++;
1da177e4
LT
1267 }
1268 }
1269
1270 /* Tx DMA Initialization */
1271 val64 = 0;
1272 writeq(val64, &bar0->tx_fifo_partition_0);
1273 writeq(val64, &bar0->tx_fifo_partition_1);
1274 writeq(val64, &bar0->tx_fifo_partition_2);
1275 writeq(val64, &bar0->tx_fifo_partition_3);
1276
1da177e4 1277 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
1278 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1279
1280 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1281 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1da177e4
LT
1282
1283 if (i == (config->tx_fifo_num - 1)) {
1284 if (i % 2 == 0)
1285 i++;
1286 }
1287
1288 switch (i) {
1289 case 1:
1290 writeq(val64, &bar0->tx_fifo_partition_0);
1291 val64 = 0;
b7c5678f 1292 j = 0;
1da177e4
LT
1293 break;
1294 case 3:
1295 writeq(val64, &bar0->tx_fifo_partition_1);
1296 val64 = 0;
b7c5678f 1297 j = 0;
1da177e4
LT
1298 break;
1299 case 5:
1300 writeq(val64, &bar0->tx_fifo_partition_2);
1301 val64 = 0;
b7c5678f 1302 j = 0;
1da177e4
LT
1303 break;
1304 case 7:
1305 writeq(val64, &bar0->tx_fifo_partition_3);
b7c5678f
RV
1306 val64 = 0;
1307 j = 0;
1308 break;
1309 default:
1310 j++;
1da177e4
LT
1311 break;
1312 }
1313 }
1314
5e25b9dd
K
1315 /*
1316 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1317 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1318 */
d44570e4 1319 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
5e25b9dd
K
1320 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1321
1da177e4
LT
1322 val64 = readq(&bar0->tx_fifo_partition_0);
1323 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
d44570e4 1324 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1da177e4 1325
20346722
K
1326 /*
1327 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1328 * integrity checking.
1329 */
1330 val64 = readq(&bar0->tx_pa_cfg);
d44570e4
JP
1331 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1332 TX_PA_CFG_IGNORE_SNAP_OUI |
1333 TX_PA_CFG_IGNORE_LLC_CTRL |
1334 TX_PA_CFG_IGNORE_L2_ERR;
1da177e4
LT
1335 writeq(val64, &bar0->tx_pa_cfg);
1336
dbedd44e 1337 /* Rx DMA initialization. */
1da177e4
LT
1338 val64 = 0;
1339 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1340 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1341
1342 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1da177e4
LT
1343 }
1344 writeq(val64, &bar0->rx_queue_priority);
1345
20346722
K
1346 /*
1347 * Allocating equal share of memory to all the
1da177e4
LT
1348 * configured Rings.
1349 */
1350 val64 = 0;
541ae68f
K
1351 if (nic->device_type & XFRAME_II_DEVICE)
1352 mem_size = 32;
1353 else
1354 mem_size = 64;
1355
1da177e4
LT
1356 for (i = 0; i < config->rx_ring_num; i++) {
1357 switch (i) {
1358 case 0:
20346722
K
1359 mem_share = (mem_size / config->rx_ring_num +
1360 mem_size % config->rx_ring_num);
1da177e4
LT
1361 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1362 continue;
1363 case 1:
20346722 1364 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1365 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1366 continue;
1367 case 2:
20346722 1368 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1369 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1370 continue;
1371 case 3:
20346722 1372 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1373 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1374 continue;
1375 case 4:
20346722 1376 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1377 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1378 continue;
1379 case 5:
20346722 1380 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1381 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1382 continue;
1383 case 6:
20346722 1384 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1385 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1386 continue;
1387 case 7:
20346722 1388 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1389 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1390 continue;
1391 }
1392 }
1393 writeq(val64, &bar0->rx_queue_cfg);
1394
20346722 1395 /*
5e25b9dd 1396 * Filling Tx round robin registers
b7c5678f 1397 * as per the number of FIFOs for equal scheduling priority
1da177e4 1398 */
5e25b9dd
K
1399 switch (config->tx_fifo_num) {
1400 case 1:
b7c5678f 1401 val64 = 0x0;
5e25b9dd
K
1402 writeq(val64, &bar0->tx_w_round_robin_0);
1403 writeq(val64, &bar0->tx_w_round_robin_1);
1404 writeq(val64, &bar0->tx_w_round_robin_2);
1405 writeq(val64, &bar0->tx_w_round_robin_3);
1406 writeq(val64, &bar0->tx_w_round_robin_4);
1407 break;
1408 case 2:
b7c5678f 1409 val64 = 0x0001000100010001ULL;
5e25b9dd 1410 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1411 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1412 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1413 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1414 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1415 writeq(val64, &bar0->tx_w_round_robin_4);
1416 break;
1417 case 3:
b7c5678f 1418 val64 = 0x0001020001020001ULL;
5e25b9dd 1419 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1420 val64 = 0x0200010200010200ULL;
5e25b9dd 1421 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1422 val64 = 0x0102000102000102ULL;
5e25b9dd 1423 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1424 val64 = 0x0001020001020001ULL;
5e25b9dd 1425 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1426 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1427 writeq(val64, &bar0->tx_w_round_robin_4);
1428 break;
1429 case 4:
b7c5678f 1430 val64 = 0x0001020300010203ULL;
5e25b9dd 1431 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1432 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1433 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1434 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1435 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1436 writeq(val64, &bar0->tx_w_round_robin_4);
1437 break;
1438 case 5:
b7c5678f 1439 val64 = 0x0001020304000102ULL;
5e25b9dd 1440 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1441 val64 = 0x0304000102030400ULL;
5e25b9dd 1442 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1443 val64 = 0x0102030400010203ULL;
5e25b9dd 1444 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1445 val64 = 0x0400010203040001ULL;
5e25b9dd 1446 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1447 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1448 writeq(val64, &bar0->tx_w_round_robin_4);
1449 break;
1450 case 6:
b7c5678f 1451 val64 = 0x0001020304050001ULL;
5e25b9dd 1452 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1453 val64 = 0x0203040500010203ULL;
5e25b9dd 1454 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1455 val64 = 0x0405000102030405ULL;
5e25b9dd 1456 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1457 val64 = 0x0001020304050001ULL;
5e25b9dd 1458 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1459 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1460 writeq(val64, &bar0->tx_w_round_robin_4);
1461 break;
1462 case 7:
b7c5678f 1463 val64 = 0x0001020304050600ULL;
5e25b9dd 1464 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1465 val64 = 0x0102030405060001ULL;
5e25b9dd 1466 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1467 val64 = 0x0203040506000102ULL;
5e25b9dd 1468 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1469 val64 = 0x0304050600010203ULL;
5e25b9dd 1470 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1471 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1472 writeq(val64, &bar0->tx_w_round_robin_4);
1473 break;
1474 case 8:
b7c5678f 1475 val64 = 0x0001020304050607ULL;
5e25b9dd 1476 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1477 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1478 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1479 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1480 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1481 writeq(val64, &bar0->tx_w_round_robin_4);
1482 break;
1483 }
1484
b41477f3 1485 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1486 val64 = readq(&bar0->tx_fifo_partition_0);
1487 val64 |= (TX_FIFO_PARTITION_EN);
1488 writeq(val64, &bar0->tx_fifo_partition_0);
1489
5e25b9dd 1490 /* Filling the Rx round robin registers as per the
0425b46a
SH
1491 * number of Rings and steering based on QoS with
1492 * equal priority.
1493 */
5e25b9dd
K
1494 switch (config->rx_ring_num) {
1495 case 1:
0425b46a
SH
1496 val64 = 0x0;
1497 writeq(val64, &bar0->rx_w_round_robin_0);
1498 writeq(val64, &bar0->rx_w_round_robin_1);
1499 writeq(val64, &bar0->rx_w_round_robin_2);
1500 writeq(val64, &bar0->rx_w_round_robin_3);
1501 writeq(val64, &bar0->rx_w_round_robin_4);
1502
5e25b9dd
K
1503 val64 = 0x8080808080808080ULL;
1504 writeq(val64, &bar0->rts_qos_steering);
1505 break;
1506 case 2:
0425b46a 1507 val64 = 0x0001000100010001ULL;
5e25b9dd 1508 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1509 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1510 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1511 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1512 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1513 writeq(val64, &bar0->rx_w_round_robin_4);
1514
1515 val64 = 0x8080808040404040ULL;
1516 writeq(val64, &bar0->rts_qos_steering);
1517 break;
1518 case 3:
0425b46a 1519 val64 = 0x0001020001020001ULL;
5e25b9dd 1520 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1521 val64 = 0x0200010200010200ULL;
5e25b9dd 1522 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1523 val64 = 0x0102000102000102ULL;
5e25b9dd 1524 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1525 val64 = 0x0001020001020001ULL;
5e25b9dd 1526 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1527 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1528 writeq(val64, &bar0->rx_w_round_robin_4);
1529
1530 val64 = 0x8080804040402020ULL;
1531 writeq(val64, &bar0->rts_qos_steering);
1532 break;
1533 case 4:
0425b46a 1534 val64 = 0x0001020300010203ULL;
5e25b9dd 1535 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1536 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1537 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1538 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1539 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1540 writeq(val64, &bar0->rx_w_round_robin_4);
1541
1542 val64 = 0x8080404020201010ULL;
1543 writeq(val64, &bar0->rts_qos_steering);
1544 break;
1545 case 5:
0425b46a 1546 val64 = 0x0001020304000102ULL;
5e25b9dd 1547 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1548 val64 = 0x0304000102030400ULL;
5e25b9dd 1549 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1550 val64 = 0x0102030400010203ULL;
5e25b9dd 1551 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1552 val64 = 0x0400010203040001ULL;
5e25b9dd 1553 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1554 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1555 writeq(val64, &bar0->rx_w_round_robin_4);
1556
1557 val64 = 0x8080404020201008ULL;
1558 writeq(val64, &bar0->rts_qos_steering);
1559 break;
1560 case 6:
0425b46a 1561 val64 = 0x0001020304050001ULL;
5e25b9dd 1562 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1563 val64 = 0x0203040500010203ULL;
5e25b9dd 1564 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1565 val64 = 0x0405000102030405ULL;
5e25b9dd 1566 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1567 val64 = 0x0001020304050001ULL;
5e25b9dd 1568 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1569 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1570 writeq(val64, &bar0->rx_w_round_robin_4);
1571
1572 val64 = 0x8080404020100804ULL;
1573 writeq(val64, &bar0->rts_qos_steering);
1574 break;
1575 case 7:
0425b46a 1576 val64 = 0x0001020304050600ULL;
5e25b9dd 1577 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1578 val64 = 0x0102030405060001ULL;
5e25b9dd 1579 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1580 val64 = 0x0203040506000102ULL;
5e25b9dd 1581 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1582 val64 = 0x0304050600010203ULL;
5e25b9dd 1583 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1584 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1585 writeq(val64, &bar0->rx_w_round_robin_4);
1586
1587 val64 = 0x8080402010080402ULL;
1588 writeq(val64, &bar0->rts_qos_steering);
1589 break;
1590 case 8:
0425b46a 1591 val64 = 0x0001020304050607ULL;
5e25b9dd 1592 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1593 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1594 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1595 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1596 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1597 writeq(val64, &bar0->rx_w_round_robin_4);
1598
1599 val64 = 0x8040201008040201ULL;
1600 writeq(val64, &bar0->rts_qos_steering);
1601 break;
1602 }
1da177e4
LT
1603
1604 /* UDP Fix */
1605 val64 = 0;
20346722 1606 for (i = 0; i < 8; i++)
1da177e4
LT
1607 writeq(val64, &bar0->rts_frm_len_n[i]);
1608
5e25b9dd
K
1609 /* Set the default rts frame length for the rings configured */
1610 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1611 for (i = 0 ; i < config->rx_ring_num ; i++)
1612 writeq(val64, &bar0->rts_frm_len_n[i]);
1613
1614 /* Set the frame length for the configured rings
1615 * desired by the user
1616 */
1617 for (i = 0; i < config->rx_ring_num; i++) {
1618 /* If rts_frm_len[i] == 0 then it is assumed that user not
1619 * specified frame length steering.
1620 * If the user provides the frame length then program
1621 * the rts_frm_len register for those values or else
1622 * leave it as it is.
1623 */
1624 if (rts_frm_len[i] != 0) {
1625 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
d44570e4 1626 &bar0->rts_frm_len_n[i]);
5e25b9dd
K
1627 }
1628 }
8a4bdbaa 1629
9fc93a41
SS
1630 /* Disable differentiated services steering logic */
1631 for (i = 0; i < 64; i++) {
1632 if (rts_ds_steer(nic, i, 0) == FAILURE) {
9e39f7c5
JP
1633 DBG_PRINT(ERR_DBG,
1634 "%s: rts_ds_steer failed on codepoint %d\n",
1635 dev->name, i);
9f74ffde 1636 return -ENODEV;
9fc93a41
SS
1637 }
1638 }
1639
20346722 1640 /* Program statistics memory */
1da177e4 1641 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1642
541ae68f
K
1643 if (nic->device_type == XFRAME_II_DEVICE) {
1644 val64 = STAT_BC(0x320);
1645 writeq(val64, &bar0->stat_byte_cnt);
1646 }
1647
20346722 1648 /*
1da177e4
LT
1649 * Initializing the sampling rate for the device to calculate the
1650 * bandwidth utilization.
1651 */
1652 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
d44570e4 1653 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1da177e4
LT
1654 writeq(val64, &bar0->mac_link_util);
1655
20346722
K
1656 /*
1657 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1658 * Scheme.
1659 */
1da177e4 1660
b7c5678f
RV
1661 /* Initialize TTI */
1662 if (SUCCESS != init_tti(nic, nic->last_link_state))
1663 return -ENODEV;
1da177e4 1664
8a4bdbaa
SS
1665 /* RTI Initialization */
1666 if (nic->device_type == XFRAME_II_DEVICE) {
541ae68f 1667 /*
8a4bdbaa
SS
1668 * Programmed to generate Apprx 500 Intrs per
1669 * second
1670 */
1671 int count = (nic->config.bus_speed * 125)/4;
1672 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1673 } else
1674 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1675 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
d44570e4
JP
1676 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1677 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1678 RTI_DATA1_MEM_RX_TIMER_AC_EN;
8a4bdbaa
SS
1679
1680 writeq(val64, &bar0->rti_data1_mem);
1681
1682 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1683 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1684 if (nic->config.intr_type == MSI_X)
d44570e4
JP
1685 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1686 RTI_DATA2_MEM_RX_UFC_D(0x40));
8a4bdbaa 1687 else
d44570e4
JP
1688 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1689 RTI_DATA2_MEM_RX_UFC_D(0x80));
8a4bdbaa 1690 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1691
8a4bdbaa 1692 for (i = 0; i < config->rx_ring_num; i++) {
d44570e4
JP
1693 val64 = RTI_CMD_MEM_WE |
1694 RTI_CMD_MEM_STROBE_NEW_CMD |
1695 RTI_CMD_MEM_OFFSET(i);
8a4bdbaa 1696 writeq(val64, &bar0->rti_command_mem);
1da177e4 1697
8a4bdbaa
SS
1698 /*
1699 * Once the operation completes, the Strobe bit of the
1700 * command register will be reset. We poll for this
1701 * particular condition. We wait for a maximum of 500ms
1702 * for the operation to complete, if it's not complete
1703 * by then we return error.
1704 */
1705 time = 0;
f957bcf0 1706 while (true) {
8a4bdbaa
SS
1707 val64 = readq(&bar0->rti_command_mem);
1708 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1709 break;
b6e3f982 1710
8a4bdbaa 1711 if (time > 10) {
9e39f7c5 1712 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
8a4bdbaa 1713 dev->name);
9f74ffde 1714 return -ENODEV;
b6e3f982 1715 }
8a4bdbaa
SS
1716 time++;
1717 msleep(50);
1da177e4 1718 }
1da177e4
LT
1719 }
1720
20346722
K
1721 /*
1722 * Initializing proper values as Pause threshold into all
1da177e4
LT
1723 * the 8 Queues on Rx side.
1724 */
1725 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1726 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1727
1728 /* Disable RMAC PAD STRIPPING */
509a2671 1729 add = &bar0->mac_cfg;
1da177e4
LT
1730 val64 = readq(&bar0->mac_cfg);
1731 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1732 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1733 writel((u32) (val64), add);
1734 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735 writel((u32) (val64 >> 32), (add + 4));
1736 val64 = readq(&bar0->mac_cfg);
1737
7d3d0439
RA
1738 /* Enable FCS stripping by adapter */
1739 add = &bar0->mac_cfg;
1740 val64 = readq(&bar0->mac_cfg);
1741 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1742 if (nic->device_type == XFRAME_II_DEVICE)
1743 writeq(val64, &bar0->mac_cfg);
1744 else {
1745 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1746 writel((u32) (val64), add);
1747 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748 writel((u32) (val64 >> 32), (add + 4));
1749 }
1750
20346722
K
1751 /*
1752 * Set the time value to be inserted in the pause frame
1da177e4
LT
1753 * generated by xena.
1754 */
1755 val64 = readq(&bar0->rmac_pause_cfg);
1756 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1757 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1758 writeq(val64, &bar0->rmac_pause_cfg);
1759
20346722 1760 /*
1da177e4
LT
1761 * Set the Threshold Limit for Generating the pause frame
1762 * If the amount of data in any Queue exceeds ratio of
1763 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1764 * pause frame is generated
1765 */
1766 val64 = 0;
1767 for (i = 0; i < 4; i++) {
d44570e4
JP
1768 val64 |= (((u64)0xFF00 |
1769 nic->mac_control.mc_pause_threshold_q0q3)
1770 << (i * 2 * 8));
1da177e4
LT
1771 }
1772 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1773
1774 val64 = 0;
1775 for (i = 0; i < 4; i++) {
d44570e4
JP
1776 val64 |= (((u64)0xFF00 |
1777 nic->mac_control.mc_pause_threshold_q4q7)
1778 << (i * 2 * 8));
1da177e4
LT
1779 }
1780 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1781
20346722
K
1782 /*
1783 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1784 * exceeded the limit pointed by shared_splits
1785 */
1786 val64 = readq(&bar0->pic_control);
1787 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1788 writeq(val64, &bar0->pic_control);
1789
863c11a9
AR
1790 if (nic->config.bus_speed == 266) {
1791 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1792 writeq(0x0, &bar0->read_retry_delay);
1793 writeq(0x0, &bar0->write_retry_delay);
1794 }
1795
541ae68f
K
1796 /*
1797 * Programming the Herc to split every write transaction
1798 * that does not start on an ADB to reduce disconnects.
1799 */
1800 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1801 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1802 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1803 writeq(val64, &bar0->misc_control);
1804 val64 = readq(&bar0->pic_control2);
b7b5a128 1805 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
863c11a9 1806 writeq(val64, &bar0->pic_control2);
541ae68f 1807 }
c92ca04b
AR
1808 if (strstr(nic->product_name, "CX4")) {
1809 val64 = TMAC_AVG_IPG(0x17);
1810 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1811 }
1812
1da177e4
LT
1813 return SUCCESS;
1814}
a371a07d
K
1815#define LINK_UP_DOWN_INTERRUPT 1
1816#define MAC_RMAC_ERR_TIMER 2
1817
1ee6dd77 1818static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d
K
1819{
1820 if (nic->device_type == XFRAME_II_DEVICE)
1821 return LINK_UP_DOWN_INTERRUPT;
1822 else
1823 return MAC_RMAC_ERR_TIMER;
1824}
8116f3cf 1825
9caab458
SS
1826/**
1827 * do_s2io_write_bits - update alarm bits in alarm register
1828 * @value: alarm bits
1829 * @flag: interrupt status
1830 * @addr: address value
1831 * Description: update alarm bits in alarm register
1832 * Return Value:
1833 * NONE.
1834 */
1835static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1836{
1837 u64 temp64;
1838
1839 temp64 = readq(addr);
1840
d44570e4
JP
1841 if (flag == ENABLE_INTRS)
1842 temp64 &= ~((u64)value);
9caab458 1843 else
d44570e4 1844 temp64 |= ((u64)value);
9caab458
SS
1845 writeq(temp64, addr);
1846}
1da177e4 1847
43b7c451 1848static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
9caab458
SS
1849{
1850 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1851 register u64 gen_int_mask = 0;
01e16faa 1852 u64 interruptible;
9caab458 1853
01e16faa 1854 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
9caab458 1855 if (mask & TX_DMA_INTR) {
9caab458
SS
1856 gen_int_mask |= TXDMA_INT_M;
1857
1858 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
d44570e4
JP
1859 TXDMA_PCC_INT | TXDMA_TTI_INT |
1860 TXDMA_LSO_INT | TXDMA_TPA_INT |
1861 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
9caab458
SS
1862
1863 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
d44570e4
JP
1864 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1865 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1866 &bar0->pfc_err_mask);
9caab458
SS
1867
1868 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
d44570e4
JP
1869 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1870 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
9caab458
SS
1871
1872 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
d44570e4
JP
1873 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1874 PCC_N_SERR | PCC_6_COF_OV_ERR |
1875 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1876 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1877 PCC_TXB_ECC_SG_ERR,
1878 flag, &bar0->pcc_err_mask);
9caab458
SS
1879
1880 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
d44570e4 1881 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
9caab458
SS
1882
1883 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
d44570e4
JP
1884 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1885 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1886 flag, &bar0->lso_err_mask);
9caab458
SS
1887
1888 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
d44570e4 1889 flag, &bar0->tpa_err_mask);
9caab458
SS
1890
1891 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
9caab458
SS
1892 }
1893
1894 if (mask & TX_MAC_INTR) {
1895 gen_int_mask |= TXMAC_INT_M;
1896 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
d44570e4 1897 &bar0->mac_int_mask);
9caab458 1898 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
d44570e4
JP
1899 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1900 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1901 flag, &bar0->mac_tmac_err_mask);
9caab458
SS
1902 }
1903
1904 if (mask & TX_XGXS_INTR) {
1905 gen_int_mask |= TXXGXS_INT_M;
1906 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
d44570e4 1907 &bar0->xgxs_int_mask);
9caab458 1908 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
d44570e4
JP
1909 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1910 flag, &bar0->xgxs_txgxs_err_mask);
9caab458
SS
1911 }
1912
1913 if (mask & RX_DMA_INTR) {
1914 gen_int_mask |= RXDMA_INT_M;
1915 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
d44570e4
JP
1916 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1917 flag, &bar0->rxdma_int_mask);
9caab458 1918 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
d44570e4
JP
1919 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1920 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1921 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
9caab458 1922 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
d44570e4
JP
1923 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1924 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1925 &bar0->prc_pcix_err_mask);
9caab458 1926 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
d44570e4
JP
1927 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1928 &bar0->rpa_err_mask);
9caab458 1929 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
d44570e4
JP
1930 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1931 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1932 RDA_FRM_ECC_SG_ERR |
1933 RDA_MISC_ERR|RDA_PCIX_ERR,
1934 flag, &bar0->rda_err_mask);
9caab458 1935 do_s2io_write_bits(RTI_SM_ERR_ALARM |
d44570e4
JP
1936 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1937 flag, &bar0->rti_err_mask);
9caab458
SS
1938 }
1939
1940 if (mask & RX_MAC_INTR) {
1941 gen_int_mask |= RXMAC_INT_M;
1942 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
d44570e4
JP
1943 &bar0->mac_int_mask);
1944 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1945 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1946 RMAC_DOUBLE_ECC_ERR);
01e16faa
SH
1947 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1948 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1949 do_s2io_write_bits(interruptible,
d44570e4 1950 flag, &bar0->mac_rmac_err_mask);
9caab458
SS
1951 }
1952
d44570e4 1953 if (mask & RX_XGXS_INTR) {
9caab458
SS
1954 gen_int_mask |= RXXGXS_INT_M;
1955 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
d44570e4 1956 &bar0->xgxs_int_mask);
9caab458 1957 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
d44570e4 1958 &bar0->xgxs_rxgxs_err_mask);
9caab458
SS
1959 }
1960
1961 if (mask & MC_INTR) {
1962 gen_int_mask |= MC_INT_M;
d44570e4
JP
1963 do_s2io_write_bits(MC_INT_MASK_MC_INT,
1964 flag, &bar0->mc_int_mask);
9caab458 1965 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
d44570e4
JP
1966 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1967 &bar0->mc_err_mask);
9caab458
SS
1968 }
1969 nic->general_int_mask = gen_int_mask;
1970
1971 /* Remove this line when alarm interrupts are enabled */
1972 nic->general_int_mask = 0;
1973}
d44570e4 1974
20346722
K
1975/**
1976 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1977 * @nic: device private variable,
1978 * @mask: A mask indicating which Intr block must be modified and,
1979 * @flag: A flag indicating whether to enable or disable the Intrs.
1980 * Description: This function will either disable or enable the interrupts
20346722
K
1981 * depending on the flag argument. The mask argument can be used to
1982 * enable/disable any Intr block.
1da177e4
LT
1983 * Return Value: NONE.
1984 */
1985
1986static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1987{
1ee6dd77 1988 struct XENA_dev_config __iomem *bar0 = nic->bar0;
9caab458
SS
1989 register u64 temp64 = 0, intr_mask = 0;
1990
1991 intr_mask = nic->general_int_mask;
1da177e4
LT
1992
1993 /* Top level interrupt classification */
1994 /* PIC Interrupts */
9caab458 1995 if (mask & TX_PIC_INTR) {
1da177e4 1996 /* Enable PIC Intrs in the general intr mask register */
9caab458 1997 intr_mask |= TXPIC_INT_M;
1da177e4 1998 if (flag == ENABLE_INTRS) {
20346722 1999 /*
a371a07d 2000 * If Hercules adapter enable GPIO otherwise
b41477f3 2001 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
2002 * interrupts for now.
2003 * TODO
1da177e4 2004 */
a371a07d 2005 if (s2io_link_fault_indication(nic) ==
d44570e4 2006 LINK_UP_DOWN_INTERRUPT) {
9caab458 2007 do_s2io_write_bits(PIC_INT_GPIO, flag,
d44570e4 2008 &bar0->pic_int_mask);
9caab458 2009 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
d44570e4 2010 &bar0->gpio_int_mask);
9caab458 2011 } else
a371a07d 2012 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4 2013 } else if (flag == DISABLE_INTRS) {
20346722
K
2014 /*
2015 * Disable PIC Intrs in the general
2016 * intr mask register
1da177e4
LT
2017 */
2018 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4
LT
2019 }
2020 }
2021
1da177e4
LT
2022 /* Tx traffic interrupts */
2023 if (mask & TX_TRAFFIC_INTR) {
9caab458 2024 intr_mask |= TXTRAFFIC_INT_M;
1da177e4 2025 if (flag == ENABLE_INTRS) {
20346722 2026 /*
1da177e4 2027 * Enable all the Tx side interrupts
20346722 2028 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
2029 */
2030 writeq(0x0, &bar0->tx_traffic_mask);
2031 } else if (flag == DISABLE_INTRS) {
20346722
K
2032 /*
2033 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
2034 * register.
2035 */
2036 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1da177e4
LT
2037 }
2038 }
2039
2040 /* Rx traffic interrupts */
2041 if (mask & RX_TRAFFIC_INTR) {
9caab458 2042 intr_mask |= RXTRAFFIC_INT_M;
1da177e4 2043 if (flag == ENABLE_INTRS) {
1da177e4
LT
2044 /* writing 0 Enables all 8 RX interrupt levels */
2045 writeq(0x0, &bar0->rx_traffic_mask);
2046 } else if (flag == DISABLE_INTRS) {
20346722
K
2047 /*
2048 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
2049 * register.
2050 */
2051 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1da177e4
LT
2052 }
2053 }
9caab458
SS
2054
2055 temp64 = readq(&bar0->general_int_mask);
2056 if (flag == ENABLE_INTRS)
d44570e4 2057 temp64 &= ~((u64)intr_mask);
9caab458
SS
2058 else
2059 temp64 = DISABLE_ALL_INTRS;
2060 writeq(temp64, &bar0->general_int_mask);
2061
2062 nic->general_int_mask = readq(&bar0->general_int_mask);
1da177e4
LT
2063}
2064
19a60522
SS
2065/**
2066 * verify_pcc_quiescent- Checks for PCC quiescent state
2067 * Return: 1 If PCC is quiescence
2068 * 0 If PCC is not quiescence
2069 */
1ee6dd77 2070static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 2071{
19a60522 2072 int ret = 0, herc;
1ee6dd77 2073 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522 2074 u64 val64 = readq(&bar0->adapter_status);
8a4bdbaa 2075
19a60522 2076 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722 2077
f957bcf0 2078 if (flag == false) {
44c10138 2079 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
19a60522 2080 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2081 ret = 1;
19a60522
SS
2082 } else {
2083 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2084 ret = 1;
20346722
K
2085 }
2086 } else {
44c10138 2087 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
5e25b9dd 2088 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 2089 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2090 ret = 1;
5e25b9dd
K
2091 } else {
2092 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 2093 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2094 ret = 1;
20346722
K
2095 }
2096 }
2097
2098 return ret;
2099}
2100/**
2101 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 2102 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 2103 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
2104 * differs and the calling function passes the input argument flag to
2105 * indicate this.
20346722 2106 * Return: 1 If xena is quiescence
1da177e4
LT
2107 * 0 If Xena is not quiescence
2108 */
2109
1ee6dd77 2110static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 2111{
19a60522 2112 int mode;
1ee6dd77 2113 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
2114 u64 val64 = readq(&bar0->adapter_status);
2115 mode = s2io_verify_pci_mode(sp);
1da177e4 2116
19a60522 2117 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
9e39f7c5 2118 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
19a60522
SS
2119 return 0;
2120 }
2121 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
9e39f7c5 2122 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
19a60522
SS
2123 return 0;
2124 }
2125 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
9e39f7c5 2126 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
19a60522
SS
2127 return 0;
2128 }
2129 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
9e39f7c5 2130 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
19a60522
SS
2131 return 0;
2132 }
2133 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
9e39f7c5 2134 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
19a60522
SS
2135 return 0;
2136 }
2137 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
9e39f7c5 2138 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
19a60522
SS
2139 return 0;
2140 }
2141 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
9e39f7c5 2142 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
19a60522
SS
2143 return 0;
2144 }
2145 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
9e39f7c5 2146 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
19a60522 2147 return 0;
1da177e4
LT
2148 }
2149
19a60522
SS
2150 /*
2151 * In PCI 33 mode, the P_PLL is not used, and therefore,
2152 * the the P_PLL_LOCK bit in the adapter_status register will
2153 * not be asserted.
2154 */
2155 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
d44570e4
JP
2156 sp->device_type == XFRAME_II_DEVICE &&
2157 mode != PCI_MODE_PCI_33) {
9e39f7c5 2158 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
19a60522
SS
2159 return 0;
2160 }
2161 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
d44570e4 2162 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
9e39f7c5 2163 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
19a60522
SS
2164 return 0;
2165 }
2166 return 1;
1da177e4
LT
2167}
2168
2169/**
2170 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2171 * @sp: Pointer to device specifc structure
20346722 2172 * Description :
1da177e4
LT
2173 * New procedure to clear mac address reading problems on Alpha platforms
2174 *
2175 */
2176
d44570e4 2177static void fix_mac_address(struct s2io_nic *sp)
1da177e4 2178{
1ee6dd77 2179 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
2180 int i = 0;
2181
2182 while (fix_mac[i] != END_SIGN) {
2183 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 2184 udelay(10);
d83d282b 2185 (void) readq(&bar0->gpio_control);
1da177e4
LT
2186 }
2187}
2188
2189/**
20346722 2190 * start_nic - Turns the device on
1da177e4 2191 * @nic : device private variable.
20346722
K
2192 * Description:
2193 * This function actually turns the device on. Before this function is
2194 * called,all Registers are configured from their reset states
2195 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
2196 * calling this function, the device interrupts are cleared and the NIC is
2197 * literally switched on by writing into the adapter control register.
20346722 2198 * Return Value:
1da177e4
LT
2199 * SUCCESS on success and -1 on failure.
2200 */
2201
2202static int start_nic(struct s2io_nic *nic)
2203{
1ee6dd77 2204 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
2205 struct net_device *dev = nic->dev;
2206 register u64 val64 = 0;
20346722 2207 u16 subid, i;
ffb5df6c
JP
2208 struct config_param *config = &nic->config;
2209 struct mac_info *mac_control = &nic->mac_control;
1da177e4
LT
2210
2211 /* PRC Initialization and configuration */
2212 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2213 struct ring_info *ring = &mac_control->rings[i];
2214
d44570e4 2215 writeq((u64)ring->rx_blocks[0].block_dma_addr,
1da177e4
LT
2216 &bar0->prc_rxd0_n[i]);
2217
2218 val64 = readq(&bar0->prc_ctrl_n[i]);
da6971d8
AR
2219 if (nic->rxd_mode == RXD_MODE_1)
2220 val64 |= PRC_CTRL_RC_ENABLED;
2221 else
2222 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
2223 if (nic->device_type == XFRAME_II_DEVICE)
2224 val64 |= PRC_CTRL_GROUP_READS;
2225 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2226 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2227 writeq(val64, &bar0->prc_ctrl_n[i]);
2228 }
2229
da6971d8
AR
2230 if (nic->rxd_mode == RXD_MODE_3B) {
2231 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2232 val64 = readq(&bar0->rx_pa_cfg);
2233 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2234 writeq(val64, &bar0->rx_pa_cfg);
2235 }
1da177e4 2236
926930b2
SS
2237 if (vlan_tag_strip == 0) {
2238 val64 = readq(&bar0->rx_pa_cfg);
2239 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2240 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 2241 nic->vlan_strip_flag = 0;
926930b2
SS
2242 }
2243
20346722 2244 /*
1da177e4
LT
2245 * Enabling MC-RLDRAM. After enabling the device, we timeout
2246 * for around 100ms, which is approximately the time required
2247 * for the device to be ready for operation.
2248 */
2249 val64 = readq(&bar0->mc_rldram_mrs);
2250 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2251 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2252 val64 = readq(&bar0->mc_rldram_mrs);
2253
20346722 2254 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2255
2256 /* Enabling ECC Protection. */
2257 val64 = readq(&bar0->adapter_control);
2258 val64 &= ~ADAPTER_ECC_EN;
2259 writeq(val64, &bar0->adapter_control);
2260
20346722
K
2261 /*
2262 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2263 * it.
2264 */
2265 val64 = readq(&bar0->adapter_status);
19a60522 2266 if (!verify_xena_quiescence(nic)) {
9e39f7c5
JP
2267 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2268 "Adapter status reads: 0x%llx\n",
2269 dev->name, (unsigned long long)val64);
1da177e4
LT
2270 return FAILURE;
2271 }
2272
20346722 2273 /*
1da177e4 2274 * With some switches, link might be already up at this point.
20346722
K
2275 * Because of this weird behavior, when we enable laser,
2276 * we may not get link. We need to handle this. We cannot
2277 * figure out which switch is misbehaving. So we are forced to
2278 * make a global change.
1da177e4
LT
2279 */
2280
2281 /* Enabling Laser. */
2282 val64 = readq(&bar0->adapter_control);
2283 val64 |= ADAPTER_EOI_TX_ON;
2284 writeq(val64, &bar0->adapter_control);
2285
c92ca04b
AR
2286 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2287 /*
25985edc 2288 * Dont see link state interrupts initially on some switches,
c92ca04b
AR
2289 * so directly scheduling the link state task here.
2290 */
2291 schedule_work(&nic->set_link_task);
2292 }
1da177e4
LT
2293 /* SXE-002: Initialize link and activity LED */
2294 subid = nic->pdev->subsystem_device;
541ae68f
K
2295 if (((subid & 0xFF) >= 0x07) &&
2296 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2297 val64 = readq(&bar0->gpio_control);
2298 val64 |= 0x0000800000000000ULL;
2299 writeq(val64, &bar0->gpio_control);
2300 val64 = 0x0411040400000000ULL;
509a2671 2301 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2302 }
2303
1da177e4
LT
2304 return SUCCESS;
2305}
fed5eccd
AR
2306/**
2307 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2308 */
d44570e4
JP
2309static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2310 struct TxD *txdlp, int get_off)
fed5eccd 2311{
1ee6dd77 2312 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2313 struct sk_buff *skb;
1ee6dd77 2314 struct TxD *txds;
fed5eccd
AR
2315 u16 j, frg_cnt;
2316
2317 txds = txdlp;
2fda096d 2318 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
d44570e4
JP
2319 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2320 sizeof(u64), PCI_DMA_TODEVICE);
fed5eccd
AR
2321 txds++;
2322 }
2323
d44570e4 2324 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
fed5eccd 2325 if (!skb) {
1ee6dd77 2326 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2327 return NULL;
2328 }
d44570e4 2329 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
e743d313 2330 skb_headlen(skb), PCI_DMA_TODEVICE);
fed5eccd
AR
2331 frg_cnt = skb_shinfo(skb)->nr_frags;
2332 if (frg_cnt) {
2333 txds++;
2334 for (j = 0; j < frg_cnt; j++, txds++) {
9e903e08 2335 const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
fed5eccd
AR
2336 if (!txds->Buffer_Pointer)
2337 break;
d44570e4
JP
2338 pci_unmap_page(nic->pdev,
2339 (dma_addr_t)txds->Buffer_Pointer,
9e903e08 2340 skb_frag_size(frag), PCI_DMA_TODEVICE);
fed5eccd
AR
2341 }
2342 }
d44570e4
JP
2343 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2344 return skb;
fed5eccd 2345}
1da177e4 2346
20346722
K
2347/**
2348 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2349 * @nic : device private variable.
20346722 2350 * Description:
1da177e4 2351 * Free all queued Tx buffers.
20346722 2352 * Return Value: void
d44570e4 2353 */
1da177e4
LT
2354
2355static void free_tx_buffers(struct s2io_nic *nic)
2356{
2357 struct net_device *dev = nic->dev;
2358 struct sk_buff *skb;
1ee6dd77 2359 struct TxD *txdp;
1da177e4 2360 int i, j;
fed5eccd 2361 int cnt = 0;
ffb5df6c
JP
2362 struct config_param *config = &nic->config;
2363 struct mac_info *mac_control = &nic->mac_control;
2364 struct stat_block *stats = mac_control->stats_info;
2365 struct swStat *swstats = &stats->sw_stat;
1da177e4
LT
2366
2367 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
2368 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2369 struct fifo_info *fifo = &mac_control->fifos[i];
2fda096d 2370 unsigned long flags;
13d866a9
JP
2371
2372 spin_lock_irqsave(&fifo->tx_lock, flags);
2373 for (j = 0; j < tx_cfg->fifo_len; j++) {
43d620c8 2374 txdp = fifo->list_info[j].list_virt_addr;
fed5eccd
AR
2375 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2376 if (skb) {
ffb5df6c 2377 swstats->mem_freed += skb->truesize;
fed5eccd
AR
2378 dev_kfree_skb(skb);
2379 cnt++;
1da177e4 2380 }
1da177e4
LT
2381 }
2382 DBG_PRINT(INTR_DBG,
9e39f7c5 2383 "%s: forcibly freeing %d skbs on FIFO%d\n",
1da177e4 2384 dev->name, cnt, i);
13d866a9
JP
2385 fifo->tx_curr_get_info.offset = 0;
2386 fifo->tx_curr_put_info.offset = 0;
2387 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
2388 }
2389}
2390
20346722
K
2391/**
2392 * stop_nic - To stop the nic
1da177e4 2393 * @nic ; device private variable.
20346722
K
2394 * Description:
2395 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2396 * function does. This function is called to stop the device.
2397 * Return Value:
2398 * void.
2399 */
2400
2401static void stop_nic(struct s2io_nic *nic)
2402{
1ee6dd77 2403 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2404 register u64 val64 = 0;
5d3213cc 2405 u16 interruptible;
1da177e4
LT
2406
2407 /* Disable all interrupts */
9caab458 2408 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
e960fc5c 2409 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 2410 interruptible |= TX_PIC_INTR;
1da177e4
LT
2411 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2412
5d3213cc
AR
2413 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2414 val64 = readq(&bar0->adapter_control);
2415 val64 &= ~(ADAPTER_CNTL_EN);
2416 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2417}
2418
20346722
K
2419/**
2420 * fill_rx_buffers - Allocates the Rx side skbs
0425b46a 2421 * @ring_info: per ring structure
3f78d885
SH
2422 * @from_card_up: If this is true, we will map the buffer to get
2423 * the dma address for buf0 and buf1 to give it to the card.
2424 * Else we will sync the already mapped buffer to give it to the card.
20346722 2425 * Description:
1da177e4
LT
2426 * The function allocates Rx side skbs and puts the physical
2427 * address of these buffers into the RxD buffer pointers, so that the NIC
2428 * can DMA the received frame into these locations.
2429 * The NIC supports 3 receive modes, viz
2430 * 1. single buffer,
2431 * 2. three buffer and
2432 * 3. Five buffer modes.
20346722
K
2433 * Each mode defines how many fragments the received frame will be split
2434 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2435 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2436 * is split into 3 fragments. As of now only single buffer mode is
2437 * supported.
2438 * Return Value:
2439 * SUCCESS on success or an appropriate -ve value on failure.
2440 */
8d8bb39b 2441static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
d44570e4 2442 int from_card_up)
1da177e4 2443{
1da177e4 2444 struct sk_buff *skb;
1ee6dd77 2445 struct RxD_t *rxdp;
0425b46a 2446 int off, size, block_no, block_no1;
1da177e4 2447 u32 alloc_tab = 0;
20346722 2448 u32 alloc_cnt;
20346722 2449 u64 tmp;
1ee6dd77 2450 struct buffAdd *ba;
1ee6dd77 2451 struct RxD_t *first_rxdp = NULL;
363dc367 2452 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
6d517a27
VP
2453 struct RxD1 *rxdp1;
2454 struct RxD3 *rxdp3;
ffb5df6c 2455 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
1da177e4 2456
0425b46a 2457 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
1da177e4 2458
0425b46a 2459 block_no1 = ring->rx_curr_get_info.block_index;
1da177e4 2460 while (alloc_tab < alloc_cnt) {
0425b46a 2461 block_no = ring->rx_curr_put_info.block_index;
1da177e4 2462
0425b46a
SH
2463 off = ring->rx_curr_put_info.offset;
2464
2465 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2466
7d2e3cb7 2467 if ((block_no == block_no1) &&
d44570e4
JP
2468 (off == ring->rx_curr_get_info.offset) &&
2469 (rxdp->Host_Control)) {
9e39f7c5
JP
2470 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2471 ring->dev->name);
1da177e4
LT
2472 goto end;
2473 }
0425b46a
SH
2474 if (off && (off == ring->rxd_count)) {
2475 ring->rx_curr_put_info.block_index++;
2476 if (ring->rx_curr_put_info.block_index ==
d44570e4 2477 ring->block_count)
0425b46a
SH
2478 ring->rx_curr_put_info.block_index = 0;
2479 block_no = ring->rx_curr_put_info.block_index;
2480 off = 0;
2481 ring->rx_curr_put_info.offset = off;
2482 rxdp = ring->rx_blocks[block_no].block_virt_addr;
1da177e4 2483 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
0425b46a
SH
2484 ring->dev->name, rxdp);
2485
1da177e4 2486 }
c9fcbf47 2487
da6971d8 2488 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
d44570e4
JP
2489 ((ring->rxd_mode == RXD_MODE_3B) &&
2490 (rxdp->Control_2 & s2BIT(0)))) {
0425b46a 2491 ring->rx_curr_put_info.offset = off;
1da177e4
LT
2492 goto end;
2493 }
da6971d8 2494 /* calculate size of skb based on ring mode */
d44570e4
JP
2495 size = ring->mtu +
2496 HEADER_ETHERNET_II_802_3_SIZE +
2497 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
0425b46a 2498 if (ring->rxd_mode == RXD_MODE_1)
da6971d8 2499 size += NET_IP_ALIGN;
da6971d8 2500 else
0425b46a 2501 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2502
da6971d8 2503 /* allocate skb */
c056b734 2504 skb = netdev_alloc_skb(nic->dev, size);
d44570e4 2505 if (!skb) {
9e39f7c5
JP
2506 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2507 ring->dev->name);
303bcb4b 2508 if (first_rxdp) {
03cc864a 2509 dma_wmb();
303bcb4b
K
2510 first_rxdp->Control_1 |= RXD_OWN_XENA;
2511 }
ffb5df6c 2512 swstats->mem_alloc_fail_cnt++;
7d2e3cb7 2513
da6971d8
AR
2514 return -ENOMEM ;
2515 }
ffb5df6c 2516 swstats->mem_allocated += skb->truesize;
0425b46a
SH
2517
2518 if (ring->rxd_mode == RXD_MODE_1) {
da6971d8 2519 /* 1 buffer mode - normal operation mode */
d44570e4 2520 rxdp1 = (struct RxD1 *)rxdp;
1ee6dd77 2521 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2522 skb_reserve(skb, NET_IP_ALIGN);
d44570e4
JP
2523 rxdp1->Buffer0_ptr =
2524 pci_map_single(ring->pdev, skb->data,
2525 size - NET_IP_ALIGN,
2526 PCI_DMA_FROMDEVICE);
8d8bb39b 2527 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2528 rxdp1->Buffer0_ptr))
491abf25
VP
2529 goto pci_map_failed;
2530
8a4bdbaa 2531 rxdp->Control_2 =
491976b2 2532 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
d44570e4 2533 rxdp->Host_Control = (unsigned long)skb;
0425b46a 2534 } else if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8 2535 /*
6d517a27
VP
2536 * 2 buffer mode -
2537 * 2 buffer mode provides 128
da6971d8 2538 * byte aligned receive buffers.
da6971d8
AR
2539 */
2540
d44570e4 2541 rxdp3 = (struct RxD3 *)rxdp;
491976b2 2542 /* save buffer pointers to avoid frequent dma mapping */
6d517a27
VP
2543 Buffer0_ptr = rxdp3->Buffer0_ptr;
2544 Buffer1_ptr = rxdp3->Buffer1_ptr;
1ee6dd77 2545 memset(rxdp, 0, sizeof(struct RxD3));
363dc367 2546 /* restore the buffer pointers for dma sync*/
6d517a27
VP
2547 rxdp3->Buffer0_ptr = Buffer0_ptr;
2548 rxdp3->Buffer1_ptr = Buffer1_ptr;
363dc367 2549
0425b46a 2550 ba = &ring->ba[block_no][off];
da6971d8 2551 skb_reserve(skb, BUF0_LEN);
d44570e4 2552 tmp = (u64)(unsigned long)skb->data;
da6971d8
AR
2553 tmp += ALIGN_SIZE;
2554 tmp &= ~ALIGN_SIZE;
2555 skb->data = (void *) (unsigned long)tmp;
27a884dc 2556 skb_reset_tail_pointer(skb);
da6971d8 2557
3f78d885 2558 if (from_card_up) {
6d517a27 2559 rxdp3->Buffer0_ptr =
d44570e4
JP
2560 pci_map_single(ring->pdev, ba->ba_0,
2561 BUF0_LEN,
2562 PCI_DMA_FROMDEVICE);
2563 if (pci_dma_mapping_error(nic->pdev,
2564 rxdp3->Buffer0_ptr))
3f78d885
SH
2565 goto pci_map_failed;
2566 } else
0425b46a 2567 pci_dma_sync_single_for_device(ring->pdev,
d44570e4
JP
2568 (dma_addr_t)rxdp3->Buffer0_ptr,
2569 BUF0_LEN,
2570 PCI_DMA_FROMDEVICE);
491abf25 2571
da6971d8 2572 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
0425b46a 2573 if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
2574 /* Two buffer mode */
2575
2576 /*
6aa20a22 2577 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2578 * L4 payload
2579 */
d44570e4
JP
2580 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2581 skb->data,
2582 ring->mtu + 4,
2583 PCI_DMA_FROMDEVICE);
da6971d8 2584
8d8bb39b 2585 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2586 rxdp3->Buffer2_ptr))
491abf25
VP
2587 goto pci_map_failed;
2588
3f78d885 2589 if (from_card_up) {
0425b46a
SH
2590 rxdp3->Buffer1_ptr =
2591 pci_map_single(ring->pdev,
d44570e4
JP
2592 ba->ba_1,
2593 BUF1_LEN,
2594 PCI_DMA_FROMDEVICE);
0425b46a 2595
8d8bb39b 2596 if (pci_dma_mapping_error(nic->pdev,
d44570e4
JP
2597 rxdp3->Buffer1_ptr)) {
2598 pci_unmap_single(ring->pdev,
2599 (dma_addr_t)(unsigned long)
2600 skb->data,
2601 ring->mtu + 4,
2602 PCI_DMA_FROMDEVICE);
3f78d885
SH
2603 goto pci_map_failed;
2604 }
75c30b13 2605 }
da6971d8
AR
2606 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2607 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
d44570e4 2608 (ring->mtu + 4);
da6971d8 2609 }
b7b5a128 2610 rxdp->Control_2 |= s2BIT(0);
0425b46a 2611 rxdp->Host_Control = (unsigned long) (skb);
1da177e4 2612 }
303bcb4b
K
2613 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2614 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2615 off++;
0425b46a 2616 if (off == (ring->rxd_count + 1))
da6971d8 2617 off = 0;
0425b46a 2618 ring->rx_curr_put_info.offset = off;
20346722 2619
da6971d8 2620 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2621 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2622 if (first_rxdp) {
03cc864a 2623 dma_wmb();
303bcb4b
K
2624 first_rxdp->Control_1 |= RXD_OWN_XENA;
2625 }
2626 first_rxdp = rxdp;
2627 }
0425b46a 2628 ring->rx_bufs_left += 1;
1da177e4
LT
2629 alloc_tab++;
2630 }
2631
d44570e4 2632end:
303bcb4b
K
2633 /* Transfer ownership of first descriptor to adapter just before
2634 * exiting. Before that, use memory barrier so that ownership
2635 * and other fields are seen by adapter correctly.
2636 */
2637 if (first_rxdp) {
03cc864a 2638 dma_wmb();
303bcb4b
K
2639 first_rxdp->Control_1 |= RXD_OWN_XENA;
2640 }
2641
1da177e4 2642 return SUCCESS;
d44570e4 2643
491abf25 2644pci_map_failed:
ffb5df6c
JP
2645 swstats->pci_map_fail_cnt++;
2646 swstats->mem_freed += skb->truesize;
491abf25
VP
2647 dev_kfree_skb_irq(skb);
2648 return -ENOMEM;
1da177e4
LT
2649}
2650
da6971d8
AR
2651static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2652{
2653 struct net_device *dev = sp->dev;
2654 int j;
2655 struct sk_buff *skb;
1ee6dd77 2656 struct RxD_t *rxdp;
6d517a27
VP
2657 struct RxD1 *rxdp1;
2658 struct RxD3 *rxdp3;
ffb5df6c
JP
2659 struct mac_info *mac_control = &sp->mac_control;
2660 struct stat_block *stats = mac_control->stats_info;
2661 struct swStat *swstats = &stats->sw_stat;
da6971d8 2662
da6971d8
AR
2663 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2664 rxdp = mac_control->rings[ring_no].
d44570e4
JP
2665 rx_blocks[blk].rxds[j].virt_addr;
2666 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2667 if (!skb)
da6971d8 2668 continue;
da6971d8 2669 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4
JP
2670 rxdp1 = (struct RxD1 *)rxdp;
2671 pci_unmap_single(sp->pdev,
2672 (dma_addr_t)rxdp1->Buffer0_ptr,
2673 dev->mtu +
2674 HEADER_ETHERNET_II_802_3_SIZE +
2675 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2676 PCI_DMA_FROMDEVICE);
1ee6dd77 2677 memset(rxdp, 0, sizeof(struct RxD1));
d44570e4
JP
2678 } else if (sp->rxd_mode == RXD_MODE_3B) {
2679 rxdp3 = (struct RxD3 *)rxdp;
d44570e4
JP
2680 pci_unmap_single(sp->pdev,
2681 (dma_addr_t)rxdp3->Buffer0_ptr,
2682 BUF0_LEN,
2683 PCI_DMA_FROMDEVICE);
2684 pci_unmap_single(sp->pdev,
2685 (dma_addr_t)rxdp3->Buffer1_ptr,
2686 BUF1_LEN,
2687 PCI_DMA_FROMDEVICE);
2688 pci_unmap_single(sp->pdev,
2689 (dma_addr_t)rxdp3->Buffer2_ptr,
2690 dev->mtu + 4,
2691 PCI_DMA_FROMDEVICE);
1ee6dd77 2692 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8 2693 }
ffb5df6c 2694 swstats->mem_freed += skb->truesize;
da6971d8 2695 dev_kfree_skb(skb);
0425b46a 2696 mac_control->rings[ring_no].rx_bufs_left -= 1;
da6971d8
AR
2697 }
2698}
2699
1da177e4 2700/**
20346722 2701 * free_rx_buffers - Frees all Rx buffers
1da177e4 2702 * @sp: device private variable.
20346722 2703 * Description:
1da177e4
LT
2704 * This function will free all Rx buffers allocated by host.
2705 * Return Value:
2706 * NONE.
2707 */
2708
2709static void free_rx_buffers(struct s2io_nic *sp)
2710{
2711 struct net_device *dev = sp->dev;
da6971d8 2712 int i, blk = 0, buf_cnt = 0;
ffb5df6c
JP
2713 struct config_param *config = &sp->config;
2714 struct mac_info *mac_control = &sp->mac_control;
1da177e4
LT
2715
2716 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2717 struct ring_info *ring = &mac_control->rings[i];
2718
da6971d8 2719 for (blk = 0; blk < rx_ring_sz[i]; blk++)
d44570e4 2720 free_rxd_blk(sp, i, blk);
1da177e4 2721
13d866a9
JP
2722 ring->rx_curr_put_info.block_index = 0;
2723 ring->rx_curr_get_info.block_index = 0;
2724 ring->rx_curr_put_info.offset = 0;
2725 ring->rx_curr_get_info.offset = 0;
2726 ring->rx_bufs_left = 0;
9e39f7c5 2727 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
1da177e4
LT
2728 dev->name, buf_cnt, i);
2729 }
2730}
2731
8d8bb39b 2732static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
f61e0a35 2733{
8d8bb39b 2734 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2735 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2736 ring->dev->name);
f61e0a35
SH
2737 }
2738 return 0;
2739}
2740
1da177e4
LT
2741/**
2742 * s2io_poll - Rx interrupt handler for NAPI support
bea3348e 2743 * @napi : pointer to the napi structure.
20346722 2744 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2745 * during one pass through the 'Poll" function.
2746 * Description:
2747 * Comes into picture only if NAPI support has been incorporated. It does
2748 * the same thing that rx_intr_handler does, but not in a interrupt context
2749 * also It will process only a given number of packets.
2750 * Return value:
2751 * 0 on success and 1 if there are No Rx packets to be processed.
2752 */
2753
f61e0a35 2754static int s2io_poll_msix(struct napi_struct *napi, int budget)
1da177e4 2755{
f61e0a35
SH
2756 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2757 struct net_device *dev = ring->dev;
f61e0a35 2758 int pkts_processed = 0;
1a79d1c3
AV
2759 u8 __iomem *addr = NULL;
2760 u8 val8 = 0;
4cf1653a 2761 struct s2io_nic *nic = netdev_priv(dev);
1ee6dd77 2762 struct XENA_dev_config __iomem *bar0 = nic->bar0;
f61e0a35 2763 int budget_org = budget;
1da177e4 2764
f61e0a35
SH
2765 if (unlikely(!is_s2io_card_up(nic)))
2766 return 0;
1da177e4 2767
f61e0a35 2768 pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2769 s2io_chk_rx_buffers(nic, ring);
1da177e4 2770
f61e0a35 2771 if (pkts_processed < budget_org) {
6ad20165 2772 napi_complete_done(napi, pkts_processed);
f61e0a35 2773 /*Re Enable MSI-Rx Vector*/
1a79d1c3 2774 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
2775 addr += 7 - ring->ring_no;
2776 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2777 writeb(val8, addr);
2778 val8 = readb(addr);
1da177e4 2779 }
f61e0a35
SH
2780 return pkts_processed;
2781}
d44570e4 2782
f61e0a35
SH
2783static int s2io_poll_inta(struct napi_struct *napi, int budget)
2784{
2785 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
f61e0a35
SH
2786 int pkts_processed = 0;
2787 int ring_pkts_processed, i;
2788 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2789 int budget_org = budget;
ffb5df6c
JP
2790 struct config_param *config = &nic->config;
2791 struct mac_info *mac_control = &nic->mac_control;
1da177e4 2792
f61e0a35
SH
2793 if (unlikely(!is_s2io_card_up(nic)))
2794 return 0;
1da177e4 2795
1da177e4 2796 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9 2797 struct ring_info *ring = &mac_control->rings[i];
f61e0a35 2798 ring_pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2799 s2io_chk_rx_buffers(nic, ring);
f61e0a35
SH
2800 pkts_processed += ring_pkts_processed;
2801 budget -= ring_pkts_processed;
2802 if (budget <= 0)
1da177e4 2803 break;
1da177e4 2804 }
f61e0a35 2805 if (pkts_processed < budget_org) {
6ad20165 2806 napi_complete_done(napi, pkts_processed);
f61e0a35
SH
2807 /* Re enable the Rx interrupts for the ring */
2808 writeq(0, &bar0->rx_traffic_mask);
2809 readl(&bar0->rx_traffic_mask);
2810 }
2811 return pkts_processed;
1da177e4 2812}
20346722 2813
b41477f3 2814#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2815/**
b41477f3 2816 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2817 * @dev : pointer to the device structure.
2818 * Description:
b41477f3
AR
2819 * This function will be called by upper layer to check for events on the
2820 * interface in situations where interrupts are disabled. It is used for
2821 * specific in-kernel networking tasks, such as remote consoles and kernel
2822 * debugging over the network (example netdump in RedHat).
612eff0e 2823 */
612eff0e
BH
2824static void s2io_netpoll(struct net_device *dev)
2825{
4cf1653a 2826 struct s2io_nic *nic = netdev_priv(dev);
80777c54 2827 const int irq = nic->pdev->irq;
1ee6dd77 2828 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2829 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e 2830 int i;
ffb5df6c
JP
2831 struct config_param *config = &nic->config;
2832 struct mac_info *mac_control = &nic->mac_control;
612eff0e 2833
d796fdb7
LV
2834 if (pci_channel_offline(nic->pdev))
2835 return;
2836
80777c54 2837 disable_irq(irq);
612eff0e 2838
612eff0e 2839 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2840 writeq(val64, &bar0->tx_traffic_int);
2841
6aa20a22 2842 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2843 * run out of skbs and will fail and eventually netpoll application such
2844 * as netdump will fail.
2845 */
2846 for (i = 0; i < config->tx_fifo_num; i++)
2847 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2848
b41477f3 2849 /* check for received packet and indicate up to network */
13d866a9
JP
2850 for (i = 0; i < config->rx_ring_num; i++) {
2851 struct ring_info *ring = &mac_control->rings[i];
2852
2853 rx_intr_handler(ring, 0);
2854 }
612eff0e
BH
2855
2856 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2857 struct ring_info *ring = &mac_control->rings[i];
2858
2859 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2860 DBG_PRINT(INFO_DBG,
2861 "%s: Out of memory in Rx Netpoll!!\n",
2862 dev->name);
612eff0e
BH
2863 break;
2864 }
2865 }
80777c54 2866 enable_irq(irq);
612eff0e
BH
2867}
2868#endif
2869
20346722 2870/**
1da177e4 2871 * rx_intr_handler - Rx interrupt handler
f61e0a35
SH
2872 * @ring_info: per ring structure.
2873 * @budget: budget for napi processing.
20346722
K
2874 * Description:
2875 * If the interrupt is because of a received frame or if the
1da177e4 2876 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2877 * called. It picks out the RxD at which place the last Rx processing had
2878 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2879 * the offset.
2880 * Return Value:
f61e0a35 2881 * No. of napi packets processed.
1da177e4 2882 */
f61e0a35 2883static int rx_intr_handler(struct ring_info *ring_data, int budget)
1da177e4 2884{
c9fcbf47 2885 int get_block, put_block;
1ee6dd77
RB
2886 struct rx_curr_get_info get_info, put_info;
2887 struct RxD_t *rxdp;
1da177e4 2888 struct sk_buff *skb;
f61e0a35 2889 int pkt_cnt = 0, napi_pkts = 0;
7d3d0439 2890 int i;
d44570e4
JP
2891 struct RxD1 *rxdp1;
2892 struct RxD3 *rxdp3;
7d3d0439 2893
99a09c26
EB
2894 if (budget <= 0)
2895 return napi_pkts;
2896
20346722
K
2897 get_info = ring_data->rx_curr_get_info;
2898 get_block = get_info.block_index;
1ee6dd77 2899 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2900 put_block = put_info.block_index;
da6971d8 2901 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65 2902
da6971d8 2903 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2904 /*
2905 * If your are next to put index then it's
2906 * FIFO full condition
2907 */
da6971d8
AR
2908 if ((get_block == put_block) &&
2909 (get_info.offset + 1) == put_info.offset) {
0425b46a 2910 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
d44570e4 2911 ring_data->dev->name);
da6971d8
AR
2912 break;
2913 }
d44570e4 2914 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
20346722 2915 if (skb == NULL) {
9e39f7c5 2916 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
0425b46a 2917 ring_data->dev->name);
f61e0a35 2918 return 0;
1da177e4 2919 }
0425b46a 2920 if (ring_data->rxd_mode == RXD_MODE_1) {
d44570e4 2921 rxdp1 = (struct RxD1 *)rxdp;
0425b46a 2922 pci_unmap_single(ring_data->pdev, (dma_addr_t)
d44570e4
JP
2923 rxdp1->Buffer0_ptr,
2924 ring_data->mtu +
2925 HEADER_ETHERNET_II_802_3_SIZE +
2926 HEADER_802_2_SIZE +
2927 HEADER_SNAP_SIZE,
2928 PCI_DMA_FROMDEVICE);
0425b46a 2929 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
d44570e4
JP
2930 rxdp3 = (struct RxD3 *)rxdp;
2931 pci_dma_sync_single_for_cpu(ring_data->pdev,
2932 (dma_addr_t)rxdp3->Buffer0_ptr,
2933 BUF0_LEN,
2934 PCI_DMA_FROMDEVICE);
2935 pci_unmap_single(ring_data->pdev,
2936 (dma_addr_t)rxdp3->Buffer2_ptr,
2937 ring_data->mtu + 4,
2938 PCI_DMA_FROMDEVICE);
da6971d8 2939 }
863c11a9 2940 prefetch(skb->data);
20346722
K
2941 rx_osm_handler(ring_data, rxdp);
2942 get_info.offset++;
da6971d8
AR
2943 ring_data->rx_curr_get_info.offset = get_info.offset;
2944 rxdp = ring_data->rx_blocks[get_block].
d44570e4 2945 rxds[get_info.offset].virt_addr;
0425b46a 2946 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
20346722 2947 get_info.offset = 0;
da6971d8 2948 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 2949 get_block++;
da6971d8
AR
2950 if (get_block == ring_data->block_count)
2951 get_block = 0;
2952 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
2953 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2954 }
1da177e4 2955
f61e0a35
SH
2956 if (ring_data->nic->config.napi) {
2957 budget--;
2958 napi_pkts++;
2959 if (!budget)
0425b46a
SH
2960 break;
2961 }
20346722 2962 pkt_cnt++;
1da177e4
LT
2963 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2964 break;
2965 }
0425b46a 2966 if (ring_data->lro) {
7d3d0439 2967 /* Clear all LRO sessions before exiting */
d44570e4 2968 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 2969 struct lro *lro = &ring_data->lro0_n[i];
7d3d0439 2970 if (lro->in_use) {
0425b46a 2971 update_L3L4_header(ring_data->nic, lro);
cdb5bf02 2972 queue_rx_frame(lro->parent, lro->vlan_tag);
7d3d0439
RA
2973 clear_lro_session(lro);
2974 }
2975 }
2976 }
d44570e4 2977 return napi_pkts;
1da177e4 2978}
20346722
K
2979
2980/**
1da177e4
LT
2981 * tx_intr_handler - Transmit interrupt handler
2982 * @nic : device private variable
20346722
K
2983 * Description:
2984 * If an interrupt was raised to indicate DMA complete of the
2985 * Tx packet, this function is called. It identifies the last TxD
2986 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2987 * DMA'ed into the NICs internal memory.
2988 * Return Value:
2989 * NONE
2990 */
2991
1ee6dd77 2992static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 2993{
1ee6dd77 2994 struct s2io_nic *nic = fifo_data->nic;
1ee6dd77 2995 struct tx_curr_get_info get_info, put_info;
3a3d5756 2996 struct sk_buff *skb = NULL;
1ee6dd77 2997 struct TxD *txdlp;
3a3d5756 2998 int pkt_cnt = 0;
2fda096d 2999 unsigned long flags = 0;
f9046eb3 3000 u8 err_mask;
ffb5df6c
JP
3001 struct stat_block *stats = nic->mac_control.stats_info;
3002 struct swStat *swstats = &stats->sw_stat;
1da177e4 3003
2fda096d 3004 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
d44570e4 3005 return;
2fda096d 3006
20346722 3007 get_info = fifo_data->tx_curr_get_info;
1ee6dd77 3008 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
43d620c8 3009 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
20346722
K
3010 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3011 (get_info.offset != put_info.offset) &&
3012 (txdlp->Host_Control)) {
3013 /* Check for TxD errors */
3014 if (txdlp->Control_1 & TXD_T_CODE) {
3015 unsigned long long err;
3016 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0 3017 if (err & 0x1) {
ffb5df6c 3018 swstats->parity_err_cnt++;
bd1034f0 3019 }
491976b2
SH
3020
3021 /* update t_code statistics */
f9046eb3 3022 err_mask = err >> 48;
d44570e4
JP
3023 switch (err_mask) {
3024 case 2:
ffb5df6c 3025 swstats->tx_buf_abort_cnt++;
491976b2
SH
3026 break;
3027
d44570e4 3028 case 3:
ffb5df6c 3029 swstats->tx_desc_abort_cnt++;
491976b2
SH
3030 break;
3031
d44570e4 3032 case 7:
ffb5df6c 3033 swstats->tx_parity_err_cnt++;
491976b2
SH
3034 break;
3035
d44570e4 3036 case 10:
ffb5df6c 3037 swstats->tx_link_loss_cnt++;
491976b2
SH
3038 break;
3039
d44570e4 3040 case 15:
ffb5df6c 3041 swstats->tx_list_proc_err_cnt++;
491976b2 3042 break;
d44570e4 3043 }
20346722 3044 }
1da177e4 3045
fed5eccd 3046 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 3047 if (skb == NULL) {
2fda096d 3048 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
9e39f7c5
JP
3049 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3050 __func__);
20346722
K
3051 return;
3052 }
3a3d5756 3053 pkt_cnt++;
20346722 3054
20346722 3055 /* Updating the statistics block */
ffb5df6c 3056 swstats->mem_freed += skb->truesize;
46befd32 3057 dev_consume_skb_irq(skb);
20346722
K
3058
3059 get_info.offset++;
863c11a9
AR
3060 if (get_info.offset == get_info.fifo_len + 1)
3061 get_info.offset = 0;
43d620c8 3062 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
d44570e4 3063 fifo_data->tx_curr_get_info.offset = get_info.offset;
1da177e4
LT
3064 }
3065
3a3d5756 3066 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
2fda096d
SR
3067
3068 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
1da177e4
LT
3069}
3070
bd1034f0
AR
3071/**
3072 * s2io_mdio_write - Function to write in to MDIO registers
3073 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3074 * @addr : address value
3075 * @value : data value
3076 * @dev : pointer to net_device structure
3077 * Description:
3078 * This function is used to write values to the MDIO registers
3079 * NONE
3080 */
d44570e4
JP
3081static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3082 struct net_device *dev)
bd1034f0 3083{
d44570e4 3084 u64 val64;
4cf1653a 3085 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3086 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0 3087
d44570e4
JP
3088 /* address transaction */
3089 val64 = MDIO_MMD_INDX_ADDR(addr) |
3090 MDIO_MMD_DEV_ADDR(mmd_type) |
3091 MDIO_MMS_PRT_ADDR(0x0);
bd1034f0
AR
3092 writeq(val64, &bar0->mdio_control);
3093 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3094 writeq(val64, &bar0->mdio_control);
3095 udelay(100);
3096
d44570e4
JP
3097 /* Data transaction */
3098 val64 = MDIO_MMD_INDX_ADDR(addr) |
3099 MDIO_MMD_DEV_ADDR(mmd_type) |
3100 MDIO_MMS_PRT_ADDR(0x0) |
3101 MDIO_MDIO_DATA(value) |
3102 MDIO_OP(MDIO_OP_WRITE_TRANS);
bd1034f0
AR
3103 writeq(val64, &bar0->mdio_control);
3104 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3105 writeq(val64, &bar0->mdio_control);
3106 udelay(100);
3107
d44570e4
JP
3108 val64 = MDIO_MMD_INDX_ADDR(addr) |
3109 MDIO_MMD_DEV_ADDR(mmd_type) |
3110 MDIO_MMS_PRT_ADDR(0x0) |
3111 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3112 writeq(val64, &bar0->mdio_control);
3113 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3114 writeq(val64, &bar0->mdio_control);
3115 udelay(100);
bd1034f0
AR
3116}
3117
3118/**
3119 * s2io_mdio_read - Function to write in to MDIO registers
3120 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3121 * @addr : address value
3122 * @dev : pointer to net_device structure
3123 * Description:
3124 * This function is used to read values to the MDIO registers
3125 * NONE
3126 */
3127static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3128{
3129 u64 val64 = 0x0;
3130 u64 rval64 = 0x0;
4cf1653a 3131 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3132 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3133
3134 /* address transaction */
d44570e4
JP
3135 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3136 | MDIO_MMD_DEV_ADDR(mmd_type)
3137 | MDIO_MMS_PRT_ADDR(0x0));
bd1034f0
AR
3138 writeq(val64, &bar0->mdio_control);
3139 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3140 writeq(val64, &bar0->mdio_control);
3141 udelay(100);
3142
3143 /* Data transaction */
d44570e4
JP
3144 val64 = MDIO_MMD_INDX_ADDR(addr) |
3145 MDIO_MMD_DEV_ADDR(mmd_type) |
3146 MDIO_MMS_PRT_ADDR(0x0) |
3147 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3148 writeq(val64, &bar0->mdio_control);
3149 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3150 writeq(val64, &bar0->mdio_control);
3151 udelay(100);
3152
3153 /* Read the value from regs */
3154 rval64 = readq(&bar0->mdio_control);
3155 rval64 = rval64 & 0xFFFF0000;
3156 rval64 = rval64 >> 16;
3157 return rval64;
3158}
d44570e4 3159
bd1034f0
AR
3160/**
3161 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
fbfecd37 3162 * @counter : counter value to be updated
bd1034f0
AR
3163 * @flag : flag to indicate the status
3164 * @type : counter type
3165 * Description:
3166 * This function is to check the status of the xpak counters value
3167 * NONE
3168 */
3169
d44570e4
JP
3170static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3171 u16 flag, u16 type)
bd1034f0
AR
3172{
3173 u64 mask = 0x3;
3174 u64 val64;
3175 int i;
d44570e4 3176 for (i = 0; i < index; i++)
bd1034f0
AR
3177 mask = mask << 0x2;
3178
d44570e4 3179 if (flag > 0) {
bd1034f0
AR
3180 *counter = *counter + 1;
3181 val64 = *regs_stat & mask;
3182 val64 = val64 >> (index * 0x2);
3183 val64 = val64 + 1;
d44570e4
JP
3184 if (val64 == 3) {
3185 switch (type) {
bd1034f0 3186 case 1:
9e39f7c5
JP
3187 DBG_PRINT(ERR_DBG,
3188 "Take Xframe NIC out of service.\n");
3189 DBG_PRINT(ERR_DBG,
3190"Excessive temperatures may result in premature transceiver failure.\n");
d44570e4 3191 break;
bd1034f0 3192 case 2:
9e39f7c5
JP
3193 DBG_PRINT(ERR_DBG,
3194 "Take Xframe NIC out of service.\n");
3195 DBG_PRINT(ERR_DBG,
3196"Excessive bias currents may indicate imminent laser diode failure.\n");
d44570e4 3197 break;
bd1034f0 3198 case 3:
9e39f7c5
JP
3199 DBG_PRINT(ERR_DBG,
3200 "Take Xframe NIC out of service.\n");
3201 DBG_PRINT(ERR_DBG,
3202"Excessive laser output power may saturate far-end receiver.\n");
d44570e4 3203 break;
bd1034f0 3204 default:
d44570e4
JP
3205 DBG_PRINT(ERR_DBG,
3206 "Incorrect XPAK Alarm type\n");
bd1034f0
AR
3207 }
3208 val64 = 0x0;
3209 }
3210 val64 = val64 << (index * 0x2);
3211 *regs_stat = (*regs_stat & (~mask)) | (val64);
3212
3213 } else {
3214 *regs_stat = *regs_stat & (~mask);
3215 }
3216}
3217
3218/**
3219 * s2io_updt_xpak_counter - Function to update the xpak counters
3220 * @dev : pointer to net_device struct
3221 * Description:
3222 * This function is to upate the status of the xpak counters value
3223 * NONE
3224 */
3225static void s2io_updt_xpak_counter(struct net_device *dev)
3226{
3227 u16 flag = 0x0;
3228 u16 type = 0x0;
3229 u16 val16 = 0x0;
3230 u64 val64 = 0x0;
3231 u64 addr = 0x0;
3232
4cf1653a 3233 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
3234 struct stat_block *stats = sp->mac_control.stats_info;
3235 struct xpakStat *xstats = &stats->xpak_stat;
bd1034f0
AR
3236
3237 /* Check the communication with the MDIO slave */
40239396 3238 addr = MDIO_CTRL1;
bd1034f0 3239 val64 = 0x0;
40239396 3240 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
d44570e4 3241 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
9e39f7c5
JP
3242 DBG_PRINT(ERR_DBG,
3243 "ERR: MDIO slave access failed - Returned %llx\n",
3244 (unsigned long long)val64);
bd1034f0
AR
3245 return;
3246 }
3247
40239396 3248 /* Check for the expected value of control reg 1 */
d44570e4 3249 if (val64 != MDIO_CTRL1_SPEED10G) {
9e39f7c5
JP
3250 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3251 "Returned: %llx- Expected: 0x%x\n",
40239396 3252 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
bd1034f0
AR
3253 return;
3254 }
3255
3256 /* Loading the DOM register to MDIO register */
3257 addr = 0xA100;
40239396
BH
3258 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3259 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3260
3261 /* Reading the Alarm flags */
3262 addr = 0xA070;
3263 val64 = 0x0;
40239396 3264 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3265
3266 flag = CHECKBIT(val64, 0x7);
3267 type = 1;
ffb5df6c
JP
3268 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3269 &xstats->xpak_regs_stat,
d44570e4 3270 0x0, flag, type);
bd1034f0 3271
d44570e4 3272 if (CHECKBIT(val64, 0x6))
ffb5df6c 3273 xstats->alarm_transceiver_temp_low++;
bd1034f0
AR
3274
3275 flag = CHECKBIT(val64, 0x3);
3276 type = 2;
ffb5df6c
JP
3277 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3278 &xstats->xpak_regs_stat,
d44570e4 3279 0x2, flag, type);
bd1034f0 3280
d44570e4 3281 if (CHECKBIT(val64, 0x2))
ffb5df6c 3282 xstats->alarm_laser_bias_current_low++;
bd1034f0
AR
3283
3284 flag = CHECKBIT(val64, 0x1);
3285 type = 3;
ffb5df6c
JP
3286 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3287 &xstats->xpak_regs_stat,
d44570e4 3288 0x4, flag, type);
bd1034f0 3289
d44570e4 3290 if (CHECKBIT(val64, 0x0))
ffb5df6c 3291 xstats->alarm_laser_output_power_low++;
bd1034f0
AR
3292
3293 /* Reading the Warning flags */
3294 addr = 0xA074;
3295 val64 = 0x0;
40239396 3296 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0 3297
d44570e4 3298 if (CHECKBIT(val64, 0x7))
ffb5df6c 3299 xstats->warn_transceiver_temp_high++;
bd1034f0 3300
d44570e4 3301 if (CHECKBIT(val64, 0x6))
ffb5df6c 3302 xstats->warn_transceiver_temp_low++;
bd1034f0 3303
d44570e4 3304 if (CHECKBIT(val64, 0x3))
ffb5df6c 3305 xstats->warn_laser_bias_current_high++;
bd1034f0 3306
d44570e4 3307 if (CHECKBIT(val64, 0x2))
ffb5df6c 3308 xstats->warn_laser_bias_current_low++;
bd1034f0 3309
d44570e4 3310 if (CHECKBIT(val64, 0x1))
ffb5df6c 3311 xstats->warn_laser_output_power_high++;
bd1034f0 3312
d44570e4 3313 if (CHECKBIT(val64, 0x0))
ffb5df6c 3314 xstats->warn_laser_output_power_low++;
bd1034f0
AR
3315}
3316
20346722 3317/**
1da177e4 3318 * wait_for_cmd_complete - waits for a command to complete.
20346722 3319 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3320 * s2io_nic structure.
20346722
K
3321 * Description: Function that waits for a command to Write into RMAC
3322 * ADDR DATA registers to be completed and returns either success or
3323 * error depending on whether the command was complete or not.
1da177e4
LT
3324 * Return value:
3325 * SUCCESS on success and FAILURE on failure.
3326 */
3327
9fc93a41 3328static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
d44570e4 3329 int bit_state)
1da177e4 3330{
9fc93a41 3331 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3332 u64 val64;
3333
9fc93a41
SS
3334 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3335 return FAILURE;
3336
3337 do {
c92ca04b 3338 val64 = readq(addr);
9fc93a41
SS
3339 if (bit_state == S2IO_BIT_RESET) {
3340 if (!(val64 & busy_bit)) {
3341 ret = SUCCESS;
3342 break;
3343 }
3344 } else {
2d146eb1 3345 if (val64 & busy_bit) {
9fc93a41
SS
3346 ret = SUCCESS;
3347 break;
3348 }
1da177e4 3349 }
c92ca04b 3350
d44570e4 3351 if (in_interrupt())
9fc93a41 3352 mdelay(delay);
c92ca04b 3353 else
9fc93a41 3354 msleep(delay);
c92ca04b 3355
9fc93a41
SS
3356 if (++cnt >= 10)
3357 delay = 50;
3358 } while (cnt < 20);
1da177e4
LT
3359 return ret;
3360}
49ce9c2c 3361/**
19a60522
SS
3362 * check_pci_device_id - Checks if the device id is supported
3363 * @id : device id
3364 * Description: Function to check if the pci device id is supported by driver.
3365 * Return value: Actual device id if supported else PCI_ANY_ID
3366 */
3367static u16 check_pci_device_id(u16 id)
3368{
3369 switch (id) {
3370 case PCI_DEVICE_ID_HERC_WIN:
3371 case PCI_DEVICE_ID_HERC_UNI:
3372 return XFRAME_II_DEVICE;
3373 case PCI_DEVICE_ID_S2IO_UNI:
3374 case PCI_DEVICE_ID_S2IO_WIN:
3375 return XFRAME_I_DEVICE;
3376 default:
3377 return PCI_ANY_ID;
3378 }
3379}
1da177e4 3380
20346722
K
3381/**
3382 * s2io_reset - Resets the card.
1da177e4
LT
3383 * @sp : private member of the device structure.
3384 * Description: Function to Reset the card. This function then also
20346722 3385 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3386 * the card reset also resets the configuration space.
3387 * Return value:
3388 * void.
3389 */
3390
d44570e4 3391static void s2io_reset(struct s2io_nic *sp)
1da177e4 3392{
1ee6dd77 3393 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3394 u64 val64;
5e25b9dd 3395 u16 subid, pci_cmd;
19a60522
SS
3396 int i;
3397 u16 val16;
491976b2
SH
3398 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3399 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
ffb5df6c
JP
3400 struct stat_block *stats;
3401 struct swStat *swstats;
491976b2 3402
9e39f7c5 3403 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3a22813a 3404 __func__, pci_name(sp->pdev));
1da177e4 3405
0b1f7ebe 3406 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3407 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3408
1da177e4
LT
3409 val64 = SW_RESET_ALL;
3410 writeq(val64, &bar0->sw_reset);
d44570e4 3411 if (strstr(sp->product_name, "CX4"))
c92ca04b 3412 msleep(750);
19a60522
SS
3413 msleep(250);
3414 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3415
19a60522
SS
3416 /* Restore the PCI state saved during initialization. */
3417 pci_restore_state(sp->pdev);
b8a623bf 3418 pci_save_state(sp->pdev);
19a60522
SS
3419 pci_read_config_word(sp->pdev, 0x2, &val16);
3420 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3421 break;
3422 msleep(200);
3423 }
1da177e4 3424
d44570e4
JP
3425 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3426 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
19a60522
SS
3427
3428 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3429
3430 s2io_init_pci(sp);
1da177e4 3431
20346722
K
3432 /* Set swapper to enable I/O register access */
3433 s2io_set_swapper(sp);
3434
faa4f796
SH
3435 /* restore mac_addr entries */
3436 do_s2io_restore_unicast_mc(sp);
3437
cc6e7c44
RA
3438 /* Restore the MSIX table entries from local variables */
3439 restore_xmsi_data(sp);
3440
5e25b9dd 3441 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3442 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3443 /* Clear "detected parity error" bit */
303bcb4b 3444 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3445
303bcb4b
K
3446 /* Clearing PCIX Ecc status register */
3447 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3448
303bcb4b 3449 /* Clearing PCI_STATUS error reflected here */
b7b5a128 3450 writeq(s2BIT(62), &bar0->txpic_int_reg);
303bcb4b 3451 }
5e25b9dd 3452
20346722 3453 /* Reset device statistics maintained by OS */
d44570e4 3454 memset(&sp->stats, 0, sizeof(struct net_device_stats));
8a4bdbaa 3455
ffb5df6c
JP
3456 stats = sp->mac_control.stats_info;
3457 swstats = &stats->sw_stat;
3458
491976b2 3459 /* save link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3460 up_cnt = swstats->link_up_cnt;
3461 down_cnt = swstats->link_down_cnt;
3462 up_time = swstats->link_up_time;
3463 down_time = swstats->link_down_time;
3464 reset_cnt = swstats->soft_reset_cnt;
3465 mem_alloc_cnt = swstats->mem_allocated;
3466 mem_free_cnt = swstats->mem_freed;
3467 watchdog_cnt = swstats->watchdog_timer_cnt;
3468
3469 memset(stats, 0, sizeof(struct stat_block));
3470
491976b2 3471 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3472 swstats->link_up_cnt = up_cnt;
3473 swstats->link_down_cnt = down_cnt;
3474 swstats->link_up_time = up_time;
3475 swstats->link_down_time = down_time;
3476 swstats->soft_reset_cnt = reset_cnt;
3477 swstats->mem_allocated = mem_alloc_cnt;
3478 swstats->mem_freed = mem_free_cnt;
3479 swstats->watchdog_timer_cnt = watchdog_cnt;
20346722 3480
1da177e4
LT
3481 /* SXE-002: Configure link and activity LED to turn it off */
3482 subid = sp->pdev->subsystem_device;
541ae68f
K
3483 if (((subid & 0xFF) >= 0x07) &&
3484 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3485 val64 = readq(&bar0->gpio_control);
3486 val64 |= 0x0000800000000000ULL;
3487 writeq(val64, &bar0->gpio_control);
3488 val64 = 0x0411040400000000ULL;
509a2671 3489 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3490 }
3491
541ae68f 3492 /*
25985edc 3493 * Clear spurious ECC interrupts that would have occurred on
541ae68f
K
3494 * XFRAME II cards after reset.
3495 */
3496 if (sp->device_type == XFRAME_II_DEVICE) {
3497 val64 = readq(&bar0->pcc_err_reg);
3498 writeq(val64, &bar0->pcc_err_reg);
3499 }
3500
f957bcf0 3501 sp->device_enabled_once = false;
1da177e4
LT
3502}
3503
3504/**
20346722
K
3505 * s2io_set_swapper - to set the swapper controle on the card
3506 * @sp : private member of the device structure,
1da177e4 3507 * pointer to the s2io_nic structure.
20346722 3508 * Description: Function to set the swapper control on the card
1da177e4
LT
3509 * correctly depending on the 'endianness' of the system.
3510 * Return value:
3511 * SUCCESS on success and FAILURE on failure.
3512 */
3513
d44570e4 3514static int s2io_set_swapper(struct s2io_nic *sp)
1da177e4
LT
3515{
3516 struct net_device *dev = sp->dev;
1ee6dd77 3517 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3518 u64 val64, valt, valr;
3519
20346722 3520 /*
1da177e4
LT
3521 * Set proper endian settings and verify the same by reading
3522 * the PIF Feed-back register.
3523 */
3524
3525 val64 = readq(&bar0->pif_rd_swapper_fb);
3526 if (val64 != 0x0123456789ABCDEFULL) {
3527 int i = 0;
85a56498
JM
3528 static const u64 value[] = {
3529 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3530 0x8100008181000081ULL, /* FE=1, SE=0 */
3531 0x4200004242000042ULL, /* FE=0, SE=1 */
3532 0 /* FE=0, SE=0 */
3533 };
1da177e4 3534
d44570e4 3535 while (i < 4) {
1da177e4
LT
3536 writeq(value[i], &bar0->swapper_ctrl);
3537 val64 = readq(&bar0->pif_rd_swapper_fb);
3538 if (val64 == 0x0123456789ABCDEFULL)
3539 break;
3540 i++;
3541 }
3542 if (i == 4) {
9e39f7c5
JP
3543 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3544 "feedback read %llx\n",
3545 dev->name, (unsigned long long)val64);
1da177e4
LT
3546 return FAILURE;
3547 }
3548 valr = value[i];
3549 } else {
3550 valr = readq(&bar0->swapper_ctrl);
3551 }
3552
3553 valt = 0x0123456789ABCDEFULL;
3554 writeq(valt, &bar0->xmsi_address);
3555 val64 = readq(&bar0->xmsi_address);
3556
d44570e4 3557 if (val64 != valt) {
1da177e4 3558 int i = 0;
85a56498
JM
3559 static const u64 value[] = {
3560 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3561 0x0081810000818100ULL, /* FE=1, SE=0 */
3562 0x0042420000424200ULL, /* FE=0, SE=1 */
3563 0 /* FE=0, SE=0 */
3564 };
1da177e4 3565
d44570e4 3566 while (i < 4) {
1da177e4
LT
3567 writeq((value[i] | valr), &bar0->swapper_ctrl);
3568 writeq(valt, &bar0->xmsi_address);
3569 val64 = readq(&bar0->xmsi_address);
d44570e4 3570 if (val64 == valt)
1da177e4
LT
3571 break;
3572 i++;
3573 }
d44570e4 3574 if (i == 4) {
20346722 3575 unsigned long long x = val64;
9e39f7c5
JP
3576 DBG_PRINT(ERR_DBG,
3577 "Write failed, Xmsi_addr reads:0x%llx\n", x);
1da177e4
LT
3578 return FAILURE;
3579 }
3580 }
3581 val64 = readq(&bar0->swapper_ctrl);
3582 val64 &= 0xFFFF000000000000ULL;
3583
d44570e4 3584#ifdef __BIG_ENDIAN
20346722
K
3585 /*
3586 * The device by default set to a big endian format, so a
1da177e4
LT
3587 * big endian driver need not set anything.
3588 */
3589 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3590 SWAPPER_CTRL_TXP_SE |
3591 SWAPPER_CTRL_TXD_R_FE |
3592 SWAPPER_CTRL_TXD_W_FE |
3593 SWAPPER_CTRL_TXF_R_FE |
3594 SWAPPER_CTRL_RXD_R_FE |
3595 SWAPPER_CTRL_RXD_W_FE |
3596 SWAPPER_CTRL_RXF_W_FE |
3597 SWAPPER_CTRL_XMSI_FE |
3598 SWAPPER_CTRL_STATS_FE |
3599 SWAPPER_CTRL_STATS_SE);
eaae7f72 3600 if (sp->config.intr_type == INTA)
cc6e7c44 3601 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3602 writeq(val64, &bar0->swapper_ctrl);
3603#else
20346722 3604 /*
1da177e4 3605 * Initially we enable all bits to make it accessible by the
20346722 3606 * driver, then we selectively enable only those bits that
1da177e4
LT
3607 * we want to set.
3608 */
3609 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3610 SWAPPER_CTRL_TXP_SE |
3611 SWAPPER_CTRL_TXD_R_FE |
3612 SWAPPER_CTRL_TXD_R_SE |
3613 SWAPPER_CTRL_TXD_W_FE |
3614 SWAPPER_CTRL_TXD_W_SE |
3615 SWAPPER_CTRL_TXF_R_FE |
3616 SWAPPER_CTRL_RXD_R_FE |
3617 SWAPPER_CTRL_RXD_R_SE |
3618 SWAPPER_CTRL_RXD_W_FE |
3619 SWAPPER_CTRL_RXD_W_SE |
3620 SWAPPER_CTRL_RXF_W_FE |
3621 SWAPPER_CTRL_XMSI_FE |
3622 SWAPPER_CTRL_STATS_FE |
3623 SWAPPER_CTRL_STATS_SE);
eaae7f72 3624 if (sp->config.intr_type == INTA)
cc6e7c44 3625 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3626 writeq(val64, &bar0->swapper_ctrl);
3627#endif
3628 val64 = readq(&bar0->swapper_ctrl);
3629
20346722
K
3630 /*
3631 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3632 * feedback register.
3633 */
3634 val64 = readq(&bar0->pif_rd_swapper_fb);
3635 if (val64 != 0x0123456789ABCDEFULL) {
3636 /* Endian settings are incorrect, calls for another dekko. */
9e39f7c5
JP
3637 DBG_PRINT(ERR_DBG,
3638 "%s: Endian settings are wrong, feedback read %llx\n",
3639 dev->name, (unsigned long long)val64);
1da177e4
LT
3640 return FAILURE;
3641 }
3642
3643 return SUCCESS;
3644}
3645
1ee6dd77 3646static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3647{
1ee6dd77 3648 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3649 u64 val64;
3650 int ret = 0, cnt = 0;
3651
3652 do {
3653 val64 = readq(&bar0->xmsi_access);
b7b5a128 3654 if (!(val64 & s2BIT(15)))
cc6e7c44
RA
3655 break;
3656 mdelay(1);
3657 cnt++;
d44570e4 3658 } while (cnt < 5);
cc6e7c44
RA
3659 if (cnt == 5) {
3660 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3661 ret = 1;
3662 }
3663
3664 return ret;
3665}
3666
1ee6dd77 3667static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3668{
1ee6dd77 3669 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3670 u64 val64;
f61e0a35
SH
3671 int i, msix_index;
3672
f61e0a35
SH
3673 if (nic->device_type == XFRAME_I_DEVICE)
3674 return;
cc6e7c44 3675
d44570e4
JP
3676 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3677 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
cc6e7c44
RA
3678 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3679 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
f61e0a35 3680 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3681 writeq(val64, &bar0->xmsi_access);
68c38507 3682 if (wait_for_msix_trans(nic, msix_index))
9e39f7c5
JP
3683 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3684 __func__, msix_index);
cc6e7c44
RA
3685 }
3686}
3687
1ee6dd77 3688static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3689{
1ee6dd77 3690 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3691 u64 val64, addr, data;
f61e0a35
SH
3692 int i, msix_index;
3693
3694 if (nic->device_type == XFRAME_I_DEVICE)
3695 return;
cc6e7c44
RA
3696
3697 /* Store and display */
d44570e4
JP
3698 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3699 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
f61e0a35 3700 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3701 writeq(val64, &bar0->xmsi_access);
f61e0a35 3702 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3703 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3704 __func__, msix_index);
cc6e7c44
RA
3705 continue;
3706 }
3707 addr = readq(&bar0->xmsi_address);
3708 data = readq(&bar0->xmsi_data);
3709 if (addr && data) {
3710 nic->msix_info[i].addr = addr;
3711 nic->msix_info[i].data = data;
3712 }
3713 }
3714}
3715
1ee6dd77 3716static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3717{
1ee6dd77 3718 struct XENA_dev_config __iomem *bar0 = nic->bar0;
ac731ab6 3719 u64 rx_mat;
cc6e7c44
RA
3720 u16 msi_control; /* Temp variable */
3721 int ret, i, j, msix_indx = 1;
4f870320 3722 int size;
ffb5df6c
JP
3723 struct stat_block *stats = nic->mac_control.stats_info;
3724 struct swStat *swstats = &stats->sw_stat;
cc6e7c44 3725
4f870320 3726 size = nic->num_entries * sizeof(struct msix_entry);
44364a03 3727 nic->entries = kzalloc(size, GFP_KERNEL);
bd684e43 3728 if (!nic->entries) {
d44570e4
JP
3729 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3730 __func__);
ffb5df6c 3731 swstats->mem_alloc_fail_cnt++;
cc6e7c44
RA
3732 return -ENOMEM;
3733 }
ffb5df6c 3734 swstats->mem_allocated += size;
f61e0a35 3735
4f870320 3736 size = nic->num_entries * sizeof(struct s2io_msix_entry);
44364a03 3737 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
bd684e43 3738 if (!nic->s2io_entries) {
8a4bdbaa 3739 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
d44570e4 3740 __func__);
ffb5df6c 3741 swstats->mem_alloc_fail_cnt++;
cc6e7c44 3742 kfree(nic->entries);
ffb5df6c 3743 swstats->mem_freed
f61e0a35 3744 += (nic->num_entries * sizeof(struct msix_entry));
cc6e7c44
RA
3745 return -ENOMEM;
3746 }
ffb5df6c 3747 swstats->mem_allocated += size;
cc6e7c44 3748
ac731ab6
SH
3749 nic->entries[0].entry = 0;
3750 nic->s2io_entries[0].entry = 0;
3751 nic->s2io_entries[0].in_use = MSIX_FLG;
3752 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3753 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3754
f61e0a35
SH
3755 for (i = 1; i < nic->num_entries; i++) {
3756 nic->entries[i].entry = ((i - 1) * 8) + 1;
3757 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
cc6e7c44
RA
3758 nic->s2io_entries[i].arg = NULL;
3759 nic->s2io_entries[i].in_use = 0;
3760 }
3761
8a4bdbaa 3762 rx_mat = readq(&bar0->rx_mat);
f61e0a35 3763 for (j = 0; j < nic->config.rx_ring_num; j++) {
8a4bdbaa 3764 rx_mat |= RX_MAT_SET(j, msix_indx);
f61e0a35
SH
3765 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3766 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3767 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3768 msix_indx += 8;
cc6e7c44 3769 }
8a4bdbaa 3770 writeq(rx_mat, &bar0->rx_mat);
f61e0a35 3771 readq(&bar0->rx_mat);
cc6e7c44 3772
37a15ed3
AG
3773 ret = pci_enable_msix_range(nic->pdev, nic->entries,
3774 nic->num_entries, nic->num_entries);
c92ca04b 3775 /* We fail init if error or we get less vectors than min required */
37a15ed3 3776 if (ret < 0) {
9e39f7c5 3777 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
cc6e7c44 3778 kfree(nic->entries);
ffb5df6c
JP
3779 swstats->mem_freed += nic->num_entries *
3780 sizeof(struct msix_entry);
cc6e7c44 3781 kfree(nic->s2io_entries);
ffb5df6c
JP
3782 swstats->mem_freed += nic->num_entries *
3783 sizeof(struct s2io_msix_entry);
cc6e7c44
RA
3784 nic->entries = NULL;
3785 nic->s2io_entries = NULL;
3786 return -ENOMEM;
3787 }
3788
3789 /*
3790 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3791 * in the herc NIC. (Temp change, needs to be removed later)
3792 */
3793 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3794 msi_control |= 0x1; /* Enable MSI */
3795 pci_write_config_word(nic->pdev, 0x42, msi_control);
3796
3797 return 0;
3798}
3799
8abc4d5b 3800/* Handle software interrupt used during MSI(X) test */
33390a70 3801static irqreturn_t s2io_test_intr(int irq, void *dev_id)
8abc4d5b
SS
3802{
3803 struct s2io_nic *sp = dev_id;
3804
3805 sp->msi_detected = 1;
3806 wake_up(&sp->msi_wait);
3807
3808 return IRQ_HANDLED;
3809}
3810
3811/* Test interrupt path by forcing a a software IRQ */
33390a70 3812static int s2io_test_msi(struct s2io_nic *sp)
8abc4d5b
SS
3813{
3814 struct pci_dev *pdev = sp->pdev;
3815 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3816 int err;
3817 u64 val64, saved64;
3818
3819 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
d44570e4 3820 sp->name, sp);
8abc4d5b
SS
3821 if (err) {
3822 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
d44570e4 3823 sp->dev->name, pci_name(pdev), pdev->irq);
8abc4d5b
SS
3824 return err;
3825 }
3826
d44570e4 3827 init_waitqueue_head(&sp->msi_wait);
8abc4d5b
SS
3828 sp->msi_detected = 0;
3829
3830 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3831 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3832 val64 |= SCHED_INT_CTRL_TIMER_EN;
3833 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3834 writeq(val64, &bar0->scheduled_int_ctrl);
3835
3836 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3837
3838 if (!sp->msi_detected) {
3839 /* MSI(X) test failed, go back to INTx mode */
2450022a 3840 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
9e39f7c5
JP
3841 "using MSI(X) during test\n",
3842 sp->dev->name, pci_name(pdev));
8abc4d5b
SS
3843
3844 err = -EOPNOTSUPP;
3845 }
3846
3847 free_irq(sp->entries[1].vector, sp);
3848
3849 writeq(saved64, &bar0->scheduled_int_ctrl);
3850
3851 return err;
3852}
18b2b7bd
SH
3853
3854static void remove_msix_isr(struct s2io_nic *sp)
3855{
3856 int i;
3857 u16 msi_control;
3858
f61e0a35 3859 for (i = 0; i < sp->num_entries; i++) {
d44570e4 3860 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
18b2b7bd
SH
3861 int vector = sp->entries[i].vector;
3862 void *arg = sp->s2io_entries[i].arg;
3863 free_irq(vector, arg);
3864 }
3865 }
3866
3867 kfree(sp->entries);
3868 kfree(sp->s2io_entries);
3869 sp->entries = NULL;
3870 sp->s2io_entries = NULL;
3871
3872 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3873 msi_control &= 0xFFFE; /* Disable MSI */
3874 pci_write_config_word(sp->pdev, 0x42, msi_control);
3875
3876 pci_disable_msix(sp->pdev);
3877}
3878
3879static void remove_inta_isr(struct s2io_nic *sp)
3880{
80777c54 3881 free_irq(sp->pdev->irq, sp->dev);
18b2b7bd
SH
3882}
3883
1da177e4
LT
3884/* ********************************************************* *
3885 * Functions defined below concern the OS part of the driver *
3886 * ********************************************************* */
3887
20346722 3888/**
1da177e4
LT
3889 * s2io_open - open entry point of the driver
3890 * @dev : pointer to the device structure.
3891 * Description:
3892 * This function is the open entry point of the driver. It mainly calls a
3893 * function to allocate Rx buffers and inserts them into the buffer
20346722 3894 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3895 * Return value:
3896 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3897 * file on failure.
3898 */
3899
ac1f60db 3900static int s2io_open(struct net_device *dev)
1da177e4 3901{
4cf1653a 3902 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 3903 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
3904 int err = 0;
3905
20346722
K
3906 /*
3907 * Make sure you have link off by default every time
1da177e4
LT
3908 * Nic is initialized
3909 */
3910 netif_carrier_off(dev);
0b1f7ebe 3911 sp->last_link_state = 0;
1da177e4
LT
3912
3913 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3914 err = s2io_card_up(sp);
3915 if (err) {
1da177e4
LT
3916 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3917 dev->name);
e6a8fee2 3918 goto hw_init_failed;
1da177e4
LT
3919 }
3920
2fd37688 3921 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
1da177e4 3922 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3923 s2io_card_down(sp);
20346722 3924 err = -ENODEV;
e6a8fee2 3925 goto hw_init_failed;
1da177e4 3926 }
3a3d5756 3927 s2io_start_all_tx_queue(sp);
1da177e4 3928 return 0;
20346722 3929
20346722 3930hw_init_failed:
eaae7f72 3931 if (sp->config.intr_type == MSI_X) {
491976b2 3932 if (sp->entries) {
cc6e7c44 3933 kfree(sp->entries);
ffb5df6c
JP
3934 swstats->mem_freed += sp->num_entries *
3935 sizeof(struct msix_entry);
491976b2
SH
3936 }
3937 if (sp->s2io_entries) {
cc6e7c44 3938 kfree(sp->s2io_entries);
ffb5df6c
JP
3939 swstats->mem_freed += sp->num_entries *
3940 sizeof(struct s2io_msix_entry);
491976b2 3941 }
cc6e7c44 3942 }
20346722 3943 return err;
1da177e4
LT
3944}
3945
3946/**
3947 * s2io_close -close entry point of the driver
3948 * @dev : device pointer.
3949 * Description:
3950 * This is the stop entry point of the driver. It needs to undo exactly
3951 * whatever was done by the open entry point,thus it's usually referred to
3952 * as the close function.Among other things this function mainly stops the
3953 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3954 * Return value:
3955 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3956 * file on failure.
3957 */
3958
ac1f60db 3959static int s2io_close(struct net_device *dev)
1da177e4 3960{
4cf1653a 3961 struct s2io_nic *sp = netdev_priv(dev);
faa4f796
SH
3962 struct config_param *config = &sp->config;
3963 u64 tmp64;
3964 int offset;
cc6e7c44 3965
9f74ffde 3966 /* Return if the device is already closed *
d44570e4
JP
3967 * Can happen when s2io_card_up failed in change_mtu *
3968 */
9f74ffde
SH
3969 if (!is_s2io_card_up(sp))
3970 return 0;
3971
3a3d5756 3972 s2io_stop_all_tx_queue(sp);
faa4f796
SH
3973 /* delete all populated mac entries */
3974 for (offset = 1; offset < config->max_mc_addr; offset++) {
3975 tmp64 = do_s2io_read_unicast_mc(sp, offset);
3976 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3977 do_s2io_delete_unicast_mc(sp, tmp64);
3978 }
3979
e6a8fee2 3980 s2io_card_down(sp);
cc6e7c44 3981
1da177e4
LT
3982 return 0;
3983}
3984
3985/**
3986 * s2io_xmit - Tx entry point of te driver
3987 * @skb : the socket buffer containing the Tx data.
3988 * @dev : device pointer.
3989 * Description :
3990 * This function is the Tx entry point of the driver. S2IO NIC supports
3991 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
25985edc 3992 * NOTE: when device can't queue the pkt,just the trans_start variable will
1da177e4
LT
3993 * not be upadted.
3994 * Return value:
3995 * 0 on success & 1 on failure.
3996 */
3997
61357325 3998static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 3999{
4cf1653a 4000 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
4001 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4002 register u64 val64;
1ee6dd77
RB
4003 struct TxD *txdp;
4004 struct TxFIFO_element __iomem *tx_fifo;
2fda096d 4005 unsigned long flags = 0;
be3a6b02 4006 u16 vlan_tag = 0;
2fda096d 4007 struct fifo_info *fifo = NULL;
75c30b13 4008 int offload_type;
6cfc482b 4009 int enable_per_list_interrupt = 0;
ffb5df6c
JP
4010 struct config_param *config = &sp->config;
4011 struct mac_info *mac_control = &sp->mac_control;
4012 struct stat_block *stats = mac_control->stats_info;
4013 struct swStat *swstats = &stats->sw_stat;
1da177e4 4014
20346722 4015 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
491976b2
SH
4016
4017 if (unlikely(skb->len <= 0)) {
9e39f7c5 4018 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
491976b2 4019 dev_kfree_skb_any(skb);
6ed10654 4020 return NETDEV_TX_OK;
2fda096d 4021 }
491976b2 4022
92b84437 4023 if (!is_s2io_card_up(sp)) {
20346722 4024 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4 4025 dev->name);
e6d26bd0 4026 dev_kfree_skb_any(skb);
6ed10654 4027 return NETDEV_TX_OK;
1da177e4
LT
4028 }
4029
4030 queue = 0;
df8a39de
JP
4031 if (skb_vlan_tag_present(skb))
4032 vlan_tag = skb_vlan_tag_get(skb);
6cfc482b
SH
4033 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4034 if (skb->protocol == htons(ETH_P_IP)) {
4035 struct iphdr *ip;
4036 struct tcphdr *th;
4037 ip = ip_hdr(skb);
4038
56f8a75c 4039 if (!ip_is_fragment(ip)) {
6cfc482b 4040 th = (struct tcphdr *)(((unsigned char *)ip) +
d44570e4 4041 ip->ihl*4);
6cfc482b
SH
4042
4043 if (ip->protocol == IPPROTO_TCP) {
4044 queue_len = sp->total_tcp_fifos;
4045 queue = (ntohs(th->source) +
d44570e4
JP
4046 ntohs(th->dest)) &
4047 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4048 if (queue >= queue_len)
4049 queue = queue_len - 1;
4050 } else if (ip->protocol == IPPROTO_UDP) {
4051 queue_len = sp->total_udp_fifos;
4052 queue = (ntohs(th->source) +
d44570e4
JP
4053 ntohs(th->dest)) &
4054 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4055 if (queue >= queue_len)
4056 queue = queue_len - 1;
4057 queue += sp->udp_fifo_idx;
4058 if (skb->len > 1024)
4059 enable_per_list_interrupt = 1;
6cfc482b
SH
4060 }
4061 }
4062 }
4063 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4064 /* get fifo number based on skb->priority value */
4065 queue = config->fifo_mapping
d44570e4 4066 [skb->priority & (MAX_TX_FIFOS - 1)];
6cfc482b 4067 fifo = &mac_control->fifos[queue];
3a3d5756 4068
a6086a89 4069 spin_lock_irqsave(&fifo->tx_lock, flags);
be3a6b02 4070
3a3d5756
SH
4071 if (sp->config.multiq) {
4072 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4073 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4074 return NETDEV_TX_BUSY;
4075 }
b19fa1fa 4076 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
4077 if (netif_queue_stopped(dev)) {
4078 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4079 return NETDEV_TX_BUSY;
4080 }
4081 }
4082
d44570e4
JP
4083 put_off = (u16)fifo->tx_curr_put_info.offset;
4084 get_off = (u16)fifo->tx_curr_get_info.offset;
43d620c8 4085 txdp = fifo->list_info[put_off].list_virt_addr;
20346722 4086
2fda096d 4087 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
1da177e4 4088 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4089 if (txdp->Host_Control ||
d44570e4 4090 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 4091 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3a3d5756 4092 s2io_stop_tx_queue(sp, fifo->fifo_no);
e6d26bd0 4093 dev_kfree_skb_any(skb);
2fda096d 4094 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4095 return NETDEV_TX_OK;
1da177e4 4096 }
0b1f7ebe 4097
75c30b13 4098 offload_type = s2io_offload_type(skb);
75c30b13 4099 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 4100 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 4101 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 4102 }
84fa7933 4103 if (skb->ip_summed == CHECKSUM_PARTIAL) {
d44570e4
JP
4104 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4105 TXD_TX_CKO_TCP_EN |
4106 TXD_TX_CKO_UDP_EN);
1da177e4 4107 }
fed5eccd
AR
4108 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4109 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2fda096d 4110 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
6cfc482b
SH
4111 if (enable_per_list_interrupt)
4112 if (put_off & (queue_len >> 5))
4113 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
3a3d5756 4114 if (vlan_tag) {
be3a6b02
K
4115 txdp->Control_2 |= TXD_VLAN_ENABLE;
4116 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4117 }
4118
e743d313 4119 frg_len = skb_headlen(skb);
d44570e4
JP
4120 txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4121 frg_len, PCI_DMA_TODEVICE);
8d8bb39b 4122 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
491abf25
VP
4123 goto pci_map_failed;
4124
d44570e4 4125 txdp->Host_Control = (unsigned long)skb;
fed5eccd 4126 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
fed5eccd
AR
4127
4128 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
4129 /* For fragmented SKB. */
4130 for (i = 0; i < frg_cnt; i++) {
9e903e08 4131 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe 4132 /* A '0' length fragment will be ignored */
9e903e08 4133 if (!skb_frag_size(frag))
0b1f7ebe 4134 continue;
1da177e4 4135 txdp++;
f0d06d82
IC
4136 txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4137 frag, 0,
9e903e08 4138 skb_frag_size(frag),
5d6bcdfe 4139 DMA_TO_DEVICE);
9e903e08 4140 txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
1da177e4
LT
4141 }
4142 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4143
4144 tx_fifo = mac_control->tx_FIFO_start[queue];
2fda096d 4145 val64 = fifo->list_info[put_off].list_phy_addr;
1da177e4
LT
4146 writeq(val64, &tx_fifo->TxDL_Pointer);
4147
4148 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4149 TX_FIFO_LAST_LIST);
75c30b13 4150 if (offload_type)
fed5eccd 4151 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 4152
1da177e4
LT
4153 writeq(val64, &tx_fifo->List_Control);
4154
1da177e4 4155 put_off++;
2fda096d 4156 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
863c11a9 4157 put_off = 0;
2fda096d 4158 fifo->tx_curr_put_info.offset = put_off;
1da177e4
LT
4159
4160 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4161 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
ffb5df6c 4162 swstats->fifo_full_cnt++;
1da177e4
LT
4163 DBG_PRINT(TX_DBG,
4164 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4165 put_off, get_off);
3a3d5756 4166 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4167 }
ffb5df6c 4168 swstats->mem_allocated += skb->truesize;
2fda096d 4169 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4 4170
f6f4bfa3
SH
4171 if (sp->config.intr_type == MSI_X)
4172 tx_intr_handler(fifo);
4173
6ed10654 4174 return NETDEV_TX_OK;
ffb5df6c 4175
491abf25 4176pci_map_failed:
ffb5df6c 4177 swstats->pci_map_fail_cnt++;
3a3d5756 4178 s2io_stop_tx_queue(sp, fifo->fifo_no);
ffb5df6c 4179 swstats->mem_freed += skb->truesize;
e6d26bd0 4180 dev_kfree_skb_any(skb);
2fda096d 4181 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4182 return NETDEV_TX_OK;
1da177e4
LT
4183}
4184
25fff88e 4185static void
e84a2ac9 4186s2io_alarm_handle(struct timer_list *t)
25fff88e 4187{
e84a2ac9 4188 struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
8116f3cf 4189 struct net_device *dev = sp->dev;
25fff88e 4190
8116f3cf 4191 s2io_handle_errors(dev);
25fff88e
K
4192 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4193}
4194
7d12e780 4195static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4196{
1ee6dd77
RB
4197 struct ring_info *ring = (struct ring_info *)dev_id;
4198 struct s2io_nic *sp = ring->nic;
f61e0a35 4199 struct XENA_dev_config __iomem *bar0 = sp->bar0;
cc6e7c44 4200
f61e0a35 4201 if (unlikely(!is_s2io_card_up(sp)))
92b84437 4202 return IRQ_HANDLED;
92b84437 4203
f61e0a35 4204 if (sp->config.napi) {
1a79d1c3
AV
4205 u8 __iomem *addr = NULL;
4206 u8 val8 = 0;
f61e0a35 4207
1a79d1c3 4208 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
4209 addr += (7 - ring->ring_no);
4210 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4211 writeb(val8, addr);
4212 val8 = readb(addr);
288379f0 4213 napi_schedule(&ring->napi);
f61e0a35
SH
4214 } else {
4215 rx_intr_handler(ring, 0);
8d8bb39b 4216 s2io_chk_rx_buffers(sp, ring);
f61e0a35 4217 }
7d3d0439 4218
cc6e7c44
RA
4219 return IRQ_HANDLED;
4220}
4221
7d12e780 4222static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4223{
ac731ab6
SH
4224 int i;
4225 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4226 struct s2io_nic *sp = fifos->nic;
4227 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4228 struct config_param *config = &sp->config;
4229 u64 reason;
cc6e7c44 4230
ac731ab6
SH
4231 if (unlikely(!is_s2io_card_up(sp)))
4232 return IRQ_NONE;
4233
4234 reason = readq(&bar0->general_int_status);
4235 if (unlikely(reason == S2IO_MINUS_ONE))
4236 /* Nothing much can be done. Get out */
92b84437 4237 return IRQ_HANDLED;
92b84437 4238
01e16faa
SH
4239 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4240 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
ac731ab6 4241
01e16faa
SH
4242 if (reason & GEN_INTR_TXPIC)
4243 s2io_txpic_intr_handle(sp);
ac731ab6 4244
01e16faa
SH
4245 if (reason & GEN_INTR_TXTRAFFIC)
4246 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
ac731ab6 4247
01e16faa
SH
4248 for (i = 0; i < config->tx_fifo_num; i++)
4249 tx_intr_handler(&fifos[i]);
ac731ab6 4250
01e16faa
SH
4251 writeq(sp->general_int_mask, &bar0->general_int_mask);
4252 readl(&bar0->general_int_status);
4253 return IRQ_HANDLED;
4254 }
4255 /* The interrupt was not raised by us */
4256 return IRQ_NONE;
cc6e7c44 4257}
ac731ab6 4258
1ee6dd77 4259static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4260{
1ee6dd77 4261 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d
K
4262 u64 val64;
4263
4264 val64 = readq(&bar0->pic_int_status);
4265 if (val64 & PIC_INT_GPIO) {
4266 val64 = readq(&bar0->gpio_int_reg);
4267 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4268 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4269 /*
4270 * This is unstable state so clear both up/down
4271 * interrupt and adapter to re-evaluate the link state.
4272 */
d44570e4 4273 val64 |= GPIO_INT_REG_LINK_DOWN;
a371a07d
K
4274 val64 |= GPIO_INT_REG_LINK_UP;
4275 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4276 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4277 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4278 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4279 writeq(val64, &bar0->gpio_int_mask);
d44570e4 4280 } else if (val64 & GPIO_INT_REG_LINK_UP) {
c92ca04b 4281 val64 = readq(&bar0->adapter_status);
d44570e4 4282 /* Enable Adapter */
19a60522
SS
4283 val64 = readq(&bar0->adapter_control);
4284 val64 |= ADAPTER_CNTL_EN;
4285 writeq(val64, &bar0->adapter_control);
4286 val64 |= ADAPTER_LED_ON;
4287 writeq(val64, &bar0->adapter_control);
4288 if (!sp->device_enabled_once)
4289 sp->device_enabled_once = 1;
c92ca04b 4290
19a60522
SS
4291 s2io_link(sp, LINK_UP);
4292 /*
4293 * unmask link down interrupt and mask link-up
4294 * intr
4295 */
4296 val64 = readq(&bar0->gpio_int_mask);
4297 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4298 val64 |= GPIO_INT_MASK_LINK_UP;
4299 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4300
d44570e4 4301 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
c92ca04b 4302 val64 = readq(&bar0->adapter_status);
19a60522
SS
4303 s2io_link(sp, LINK_DOWN);
4304 /* Link is down so unmaks link up interrupt */
4305 val64 = readq(&bar0->gpio_int_mask);
4306 val64 &= ~GPIO_INT_MASK_LINK_UP;
4307 val64 |= GPIO_INT_MASK_LINK_DOWN;
4308 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4309
4310 /* turn off LED */
4311 val64 = readq(&bar0->adapter_control);
d44570e4 4312 val64 = val64 & (~ADAPTER_LED_ON);
ac1f90d6 4313 writeq(val64, &bar0->adapter_control);
a371a07d
K
4314 }
4315 }
c92ca04b 4316 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4317}
4318
8116f3cf
SS
4319/**
4320 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4321 * @value: alarm bits
4322 * @addr: address value
4323 * @cnt: counter variable
4324 * Description: Check for alarm and increment the counter
4325 * Return Value:
4326 * 1 - if alarm bit set
4327 * 0 - if alarm bit is not set
4328 */
d44570e4
JP
4329static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4330 unsigned long long *cnt)
8116f3cf
SS
4331{
4332 u64 val64;
4333 val64 = readq(addr);
d44570e4 4334 if (val64 & value) {
8116f3cf
SS
4335 writeq(val64, addr);
4336 (*cnt)++;
4337 return 1;
4338 }
4339 return 0;
4340
4341}
4342
4343/**
4344 * s2io_handle_errors - Xframe error indication handler
4345 * @nic: device private variable
4346 * Description: Handle alarms such as loss of link, single or
4347 * double ECC errors, critical and serious errors.
4348 * Return Value:
4349 * NONE
4350 */
d44570e4 4351static void s2io_handle_errors(void *dev_id)
8116f3cf 4352{
d44570e4 4353 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4354 struct s2io_nic *sp = netdev_priv(dev);
8116f3cf 4355 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d44570e4 4356 u64 temp64 = 0, val64 = 0;
8116f3cf
SS
4357 int i = 0;
4358
4359 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4360 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4361
92b84437 4362 if (!is_s2io_card_up(sp))
8116f3cf
SS
4363 return;
4364
4365 if (pci_channel_offline(sp->pdev))
4366 return;
4367
4368 memset(&sw_stat->ring_full_cnt, 0,
d44570e4 4369 sizeof(sw_stat->ring_full_cnt));
8116f3cf
SS
4370
4371 /* Handling the XPAK counters update */
d44570e4 4372 if (stats->xpak_timer_count < 72000) {
8116f3cf
SS
4373 /* waiting for an hour */
4374 stats->xpak_timer_count++;
4375 } else {
4376 s2io_updt_xpak_counter(dev);
4377 /* reset the count to zero */
4378 stats->xpak_timer_count = 0;
4379 }
4380
4381 /* Handling link status change error Intr */
4382 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4383 val64 = readq(&bar0->mac_rmac_err_reg);
4384 writeq(val64, &bar0->mac_rmac_err_reg);
4385 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4386 schedule_work(&sp->set_link_task);
4387 }
4388
4389 /* In case of a serious error, the device will be Reset. */
4390 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
d44570e4 4391 &sw_stat->serious_err_cnt))
8116f3cf
SS
4392 goto reset;
4393
4394 /* Check for data parity error */
4395 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
d44570e4 4396 &sw_stat->parity_err_cnt))
8116f3cf
SS
4397 goto reset;
4398
4399 /* Check for ring full counter */
4400 if (sp->device_type == XFRAME_II_DEVICE) {
4401 val64 = readq(&bar0->ring_bump_counter1);
d44570e4
JP
4402 for (i = 0; i < 4; i++) {
4403 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf
SS
4404 temp64 >>= 64 - ((i+1)*16);
4405 sw_stat->ring_full_cnt[i] += temp64;
4406 }
4407
4408 val64 = readq(&bar0->ring_bump_counter2);
d44570e4
JP
4409 for (i = 0; i < 4; i++) {
4410 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf 4411 temp64 >>= 64 - ((i+1)*16);
d44570e4 4412 sw_stat->ring_full_cnt[i+4] += temp64;
8116f3cf
SS
4413 }
4414 }
4415
4416 val64 = readq(&bar0->txdma_int_status);
4417 /*check for pfc_err*/
4418 if (val64 & TXDMA_PFC_INT) {
d44570e4
JP
4419 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4420 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4421 PFC_PCIX_ERR,
4422 &bar0->pfc_err_reg,
4423 &sw_stat->pfc_err_cnt))
8116f3cf 4424 goto reset;
d44570e4
JP
4425 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4426 &bar0->pfc_err_reg,
4427 &sw_stat->pfc_err_cnt);
8116f3cf
SS
4428 }
4429
4430 /*check for tda_err*/
4431 if (val64 & TXDMA_TDA_INT) {
d44570e4
JP
4432 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4433 TDA_SM0_ERR_ALARM |
4434 TDA_SM1_ERR_ALARM,
4435 &bar0->tda_err_reg,
4436 &sw_stat->tda_err_cnt))
8116f3cf
SS
4437 goto reset;
4438 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
d44570e4
JP
4439 &bar0->tda_err_reg,
4440 &sw_stat->tda_err_cnt);
8116f3cf
SS
4441 }
4442 /*check for pcc_err*/
4443 if (val64 & TXDMA_PCC_INT) {
d44570e4
JP
4444 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4445 PCC_N_SERR | PCC_6_COF_OV_ERR |
4446 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4447 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4448 PCC_TXB_ECC_DB_ERR,
4449 &bar0->pcc_err_reg,
4450 &sw_stat->pcc_err_cnt))
8116f3cf
SS
4451 goto reset;
4452 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
d44570e4
JP
4453 &bar0->pcc_err_reg,
4454 &sw_stat->pcc_err_cnt);
8116f3cf
SS
4455 }
4456
4457 /*check for tti_err*/
4458 if (val64 & TXDMA_TTI_INT) {
d44570e4
JP
4459 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4460 &bar0->tti_err_reg,
4461 &sw_stat->tti_err_cnt))
8116f3cf
SS
4462 goto reset;
4463 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
d44570e4
JP
4464 &bar0->tti_err_reg,
4465 &sw_stat->tti_err_cnt);
8116f3cf
SS
4466 }
4467
4468 /*check for lso_err*/
4469 if (val64 & TXDMA_LSO_INT) {
d44570e4
JP
4470 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4471 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4472 &bar0->lso_err_reg,
4473 &sw_stat->lso_err_cnt))
8116f3cf
SS
4474 goto reset;
4475 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
d44570e4
JP
4476 &bar0->lso_err_reg,
4477 &sw_stat->lso_err_cnt);
8116f3cf
SS
4478 }
4479
4480 /*check for tpa_err*/
4481 if (val64 & TXDMA_TPA_INT) {
d44570e4
JP
4482 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4483 &bar0->tpa_err_reg,
4484 &sw_stat->tpa_err_cnt))
8116f3cf 4485 goto reset;
d44570e4
JP
4486 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4487 &bar0->tpa_err_reg,
4488 &sw_stat->tpa_err_cnt);
8116f3cf
SS
4489 }
4490
4491 /*check for sm_err*/
4492 if (val64 & TXDMA_SM_INT) {
d44570e4
JP
4493 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4494 &bar0->sm_err_reg,
4495 &sw_stat->sm_err_cnt))
8116f3cf
SS
4496 goto reset;
4497 }
4498
4499 val64 = readq(&bar0->mac_int_status);
4500 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4501 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
d44570e4
JP
4502 &bar0->mac_tmac_err_reg,
4503 &sw_stat->mac_tmac_err_cnt))
8116f3cf 4504 goto reset;
d44570e4
JP
4505 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4506 TMAC_DESC_ECC_SG_ERR |
4507 TMAC_DESC_ECC_DB_ERR,
4508 &bar0->mac_tmac_err_reg,
4509 &sw_stat->mac_tmac_err_cnt);
8116f3cf
SS
4510 }
4511
4512 val64 = readq(&bar0->xgxs_int_status);
4513 if (val64 & XGXS_INT_STATUS_TXGXS) {
4514 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
d44570e4
JP
4515 &bar0->xgxs_txgxs_err_reg,
4516 &sw_stat->xgxs_txgxs_err_cnt))
8116f3cf
SS
4517 goto reset;
4518 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
d44570e4
JP
4519 &bar0->xgxs_txgxs_err_reg,
4520 &sw_stat->xgxs_txgxs_err_cnt);
8116f3cf
SS
4521 }
4522
4523 val64 = readq(&bar0->rxdma_int_status);
4524 if (val64 & RXDMA_INT_RC_INT_M) {
d44570e4
JP
4525 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4526 RC_FTC_ECC_DB_ERR |
4527 RC_PRCn_SM_ERR_ALARM |
4528 RC_FTC_SM_ERR_ALARM,
4529 &bar0->rc_err_reg,
4530 &sw_stat->rc_err_cnt))
8116f3cf 4531 goto reset;
d44570e4
JP
4532 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4533 RC_FTC_ECC_SG_ERR |
4534 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4535 &sw_stat->rc_err_cnt);
4536 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4537 PRC_PCI_AB_WR_Rn |
4538 PRC_PCI_AB_F_WR_Rn,
4539 &bar0->prc_pcix_err_reg,
4540 &sw_stat->prc_pcix_err_cnt))
8116f3cf 4541 goto reset;
d44570e4
JP
4542 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4543 PRC_PCI_DP_WR_Rn |
4544 PRC_PCI_DP_F_WR_Rn,
4545 &bar0->prc_pcix_err_reg,
4546 &sw_stat->prc_pcix_err_cnt);
8116f3cf
SS
4547 }
4548
4549 if (val64 & RXDMA_INT_RPA_INT_M) {
4550 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
d44570e4
JP
4551 &bar0->rpa_err_reg,
4552 &sw_stat->rpa_err_cnt))
8116f3cf
SS
4553 goto reset;
4554 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
d44570e4
JP
4555 &bar0->rpa_err_reg,
4556 &sw_stat->rpa_err_cnt);
8116f3cf
SS
4557 }
4558
4559 if (val64 & RXDMA_INT_RDA_INT_M) {
d44570e4
JP
4560 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4561 RDA_FRM_ECC_DB_N_AERR |
4562 RDA_SM1_ERR_ALARM |
4563 RDA_SM0_ERR_ALARM |
4564 RDA_RXD_ECC_DB_SERR,
4565 &bar0->rda_err_reg,
4566 &sw_stat->rda_err_cnt))
8116f3cf 4567 goto reset;
d44570e4
JP
4568 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4569 RDA_FRM_ECC_SG_ERR |
4570 RDA_MISC_ERR |
4571 RDA_PCIX_ERR,
4572 &bar0->rda_err_reg,
4573 &sw_stat->rda_err_cnt);
8116f3cf
SS
4574 }
4575
4576 if (val64 & RXDMA_INT_RTI_INT_M) {
d44570e4
JP
4577 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4578 &bar0->rti_err_reg,
4579 &sw_stat->rti_err_cnt))
8116f3cf
SS
4580 goto reset;
4581 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
d44570e4
JP
4582 &bar0->rti_err_reg,
4583 &sw_stat->rti_err_cnt);
8116f3cf
SS
4584 }
4585
4586 val64 = readq(&bar0->mac_int_status);
4587 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4588 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
d44570e4
JP
4589 &bar0->mac_rmac_err_reg,
4590 &sw_stat->mac_rmac_err_cnt))
8116f3cf 4591 goto reset;
d44570e4
JP
4592 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4593 RMAC_SINGLE_ECC_ERR |
4594 RMAC_DOUBLE_ECC_ERR,
4595 &bar0->mac_rmac_err_reg,
4596 &sw_stat->mac_rmac_err_cnt);
8116f3cf
SS
4597 }
4598
4599 val64 = readq(&bar0->xgxs_int_status);
4600 if (val64 & XGXS_INT_STATUS_RXGXS) {
4601 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
d44570e4
JP
4602 &bar0->xgxs_rxgxs_err_reg,
4603 &sw_stat->xgxs_rxgxs_err_cnt))
8116f3cf
SS
4604 goto reset;
4605 }
4606
4607 val64 = readq(&bar0->mc_int_status);
d44570e4
JP
4608 if (val64 & MC_INT_STATUS_MC_INT) {
4609 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4610 &bar0->mc_err_reg,
4611 &sw_stat->mc_err_cnt))
8116f3cf
SS
4612 goto reset;
4613
4614 /* Handling Ecc errors */
4615 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4616 writeq(val64, &bar0->mc_err_reg);
4617 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4618 sw_stat->double_ecc_errs++;
4619 if (sp->device_type != XFRAME_II_DEVICE) {
4620 /*
4621 * Reset XframeI only if critical error
4622 */
4623 if (val64 &
d44570e4
JP
4624 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4625 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4626 goto reset;
4627 }
8116f3cf
SS
4628 } else
4629 sw_stat->single_ecc_errs++;
4630 }
4631 }
4632 return;
4633
4634reset:
3a3d5756 4635 s2io_stop_all_tx_queue(sp);
8116f3cf
SS
4636 schedule_work(&sp->rst_timer_task);
4637 sw_stat->soft_reset_cnt++;
8116f3cf
SS
4638}
4639
1da177e4
LT
4640/**
4641 * s2io_isr - ISR handler of the device .
4642 * @irq: the irq of the device.
4643 * @dev_id: a void pointer to the dev structure of the NIC.
20346722
K
4644 * Description: This function is the ISR handler of the device. It
4645 * identifies the reason for the interrupt and calls the relevant
4646 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4647 * recv buffers, if their numbers are below the panic value which is
4648 * presently set to 25% of the original number of rcv buffers allocated.
4649 * Return value:
20346722 4650 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4651 * IRQ_NONE: will be returned if interrupt is not from our device
4652 */
7d12e780 4653static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4 4654{
d44570e4 4655 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4656 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4657 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4658 int i;
19a60522 4659 u64 reason = 0;
1ee6dd77 4660 struct mac_info *mac_control;
1da177e4
LT
4661 struct config_param *config;
4662
d796fdb7
LV
4663 /* Pretend we handled any irq's from a disconnected card */
4664 if (pci_channel_offline(sp->pdev))
4665 return IRQ_NONE;
4666
596c5c97 4667 if (!is_s2io_card_up(sp))
92b84437 4668 return IRQ_NONE;
92b84437 4669
1da177e4 4670 config = &sp->config;
ffb5df6c 4671 mac_control = &sp->mac_control;
1da177e4 4672
20346722 4673 /*
1da177e4
LT
4674 * Identify the cause for interrupt and call the appropriate
4675 * interrupt handler. Causes for the interrupt could be;
4676 * 1. Rx of packet.
4677 * 2. Tx complete.
4678 * 3. Link down.
1da177e4
LT
4679 */
4680 reason = readq(&bar0->general_int_status);
4681
d44570e4
JP
4682 if (unlikely(reason == S2IO_MINUS_ONE))
4683 return IRQ_HANDLED; /* Nothing much can be done. Get out */
5d3213cc 4684
d44570e4
JP
4685 if (reason &
4686 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
596c5c97
SS
4687 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4688
4689 if (config->napi) {
4690 if (reason & GEN_INTR_RXTRAFFIC) {
288379f0 4691 napi_schedule(&sp->napi);
f61e0a35
SH
4692 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4693 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4694 readl(&bar0->rx_traffic_int);
db874e65 4695 }
596c5c97
SS
4696 } else {
4697 /*
4698 * rx_traffic_int reg is an R1 register, writing all 1's
4699 * will ensure that the actual interrupt causing bit
4700 * get's cleared and hence a read can be avoided.
4701 */
4702 if (reason & GEN_INTR_RXTRAFFIC)
19a60522 4703 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
596c5c97 4704
13d866a9
JP
4705 for (i = 0; i < config->rx_ring_num; i++) {
4706 struct ring_info *ring = &mac_control->rings[i];
4707
4708 rx_intr_handler(ring, 0);
4709 }
db874e65 4710 }
596c5c97 4711
db874e65 4712 /*
596c5c97 4713 * tx_traffic_int reg is an R1 register, writing all 1's
db874e65
SS
4714 * will ensure that the actual interrupt causing bit get's
4715 * cleared and hence a read can be avoided.
4716 */
596c5c97
SS
4717 if (reason & GEN_INTR_TXTRAFFIC)
4718 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
19a60522 4719
596c5c97
SS
4720 for (i = 0; i < config->tx_fifo_num; i++)
4721 tx_intr_handler(&mac_control->fifos[i]);
1da177e4 4722
596c5c97
SS
4723 if (reason & GEN_INTR_TXPIC)
4724 s2io_txpic_intr_handle(sp);
fe113638 4725
596c5c97
SS
4726 /*
4727 * Reallocate the buffers from the interrupt handler itself.
4728 */
4729 if (!config->napi) {
13d866a9
JP
4730 for (i = 0; i < config->rx_ring_num; i++) {
4731 struct ring_info *ring = &mac_control->rings[i];
4732
4733 s2io_chk_rx_buffers(sp, ring);
4734 }
596c5c97
SS
4735 }
4736 writeq(sp->general_int_mask, &bar0->general_int_mask);
4737 readl(&bar0->general_int_status);
20346722 4738
596c5c97 4739 return IRQ_HANDLED;
db874e65 4740
d44570e4 4741 } else if (!reason) {
596c5c97
SS
4742 /* The interrupt was not raised by us */
4743 return IRQ_NONE;
4744 }
db874e65 4745
1da177e4
LT
4746 return IRQ_HANDLED;
4747}
4748
7ba013ac
K
4749/**
4750 * s2io_updt_stats -
4751 */
1ee6dd77 4752static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4753{
1ee6dd77 4754 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac
K
4755 u64 val64;
4756 int cnt = 0;
4757
92b84437 4758 if (is_s2io_card_up(sp)) {
7ba013ac
K
4759 /* Apprx 30us on a 133 MHz bus */
4760 val64 = SET_UPDT_CLICKS(10) |
4761 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4762 writeq(val64, &bar0->stat_cfg);
4763 do {
4764 udelay(100);
4765 val64 = readq(&bar0->stat_cfg);
b7b5a128 4766 if (!(val64 & s2BIT(0)))
7ba013ac
K
4767 break;
4768 cnt++;
4769 if (cnt == 5)
4770 break; /* Updt failed */
d44570e4 4771 } while (1);
8a4bdbaa 4772 }
7ba013ac
K
4773}
4774
1da177e4 4775/**
20346722 4776 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4777 * @dev : pointer to the device structure.
4778 * Description:
20346722 4779 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4780 * structure and returns a pointer to the same.
4781 * Return value:
4782 * pointer to the updated net_device_stats structure.
4783 */
ac1f60db 4784static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4785{
4cf1653a 4786 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
4787 struct mac_info *mac_control = &sp->mac_control;
4788 struct stat_block *stats = mac_control->stats_info;
4a490432 4789 u64 delta;
1da177e4 4790
7ba013ac
K
4791 /* Configure Stats for immediate updt */
4792 s2io_updt_stats(sp);
4793
4a490432
JM
4794 /* A device reset will cause the on-adapter statistics to be zero'ed.
4795 * This can be done while running by changing the MTU. To prevent the
4796 * system from having the stats zero'ed, the driver keeps a copy of the
4797 * last update to the system (which is also zero'ed on reset). This
4798 * enables the driver to accurately know the delta between the last
4799 * update and the current update.
4800 */
4801 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4802 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4803 sp->stats.rx_packets += delta;
4804 dev->stats.rx_packets += delta;
4805
4806 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4807 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4808 sp->stats.tx_packets += delta;
4809 dev->stats.tx_packets += delta;
4810
4811 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4812 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4813 sp->stats.rx_bytes += delta;
4814 dev->stats.rx_bytes += delta;
4815
4816 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4817 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4818 sp->stats.tx_bytes += delta;
4819 dev->stats.tx_bytes += delta;
4820
4821 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4822 sp->stats.rx_errors += delta;
4823 dev->stats.rx_errors += delta;
4824
4825 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4826 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4827 sp->stats.tx_errors += delta;
4828 dev->stats.tx_errors += delta;
4829
4830 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4831 sp->stats.rx_dropped += delta;
4832 dev->stats.rx_dropped += delta;
4833
4834 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4835 sp->stats.tx_dropped += delta;
4836 dev->stats.tx_dropped += delta;
4837
4838 /* The adapter MAC interprets pause frames as multicast packets, but
4839 * does not pass them up. This erroneously increases the multicast
4840 * packet count and needs to be deducted when the multicast frame count
4841 * is queried.
4842 */
4843 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4844 le32_to_cpu(stats->rmac_vld_mcst_frms);
4845 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4846 delta -= sp->stats.multicast;
4847 sp->stats.multicast += delta;
4848 dev->stats.multicast += delta;
1da177e4 4849
4a490432
JM
4850 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4851 le32_to_cpu(stats->rmac_usized_frms)) +
4852 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4853 sp->stats.rx_length_errors += delta;
4854 dev->stats.rx_length_errors += delta;
13d866a9 4855
4a490432
JM
4856 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4857 sp->stats.rx_crc_errors += delta;
4858 dev->stats.rx_crc_errors += delta;
0425b46a 4859
d44570e4 4860 return &dev->stats;
1da177e4
LT
4861}
4862
4863/**
4864 * s2io_set_multicast - entry point for multicast address enable/disable.
4865 * @dev : pointer to the device structure
4866 * Description:
20346722
K
4867 * This function is a driver entry point which gets called by the kernel
4868 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4869 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4870 * determine, if multicast address must be enabled or if promiscuous mode
4871 * is to be disabled etc.
4872 * Return value:
4873 * void.
4874 */
4875
4876static void s2io_set_multicast(struct net_device *dev)
4877{
4878 int i, j, prev_cnt;
22bedad3 4879 struct netdev_hw_addr *ha;
4cf1653a 4880 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4881 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4882 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
d44570e4 4883 0xfeffffffffffULL;
faa4f796 4884 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
1da177e4 4885 void __iomem *add;
faa4f796 4886 struct config_param *config = &sp->config;
1da177e4
LT
4887
4888 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4889 /* Enable all Multicast addresses */
4890 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4891 &bar0->rmac_addr_data0_mem);
4892 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4893 &bar0->rmac_addr_data1_mem);
4894 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
4895 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4896 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
1da177e4
LT
4897 writeq(val64, &bar0->rmac_addr_cmd_mem);
4898 /* Wait till command completes */
c92ca04b 4899 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
4900 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4901 S2IO_BIT_RESET);
1da177e4
LT
4902
4903 sp->m_cast_flg = 1;
faa4f796 4904 sp->all_multi_pos = config->max_mc_addr - 1;
1da177e4
LT
4905 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4906 /* Disable all Multicast addresses */
4907 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4908 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
4909 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4910 &bar0->rmac_addr_data1_mem);
1da177e4 4911 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
4912 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4913 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
1da177e4
LT
4914 writeq(val64, &bar0->rmac_addr_cmd_mem);
4915 /* Wait till command completes */
c92ca04b 4916 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
4917 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4918 S2IO_BIT_RESET);
1da177e4
LT
4919
4920 sp->m_cast_flg = 0;
4921 sp->all_multi_pos = 0;
4922 }
4923
4924 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4925 /* Put the NIC into promiscuous mode */
4926 add = &bar0->mac_cfg;
4927 val64 = readq(&bar0->mac_cfg);
4928 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4929
4930 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 4931 writel((u32)val64, add);
1da177e4
LT
4932 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4933 writel((u32) (val64 >> 32), (add + 4));
4934
926930b2
SS
4935 if (vlan_tag_strip != 1) {
4936 val64 = readq(&bar0->rx_pa_cfg);
4937 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4938 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 4939 sp->vlan_strip_flag = 0;
926930b2
SS
4940 }
4941
1da177e4
LT
4942 val64 = readq(&bar0->mac_cfg);
4943 sp->promisc_flg = 1;
776bd20f 4944 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
4945 dev->name);
4946 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4947 /* Remove the NIC from promiscuous mode */
4948 add = &bar0->mac_cfg;
4949 val64 = readq(&bar0->mac_cfg);
4950 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4951
4952 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 4953 writel((u32)val64, add);
1da177e4
LT
4954 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4955 writel((u32) (val64 >> 32), (add + 4));
4956
926930b2
SS
4957 if (vlan_tag_strip != 0) {
4958 val64 = readq(&bar0->rx_pa_cfg);
4959 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4960 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 4961 sp->vlan_strip_flag = 1;
926930b2
SS
4962 }
4963
1da177e4
LT
4964 val64 = readq(&bar0->mac_cfg);
4965 sp->promisc_flg = 0;
9e39f7c5 4966 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
1da177e4
LT
4967 }
4968
4969 /* Update individual M_CAST address list */
4cd24eaf
JP
4970 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4971 if (netdev_mc_count(dev) >
faa4f796 4972 (config->max_mc_addr - config->max_mac_addr)) {
9e39f7c5
JP
4973 DBG_PRINT(ERR_DBG,
4974 "%s: No more Rx filters can be added - "
4975 "please enable ALL_MULTI instead\n",
1da177e4 4976 dev->name);
1da177e4
LT
4977 return;
4978 }
4979
4980 prev_cnt = sp->mc_addr_count;
4cd24eaf 4981 sp->mc_addr_count = netdev_mc_count(dev);
1da177e4
LT
4982
4983 /* Clear out the previous list of Mc in the H/W. */
4984 for (i = 0; i < prev_cnt; i++) {
4985 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4986 &bar0->rmac_addr_data0_mem);
4987 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 4988 &bar0->rmac_addr_data1_mem);
1da177e4 4989 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
4990 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4991 RMAC_ADDR_CMD_MEM_OFFSET
4992 (config->mc_start_offset + i);
1da177e4
LT
4993 writeq(val64, &bar0->rmac_addr_cmd_mem);
4994
4995 /* Wait for command completes */
c92ca04b 4996 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
4997 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4998 S2IO_BIT_RESET)) {
9e39f7c5
JP
4999 DBG_PRINT(ERR_DBG,
5000 "%s: Adding Multicasts failed\n",
5001 dev->name);
1da177e4
LT
5002 return;
5003 }
5004 }
5005
5006 /* Create the new Rx filter list and update the same in H/W. */
5508590c 5007 i = 0;
22bedad3 5008 netdev_for_each_mc_addr(ha, dev) {
a7a80d5a 5009 mac_addr = 0;
1da177e4 5010 for (j = 0; j < ETH_ALEN; j++) {
22bedad3 5011 mac_addr |= ha->addr[j];
1da177e4
LT
5012 mac_addr <<= 8;
5013 }
5014 mac_addr >>= 8;
5015 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5016 &bar0->rmac_addr_data0_mem);
5017 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 5018 &bar0->rmac_addr_data1_mem);
1da177e4 5019 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5020 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5021 RMAC_ADDR_CMD_MEM_OFFSET
5022 (i + config->mc_start_offset);
1da177e4
LT
5023 writeq(val64, &bar0->rmac_addr_cmd_mem);
5024
5025 /* Wait for command completes */
c92ca04b 5026 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5027 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5028 S2IO_BIT_RESET)) {
9e39f7c5
JP
5029 DBG_PRINT(ERR_DBG,
5030 "%s: Adding Multicasts failed\n",
5031 dev->name);
1da177e4
LT
5032 return;
5033 }
5508590c 5034 i++;
1da177e4
LT
5035 }
5036 }
5037}
5038
faa4f796
SH
5039/* read from CAM unicast & multicast addresses and store it in
5040 * def_mac_addr structure
5041 */
dac499f9 5042static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
faa4f796
SH
5043{
5044 int offset;
5045 u64 mac_addr = 0x0;
5046 struct config_param *config = &sp->config;
5047
5048 /* store unicast & multicast mac addresses */
5049 for (offset = 0; offset < config->max_mc_addr; offset++) {
5050 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5051 /* if read fails disable the entry */
5052 if (mac_addr == FAILURE)
5053 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5054 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5055 }
5056}
5057
5058/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5059static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5060{
5061 int offset;
5062 struct config_param *config = &sp->config;
5063 /* restore unicast mac address */
5064 for (offset = 0; offset < config->max_mac_addr; offset++)
5065 do_s2io_prog_unicast(sp->dev,
d44570e4 5066 sp->def_mac_addr[offset].mac_addr);
faa4f796
SH
5067
5068 /* restore multicast mac address */
5069 for (offset = config->mc_start_offset;
d44570e4 5070 offset < config->max_mc_addr; offset++)
faa4f796
SH
5071 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5072}
5073
5074/* add a multicast MAC address to CAM */
5075static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5076{
5077 int i;
5078 u64 mac_addr = 0;
5079 struct config_param *config = &sp->config;
5080
5081 for (i = 0; i < ETH_ALEN; i++) {
5082 mac_addr <<= 8;
5083 mac_addr |= addr[i];
5084 }
5085 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5086 return SUCCESS;
5087
5088 /* check if the multicast mac already preset in CAM */
5089 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5090 u64 tmp64;
5091 tmp64 = do_s2io_read_unicast_mc(sp, i);
5092 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5093 break;
5094
5095 if (tmp64 == mac_addr)
5096 return SUCCESS;
5097 }
5098 if (i == config->max_mc_addr) {
5099 DBG_PRINT(ERR_DBG,
d44570e4 5100 "CAM full no space left for multicast MAC\n");
faa4f796
SH
5101 return FAILURE;
5102 }
5103 /* Update the internal structure with this new mac address */
5104 do_s2io_copy_mac_addr(sp, i, mac_addr);
5105
d44570e4 5106 return do_s2io_add_mac(sp, mac_addr, i);
faa4f796
SH
5107}
5108
5109/* add MAC address to CAM */
5110static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
2fd37688
SS
5111{
5112 u64 val64;
5113 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5114
5115 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
d44570e4 5116 &bar0->rmac_addr_data0_mem);
2fd37688 5117
d44570e4 5118 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2fd37688
SS
5119 RMAC_ADDR_CMD_MEM_OFFSET(off);
5120 writeq(val64, &bar0->rmac_addr_cmd_mem);
5121
5122 /* Wait till command completes */
5123 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5124 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5125 S2IO_BIT_RESET)) {
faa4f796 5126 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
2fd37688
SS
5127 return FAILURE;
5128 }
5129 return SUCCESS;
5130}
faa4f796
SH
5131/* deletes a specified unicast/multicast mac entry from CAM */
5132static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5133{
5134 int offset;
5135 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5136 struct config_param *config = &sp->config;
5137
5138 for (offset = 1;
d44570e4 5139 offset < config->max_mc_addr; offset++) {
faa4f796
SH
5140 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5141 if (tmp64 == addr) {
5142 /* disable the entry by writing 0xffffffffffffULL */
5143 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5144 return FAILURE;
5145 /* store the new mac list from CAM */
5146 do_s2io_store_unicast_mc(sp);
5147 return SUCCESS;
5148 }
5149 }
5150 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
d44570e4 5151 (unsigned long long)addr);
faa4f796
SH
5152 return FAILURE;
5153}
5154
5155/* read mac entries from CAM */
5156static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5157{
5158 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5159 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5160
5161 /* read mac addr */
d44570e4 5162 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796
SH
5163 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5164 writeq(val64, &bar0->rmac_addr_cmd_mem);
5165
5166 /* Wait till command completes */
5167 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5168 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5169 S2IO_BIT_RESET)) {
faa4f796
SH
5170 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5171 return FAILURE;
5172 }
5173 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4
JP
5174
5175 return tmp64 >> 16;
faa4f796 5176}
2fd37688
SS
5177
5178/**
49ce9c2c 5179 * s2io_set_mac_addr - driver entry point
2fd37688 5180 */
faa4f796 5181
2fd37688
SS
5182static int s2io_set_mac_addr(struct net_device *dev, void *p)
5183{
5184 struct sockaddr *addr = p;
5185
5186 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 5187 return -EADDRNOTAVAIL;
2fd37688
SS
5188
5189 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5190
5191 /* store the MAC address in CAM */
d44570e4 5192 return do_s2io_prog_unicast(dev, dev->dev_addr);
2fd37688 5193}
1da177e4 5194/**
2fd37688 5195 * do_s2io_prog_unicast - Programs the Xframe mac address
1da177e4
LT
5196 * @dev : pointer to the device structure.
5197 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 5198 * Description : This procedure will program the Xframe to receive
1da177e4 5199 * frames with new Mac Address
20346722 5200 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
5201 * as defined in errno.h file on failure.
5202 */
faa4f796 5203
2fd37688 5204static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
1da177e4 5205{
4cf1653a 5206 struct s2io_nic *sp = netdev_priv(dev);
2fd37688 5207 register u64 mac_addr = 0, perm_addr = 0;
1da177e4 5208 int i;
faa4f796
SH
5209 u64 tmp64;
5210 struct config_param *config = &sp->config;
1da177e4 5211
20346722 5212 /*
d44570e4
JP
5213 * Set the new MAC address as the new unicast filter and reflect this
5214 * change on the device address registered with the OS. It will be
5215 * at offset 0.
5216 */
1da177e4
LT
5217 for (i = 0; i < ETH_ALEN; i++) {
5218 mac_addr <<= 8;
5219 mac_addr |= addr[i];
2fd37688
SS
5220 perm_addr <<= 8;
5221 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
d8d70caf
SS
5222 }
5223
2fd37688
SS
5224 /* check if the dev_addr is different than perm_addr */
5225 if (mac_addr == perm_addr)
d8d70caf
SS
5226 return SUCCESS;
5227
faa4f796
SH
5228 /* check if the mac already preset in CAM */
5229 for (i = 1; i < config->max_mac_addr; i++) {
5230 tmp64 = do_s2io_read_unicast_mc(sp, i);
5231 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5232 break;
5233
5234 if (tmp64 == mac_addr) {
5235 DBG_PRINT(INFO_DBG,
d44570e4
JP
5236 "MAC addr:0x%llx already present in CAM\n",
5237 (unsigned long long)mac_addr);
faa4f796
SH
5238 return SUCCESS;
5239 }
5240 }
5241 if (i == config->max_mac_addr) {
5242 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5243 return FAILURE;
5244 }
d8d70caf 5245 /* Update the internal structure with this new mac address */
faa4f796 5246 do_s2io_copy_mac_addr(sp, i, mac_addr);
d44570e4
JP
5247
5248 return do_s2io_add_mac(sp, mac_addr, i);
1da177e4
LT
5249}
5250
5251/**
51f21442 5252 * s2io_ethtool_set_link_ksettings - Sets different link parameters.
d07ce242
JP
5253 * @sp : private member of the device structure, which is a pointer to the
5254 * s2io_nic structure.
51f21442 5255 * @cmd: pointer to the structure with parameters given by ethtool to set
1da177e4
LT
5256 * link information.
5257 * Description:
20346722 5258 * The function sets different link parameters provided by the user onto
1da177e4
LT
5259 * the NIC.
5260 * Return value:
5261 * 0 on success.
d44570e4 5262 */
1da177e4 5263
51f21442
PR
5264static int
5265s2io_ethtool_set_link_ksettings(struct net_device *dev,
5266 const struct ethtool_link_ksettings *cmd)
1da177e4 5267{
4cf1653a 5268 struct s2io_nic *sp = netdev_priv(dev);
51f21442
PR
5269 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5270 (cmd->base.speed != SPEED_10000) ||
5271 (cmd->base.duplex != DUPLEX_FULL))
1da177e4
LT
5272 return -EINVAL;
5273 else {
5274 s2io_close(sp->dev);
5275 s2io_open(sp->dev);
5276 }
5277
5278 return 0;
5279}
5280
5281/**
51f21442 5282 * s2io_ethtol_get_link_ksettings - Return link specific information.
1da177e4
LT
5283 * @sp : private member of the device structure, pointer to the
5284 * s2io_nic structure.
51f21442 5285 * @cmd : pointer to the structure with parameters given by ethtool
1da177e4
LT
5286 * to return link information.
5287 * Description:
5288 * Returns link specific information like speed, duplex etc.. to ethtool.
5289 * Return value :
5290 * return 0 on success.
5291 */
5292
51f21442
PR
5293static int
5294s2io_ethtool_get_link_ksettings(struct net_device *dev,
5295 struct ethtool_link_ksettings *cmd)
1da177e4 5296{
4cf1653a 5297 struct s2io_nic *sp = netdev_priv(dev);
1a7eb72b 5298
51f21442
PR
5299 ethtool_link_ksettings_zero_link_mode(cmd, supported);
5300 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5301 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5302
5303 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5304 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5305 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5306
5307 cmd->base.port = PORT_FIBRE;
1da177e4
LT
5308
5309 if (netif_carrier_ok(sp->dev)) {
51f21442
PR
5310 cmd->base.speed = SPEED_10000;
5311 cmd->base.duplex = DUPLEX_FULL;
1da177e4 5312 } else {
51f21442
PR
5313 cmd->base.speed = SPEED_UNKNOWN;
5314 cmd->base.duplex = DUPLEX_UNKNOWN;
1da177e4
LT
5315 }
5316
51f21442 5317 cmd->base.autoneg = AUTONEG_DISABLE;
1da177e4
LT
5318 return 0;
5319}
5320
5321/**
20346722
K
5322 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5323 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5324 * s2io_nic structure.
5325 * @info : pointer to the structure with parameters given by ethtool to
5326 * return driver information.
5327 * Description:
5328 * Returns driver specefic information like name, version etc.. to ethtool.
5329 * Return value:
5330 * void
5331 */
5332
5333static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5334 struct ethtool_drvinfo *info)
5335{
4cf1653a 5336 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 5337
68aad78c
RJ
5338 strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5339 strlcpy(info->version, s2io_driver_version, sizeof(info->version));
68aad78c 5340 strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
5341}
5342
5343/**
5344 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 5345 * @sp: private member of the device structure, which is a pointer to the
1da177e4 5346 * s2io_nic structure.
20346722 5347 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4 5348 * dumping the registers.
8b1bb92b 5349 * @reg_space: The input argument into which all the registers are dumped.
1da177e4
LT
5350 * Description:
5351 * Dumps the entire register space of xFrame NIC into the user given
5352 * buffer area.
5353 * Return value :
5354 * void .
d44570e4 5355 */
1da177e4
LT
5356
5357static void s2io_ethtool_gregs(struct net_device *dev,
5358 struct ethtool_regs *regs, void *space)
5359{
5360 int i;
5361 u64 reg;
d44570e4 5362 u8 *reg_space = (u8 *)space;
4cf1653a 5363 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5364
5365 regs->len = XENA_REG_SPACE;
5366 regs->version = sp->pdev->subsystem_device;
5367
5368 for (i = 0; i < regs->len; i += 8) {
5369 reg = readq(sp->bar0 + i);
5370 memcpy((reg_space + i), &reg, 8);
5371 }
5372}
5373
034e3450 5374/*
5375 * s2io_set_led - control NIC led
d44570e4 5376 */
034e3450 5377static void s2io_set_led(struct s2io_nic *sp, bool on)
1da177e4 5378{
1ee6dd77 5379 struct XENA_dev_config __iomem *bar0 = sp->bar0;
034e3450 5380 u16 subid = sp->pdev->subsystem_device;
5381 u64 val64;
1da177e4 5382
541ae68f 5383 if ((sp->device_type == XFRAME_II_DEVICE) ||
d44570e4 5384 ((subid & 0xFF) >= 0x07)) {
1da177e4 5385 val64 = readq(&bar0->gpio_control);
034e3450 5386 if (on)
5387 val64 |= GPIO_CTRL_GPIO_0;
5388 else
5389 val64 &= ~GPIO_CTRL_GPIO_0;
5390
1da177e4
LT
5391 writeq(val64, &bar0->gpio_control);
5392 } else {
5393 val64 = readq(&bar0->adapter_control);
034e3450 5394 if (on)
5395 val64 |= ADAPTER_LED_ON;
5396 else
5397 val64 &= ~ADAPTER_LED_ON;
5398
1da177e4
LT
5399 writeq(val64, &bar0->adapter_control);
5400 }
5401
1da177e4
LT
5402}
5403
5404/**
034e3450 5405 * s2io_ethtool_set_led - To physically identify the nic on the system.
5406 * @dev : network device
5407 * @state: led setting
5408 *
1da177e4 5409 * Description: Used to physically identify the NIC on the system.
20346722 5410 * The Link LED will blink for a time specified by the user for
1da177e4 5411 * identification.
20346722 5412 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4 5413 * identification is possible only if it's link is up.
1da177e4
LT
5414 */
5415
034e3450 5416static int s2io_ethtool_set_led(struct net_device *dev,
5417 enum ethtool_phys_id_state state)
1da177e4 5418{
4cf1653a 5419 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5420 struct XENA_dev_config __iomem *bar0 = sp->bar0;
034e3450 5421 u16 subid = sp->pdev->subsystem_device;
1da177e4 5422
d44570e4 5423 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
034e3450 5424 u64 val64 = readq(&bar0->adapter_control);
1da177e4 5425 if (!(val64 & ADAPTER_CNTL_EN)) {
6cef2b8e 5426 pr_err("Adapter Link down, cannot blink LED\n");
034e3450 5427 return -EAGAIN;
1da177e4
LT
5428 }
5429 }
1da177e4 5430
034e3450 5431 switch (state) {
5432 case ETHTOOL_ID_ACTIVE:
5433 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
fce55922 5434 return 1; /* cycle on/off once per second */
034e3450 5435
5436 case ETHTOOL_ID_ON:
5437 s2io_set_led(sp, true);
5438 break;
5439
5440 case ETHTOOL_ID_OFF:
5441 s2io_set_led(sp, false);
5442 break;
5443
5444 case ETHTOOL_ID_INACTIVE:
5445 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5446 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
1da177e4
LT
5447 }
5448
5449 return 0;
5450}
5451
0cec35eb 5452static void s2io_ethtool_gringparam(struct net_device *dev,
d44570e4 5453 struct ethtool_ringparam *ering)
0cec35eb 5454{
4cf1653a 5455 struct s2io_nic *sp = netdev_priv(dev);
d44570e4 5456 int i, tx_desc_count = 0, rx_desc_count = 0;
0cec35eb 5457
1853e2e1 5458 if (sp->rxd_mode == RXD_MODE_1) {
0cec35eb 5459 ering->rx_max_pending = MAX_RX_DESC_1;
1853e2e1
JM
5460 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5461 } else {
0cec35eb 5462 ering->rx_max_pending = MAX_RX_DESC_2;
1853e2e1
JM
5463 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5464 }
0cec35eb
SH
5465
5466 ering->tx_max_pending = MAX_TX_DESC;
8a4bdbaa 5467
1853e2e1 5468 for (i = 0; i < sp->config.rx_ring_num; i++)
0cec35eb 5469 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
0cec35eb 5470 ering->rx_pending = rx_desc_count;
0cec35eb 5471 ering->rx_jumbo_pending = rx_desc_count;
1853e2e1
JM
5472
5473 for (i = 0; i < sp->config.tx_fifo_num; i++)
5474 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5475 ering->tx_pending = tx_desc_count;
5476 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
0cec35eb
SH
5477}
5478
1da177e4
LT
5479/**
5480 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
5481 * @sp : private member of the device structure, which is a pointer to the
5482 * s2io_nic structure.
1da177e4
LT
5483 * @ep : pointer to the structure with pause parameters given by ethtool.
5484 * Description:
5485 * Returns the Pause frame generation and reception capability of the NIC.
5486 * Return value:
5487 * void
5488 */
5489static void s2io_ethtool_getpause_data(struct net_device *dev,
5490 struct ethtool_pauseparam *ep)
5491{
5492 u64 val64;
4cf1653a 5493 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5494 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5495
5496 val64 = readq(&bar0->rmac_pause_cfg);
5497 if (val64 & RMAC_PAUSE_GEN_ENABLE)
f957bcf0 5498 ep->tx_pause = true;
1da177e4 5499 if (val64 & RMAC_PAUSE_RX_ENABLE)
f957bcf0
TK
5500 ep->rx_pause = true;
5501 ep->autoneg = false;
1da177e4
LT
5502}
5503
5504/**
5505 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 5506 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5507 * s2io_nic structure.
5508 * @ep : pointer to the structure with pause parameters given by ethtool.
5509 * Description:
5510 * It can be used to set or reset Pause frame generation or reception
5511 * support of the NIC.
5512 * Return value:
5513 * int, returns 0 on Success
5514 */
5515
5516static int s2io_ethtool_setpause_data(struct net_device *dev,
d44570e4 5517 struct ethtool_pauseparam *ep)
1da177e4
LT
5518{
5519 u64 val64;
4cf1653a 5520 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5521 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5522
5523 val64 = readq(&bar0->rmac_pause_cfg);
5524 if (ep->tx_pause)
5525 val64 |= RMAC_PAUSE_GEN_ENABLE;
5526 else
5527 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5528 if (ep->rx_pause)
5529 val64 |= RMAC_PAUSE_RX_ENABLE;
5530 else
5531 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5532 writeq(val64, &bar0->rmac_pause_cfg);
5533 return 0;
5534}
5535
5536/**
5537 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 5538 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5539 * s2io_nic structure.
5540 * @off : offset at which the data must be written
5541 * @data : Its an output parameter where the data read at the given
20346722 5542 * offset is stored.
1da177e4 5543 * Description:
20346722 5544 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
5545 * read data.
5546 * NOTE: Will allow to read only part of the EEPROM visible through the
5547 * I2C bus.
5548 * Return value:
5549 * -1 on failure and 0 on success.
5550 */
5551
5552#define S2IO_DEV_ID 5
d44570e4 5553static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
1da177e4
LT
5554{
5555 int ret = -1;
5556 u32 exit_cnt = 0;
5557 u64 val64;
1ee6dd77 5558 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5559
ad4ebed0 5560 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5561 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5562 I2C_CONTROL_ADDR(off) |
5563 I2C_CONTROL_BYTE_CNT(0x3) |
5564 I2C_CONTROL_READ |
5565 I2C_CONTROL_CNTL_START;
ad4ebed0 5566 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 5567
ad4ebed0 5568 while (exit_cnt < 5) {
5569 val64 = readq(&bar0->i2c_control);
5570 if (I2C_CONTROL_CNTL_END(val64)) {
5571 *data = I2C_CONTROL_GET_DATA(val64);
5572 ret = 0;
5573 break;
5574 }
5575 msleep(50);
5576 exit_cnt++;
1da177e4 5577 }
1da177e4
LT
5578 }
5579
ad4ebed0 5580 if (sp->device_type == XFRAME_II_DEVICE) {
5581 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5582 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 5583 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5584 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5585 val64 |= SPI_CONTROL_REQ;
5586 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5587 while (exit_cnt < 5) {
5588 val64 = readq(&bar0->spi_control);
5589 if (val64 & SPI_CONTROL_NACK) {
5590 ret = 1;
5591 break;
5592 } else if (val64 & SPI_CONTROL_DONE) {
5593 *data = readq(&bar0->spi_data);
5594 *data &= 0xffffff;
5595 ret = 0;
5596 break;
5597 }
5598 msleep(50);
5599 exit_cnt++;
5600 }
5601 }
1da177e4
LT
5602 return ret;
5603}
5604
5605/**
5606 * write_eeprom - actually writes the relevant part of the data value.
5607 * @sp : private member of the device structure, which is a pointer to the
5608 * s2io_nic structure.
5609 * @off : offset at which the data must be written
5610 * @data : The data that is to be written
20346722 5611 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
5612 * the Eeprom. (max of 3)
5613 * Description:
5614 * Actually writes the relevant part of the data value into the Eeprom
5615 * through the I2C bus.
5616 * Return value:
5617 * 0 on success, -1 on failure.
5618 */
5619
d44570e4 5620static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
1da177e4
LT
5621{
5622 int exit_cnt = 0, ret = -1;
5623 u64 val64;
1ee6dd77 5624 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5625
ad4ebed0 5626 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5627 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5628 I2C_CONTROL_ADDR(off) |
5629 I2C_CONTROL_BYTE_CNT(cnt) |
5630 I2C_CONTROL_SET_DATA((u32)data) |
5631 I2C_CONTROL_CNTL_START;
ad4ebed0 5632 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5633
5634 while (exit_cnt < 5) {
5635 val64 = readq(&bar0->i2c_control);
5636 if (I2C_CONTROL_CNTL_END(val64)) {
5637 if (!(val64 & I2C_CONTROL_NACK))
5638 ret = 0;
5639 break;
5640 }
5641 msleep(50);
5642 exit_cnt++;
5643 }
5644 }
1da177e4 5645
ad4ebed0 5646 if (sp->device_type == XFRAME_II_DEVICE) {
5647 int write_cnt = (cnt == 8) ? 0 : cnt;
d44570e4 5648 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
ad4ebed0 5649
5650 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5651 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 5652 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5653 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5654 val64 |= SPI_CONTROL_REQ;
5655 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5656 while (exit_cnt < 5) {
5657 val64 = readq(&bar0->spi_control);
5658 if (val64 & SPI_CONTROL_NACK) {
5659 ret = 1;
5660 break;
5661 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 5662 ret = 0;
ad4ebed0 5663 break;
5664 }
5665 msleep(50);
5666 exit_cnt++;
1da177e4 5667 }
1da177e4 5668 }
1da177e4
LT
5669 return ret;
5670}
1ee6dd77 5671static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5672{
b41477f3
AR
5673 u8 *vpd_data;
5674 u8 data;
9c179780 5675 int i = 0, cnt, len, fail = 0;
9dc737a7 5676 int vpd_addr = 0x80;
ffb5df6c 5677 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
9dc737a7
AR
5678
5679 if (nic->device_type == XFRAME_II_DEVICE) {
5680 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5681 vpd_addr = 0x80;
d44570e4 5682 } else {
9dc737a7
AR
5683 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5684 vpd_addr = 0x50;
5685 }
19a60522 5686 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5687
b41477f3 5688 vpd_data = kmalloc(256, GFP_KERNEL);
c53d4945 5689 if (!vpd_data) {
ffb5df6c 5690 swstats->mem_alloc_fail_cnt++;
b41477f3 5691 return;
c53d4945 5692 }
ffb5df6c 5693 swstats->mem_allocated += 256;
b41477f3 5694
d44570e4 5695 for (i = 0; i < 256; i += 4) {
9dc737a7
AR
5696 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5697 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5698 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
d44570e4 5699 for (cnt = 0; cnt < 5; cnt++) {
9dc737a7
AR
5700 msleep(2);
5701 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5702 if (data == 0x80)
5703 break;
5704 }
5705 if (cnt >= 5) {
5706 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5707 fail = 1;
5708 break;
5709 }
5710 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5711 (u32 *)&vpd_data[i]);
5712 }
19a60522 5713
d44570e4 5714 if (!fail) {
19a60522 5715 /* read serial number of adapter */
9c179780 5716 for (cnt = 0; cnt < 252; cnt++) {
d44570e4 5717 if ((vpd_data[cnt] == 'S') &&
9c179780
KV
5718 (vpd_data[cnt+1] == 'N')) {
5719 len = vpd_data[cnt+2];
5720 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5721 memcpy(nic->serial_num,
5722 &vpd_data[cnt + 3],
5723 len);
5724 memset(nic->serial_num+len,
5725 0,
5726 VPD_STRING_LEN-len);
5727 break;
5728 }
19a60522
SS
5729 }
5730 }
5731 }
5732
9c179780
KV
5733 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5734 len = vpd_data[1];
5735 memcpy(nic->product_name, &vpd_data[3], len);
5736 nic->product_name[len] = 0;
5737 }
b41477f3 5738 kfree(vpd_data);
ffb5df6c 5739 swstats->mem_freed += 256;
9dc737a7
AR
5740}
5741
1da177e4
LT
5742/**
5743 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
d07ce242
JP
5744 * @sp : private member of the device structure, which is a pointer to the
5745 * s2io_nic structure.
20346722 5746 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5747 * containing all relevant information.
5748 * @data_buf : user defined value to be written into Eeprom.
5749 * Description: Reads the values stored in the Eeprom at given offset
5750 * for a given length. Stores these values int the input argument data
5751 * buffer 'data_buf' and returns these to the caller (ethtool.)
5752 * Return value:
5753 * int 0 on success
5754 */
5755
5756static int s2io_ethtool_geeprom(struct net_device *dev,
d44570e4 5757 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5758{
ad4ebed0 5759 u32 i, valid;
5760 u64 data;
4cf1653a 5761 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5762
5763 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5764
5765 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5766 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5767
5768 for (i = 0; i < eeprom->len; i += 4) {
5769 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5770 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5771 return -EFAULT;
5772 }
5773 valid = INV(data);
5774 memcpy((data_buf + i), &valid, 4);
5775 }
5776 return 0;
5777}
5778
5779/**
5780 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5781 * @sp : private member of the device structure, which is a pointer to the
5782 * s2io_nic structure.
20346722 5783 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5784 * containing all relevant information.
5785 * @data_buf ; user defined value to be written into Eeprom.
5786 * Description:
5787 * Tries to write the user provided value in the Eeprom, at the offset
5788 * given by the user.
5789 * Return value:
5790 * 0 on success, -EFAULT on failure.
5791 */
5792
5793static int s2io_ethtool_seeprom(struct net_device *dev,
5794 struct ethtool_eeprom *eeprom,
d44570e4 5795 u8 *data_buf)
1da177e4
LT
5796{
5797 int len = eeprom->len, cnt = 0;
ad4ebed0 5798 u64 valid = 0, data;
4cf1653a 5799 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5800
5801 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5802 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5803 "ETHTOOL_WRITE_EEPROM Err: "
5804 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5805 (sp->pdev->vendor | (sp->pdev->device << 16)),
5806 eeprom->magic);
1da177e4
LT
5807 return -EFAULT;
5808 }
5809
5810 while (len) {
d44570e4
JP
5811 data = (u32)data_buf[cnt] & 0x000000FF;
5812 if (data)
5813 valid = (u32)(data << 24);
5814 else
1da177e4
LT
5815 valid = data;
5816
5817 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5818 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5819 "ETHTOOL_WRITE_EEPROM Err: "
5820 "Cannot write into the specified offset\n");
1da177e4
LT
5821 return -EFAULT;
5822 }
5823 cnt++;
5824 len--;
5825 }
5826
5827 return 0;
5828}
5829
5830/**
20346722
K
5831 * s2io_register_test - reads and writes into all clock domains.
5832 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5833 * s2io_nic structure.
5834 * @data : variable that returns the result of each of the test conducted b
5835 * by the driver.
5836 * Description:
5837 * Read and write into all clock domains. The NIC has 3 clock domains,
5838 * see that registers in all the three regions are accessible.
5839 * Return value:
5840 * 0 on success.
5841 */
5842
d44570e4 5843static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 5844{
1ee6dd77 5845 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5846 u64 val64 = 0, exp_val;
1da177e4
LT
5847 int fail = 0;
5848
20346722
K
5849 val64 = readq(&bar0->pif_rd_swapper_fb);
5850 if (val64 != 0x123456789abcdefULL) {
1da177e4 5851 fail = 1;
9e39f7c5 5852 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
1da177e4
LT
5853 }
5854
5855 val64 = readq(&bar0->rmac_pause_cfg);
5856 if (val64 != 0xc000ffff00000000ULL) {
5857 fail = 1;
9e39f7c5 5858 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
1da177e4
LT
5859 }
5860
5861 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5862 if (sp->device_type == XFRAME_II_DEVICE)
5863 exp_val = 0x0404040404040404ULL;
5864 else
5865 exp_val = 0x0808080808080808ULL;
5866 if (val64 != exp_val) {
1da177e4 5867 fail = 1;
9e39f7c5 5868 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
1da177e4
LT
5869 }
5870
5871 val64 = readq(&bar0->xgxs_efifo_cfg);
5872 if (val64 != 0x000000001923141EULL) {
5873 fail = 1;
9e39f7c5 5874 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
1da177e4
LT
5875 }
5876
5877 val64 = 0x5A5A5A5A5A5A5A5AULL;
5878 writeq(val64, &bar0->xmsi_data);
5879 val64 = readq(&bar0->xmsi_data);
5880 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5881 fail = 1;
9e39f7c5 5882 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
1da177e4
LT
5883 }
5884
5885 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5886 writeq(val64, &bar0->xmsi_data);
5887 val64 = readq(&bar0->xmsi_data);
5888 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5889 fail = 1;
9e39f7c5 5890 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
1da177e4
LT
5891 }
5892
5893 *data = fail;
ad4ebed0 5894 return fail;
1da177e4
LT
5895}
5896
5897/**
20346722 5898 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5899 * @sp : private member of the device structure, which is a pointer to the
5900 * s2io_nic structure.
5901 * @data:variable that returns the result of each of the test conducted by
5902 * the driver.
5903 * Description:
20346722 5904 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5905 * register.
5906 * Return value:
5907 * 0 on success.
5908 */
5909
d44570e4 5910static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
5911{
5912 int fail = 0;
ad4ebed0 5913 u64 ret_data, org_4F0, org_7F0;
5914 u8 saved_4F0 = 0, saved_7F0 = 0;
5915 struct net_device *dev = sp->dev;
1da177e4
LT
5916
5917 /* Test Write Error at offset 0 */
ad4ebed0 5918 /* Note that SPI interface allows write access to all areas
5919 * of EEPROM. Hence doing all negative testing only for Xframe I.
5920 */
5921 if (sp->device_type == XFRAME_I_DEVICE)
5922 if (!write_eeprom(sp, 0, 0, 3))
5923 fail = 1;
5924
5925 /* Save current values at offsets 0x4F0 and 0x7F0 */
5926 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5927 saved_4F0 = 1;
5928 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5929 saved_7F0 = 1;
1da177e4
LT
5930
5931 /* Test Write at offset 4f0 */
ad4ebed0 5932 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5933 fail = 1;
5934 if (read_eeprom(sp, 0x4F0, &ret_data))
5935 fail = 1;
5936
ad4ebed0 5937 if (ret_data != 0x012345) {
26b7625c 5938 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
d44570e4
JP
5939 "Data written %llx Data read %llx\n",
5940 dev->name, (unsigned long long)0x12345,
5941 (unsigned long long)ret_data);
1da177e4 5942 fail = 1;
ad4ebed0 5943 }
1da177e4
LT
5944
5945 /* Reset the EEPROM data go FFFF */
ad4ebed0 5946 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5947
5948 /* Test Write Request Error at offset 0x7c */
ad4ebed0 5949 if (sp->device_type == XFRAME_I_DEVICE)
5950 if (!write_eeprom(sp, 0x07C, 0, 3))
5951 fail = 1;
1da177e4 5952
ad4ebed0 5953 /* Test Write Request at offset 0x7f0 */
5954 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 5955 fail = 1;
ad4ebed0 5956 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
5957 fail = 1;
5958
ad4ebed0 5959 if (ret_data != 0x012345) {
26b7625c 5960 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
d44570e4
JP
5961 "Data written %llx Data read %llx\n",
5962 dev->name, (unsigned long long)0x12345,
5963 (unsigned long long)ret_data);
1da177e4 5964 fail = 1;
ad4ebed0 5965 }
1da177e4
LT
5966
5967 /* Reset the EEPROM data go FFFF */
ad4ebed0 5968 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 5969
ad4ebed0 5970 if (sp->device_type == XFRAME_I_DEVICE) {
5971 /* Test Write Error at offset 0x80 */
5972 if (!write_eeprom(sp, 0x080, 0, 3))
5973 fail = 1;
1da177e4 5974
ad4ebed0 5975 /* Test Write Error at offset 0xfc */
5976 if (!write_eeprom(sp, 0x0FC, 0, 3))
5977 fail = 1;
1da177e4 5978
ad4ebed0 5979 /* Test Write Error at offset 0x100 */
5980 if (!write_eeprom(sp, 0x100, 0, 3))
5981 fail = 1;
1da177e4 5982
ad4ebed0 5983 /* Test Write Error at offset 4ec */
5984 if (!write_eeprom(sp, 0x4EC, 0, 3))
5985 fail = 1;
5986 }
5987
5988 /* Restore values at offsets 0x4F0 and 0x7F0 */
5989 if (saved_4F0)
5990 write_eeprom(sp, 0x4F0, org_4F0, 3);
5991 if (saved_7F0)
5992 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
5993
5994 *data = fail;
ad4ebed0 5995 return fail;
1da177e4
LT
5996}
5997
5998/**
5999 * s2io_bist_test - invokes the MemBist test of the card .
20346722 6000 * @sp : private member of the device structure, which is a pointer to the
1da177e4 6001 * s2io_nic structure.
20346722 6002 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
6003 * the driver.
6004 * Description:
6005 * This invokes the MemBist test of the card. We give around
6006 * 2 secs time for the Test to complete. If it's still not complete
20346722 6007 * within this peiod, we consider that the test failed.
1da177e4
LT
6008 * Return value:
6009 * 0 on success and -1 on failure.
6010 */
6011
d44570e4 6012static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
6013{
6014 u8 bist = 0;
6015 int cnt = 0, ret = -1;
6016
6017 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6018 bist |= PCI_BIST_START;
6019 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6020
6021 while (cnt < 20) {
6022 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6023 if (!(bist & PCI_BIST_START)) {
6024 *data = (bist & PCI_BIST_CODE_MASK);
6025 ret = 0;
6026 break;
6027 }
6028 msleep(100);
6029 cnt++;
6030 }
6031
6032 return ret;
6033}
6034
6035/**
49ce9c2c 6036 * s2io_link_test - verifies the link state of the nic
20346722 6037 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
6038 * s2io_nic structure.
6039 * @data: variable that returns the result of each of the test conducted by
6040 * the driver.
6041 * Description:
20346722 6042 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
6043 * argument 'data' appropriately.
6044 * Return value:
6045 * 0 on success.
6046 */
6047
d44570e4 6048static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6049{
1ee6dd77 6050 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
6051 u64 val64;
6052
6053 val64 = readq(&bar0->adapter_status);
d44570e4 6054 if (!(LINK_IS_UP(val64)))
1da177e4 6055 *data = 1;
c92ca04b
AR
6056 else
6057 *data = 0;
1da177e4 6058
b41477f3 6059 return *data;
1da177e4
LT
6060}
6061
6062/**
20346722 6063 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
49ce9c2c 6064 * @sp: private member of the device structure, which is a pointer to the
1da177e4 6065 * s2io_nic structure.
49ce9c2c 6066 * @data: variable that returns the result of each of the test
1da177e4
LT
6067 * conducted by the driver.
6068 * Description:
20346722 6069 * This is one of the offline test that tests the read and write
1da177e4
LT
6070 * access to the RldRam chip on the NIC.
6071 * Return value:
6072 * 0 on success.
6073 */
6074
d44570e4 6075static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6076{
1ee6dd77 6077 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 6078 u64 val64;
ad4ebed0 6079 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
6080
6081 val64 = readq(&bar0->adapter_control);
6082 val64 &= ~ADAPTER_ECC_EN;
6083 writeq(val64, &bar0->adapter_control);
6084
6085 val64 = readq(&bar0->mc_rldram_test_ctrl);
6086 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 6087 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6088
6089 val64 = readq(&bar0->mc_rldram_mrs);
6090 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6091 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6092
6093 val64 |= MC_RLDRAM_MRS_ENABLE;
6094 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6095
6096 while (iteration < 2) {
6097 val64 = 0x55555555aaaa0000ULL;
d44570e4 6098 if (iteration == 1)
1da177e4 6099 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6100 writeq(val64, &bar0->mc_rldram_test_d0);
6101
6102 val64 = 0xaaaa5a5555550000ULL;
d44570e4 6103 if (iteration == 1)
1da177e4 6104 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6105 writeq(val64, &bar0->mc_rldram_test_d1);
6106
6107 val64 = 0x55aaaaaaaa5a0000ULL;
d44570e4 6108 if (iteration == 1)
1da177e4 6109 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6110 writeq(val64, &bar0->mc_rldram_test_d2);
6111
ad4ebed0 6112 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
6113 writeq(val64, &bar0->mc_rldram_test_add);
6114
d44570e4
JP
6115 val64 = MC_RLDRAM_TEST_MODE |
6116 MC_RLDRAM_TEST_WRITE |
6117 MC_RLDRAM_TEST_GO;
ad4ebed0 6118 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6119
6120 for (cnt = 0; cnt < 5; cnt++) {
6121 val64 = readq(&bar0->mc_rldram_test_ctrl);
6122 if (val64 & MC_RLDRAM_TEST_DONE)
6123 break;
6124 msleep(200);
6125 }
6126
6127 if (cnt == 5)
6128 break;
6129
ad4ebed0 6130 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6131 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6132
6133 for (cnt = 0; cnt < 5; cnt++) {
6134 val64 = readq(&bar0->mc_rldram_test_ctrl);
6135 if (val64 & MC_RLDRAM_TEST_DONE)
6136 break;
6137 msleep(500);
6138 }
6139
6140 if (cnt == 5)
6141 break;
6142
6143 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 6144 if (!(val64 & MC_RLDRAM_TEST_PASS))
6145 test_fail = 1;
1da177e4
LT
6146
6147 iteration++;
6148 }
6149
ad4ebed0 6150 *data = test_fail;
1da177e4 6151
ad4ebed0 6152 /* Bring the adapter out of test mode */
6153 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6154
6155 return test_fail;
1da177e4
LT
6156}
6157
6158/**
6159 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6160 * @sp : private member of the device structure, which is a pointer to the
6161 * s2io_nic structure.
6162 * @ethtest : pointer to a ethtool command specific structure that will be
6163 * returned to the user.
20346722 6164 * @data : variable that returns the result of each of the test
1da177e4
LT
6165 * conducted by the driver.
6166 * Description:
6167 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6168 * the health of the card.
6169 * Return value:
6170 * void
6171 */
6172
6173static void s2io_ethtool_test(struct net_device *dev,
6174 struct ethtool_test *ethtest,
d44570e4 6175 uint64_t *data)
1da177e4 6176{
4cf1653a 6177 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
6178 int orig_state = netif_running(sp->dev);
6179
6180 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6181 /* Offline Tests. */
20346722 6182 if (orig_state)
1da177e4 6183 s2io_close(sp->dev);
1da177e4
LT
6184
6185 if (s2io_register_test(sp, &data[0]))
6186 ethtest->flags |= ETH_TEST_FL_FAILED;
6187
6188 s2io_reset(sp);
1da177e4
LT
6189
6190 if (s2io_rldram_test(sp, &data[3]))
6191 ethtest->flags |= ETH_TEST_FL_FAILED;
6192
6193 s2io_reset(sp);
1da177e4
LT
6194
6195 if (s2io_eeprom_test(sp, &data[1]))
6196 ethtest->flags |= ETH_TEST_FL_FAILED;
6197
6198 if (s2io_bist_test(sp, &data[4]))
6199 ethtest->flags |= ETH_TEST_FL_FAILED;
6200
6201 if (orig_state)
6202 s2io_open(sp->dev);
6203
6204 data[2] = 0;
6205 } else {
6206 /* Online Tests. */
6207 if (!orig_state) {
d44570e4 6208 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
1da177e4
LT
6209 dev->name);
6210 data[0] = -1;
6211 data[1] = -1;
6212 data[2] = -1;
6213 data[3] = -1;
6214 data[4] = -1;
6215 }
6216
6217 if (s2io_link_test(sp, &data[2]))
6218 ethtest->flags |= ETH_TEST_FL_FAILED;
6219
6220 data[0] = 0;
6221 data[1] = 0;
6222 data[3] = 0;
6223 data[4] = 0;
6224 }
6225}
6226
6227static void s2io_get_ethtool_stats(struct net_device *dev,
6228 struct ethtool_stats *estats,
d44570e4 6229 u64 *tmp_stats)
1da177e4 6230{
8116f3cf 6231 int i = 0, k;
4cf1653a 6232 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
6233 struct stat_block *stats = sp->mac_control.stats_info;
6234 struct swStat *swstats = &stats->sw_stat;
6235 struct xpakStat *xstats = &stats->xpak_stat;
1da177e4 6236
7ba013ac 6237 s2io_updt_stats(sp);
541ae68f 6238 tmp_stats[i++] =
ffb5df6c
JP
6239 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6240 le32_to_cpu(stats->tmac_frms);
541ae68f 6241 tmp_stats[i++] =
ffb5df6c
JP
6242 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6243 le32_to_cpu(stats->tmac_data_octets);
6244 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
541ae68f 6245 tmp_stats[i++] =
ffb5df6c
JP
6246 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6247 le32_to_cpu(stats->tmac_mcst_frms);
541ae68f 6248 tmp_stats[i++] =
ffb5df6c
JP
6249 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6250 le32_to_cpu(stats->tmac_bcst_frms);
6251 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
bd1034f0 6252 tmp_stats[i++] =
ffb5df6c
JP
6253 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6254 le32_to_cpu(stats->tmac_ttl_octets);
bd1034f0 6255 tmp_stats[i++] =
ffb5df6c
JP
6256 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6257 le32_to_cpu(stats->tmac_ucst_frms);
d44570e4 6258 tmp_stats[i++] =
ffb5df6c
JP
6259 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6260 le32_to_cpu(stats->tmac_nucst_frms);
541ae68f 6261 tmp_stats[i++] =
ffb5df6c
JP
6262 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6263 le32_to_cpu(stats->tmac_any_err_frms);
6264 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6265 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
541ae68f 6266 tmp_stats[i++] =
ffb5df6c
JP
6267 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6268 le32_to_cpu(stats->tmac_vld_ip);
541ae68f 6269 tmp_stats[i++] =
ffb5df6c
JP
6270 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6271 le32_to_cpu(stats->tmac_drop_ip);
541ae68f 6272 tmp_stats[i++] =
ffb5df6c
JP
6273 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6274 le32_to_cpu(stats->tmac_icmp);
541ae68f 6275 tmp_stats[i++] =
ffb5df6c
JP
6276 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6277 le32_to_cpu(stats->tmac_rst_tcp);
6278 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6279 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6280 le32_to_cpu(stats->tmac_udp);
541ae68f 6281 tmp_stats[i++] =
ffb5df6c
JP
6282 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6283 le32_to_cpu(stats->rmac_vld_frms);
541ae68f 6284 tmp_stats[i++] =
ffb5df6c
JP
6285 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6286 le32_to_cpu(stats->rmac_data_octets);
6287 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6288 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
541ae68f 6289 tmp_stats[i++] =
ffb5df6c
JP
6290 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6291 le32_to_cpu(stats->rmac_vld_mcst_frms);
541ae68f 6292 tmp_stats[i++] =
ffb5df6c
JP
6293 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6294 le32_to_cpu(stats->rmac_vld_bcst_frms);
6295 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6296 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6297 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6298 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6299 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
d44570e4 6300 tmp_stats[i++] =
ffb5df6c
JP
6301 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6302 le32_to_cpu(stats->rmac_ttl_octets);
bd1034f0 6303 tmp_stats[i++] =
ffb5df6c
JP
6304 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6305 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
d44570e4 6306 tmp_stats[i++] =
ffb5df6c
JP
6307 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6308 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
541ae68f 6309 tmp_stats[i++] =
ffb5df6c
JP
6310 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6311 le32_to_cpu(stats->rmac_discarded_frms);
d44570e4 6312 tmp_stats[i++] =
ffb5df6c
JP
6313 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6314 << 32 | le32_to_cpu(stats->rmac_drop_events);
6315 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6316 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
541ae68f 6317 tmp_stats[i++] =
ffb5df6c
JP
6318 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6319 le32_to_cpu(stats->rmac_usized_frms);
541ae68f 6320 tmp_stats[i++] =
ffb5df6c
JP
6321 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6322 le32_to_cpu(stats->rmac_osized_frms);
541ae68f 6323 tmp_stats[i++] =
ffb5df6c
JP
6324 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6325 le32_to_cpu(stats->rmac_frag_frms);
541ae68f 6326 tmp_stats[i++] =
ffb5df6c
JP
6327 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6328 le32_to_cpu(stats->rmac_jabber_frms);
6329 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6330 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6331 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6332 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6333 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6334 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
bd1034f0 6335 tmp_stats[i++] =
ffb5df6c
JP
6336 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6337 le32_to_cpu(stats->rmac_ip);
6338 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6339 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
bd1034f0 6340 tmp_stats[i++] =
ffb5df6c
JP
6341 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6342 le32_to_cpu(stats->rmac_drop_ip);
bd1034f0 6343 tmp_stats[i++] =
ffb5df6c
JP
6344 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6345 le32_to_cpu(stats->rmac_icmp);
6346 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
bd1034f0 6347 tmp_stats[i++] =
ffb5df6c
JP
6348 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6349 le32_to_cpu(stats->rmac_udp);
541ae68f 6350 tmp_stats[i++] =
ffb5df6c
JP
6351 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6352 le32_to_cpu(stats->rmac_err_drp_udp);
6353 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6354 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6355 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6356 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6357 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6358 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6359 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6360 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6361 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6362 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6363 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6364 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6365 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6366 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6367 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6368 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6369 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
541ae68f 6370 tmp_stats[i++] =
ffb5df6c
JP
6371 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6372 le32_to_cpu(stats->rmac_pause_cnt);
6373 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6374 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
541ae68f 6375 tmp_stats[i++] =
ffb5df6c
JP
6376 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6377 le32_to_cpu(stats->rmac_accepted_ip);
6378 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6379 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6380 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6381 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6382 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6383 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6384 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6385 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6386 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6387 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6388 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6389 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6390 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6391 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6392 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6393 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6394 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6395 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6396 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
fa1f0cb3
SS
6397
6398 /* Enhanced statistics exist only for Hercules */
d44570e4 6399 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6400 tmp_stats[i++] =
ffb5df6c 6401 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
fa1f0cb3 6402 tmp_stats[i++] =
ffb5df6c 6403 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
fa1f0cb3 6404 tmp_stats[i++] =
ffb5df6c
JP
6405 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6406 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6407 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6408 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6409 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6410 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6411 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6412 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6413 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6414 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6415 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6416 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6417 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6418 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
fa1f0cb3
SS
6419 }
6420
7ba013ac 6421 tmp_stats[i++] = 0;
ffb5df6c
JP
6422 tmp_stats[i++] = swstats->single_ecc_errs;
6423 tmp_stats[i++] = swstats->double_ecc_errs;
6424 tmp_stats[i++] = swstats->parity_err_cnt;
6425 tmp_stats[i++] = swstats->serious_err_cnt;
6426 tmp_stats[i++] = swstats->soft_reset_cnt;
6427 tmp_stats[i++] = swstats->fifo_full_cnt;
8116f3cf 6428 for (k = 0; k < MAX_RX_RINGS; k++)
ffb5df6c
JP
6429 tmp_stats[i++] = swstats->ring_full_cnt[k];
6430 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6431 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6432 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6433 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6434 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6435 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6436 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6437 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6438 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6439 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6440 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6441 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6442 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6443 tmp_stats[i++] = swstats->sending_both;
6444 tmp_stats[i++] = swstats->outof_sequence_pkts;
6445 tmp_stats[i++] = swstats->flush_max_pkts;
6446 if (swstats->num_aggregations) {
6447 u64 tmp = swstats->sum_avg_pkts_aggregated;
bd1034f0 6448 int count = 0;
6aa20a22 6449 /*
bd1034f0
AR
6450 * Since 64-bit divide does not work on all platforms,
6451 * do repeated subtraction.
6452 */
ffb5df6c
JP
6453 while (tmp >= swstats->num_aggregations) {
6454 tmp -= swstats->num_aggregations;
bd1034f0
AR
6455 count++;
6456 }
6457 tmp_stats[i++] = count;
d44570e4 6458 } else
bd1034f0 6459 tmp_stats[i++] = 0;
ffb5df6c
JP
6460 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6461 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6462 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6463 tmp_stats[i++] = swstats->mem_allocated;
6464 tmp_stats[i++] = swstats->mem_freed;
6465 tmp_stats[i++] = swstats->link_up_cnt;
6466 tmp_stats[i++] = swstats->link_down_cnt;
6467 tmp_stats[i++] = swstats->link_up_time;
6468 tmp_stats[i++] = swstats->link_down_time;
6469
6470 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6471 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6472 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6473 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6474 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6475
6476 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6477 tmp_stats[i++] = swstats->rx_abort_cnt;
6478 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6479 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6480 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6481 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6482 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6483 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6484 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6485 tmp_stats[i++] = swstats->tda_err_cnt;
6486 tmp_stats[i++] = swstats->pfc_err_cnt;
6487 tmp_stats[i++] = swstats->pcc_err_cnt;
6488 tmp_stats[i++] = swstats->tti_err_cnt;
6489 tmp_stats[i++] = swstats->tpa_err_cnt;
6490 tmp_stats[i++] = swstats->sm_err_cnt;
6491 tmp_stats[i++] = swstats->lso_err_cnt;
6492 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6493 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6494 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6495 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6496 tmp_stats[i++] = swstats->rc_err_cnt;
6497 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6498 tmp_stats[i++] = swstats->rpa_err_cnt;
6499 tmp_stats[i++] = swstats->rda_err_cnt;
6500 tmp_stats[i++] = swstats->rti_err_cnt;
6501 tmp_stats[i++] = swstats->mc_err_cnt;
1da177e4
LT
6502}
6503
ac1f60db 6504static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4 6505{
d44570e4 6506 return XENA_REG_SPACE;
1da177e4
LT
6507}
6508
6509
ac1f60db 6510static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4 6511{
d44570e4 6512 return XENA_EEPROM_SPACE;
1da177e4
LT
6513}
6514
b9f2c044 6515static int s2io_get_sset_count(struct net_device *dev, int sset)
1da177e4 6516{
4cf1653a 6517 struct s2io_nic *sp = netdev_priv(dev);
b9f2c044
JG
6518
6519 switch (sset) {
6520 case ETH_SS_TEST:
6521 return S2IO_TEST_LEN;
6522 case ETH_SS_STATS:
d44570e4 6523 switch (sp->device_type) {
b9f2c044
JG
6524 case XFRAME_I_DEVICE:
6525 return XFRAME_I_STAT_LEN;
6526 case XFRAME_II_DEVICE:
6527 return XFRAME_II_STAT_LEN;
6528 default:
6529 return 0;
6530 }
6531 default:
6532 return -EOPNOTSUPP;
6533 }
1da177e4 6534}
ac1f60db
AB
6535
6536static void s2io_ethtool_get_strings(struct net_device *dev,
d44570e4 6537 u32 stringset, u8 *data)
1da177e4 6538{
fa1f0cb3 6539 int stat_size = 0;
4cf1653a 6540 struct s2io_nic *sp = netdev_priv(dev);
fa1f0cb3 6541
1da177e4
LT
6542 switch (stringset) {
6543 case ETH_SS_TEST:
6544 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6545 break;
6546 case ETH_SS_STATS:
fa1f0cb3 6547 stat_size = sizeof(ethtool_xena_stats_keys);
d44570e4
JP
6548 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6549 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6550 memcpy(data + stat_size,
d44570e4
JP
6551 &ethtool_enhanced_stats_keys,
6552 sizeof(ethtool_enhanced_stats_keys));
fa1f0cb3
SS
6553 stat_size += sizeof(ethtool_enhanced_stats_keys);
6554 }
6555
6556 memcpy(data + stat_size, &ethtool_driver_stats_keys,
d44570e4 6557 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
6558 }
6559}
1da177e4 6560
c8f44aff 6561static int s2io_set_features(struct net_device *dev, netdev_features_t features)
958de193
JM
6562{
6563 struct s2io_nic *sp = netdev_priv(dev);
c8f44aff 6564 netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
958de193
JM
6565
6566 if (changed && netif_running(dev)) {
b437a8cc
MM
6567 int rc;
6568
958de193
JM
6569 s2io_stop_all_tx_queue(sp);
6570 s2io_card_down(sp);
b437a8cc 6571 dev->features = features;
958de193
JM
6572 rc = s2io_card_up(sp);
6573 if (rc)
6574 s2io_reset(sp);
6575 else
6576 s2io_start_all_tx_queue(sp);
b437a8cc
MM
6577
6578 return rc ? rc : 1;
958de193
JM
6579 }
6580
b437a8cc 6581 return 0;
958de193
JM
6582}
6583
7282d491 6584static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
6585 .get_drvinfo = s2io_ethtool_gdrvinfo,
6586 .get_regs_len = s2io_ethtool_get_regs_len,
6587 .get_regs = s2io_ethtool_gregs,
6588 .get_link = ethtool_op_get_link,
6589 .get_eeprom_len = s2io_get_eeprom_len,
6590 .get_eeprom = s2io_ethtool_geeprom,
6591 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 6592 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
6593 .get_pauseparam = s2io_ethtool_getpause_data,
6594 .set_pauseparam = s2io_ethtool_setpause_data,
1da177e4
LT
6595 .self_test = s2io_ethtool_test,
6596 .get_strings = s2io_ethtool_get_strings,
034e3450 6597 .set_phys_id = s2io_ethtool_set_led,
b9f2c044
JG
6598 .get_ethtool_stats = s2io_get_ethtool_stats,
6599 .get_sset_count = s2io_get_sset_count,
51f21442
PR
6600 .get_link_ksettings = s2io_ethtool_get_link_ksettings,
6601 .set_link_ksettings = s2io_ethtool_set_link_ksettings,
1da177e4
LT
6602};
6603
6604/**
20346722 6605 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
6606 * @dev : Device pointer.
6607 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6608 * a proprietary structure used to pass information to the driver.
6609 * @cmd : This is used to distinguish between the different commands that
6610 * can be passed to the IOCTL functions.
6611 * Description:
20346722
K
6612 * Currently there are no special functionality supported in IOCTL, hence
6613 * function always return EOPNOTSUPPORTED
1da177e4
LT
6614 */
6615
ac1f60db 6616static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
6617{
6618 return -EOPNOTSUPP;
6619}
6620
6621/**
6622 * s2io_change_mtu - entry point to change MTU size for the device.
6623 * @dev : device pointer.
6624 * @new_mtu : the new MTU size for the device.
6625 * Description: A driver entry point to change MTU size for the device.
6626 * Before changing the MTU the device must be stopped.
6627 * Return value:
6628 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6629 * file on failure.
6630 */
6631
ac1f60db 6632static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 6633{
4cf1653a 6634 struct s2io_nic *sp = netdev_priv(dev);
9f74ffde 6635 int ret = 0;
1da177e4 6636
1da177e4 6637 dev->mtu = new_mtu;
d8892c6e 6638 if (netif_running(dev)) {
3a3d5756 6639 s2io_stop_all_tx_queue(sp);
e6a8fee2 6640 s2io_card_down(sp);
9f74ffde
SH
6641 ret = s2io_card_up(sp);
6642 if (ret) {
d8892c6e 6643 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
b39d66a8 6644 __func__);
9f74ffde 6645 return ret;
d8892c6e 6646 }
3a3d5756 6647 s2io_wake_all_tx_queue(sp);
d8892c6e 6648 } else { /* Device is down */
1ee6dd77 6649 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e
K
6650 u64 val64 = new_mtu;
6651
6652 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6653 }
1da177e4 6654
9f74ffde 6655 return ret;
1da177e4
LT
6656}
6657
1da177e4
LT
6658/**
6659 * s2io_set_link - Set the LInk status
6660 * @data: long pointer to device private structue
6661 * Description: Sets the link status for the adapter
6662 */
6663
c4028958 6664static void s2io_set_link(struct work_struct *work)
1da177e4 6665{
d44570e4
JP
6666 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6667 set_link_task);
1da177e4 6668 struct net_device *dev = nic->dev;
1ee6dd77 6669 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6670 register u64 val64;
6671 u16 subid;
6672
22747d6b
FR
6673 rtnl_lock();
6674
6675 if (!netif_running(dev))
6676 goto out_unlock;
6677
92b84437 6678 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
1da177e4 6679 /* The card is being reset, no point doing anything */
22747d6b 6680 goto out_unlock;
1da177e4
LT
6681 }
6682
6683 subid = nic->pdev->subsystem_device;
a371a07d
K
6684 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6685 /*
6686 * Allow a small delay for the NICs self initiated
6687 * cleanup to complete.
6688 */
6689 msleep(100);
6690 }
1da177e4
LT
6691
6692 val64 = readq(&bar0->adapter_status);
19a60522
SS
6693 if (LINK_IS_UP(val64)) {
6694 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6695 if (verify_xena_quiescence(nic)) {
6696 val64 = readq(&bar0->adapter_control);
6697 val64 |= ADAPTER_CNTL_EN;
1da177e4 6698 writeq(val64, &bar0->adapter_control);
19a60522 6699 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
d44570e4 6700 nic->device_type, subid)) {
19a60522
SS
6701 val64 = readq(&bar0->gpio_control);
6702 val64 |= GPIO_CTRL_GPIO_0;
6703 writeq(val64, &bar0->gpio_control);
6704 val64 = readq(&bar0->gpio_control);
6705 } else {
6706 val64 |= ADAPTER_LED_ON;
6707 writeq(val64, &bar0->adapter_control);
a371a07d 6708 }
f957bcf0 6709 nic->device_enabled_once = true;
19a60522 6710 } else {
9e39f7c5
JP
6711 DBG_PRINT(ERR_DBG,
6712 "%s: Error: device is not Quiescent\n",
6713 dev->name);
3a3d5756 6714 s2io_stop_all_tx_queue(nic);
1da177e4 6715 }
19a60522 6716 }
92c48799
SS
6717 val64 = readq(&bar0->adapter_control);
6718 val64 |= ADAPTER_LED_ON;
6719 writeq(val64, &bar0->adapter_control);
6720 s2io_link(nic, LINK_UP);
19a60522
SS
6721 } else {
6722 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6723 subid)) {
6724 val64 = readq(&bar0->gpio_control);
6725 val64 &= ~GPIO_CTRL_GPIO_0;
6726 writeq(val64, &bar0->gpio_control);
6727 val64 = readq(&bar0->gpio_control);
1da177e4 6728 }
92c48799
SS
6729 /* turn off LED */
6730 val64 = readq(&bar0->adapter_control);
d44570e4 6731 val64 = val64 & (~ADAPTER_LED_ON);
92c48799 6732 writeq(val64, &bar0->adapter_control);
19a60522 6733 s2io_link(nic, LINK_DOWN);
1da177e4 6734 }
92b84437 6735 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
22747d6b
FR
6736
6737out_unlock:
d8d70caf 6738 rtnl_unlock();
1da177e4
LT
6739}
6740
1ee6dd77 6741static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
d44570e4
JP
6742 struct buffAdd *ba,
6743 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6744 u64 *temp2, int size)
5d3213cc
AR
6745{
6746 struct net_device *dev = sp->dev;
491abf25 6747 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
5d3213cc
AR
6748
6749 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6d517a27 6750 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
5d3213cc
AR
6751 /* allocate skb */
6752 if (*skb) {
6753 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6754 /*
6755 * As Rx frame are not going to be processed,
6756 * using same mapped address for the Rxd
6757 * buffer pointer
6758 */
6d517a27 6759 rxdp1->Buffer0_ptr = *temp0;
5d3213cc 6760 } else {
c056b734 6761 *skb = netdev_alloc_skb(dev, size);
5d3213cc 6762 if (!(*skb)) {
9e39f7c5
JP
6763 DBG_PRINT(INFO_DBG,
6764 "%s: Out of memory to allocate %s\n",
6765 dev->name, "1 buf mode SKBs");
ffb5df6c 6766 stats->mem_alloc_fail_cnt++;
5d3213cc
AR
6767 return -ENOMEM ;
6768 }
ffb5df6c 6769 stats->mem_allocated += (*skb)->truesize;
5d3213cc
AR
6770 /* storing the mapped addr in a temp variable
6771 * such it will be used for next rxd whose
6772 * Host Control is NULL
6773 */
6d517a27 6774 rxdp1->Buffer0_ptr = *temp0 =
d44570e4
JP
6775 pci_map_single(sp->pdev, (*skb)->data,
6776 size - NET_IP_ALIGN,
6777 PCI_DMA_FROMDEVICE);
8d8bb39b 6778 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
491abf25 6779 goto memalloc_failed;
5d3213cc
AR
6780 rxdp->Host_Control = (unsigned long) (*skb);
6781 }
6782 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6d517a27 6783 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
5d3213cc
AR
6784 /* Two buffer Mode */
6785 if (*skb) {
6d517a27
VP
6786 rxdp3->Buffer2_ptr = *temp2;
6787 rxdp3->Buffer0_ptr = *temp0;
6788 rxdp3->Buffer1_ptr = *temp1;
5d3213cc 6789 } else {
c056b734 6790 *skb = netdev_alloc_skb(dev, size);
2ceaac75 6791 if (!(*skb)) {
9e39f7c5
JP
6792 DBG_PRINT(INFO_DBG,
6793 "%s: Out of memory to allocate %s\n",
6794 dev->name,
6795 "2 buf mode SKBs");
ffb5df6c 6796 stats->mem_alloc_fail_cnt++;
2ceaac75
DR
6797 return -ENOMEM;
6798 }
ffb5df6c 6799 stats->mem_allocated += (*skb)->truesize;
6d517a27 6800 rxdp3->Buffer2_ptr = *temp2 =
5d3213cc
AR
6801 pci_map_single(sp->pdev, (*skb)->data,
6802 dev->mtu + 4,
6803 PCI_DMA_FROMDEVICE);
8d8bb39b 6804 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
491abf25 6805 goto memalloc_failed;
6d517a27 6806 rxdp3->Buffer0_ptr = *temp0 =
d44570e4
JP
6807 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6808 PCI_DMA_FROMDEVICE);
8d8bb39b 6809 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
6810 rxdp3->Buffer0_ptr)) {
6811 pci_unmap_single(sp->pdev,
6812 (dma_addr_t)rxdp3->Buffer2_ptr,
6813 dev->mtu + 4,
6814 PCI_DMA_FROMDEVICE);
491abf25
VP
6815 goto memalloc_failed;
6816 }
5d3213cc
AR
6817 rxdp->Host_Control = (unsigned long) (*skb);
6818
6819 /* Buffer-1 will be dummy buffer not used */
6d517a27 6820 rxdp3->Buffer1_ptr = *temp1 =
5d3213cc 6821 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
d44570e4 6822 PCI_DMA_FROMDEVICE);
8d8bb39b 6823 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
6824 rxdp3->Buffer1_ptr)) {
6825 pci_unmap_single(sp->pdev,
6826 (dma_addr_t)rxdp3->Buffer0_ptr,
6827 BUF0_LEN, PCI_DMA_FROMDEVICE);
6828 pci_unmap_single(sp->pdev,
6829 (dma_addr_t)rxdp3->Buffer2_ptr,
6830 dev->mtu + 4,
6831 PCI_DMA_FROMDEVICE);
491abf25
VP
6832 goto memalloc_failed;
6833 }
5d3213cc
AR
6834 }
6835 }
6836 return 0;
d44570e4
JP
6837
6838memalloc_failed:
6839 stats->pci_map_fail_cnt++;
6840 stats->mem_freed += (*skb)->truesize;
6841 dev_kfree_skb(*skb);
6842 return -ENOMEM;
5d3213cc 6843}
491abf25 6844
1ee6dd77
RB
6845static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6846 int size)
5d3213cc
AR
6847{
6848 struct net_device *dev = sp->dev;
6849 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4 6850 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
5d3213cc
AR
6851 } else if (sp->rxd_mode == RXD_MODE_3B) {
6852 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6853 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
d44570e4 6854 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
5d3213cc
AR
6855 }
6856}
6857
1ee6dd77 6858static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
6859{
6860 int i, j, k, blk_cnt = 0, size;
5d3213cc 6861 struct config_param *config = &sp->config;
ffb5df6c 6862 struct mac_info *mac_control = &sp->mac_control;
5d3213cc 6863 struct net_device *dev = sp->dev;
1ee6dd77 6864 struct RxD_t *rxdp = NULL;
5d3213cc 6865 struct sk_buff *skb = NULL;
1ee6dd77 6866 struct buffAdd *ba = NULL;
5d3213cc
AR
6867 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6868
6869 /* Calculate the size based on ring mode */
6870 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6871 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6872 if (sp->rxd_mode == RXD_MODE_1)
6873 size += NET_IP_ALIGN;
6874 else if (sp->rxd_mode == RXD_MODE_3B)
6875 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
5d3213cc
AR
6876
6877 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
6878 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6879 struct ring_info *ring = &mac_control->rings[i];
6880
d44570e4 6881 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
5d3213cc
AR
6882
6883 for (j = 0; j < blk_cnt; j++) {
6884 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
d44570e4
JP
6885 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6886 if (sp->rxd_mode == RXD_MODE_3B)
13d866a9 6887 ba = &ring->ba[j][k];
d44570e4 6888 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
64699336
JP
6889 &temp0_64,
6890 &temp1_64,
6891 &temp2_64,
d44570e4 6892 size) == -ENOMEM) {
ac1f90d6
SS
6893 return 0;
6894 }
5d3213cc
AR
6895
6896 set_rxd_buffer_size(sp, rxdp, size);
03cc864a 6897 dma_wmb();
5d3213cc
AR
6898 /* flip the Ownership bit to Hardware */
6899 rxdp->Control_1 |= RXD_OWN_XENA;
6900 }
6901 }
6902 }
6903 return 0;
6904
6905}
6906
d44570e4 6907static int s2io_add_isr(struct s2io_nic *sp)
1da177e4 6908{
e6a8fee2 6909 int ret = 0;
c92ca04b 6910 struct net_device *dev = sp->dev;
e6a8fee2 6911 int err = 0;
1da177e4 6912
eaae7f72 6913 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
6914 ret = s2io_enable_msi_x(sp);
6915 if (ret) {
6916 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
eaae7f72 6917 sp->config.intr_type = INTA;
20346722 6918 }
1da177e4 6919
d44570e4
JP
6920 /*
6921 * Store the values of the MSIX table in
6922 * the struct s2io_nic structure
6923 */
e6a8fee2 6924 store_xmsi_data(sp);
c92ca04b 6925
e6a8fee2 6926 /* After proper initialization of H/W, register ISR */
eaae7f72 6927 if (sp->config.intr_type == MSI_X) {
ac731ab6
SH
6928 int i, msix_rx_cnt = 0;
6929
f61e0a35
SH
6930 for (i = 0; i < sp->num_entries; i++) {
6931 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6932 if (sp->s2io_entries[i].type ==
d44570e4 6933 MSIX_RING_TYPE) {
a8c1d28a
DC
6934 snprintf(sp->desc[i],
6935 sizeof(sp->desc[i]),
6936 "%s:MSI-X-%d-RX",
ac731ab6
SH
6937 dev->name, i);
6938 err = request_irq(sp->entries[i].vector,
d44570e4
JP
6939 s2io_msix_ring_handle,
6940 0,
6941 sp->desc[i],
6942 sp->s2io_entries[i].arg);
ac731ab6 6943 } else if (sp->s2io_entries[i].type ==
d44570e4 6944 MSIX_ALARM_TYPE) {
a8c1d28a
DC
6945 snprintf(sp->desc[i],
6946 sizeof(sp->desc[i]),
6947 "%s:MSI-X-%d-TX",
d44570e4 6948 dev->name, i);
ac731ab6 6949 err = request_irq(sp->entries[i].vector,
d44570e4
JP
6950 s2io_msix_fifo_handle,
6951 0,
6952 sp->desc[i],
6953 sp->s2io_entries[i].arg);
ac731ab6 6954
fb6a825b 6955 }
ac731ab6
SH
6956 /* if either data or addr is zero print it. */
6957 if (!(sp->msix_info[i].addr &&
d44570e4 6958 sp->msix_info[i].data)) {
ac731ab6 6959 DBG_PRINT(ERR_DBG,
d44570e4
JP
6960 "%s @Addr:0x%llx Data:0x%llx\n",
6961 sp->desc[i],
6962 (unsigned long long)
6963 sp->msix_info[i].addr,
6964 (unsigned long long)
6965 ntohl(sp->msix_info[i].data));
ac731ab6 6966 } else
fb6a825b 6967 msix_rx_cnt++;
ac731ab6
SH
6968 if (err) {
6969 remove_msix_isr(sp);
6970
6971 DBG_PRINT(ERR_DBG,
d44570e4
JP
6972 "%s:MSI-X-%d registration "
6973 "failed\n", dev->name, i);
ac731ab6
SH
6974
6975 DBG_PRINT(ERR_DBG,
d44570e4
JP
6976 "%s: Defaulting to INTA\n",
6977 dev->name);
ac731ab6
SH
6978 sp->config.intr_type = INTA;
6979 break;
fb6a825b 6980 }
ac731ab6
SH
6981 sp->s2io_entries[i].in_use =
6982 MSIX_REGISTERED_SUCCESS;
c92ca04b 6983 }
e6a8fee2 6984 }
18b2b7bd 6985 if (!err) {
6cef2b8e 6986 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
9e39f7c5
JP
6987 DBG_PRINT(INFO_DBG,
6988 "MSI-X-TX entries enabled through alarm vector\n");
18b2b7bd 6989 }
e6a8fee2 6990 }
eaae7f72 6991 if (sp->config.intr_type == INTA) {
80777c54 6992 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
d44570e4 6993 sp->name, dev);
e6a8fee2
AR
6994 if (err) {
6995 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6996 dev->name);
6997 return -1;
6998 }
6999 }
7000 return 0;
7001}
d44570e4
JP
7002
7003static void s2io_rem_isr(struct s2io_nic *sp)
e6a8fee2 7004{
18b2b7bd
SH
7005 if (sp->config.intr_type == MSI_X)
7006 remove_msix_isr(sp);
7007 else
7008 remove_inta_isr(sp);
e6a8fee2
AR
7009}
7010
d44570e4 7011static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
e6a8fee2
AR
7012{
7013 int cnt = 0;
1ee6dd77 7014 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2 7015 register u64 val64 = 0;
5f490c96
SH
7016 struct config_param *config;
7017 config = &sp->config;
e6a8fee2 7018
9f74ffde
SH
7019 if (!is_s2io_card_up(sp))
7020 return;
7021
e6a8fee2
AR
7022 del_timer_sync(&sp->alarm_timer);
7023 /* If s2io_set_link task is executing, wait till it completes. */
d44570e4 7024 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
e6a8fee2 7025 msleep(50);
92b84437 7026 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
e6a8fee2 7027
5f490c96 7028 /* Disable napi */
f61e0a35
SH
7029 if (sp->config.napi) {
7030 int off = 0;
7031 if (config->intr_type == MSI_X) {
7032 for (; off < sp->config.rx_ring_num; off++)
7033 napi_disable(&sp->mac_control.rings[off].napi);
d44570e4 7034 }
f61e0a35
SH
7035 else
7036 napi_disable(&sp->napi);
7037 }
5f490c96 7038
e6a8fee2 7039 /* disable Tx and Rx traffic on the NIC */
d796fdb7
LV
7040 if (do_io)
7041 stop_nic(sp);
e6a8fee2
AR
7042
7043 s2io_rem_isr(sp);
1da177e4 7044
01e16faa
SH
7045 /* stop the tx queue, indicate link down */
7046 s2io_link(sp, LINK_DOWN);
7047
1da177e4 7048 /* Check if the device is Quiescent and then Reset the NIC */
d44570e4 7049 while (do_io) {
5d3213cc
AR
7050 /* As per the HW requirement we need to replenish the
7051 * receive buffer to avoid the ring bump. Since there is
7052 * no intention of processing the Rx frame at this pointwe are
70f23fd6 7053 * just setting the ownership bit of rxd in Each Rx
5d3213cc
AR
7054 * ring to HW and set the appropriate buffer size
7055 * based on the ring mode
7056 */
7057 rxd_owner_bit_reset(sp);
7058
1da177e4 7059 val64 = readq(&bar0->adapter_status);
19a60522 7060 if (verify_xena_quiescence(sp)) {
d44570e4
JP
7061 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7062 break;
1da177e4
LT
7063 }
7064
7065 msleep(50);
7066 cnt++;
7067 if (cnt == 10) {
9e39f7c5
JP
7068 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7069 "adapter status reads 0x%llx\n",
d44570e4 7070 (unsigned long long)val64);
1da177e4
LT
7071 break;
7072 }
d796fdb7
LV
7073 }
7074 if (do_io)
7075 s2io_reset(sp);
1da177e4 7076
7ba013ac 7077 /* Free all Tx buffers */
1da177e4 7078 free_tx_buffers(sp);
7ba013ac
K
7079
7080 /* Free all Rx buffers */
1da177e4
LT
7081 free_rx_buffers(sp);
7082
92b84437 7083 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
1da177e4
LT
7084}
7085
d44570e4 7086static void s2io_card_down(struct s2io_nic *sp)
d796fdb7
LV
7087{
7088 do_s2io_card_down(sp, 1);
7089}
7090
d44570e4 7091static int s2io_card_up(struct s2io_nic *sp)
1da177e4 7092{
cc6e7c44 7093 int i, ret = 0;
1da177e4 7094 struct config_param *config;
ffb5df6c 7095 struct mac_info *mac_control;
64699336 7096 struct net_device *dev = sp->dev;
e6a8fee2 7097 u16 interruptible;
1da177e4
LT
7098
7099 /* Initialize the H/W I/O registers */
9f74ffde
SH
7100 ret = init_nic(sp);
7101 if (ret != 0) {
1da177e4
LT
7102 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7103 dev->name);
9f74ffde
SH
7104 if (ret != -EIO)
7105 s2io_reset(sp);
7106 return ret;
1da177e4
LT
7107 }
7108
20346722
K
7109 /*
7110 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
7111 * Rx ring and initializing buffers into 30 Rx blocks
7112 */
1da177e4 7113 config = &sp->config;
ffb5df6c 7114 mac_control = &sp->mac_control;
1da177e4
LT
7115
7116 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7117 struct ring_info *ring = &mac_control->rings[i];
7118
7119 ring->mtu = dev->mtu;
f0c54ace 7120 ring->lro = !!(dev->features & NETIF_F_LRO);
13d866a9 7121 ret = fill_rx_buffers(sp, ring, 1);
0425b46a 7122 if (ret) {
1da177e4
LT
7123 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7124 dev->name);
7125 s2io_reset(sp);
7126 free_rx_buffers(sp);
7127 return -ENOMEM;
7128 }
7129 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
13d866a9 7130 ring->rx_bufs_left);
1da177e4 7131 }
5f490c96
SH
7132
7133 /* Initialise napi */
f61e0a35 7134 if (config->napi) {
f61e0a35
SH
7135 if (config->intr_type == MSI_X) {
7136 for (i = 0; i < sp->config.rx_ring_num; i++)
7137 napi_enable(&sp->mac_control.rings[i].napi);
7138 } else {
7139 napi_enable(&sp->napi);
7140 }
7141 }
5f490c96 7142
19a60522
SS
7143 /* Maintain the state prior to the open */
7144 if (sp->promisc_flg)
7145 sp->promisc_flg = 0;
7146 if (sp->m_cast_flg) {
7147 sp->m_cast_flg = 0;
d44570e4 7148 sp->all_multi_pos = 0;
19a60522 7149 }
1da177e4
LT
7150
7151 /* Setting its receive mode */
7152 s2io_set_multicast(dev);
7153
f0c54ace 7154 if (dev->features & NETIF_F_LRO) {
b41477f3 7155 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439 7156 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
d44570e4 7157 /* Check if we can use (if specified) user provided value */
7d3d0439
RA
7158 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7159 sp->lro_max_aggr_per_sess = lro_max_pkts;
7160 }
7161
1da177e4
LT
7162 /* Enable Rx Traffic and interrupts on the NIC */
7163 if (start_nic(sp)) {
7164 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 7165 s2io_reset(sp);
e6a8fee2
AR
7166 free_rx_buffers(sp);
7167 return -ENODEV;
7168 }
7169
7170 /* Add interrupt service routine */
7171 if (s2io_add_isr(sp) != 0) {
eaae7f72 7172 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7173 s2io_rem_isr(sp);
7174 s2io_reset(sp);
1da177e4
LT
7175 free_rx_buffers(sp);
7176 return -ENODEV;
7177 }
7178
e84a2ac9
KC
7179 timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7180 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
25fff88e 7181
01e16faa
SH
7182 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7183
e6a8fee2 7184 /* Enable select interrupts */
9caab458 7185 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
01e16faa
SH
7186 if (sp->config.intr_type != INTA) {
7187 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7188 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7189 } else {
e6a8fee2 7190 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 7191 interruptible |= TX_PIC_INTR;
e6a8fee2
AR
7192 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7193 }
7194
1da177e4
LT
7195 return 0;
7196}
7197
20346722 7198/**
1da177e4
LT
7199 * s2io_restart_nic - Resets the NIC.
7200 * @data : long pointer to the device private structure
7201 * Description:
7202 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 7203 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
7204 * the run time of the watch dog routine which is run holding a
7205 * spin lock.
7206 */
7207
c4028958 7208static void s2io_restart_nic(struct work_struct *work)
1da177e4 7209{
1ee6dd77 7210 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 7211 struct net_device *dev = sp->dev;
1da177e4 7212
22747d6b
FR
7213 rtnl_lock();
7214
7215 if (!netif_running(dev))
7216 goto out_unlock;
7217
e6a8fee2 7218 s2io_card_down(sp);
1da177e4 7219 if (s2io_card_up(sp)) {
d44570e4 7220 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
1da177e4 7221 }
3a3d5756 7222 s2io_wake_all_tx_queue(sp);
d44570e4 7223 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
22747d6b
FR
7224out_unlock:
7225 rtnl_unlock();
1da177e4
LT
7226}
7227
20346722
K
7228/**
7229 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
7230 * @dev : Pointer to net device structure
7231 * Description:
7232 * This function is triggered if the Tx Queue is stopped
7233 * for a pre-defined amount of time when the Interface is still up.
7234 * If the Interface is jammed in such a situation, the hardware is
7235 * reset (by s2io_close) and restarted again (by s2io_open) to
7236 * overcome any problem that might have been caused in the hardware.
7237 * Return value:
7238 * void
7239 */
7240
7241static void s2io_tx_watchdog(struct net_device *dev)
7242{
4cf1653a 7243 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 7244 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7245
7246 if (netif_carrier_ok(dev)) {
ffb5df6c 7247 swstats->watchdog_timer_cnt++;
1da177e4 7248 schedule_work(&sp->rst_timer_task);
ffb5df6c 7249 swstats->soft_reset_cnt++;
1da177e4
LT
7250 }
7251}
7252
7253/**
7254 * rx_osm_handler - To perform some OS related operations on SKB.
7255 * @sp: private member of the device structure,pointer to s2io_nic structure.
7256 * @skb : the socket buffer pointer.
7257 * @len : length of the packet
7258 * @cksum : FCS checksum of the frame.
7259 * @ring_no : the ring from which this RxD was extracted.
20346722 7260 * Description:
b41477f3 7261 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
7262 * some OS related operations on the SKB before passing it to the upper
7263 * layers. It mainly checks if the checksum is OK, if so adds it to the
7264 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7265 * to the upper layer. If the checksum is wrong, it increments the Rx
7266 * packet error count, frees the SKB and returns error.
7267 * Return value:
7268 * SUCCESS on success and -1 on failure.
7269 */
1ee6dd77 7270static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 7271{
1ee6dd77 7272 struct s2io_nic *sp = ring_data->nic;
64699336 7273 struct net_device *dev = ring_data->dev;
20346722 7274 struct sk_buff *skb = (struct sk_buff *)
d44570e4 7275 ((unsigned long)rxdp->Host_Control);
20346722 7276 int ring_no = ring_data->ring_no;
1da177e4 7277 u16 l3_csum, l4_csum;
863c11a9 7278 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
2e6a684b 7279 struct lro *uninitialized_var(lro);
f9046eb3 7280 u8 err_mask;
ffb5df6c 7281 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
da6971d8 7282
20346722 7283 skb->dev = dev;
c92ca04b 7284
863c11a9 7285 if (err) {
bd1034f0 7286 /* Check for parity error */
d44570e4 7287 if (err & 0x1)
ffb5df6c 7288 swstats->parity_err_cnt++;
d44570e4 7289
f9046eb3 7290 err_mask = err >> 48;
d44570e4
JP
7291 switch (err_mask) {
7292 case 1:
ffb5df6c 7293 swstats->rx_parity_err_cnt++;
491976b2
SH
7294 break;
7295
d44570e4 7296 case 2:
ffb5df6c 7297 swstats->rx_abort_cnt++;
491976b2
SH
7298 break;
7299
d44570e4 7300 case 3:
ffb5df6c 7301 swstats->rx_parity_abort_cnt++;
491976b2
SH
7302 break;
7303
d44570e4 7304 case 4:
ffb5df6c 7305 swstats->rx_rda_fail_cnt++;
491976b2
SH
7306 break;
7307
d44570e4 7308 case 5:
ffb5df6c 7309 swstats->rx_unkn_prot_cnt++;
491976b2
SH
7310 break;
7311
d44570e4 7312 case 6:
ffb5df6c 7313 swstats->rx_fcs_err_cnt++;
491976b2 7314 break;
bd1034f0 7315
d44570e4 7316 case 7:
ffb5df6c 7317 swstats->rx_buf_size_err_cnt++;
491976b2
SH
7318 break;
7319
d44570e4 7320 case 8:
ffb5df6c 7321 swstats->rx_rxd_corrupt_cnt++;
491976b2
SH
7322 break;
7323
d44570e4 7324 case 15:
ffb5df6c 7325 swstats->rx_unkn_err_cnt++;
491976b2
SH
7326 break;
7327 }
863c11a9 7328 /*
d44570e4
JP
7329 * Drop the packet if bad transfer code. Exception being
7330 * 0x5, which could be due to unsupported IPv6 extension header.
7331 * In this case, we let stack handle the packet.
7332 * Note that in this case, since checksum will be incorrect,
7333 * stack will validate the same.
7334 */
f9046eb3
OH
7335 if (err_mask != 0x5) {
7336 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
d44570e4 7337 dev->name, err_mask);
dc56e634 7338 dev->stats.rx_crc_errors++;
ffb5df6c 7339 swstats->mem_freed
491976b2 7340 += skb->truesize;
863c11a9 7341 dev_kfree_skb(skb);
0425b46a 7342 ring_data->rx_bufs_left -= 1;
863c11a9
AR
7343 rxdp->Host_Control = 0;
7344 return 0;
7345 }
20346722 7346 }
1da177e4 7347
20346722 7348 rxdp->Host_Control = 0;
da6971d8
AR
7349 if (sp->rxd_mode == RXD_MODE_1) {
7350 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 7351
da6971d8 7352 skb_put(skb, len);
6d517a27 7353 } else if (sp->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
7354 int get_block = ring_data->rx_curr_get_info.block_index;
7355 int get_off = ring_data->rx_curr_get_info.offset;
7356 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7357 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7358 unsigned char *buff = skb_push(skb, buf0_len);
7359
1ee6dd77 7360 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8 7361 memcpy(buff, ba->ba_0, buf0_len);
6d517a27 7362 skb_put(skb, buf2_len);
da6971d8 7363 }
20346722 7364
d44570e4
JP
7365 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7366 ((!ring_data->lro) ||
6d85a1bf 7367 (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
b437a8cc 7368 (dev->features & NETIF_F_RXCSUM)) {
20346722 7369 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
7370 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7371 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 7372 /*
1da177e4
LT
7373 * NIC verifies if the Checksum of the received
7374 * frame is Ok or not and accordingly returns
7375 * a flag in the RxD.
7376 */
7377 skb->ip_summed = CHECKSUM_UNNECESSARY;
0425b46a 7378 if (ring_data->lro) {
06f0c139 7379 u32 tcp_len = 0;
7d3d0439
RA
7380 u8 *tcp;
7381 int ret = 0;
7382
0425b46a 7383 ret = s2io_club_tcp_session(ring_data,
d44570e4
JP
7384 skb->data, &tcp,
7385 &tcp_len, &lro,
7386 rxdp, sp);
7d3d0439 7387 switch (ret) {
d44570e4
JP
7388 case 3: /* Begin anew */
7389 lro->parent = skb;
7390 goto aggregate;
7391 case 1: /* Aggregate */
7392 lro_append_pkt(sp, lro, skb, tcp_len);
7393 goto aggregate;
7394 case 4: /* Flush session */
7395 lro_append_pkt(sp, lro, skb, tcp_len);
7396 queue_rx_frame(lro->parent,
7397 lro->vlan_tag);
7398 clear_lro_session(lro);
ffb5df6c 7399 swstats->flush_max_pkts++;
d44570e4
JP
7400 goto aggregate;
7401 case 2: /* Flush both */
7402 lro->parent->data_len = lro->frags_len;
ffb5df6c 7403 swstats->sending_both++;
d44570e4
JP
7404 queue_rx_frame(lro->parent,
7405 lro->vlan_tag);
7406 clear_lro_session(lro);
7407 goto send_up;
7408 case 0: /* sessions exceeded */
7409 case -1: /* non-TCP or not L2 aggregatable */
7410 case 5: /*
7411 * First pkt in session not
7412 * L3/L4 aggregatable
7413 */
7414 break;
7415 default:
7416 DBG_PRINT(ERR_DBG,
7417 "%s: Samadhana!!\n",
7418 __func__);
7419 BUG();
7d3d0439
RA
7420 }
7421 }
1da177e4 7422 } else {
20346722
K
7423 /*
7424 * Packet with erroneous checksum, let the
1da177e4
LT
7425 * upper layers deal with it.
7426 */
bc8acf2c 7427 skb_checksum_none_assert(skb);
1da177e4 7428 }
cdb5bf02 7429 } else
bc8acf2c 7430 skb_checksum_none_assert(skb);
cdb5bf02 7431
ffb5df6c 7432 swstats->mem_freed += skb->truesize;
7d3d0439 7433send_up:
0c8dfc83 7434 skb_record_rx_queue(skb, ring_no);
cdb5bf02 7435 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 7436aggregate:
0425b46a 7437 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
1da177e4
LT
7438 return SUCCESS;
7439}
7440
7441/**
7442 * s2io_link - stops/starts the Tx queue.
7443 * @sp : private member of the device structure, which is a pointer to the
7444 * s2io_nic structure.
7445 * @link : inidicates whether link is UP/DOWN.
7446 * Description:
7447 * This function stops/starts the Tx queue depending on whether the link
20346722
K
7448 * status of the NIC is is down or up. This is called by the Alarm
7449 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
7450 * Return value:
7451 * void.
7452 */
7453
d44570e4 7454static void s2io_link(struct s2io_nic *sp, int link)
1da177e4 7455{
64699336 7456 struct net_device *dev = sp->dev;
ffb5df6c 7457 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7458
7459 if (link != sp->last_link_state) {
b7c5678f 7460 init_tti(sp, link);
1da177e4
LT
7461 if (link == LINK_DOWN) {
7462 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
3a3d5756 7463 s2io_stop_all_tx_queue(sp);
1da177e4 7464 netif_carrier_off(dev);
ffb5df6c
JP
7465 if (swstats->link_up_cnt)
7466 swstats->link_up_time =
7467 jiffies - sp->start_time;
7468 swstats->link_down_cnt++;
1da177e4
LT
7469 } else {
7470 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
ffb5df6c
JP
7471 if (swstats->link_down_cnt)
7472 swstats->link_down_time =
d44570e4 7473 jiffies - sp->start_time;
ffb5df6c 7474 swstats->link_up_cnt++;
1da177e4 7475 netif_carrier_on(dev);
3a3d5756 7476 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7477 }
7478 }
7479 sp->last_link_state = link;
491976b2 7480 sp->start_time = jiffies;
1da177e4
LT
7481}
7482
20346722
K
7483/**
7484 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7485 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
7486 * s2io_nic structure.
7487 * Description:
7488 * This function initializes a few of the PCI and PCI-X configuration registers
7489 * with recommended values.
7490 * Return value:
7491 * void
7492 */
7493
d44570e4 7494static void s2io_init_pci(struct s2io_nic *sp)
1da177e4 7495{
20346722 7496 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
7497
7498 /* Enable Data Parity Error Recovery in PCI-X command register. */
7499 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7500 &(pcix_cmd));
1da177e4 7501 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7502 (pcix_cmd | 1));
1da177e4 7503 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7504 &(pcix_cmd));
1da177e4
LT
7505
7506 /* Set the PErr Response bit in PCI command register. */
7507 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7508 pci_write_config_word(sp->pdev, PCI_COMMAND,
7509 (pci_cmd | PCI_COMMAND_PARITY));
7510 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
7511}
7512
3a3d5756 7513static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
d44570e4 7514 u8 *dev_multiq)
9dc737a7 7515{
1853e2e1
JM
7516 int i;
7517
d44570e4 7518 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
9e39f7c5 7519 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
d44570e4 7520 "(%d) not supported\n", tx_fifo_num);
6cfc482b
SH
7521
7522 if (tx_fifo_num < 1)
7523 tx_fifo_num = 1;
7524 else
7525 tx_fifo_num = MAX_TX_FIFOS;
7526
9e39f7c5 7527 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
9dc737a7 7528 }
2fda096d 7529
6cfc482b 7530 if (multiq)
3a3d5756 7531 *dev_multiq = multiq;
6cfc482b
SH
7532
7533 if (tx_steering_type && (1 == tx_fifo_num)) {
7534 if (tx_steering_type != TX_DEFAULT_STEERING)
7535 DBG_PRINT(ERR_DBG,
9e39f7c5 7536 "Tx steering is not supported with "
d44570e4 7537 "one fifo. Disabling Tx steering.\n");
6cfc482b
SH
7538 tx_steering_type = NO_STEERING;
7539 }
7540
7541 if ((tx_steering_type < NO_STEERING) ||
d44570e4
JP
7542 (tx_steering_type > TX_DEFAULT_STEERING)) {
7543 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7544 "Requested transmit steering not supported\n");
7545 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
6cfc482b 7546 tx_steering_type = NO_STEERING;
3a3d5756
SH
7547 }
7548
0425b46a 7549 if (rx_ring_num > MAX_RX_RINGS) {
d44570e4 7550 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7551 "Requested number of rx rings not supported\n");
7552 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
d44570e4 7553 MAX_RX_RINGS);
0425b46a 7554 rx_ring_num = MAX_RX_RINGS;
9dc737a7 7555 }
0425b46a 7556
eccb8628 7557 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
9e39f7c5 7558 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
9dc737a7
AR
7559 "Defaulting to INTA\n");
7560 *dev_intr_type = INTA;
7561 }
596c5c97 7562
9dc737a7 7563 if ((*dev_intr_type == MSI_X) &&
d44570e4
JP
7564 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7565 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
9e39f7c5 7566 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
d44570e4 7567 "Defaulting to INTA\n");
9dc737a7
AR
7568 *dev_intr_type = INTA;
7569 }
fb6a825b 7570
6d517a27 7571 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
9e39f7c5
JP
7572 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7573 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
6d517a27 7574 rx_ring_mode = 1;
9dc737a7 7575 }
1853e2e1
JM
7576
7577 for (i = 0; i < MAX_RX_RINGS; i++)
7578 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7579 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7580 "supported\nDefaulting to %d\n",
7581 MAX_RX_BLOCKS_PER_RING);
7582 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7583 }
7584
9dc737a7
AR
7585 return SUCCESS;
7586}
7587
9fc93a41
SS
7588/**
7589 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7590 * or Traffic class respectively.
b7c5678f 7591 * @nic: device private variable
9fc93a41
SS
7592 * Description: The function configures the receive steering to
7593 * desired receive ring.
7594 * Return Value: SUCCESS on success and
7595 * '-1' on failure (endian settings incorrect).
7596 */
7597static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7598{
7599 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7600 register u64 val64 = 0;
7601
7602 if (ds_codepoint > 63)
7603 return FAILURE;
7604
7605 val64 = RTS_DS_MEM_DATA(ring);
7606 writeq(val64, &bar0->rts_ds_mem_data);
7607
7608 val64 = RTS_DS_MEM_CTRL_WE |
7609 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7610 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7611
7612 writeq(val64, &bar0->rts_ds_mem_ctrl);
7613
7614 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
d44570e4
JP
7615 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7616 S2IO_BIT_RESET);
9fc93a41
SS
7617}
7618
04025095
SH
7619static const struct net_device_ops s2io_netdev_ops = {
7620 .ndo_open = s2io_open,
7621 .ndo_stop = s2io_close,
7622 .ndo_get_stats = s2io_get_stats,
7623 .ndo_start_xmit = s2io_xmit,
7624 .ndo_validate_addr = eth_validate_addr,
afc4b13d 7625 .ndo_set_rx_mode = s2io_set_multicast,
04025095
SH
7626 .ndo_do_ioctl = s2io_ioctl,
7627 .ndo_set_mac_address = s2io_set_mac_addr,
7628 .ndo_change_mtu = s2io_change_mtu,
b437a8cc 7629 .ndo_set_features = s2io_set_features,
04025095
SH
7630 .ndo_tx_timeout = s2io_tx_watchdog,
7631#ifdef CONFIG_NET_POLL_CONTROLLER
7632 .ndo_poll_controller = s2io_netpoll,
7633#endif
7634};
7635
1da177e4 7636/**
20346722 7637 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
7638 * @pdev : structure containing the PCI related information of the device.
7639 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7640 * Description:
7641 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
7642 * All OS related initialization including memory and device structure and
7643 * initlaization of the device private variable is done. Also the swapper
7644 * control register is initialized to enable read and write into the I/O
1da177e4
LT
7645 * registers of the device.
7646 * Return value:
7647 * returns 0 on success and negative on failure.
7648 */
7649
3a036ce5 7650static int
1da177e4
LT
7651s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7652{
1ee6dd77 7653 struct s2io_nic *sp;
1da177e4 7654 struct net_device *dev;
1da177e4 7655 int i, j, ret;
f957bcf0 7656 int dma_flag = false;
1da177e4
LT
7657 u32 mac_up, mac_down;
7658 u64 val64 = 0, tmp64 = 0;
1ee6dd77 7659 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 7660 u16 subid;
1da177e4 7661 struct config_param *config;
ffb5df6c 7662 struct mac_info *mac_control;
541ae68f 7663 int mode;
cc6e7c44 7664 u8 dev_intr_type = intr_type;
3a3d5756 7665 u8 dev_multiq = 0;
1da177e4 7666
3a3d5756
SH
7667 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7668 if (ret)
9dc737a7 7669 return ret;
1da177e4 7670
d44570e4
JP
7671 ret = pci_enable_device(pdev);
7672 if (ret) {
1da177e4 7673 DBG_PRINT(ERR_DBG,
9e39f7c5 7674 "%s: pci_enable_device failed\n", __func__);
1da177e4
LT
7675 return ret;
7676 }
7677
6a35528a 7678 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
9e39f7c5 7679 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
f957bcf0 7680 dma_flag = true;
d44570e4 7681 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1da177e4 7682 DBG_PRINT(ERR_DBG,
d44570e4
JP
7683 "Unable to obtain 64bit DMA "
7684 "for consistent allocations\n");
1da177e4
LT
7685 pci_disable_device(pdev);
7686 return -ENOMEM;
7687 }
284901a9 7688 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
9e39f7c5 7689 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
1da177e4
LT
7690 } else {
7691 pci_disable_device(pdev);
7692 return -ENOMEM;
7693 }
d44570e4
JP
7694 ret = pci_request_regions(pdev, s2io_driver_name);
7695 if (ret) {
9e39f7c5 7696 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
d44570e4 7697 __func__, ret);
eccb8628
VP
7698 pci_disable_device(pdev);
7699 return -ENODEV;
1da177e4 7700 }
3a3d5756 7701 if (dev_multiq)
6cfc482b 7702 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
3a3d5756 7703 else
b19fa1fa 7704 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4 7705 if (dev == NULL) {
1da177e4
LT
7706 pci_disable_device(pdev);
7707 pci_release_regions(pdev);
7708 return -ENODEV;
7709 }
7710
7711 pci_set_master(pdev);
7712 pci_set_drvdata(pdev, dev);
1da177e4
LT
7713 SET_NETDEV_DEV(dev, &pdev->dev);
7714
7715 /* Private member variable initialized to s2io NIC structure */
4cf1653a 7716 sp = netdev_priv(dev);
1da177e4
LT
7717 sp->dev = dev;
7718 sp->pdev = pdev;
1da177e4 7719 sp->high_dma_flag = dma_flag;
f957bcf0 7720 sp->device_enabled_once = false;
da6971d8
AR
7721 if (rx_ring_mode == 1)
7722 sp->rxd_mode = RXD_MODE_1;
7723 if (rx_ring_mode == 2)
7724 sp->rxd_mode = RXD_MODE_3B;
da6971d8 7725
eaae7f72 7726 sp->config.intr_type = dev_intr_type;
1da177e4 7727
541ae68f 7728 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
d44570e4 7729 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
541ae68f
K
7730 sp->device_type = XFRAME_II_DEVICE;
7731 else
7732 sp->device_type = XFRAME_I_DEVICE;
7733
6aa20a22 7734
1da177e4
LT
7735 /* Initialize some PCI/PCI-X fields of the NIC. */
7736 s2io_init_pci(sp);
7737
20346722 7738 /*
1da177e4 7739 * Setting the device configuration parameters.
20346722
K
7740 * Most of these parameters can be specified by the user during
7741 * module insertion as they are module loadable parameters. If
7742 * these parameters are not not specified during load time, they
1da177e4
LT
7743 * are initialized with default values.
7744 */
1da177e4 7745 config = &sp->config;
ffb5df6c 7746 mac_control = &sp->mac_control;
1da177e4 7747
596c5c97 7748 config->napi = napi;
6cfc482b 7749 config->tx_steering_type = tx_steering_type;
596c5c97 7750
1da177e4 7751 /* Tx side parameters. */
6cfc482b
SH
7752 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7753 config->tx_fifo_num = MAX_TX_FIFOS;
7754 else
7755 config->tx_fifo_num = tx_fifo_num;
7756
7757 /* Initialize the fifos used for tx steering */
7758 if (config->tx_fifo_num < 5) {
d44570e4
JP
7759 if (config->tx_fifo_num == 1)
7760 sp->total_tcp_fifos = 1;
7761 else
7762 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7763 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7764 sp->total_udp_fifos = 1;
7765 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
6cfc482b
SH
7766 } else {
7767 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
d44570e4 7768 FIFO_OTHER_MAX_NUM);
6cfc482b
SH
7769 sp->udp_fifo_idx = sp->total_tcp_fifos;
7770 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7771 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7772 }
7773
3a3d5756 7774 config->multiq = dev_multiq;
6cfc482b 7775 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7776 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7777
7778 tx_cfg->fifo_len = tx_fifo_len[i];
7779 tx_cfg->fifo_priority = i;
1da177e4
LT
7780 }
7781
20346722
K
7782 /* mapping the QoS priority to the configured fifos */
7783 for (i = 0; i < MAX_TX_FIFOS; i++)
3a3d5756 7784 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
20346722 7785
6cfc482b
SH
7786 /* map the hashing selector table to the configured fifos */
7787 for (i = 0; i < config->tx_fifo_num; i++)
7788 sp->fifo_selector[i] = fifo_selector[i];
7789
7790
1da177e4
LT
7791 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7792 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7793 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7794
7795 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7796 if (tx_cfg->fifo_len < 65) {
1da177e4
LT
7797 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7798 break;
7799 }
7800 }
fed5eccd
AR
7801 /* + 2 because one Txd for skb->data and one Txd for UFO */
7802 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7803
7804 /* Rx side parameters. */
1da177e4 7805 config->rx_ring_num = rx_ring_num;
0425b46a 7806 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7807 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7808 struct ring_info *ring = &mac_control->rings[i];
7809
7810 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7811 rx_cfg->ring_priority = i;
7812 ring->rx_bufs_left = 0;
7813 ring->rxd_mode = sp->rxd_mode;
7814 ring->rxd_count = rxd_count[sp->rxd_mode];
7815 ring->pdev = sp->pdev;
7816 ring->dev = sp->dev;
1da177e4
LT
7817 }
7818
7819 for (i = 0; i < rx_ring_num; i++) {
13d866a9
JP
7820 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7821
7822 rx_cfg->ring_org = RING_ORG_BUFF1;
7823 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
1da177e4
LT
7824 }
7825
7826 /* Setting Mac Control parameters */
7827 mac_control->rmac_pause_time = rmac_pause_time;
7828 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7829 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7830
7831
1da177e4
LT
7832 /* initialize the shared memory used by the NIC and the host */
7833 if (init_shared_mem(sp)) {
d44570e4 7834 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
1da177e4
LT
7835 ret = -ENOMEM;
7836 goto mem_alloc_failed;
7837 }
7838
275f165f 7839 sp->bar0 = pci_ioremap_bar(pdev, 0);
1da177e4 7840 if (!sp->bar0) {
19a60522 7841 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
7842 dev->name);
7843 ret = -ENOMEM;
7844 goto bar0_remap_failed;
7845 }
7846
275f165f 7847 sp->bar1 = pci_ioremap_bar(pdev, 2);
1da177e4 7848 if (!sp->bar1) {
19a60522 7849 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
7850 dev->name);
7851 ret = -ENOMEM;
7852 goto bar1_remap_failed;
7853 }
7854
1da177e4
LT
7855 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7856 for (j = 0; j < MAX_TX_FIFOS; j++) {
43d620c8 7857 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
1da177e4
LT
7858 }
7859
7860 /* Driver entry points */
04025095 7861 dev->netdev_ops = &s2io_netdev_ops;
7ad24ea4 7862 dev->ethtool_ops = &netdev_ethtool_ops;
b437a8cc
MM
7863 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7864 NETIF_F_TSO | NETIF_F_TSO6 |
7865 NETIF_F_RXCSUM | NETIF_F_LRO;
7866 dev->features |= dev->hw_features |
f646968f 7867 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
f957bcf0 7868 if (sp->high_dma_flag == true)
1da177e4 7869 dev->features |= NETIF_F_HIGHDMA;
1da177e4 7870 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
7871 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7872 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 7873
e960fc5c 7874 pci_save_state(sp->pdev);
1da177e4
LT
7875
7876 /* Setting swapper control on the NIC, for proper reset operation */
7877 if (s2io_set_swapper(sp)) {
9e39f7c5 7878 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
1da177e4
LT
7879 dev->name);
7880 ret = -EAGAIN;
7881 goto set_swap_failed;
7882 }
7883
541ae68f
K
7884 /* Verify if the Herc works on the slot its placed into */
7885 if (sp->device_type & XFRAME_II_DEVICE) {
7886 mode = s2io_verify_pci_mode(sp);
7887 if (mode < 0) {
9e39f7c5
JP
7888 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7889 __func__);
541ae68f
K
7890 ret = -EBADSLT;
7891 goto set_swap_failed;
7892 }
7893 }
7894
f61e0a35
SH
7895 if (sp->config.intr_type == MSI_X) {
7896 sp->num_entries = config->rx_ring_num + 1;
7897 ret = s2io_enable_msi_x(sp);
7898
7899 if (!ret) {
7900 ret = s2io_test_msi(sp);
7901 /* rollback MSI-X, will re-enable during add_isr() */
7902 remove_msix_isr(sp);
7903 }
7904 if (ret) {
7905
7906 DBG_PRINT(ERR_DBG,
9e39f7c5 7907 "MSI-X requested but failed to enable\n");
f61e0a35
SH
7908 sp->config.intr_type = INTA;
7909 }
7910 }
7911
7912 if (config->intr_type == MSI_X) {
13d866a9
JP
7913 for (i = 0; i < config->rx_ring_num ; i++) {
7914 struct ring_info *ring = &mac_control->rings[i];
7915
7916 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7917 }
f61e0a35
SH
7918 } else {
7919 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7920 }
7921
541ae68f
K
7922 /* Not needed for Herc */
7923 if (sp->device_type & XFRAME_I_DEVICE) {
7924 /*
7925 * Fix for all "FFs" MAC address problems observed on
7926 * Alpha platforms
7927 */
7928 fix_mac_address(sp);
7929 s2io_reset(sp);
7930 }
1da177e4
LT
7931
7932 /*
1da177e4
LT
7933 * MAC address initialization.
7934 * For now only one mac address will be read and used.
7935 */
7936 bar0 = sp->bar0;
7937 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
d44570e4 7938 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
1da177e4 7939 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 7940 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
7941 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7942 S2IO_BIT_RESET);
1da177e4 7943 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4 7944 mac_down = (u32)tmp64;
1da177e4
LT
7945 mac_up = (u32) (tmp64 >> 32);
7946
1da177e4
LT
7947 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7948 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7949 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7950 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7951 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7952 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7953
1da177e4
LT
7954 /* Set the factory defined MAC address initially */
7955 dev->addr_len = ETH_ALEN;
7956 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7957
faa4f796
SH
7958 /* initialize number of multicast & unicast MAC entries variables */
7959 if (sp->device_type == XFRAME_I_DEVICE) {
7960 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7961 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7962 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7963 } else if (sp->device_type == XFRAME_II_DEVICE) {
7964 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7965 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7966 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7967 }
7968
18c310fb
JW
7969 /* MTU range: 46 - 9600 */
7970 dev->min_mtu = MIN_MTU;
7971 dev->max_mtu = S2IO_JUMBO_SIZE;
7972
faa4f796
SH
7973 /* store mac addresses from CAM to s2io_nic structure */
7974 do_s2io_store_unicast_mc(sp);
7975
f61e0a35
SH
7976 /* Configure MSIX vector for number of rings configured plus one */
7977 if ((sp->device_type == XFRAME_II_DEVICE) &&
d44570e4 7978 (config->intr_type == MSI_X))
f61e0a35
SH
7979 sp->num_entries = config->rx_ring_num + 1;
7980
d44570e4 7981 /* Store the values of the MSIX table in the s2io_nic structure */
c77dd43e 7982 store_xmsi_data(sp);
b41477f3
AR
7983 /* reset Nic and bring it to known state */
7984 s2io_reset(sp);
7985
1da177e4 7986 /*
99993af6 7987 * Initialize link state flags
541ae68f 7988 * and the card state parameter
1da177e4 7989 */
92b84437 7990 sp->state = 0;
1da177e4 7991
1da177e4 7992 /* Initialize spinlocks */
13d866a9
JP
7993 for (i = 0; i < sp->config.tx_fifo_num; i++) {
7994 struct fifo_info *fifo = &mac_control->fifos[i];
7995
7996 spin_lock_init(&fifo->tx_lock);
7997 }
db874e65 7998
20346722
K
7999 /*
8000 * SXE-002: Configure link and activity LED to init state
8001 * on driver load.
1da177e4
LT
8002 */
8003 subid = sp->pdev->subsystem_device;
8004 if ((subid & 0xFF) >= 0x07) {
8005 val64 = readq(&bar0->gpio_control);
8006 val64 |= 0x0000800000000000ULL;
8007 writeq(val64, &bar0->gpio_control);
8008 val64 = 0x0411040400000000ULL;
d44570e4 8009 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
8010 val64 = readq(&bar0->gpio_control);
8011 }
8012
8013 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8014
8015 if (register_netdev(dev)) {
8016 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8017 ret = -ENODEV;
8018 goto register_failed;
8019 }
9dc737a7 8020 s2io_vpd_read(sp);
926bd900 8021 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
d44570e4 8022 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
44c10138 8023 sp->product_name, pdev->revision);
b41477f3
AR
8024 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8025 s2io_driver_version);
9e39f7c5
JP
8026 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8027 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
9dc737a7 8028 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 8029 mode = s2io_print_pci_mode(sp);
541ae68f 8030 if (mode < 0) {
541ae68f 8031 ret = -EBADSLT;
9dc737a7 8032 unregister_netdev(dev);
541ae68f
K
8033 goto set_swap_failed;
8034 }
541ae68f 8035 }
d44570e4
JP
8036 switch (sp->rxd_mode) {
8037 case RXD_MODE_1:
8038 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8039 dev->name);
8040 break;
8041 case RXD_MODE_3B:
8042 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8043 dev->name);
8044 break;
9dc737a7 8045 }
db874e65 8046
f61e0a35
SH
8047 switch (sp->config.napi) {
8048 case 0:
8049 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8050 break;
8051 case 1:
db874e65 8052 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
f61e0a35
SH
8053 break;
8054 }
3a3d5756
SH
8055
8056 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
d44570e4 8057 sp->config.tx_fifo_num);
3a3d5756 8058
0425b46a
SH
8059 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8060 sp->config.rx_ring_num);
8061
d44570e4
JP
8062 switch (sp->config.intr_type) {
8063 case INTA:
8064 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8065 break;
8066 case MSI_X:
8067 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8068 break;
9dc737a7 8069 }
3a3d5756 8070 if (sp->config.multiq) {
13d866a9
JP
8071 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8072 struct fifo_info *fifo = &mac_control->fifos[i];
8073
8074 fifo->multiq = config->multiq;
8075 }
3a3d5756 8076 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
d44570e4 8077 dev->name);
3a3d5756
SH
8078 } else
8079 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
d44570e4 8080 dev->name);
3a3d5756 8081
6cfc482b
SH
8082 switch (sp->config.tx_steering_type) {
8083 case NO_STEERING:
d44570e4
JP
8084 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8085 dev->name);
8086 break;
6cfc482b 8087 case TX_PRIORITY_STEERING:
d44570e4
JP
8088 DBG_PRINT(ERR_DBG,
8089 "%s: Priority steering enabled for transmit\n",
8090 dev->name);
6cfc482b
SH
8091 break;
8092 case TX_DEFAULT_STEERING:
d44570e4
JP
8093 DBG_PRINT(ERR_DBG,
8094 "%s: Default steering enabled for transmit\n",
8095 dev->name);
6cfc482b
SH
8096 }
8097
f0c54ace
AW
8098 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8099 dev->name);
7ba013ac 8100 /* Initialize device name */
a8c1d28a
DC
8101 snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8102 sp->product_name);
7ba013ac 8103
cd0fce03
BL
8104 if (vlan_tag_strip)
8105 sp->vlan_strip_flag = 1;
8106 else
8107 sp->vlan_strip_flag = 0;
8108
20346722
K
8109 /*
8110 * Make Link state as off at this point, when the Link change
8111 * interrupt comes the state will be automatically changed to
1da177e4
LT
8112 * the right state.
8113 */
8114 netif_carrier_off(dev);
1da177e4
LT
8115
8116 return 0;
8117
d44570e4
JP
8118register_failed:
8119set_swap_failed:
1da177e4 8120 iounmap(sp->bar1);
d44570e4 8121bar1_remap_failed:
1da177e4 8122 iounmap(sp->bar0);
d44570e4
JP
8123bar0_remap_failed:
8124mem_alloc_failed:
1da177e4
LT
8125 free_shared_mem(sp);
8126 pci_disable_device(pdev);
eccb8628 8127 pci_release_regions(pdev);
1da177e4
LT
8128 free_netdev(dev);
8129
8130 return ret;
8131}
8132
8133/**
20346722 8134 * s2io_rem_nic - Free the PCI device
1da177e4 8135 * @pdev: structure containing the PCI related information of the device.
20346722 8136 * Description: This function is called by the Pci subsystem to release a
1da177e4 8137 * PCI device and free up all resource held up by the device. This could
20346722 8138 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
8139 * from memory.
8140 */
8141
3a036ce5 8142static void s2io_rem_nic(struct pci_dev *pdev)
1da177e4 8143{
a31ff388 8144 struct net_device *dev = pci_get_drvdata(pdev);
1ee6dd77 8145 struct s2io_nic *sp;
1da177e4
LT
8146
8147 if (dev == NULL) {
8148 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8149 return;
8150 }
8151
4cf1653a 8152 sp = netdev_priv(dev);
23f333a2
TH
8153
8154 cancel_work_sync(&sp->rst_timer_task);
8155 cancel_work_sync(&sp->set_link_task);
8156
1da177e4
LT
8157 unregister_netdev(dev);
8158
8159 free_shared_mem(sp);
8160 iounmap(sp->bar0);
8161 iounmap(sp->bar1);
eccb8628 8162 pci_release_regions(pdev);
1da177e4 8163 free_netdev(dev);
19a60522 8164 pci_disable_device(pdev);
1da177e4
LT
8165}
8166
910be1ab 8167module_pci_driver(s2io_driver);
7d3d0439 8168
6aa20a22 8169static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
d44570e4
JP
8170 struct tcphdr **tcp, struct RxD_t *rxdp,
8171 struct s2io_nic *sp)
7d3d0439
RA
8172{
8173 int ip_off;
8174 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8175
8176 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
d44570e4
JP
8177 DBG_PRINT(INIT_DBG,
8178 "%s: Non-TCP frames not supported for LRO\n",
b39d66a8 8179 __func__);
7d3d0439
RA
8180 return -1;
8181 }
8182
cdb5bf02 8183 /* Checking for DIX type or DIX type with VLAN */
d44570e4 8184 if ((l2_type == 0) || (l2_type == 4)) {
cdb5bf02
SH
8185 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8186 /*
8187 * If vlan stripping is disabled and the frame is VLAN tagged,
8188 * shift the offset by the VLAN header size bytes.
8189 */
cd0fce03 8190 if ((!sp->vlan_strip_flag) &&
d44570e4 8191 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
cdb5bf02
SH
8192 ip_off += HEADER_VLAN_SIZE;
8193 } else {
7d3d0439 8194 /* LLC, SNAP etc are considered non-mergeable */
cdb5bf02 8195 return -1;
7d3d0439
RA
8196 }
8197
64699336 8198 *ip = (struct iphdr *)(buffer + ip_off);
7d3d0439
RA
8199 ip_len = (u8)((*ip)->ihl);
8200 ip_len <<= 2;
8201 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8202
8203 return 0;
8204}
8205
1ee6dd77 8206static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8207 struct tcphdr *tcp)
8208{
d44570e4
JP
8209 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8210 if ((lro->iph->saddr != ip->saddr) ||
8211 (lro->iph->daddr != ip->daddr) ||
8212 (lro->tcph->source != tcp->source) ||
8213 (lro->tcph->dest != tcp->dest))
7d3d0439
RA
8214 return -1;
8215 return 0;
8216}
8217
8218static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8219{
d44570e4 8220 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
7d3d0439
RA
8221}
8222
1ee6dd77 8223static void initiate_new_session(struct lro *lro, u8 *l2h,
d44570e4
JP
8224 struct iphdr *ip, struct tcphdr *tcp,
8225 u32 tcp_pyld_len, u16 vlan_tag)
7d3d0439 8226{
d44570e4 8227 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8228 lro->l2h = l2h;
8229 lro->iph = ip;
8230 lro->tcph = tcp;
8231 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
c8855953 8232 lro->tcp_ack = tcp->ack_seq;
7d3d0439
RA
8233 lro->sg_num = 1;
8234 lro->total_len = ntohs(ip->tot_len);
8235 lro->frags_len = 0;
cdb5bf02 8236 lro->vlan_tag = vlan_tag;
6aa20a22 8237 /*
d44570e4
JP
8238 * Check if we saw TCP timestamp.
8239 * Other consistency checks have already been done.
8240 */
7d3d0439 8241 if (tcp->doff == 8) {
c8855953
SR
8242 __be32 *ptr;
8243 ptr = (__be32 *)(tcp+1);
7d3d0439 8244 lro->saw_ts = 1;
c8855953 8245 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8246 lro->cur_tsecr = *(ptr+2);
8247 }
8248 lro->in_use = 1;
8249}
8250
1ee6dd77 8251static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
8252{
8253 struct iphdr *ip = lro->iph;
8254 struct tcphdr *tcp = lro->tcph;
ffb5df6c
JP
8255 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8256
d44570e4 8257 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8258
8259 /* Update L3 header */
9a18dd15 8260 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
7d3d0439 8261 ip->tot_len = htons(lro->total_len);
7d3d0439
RA
8262
8263 /* Update L4 header */
8264 tcp->ack_seq = lro->tcp_ack;
8265 tcp->window = lro->window;
8266
8267 /* Update tsecr field if this session has timestamps enabled */
8268 if (lro->saw_ts) {
c8855953 8269 __be32 *ptr = (__be32 *)(tcp + 1);
7d3d0439
RA
8270 *(ptr+2) = lro->cur_tsecr;
8271 }
8272
8273 /* Update counters required for calculation of
8274 * average no. of packets aggregated.
8275 */
ffb5df6c
JP
8276 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8277 swstats->num_aggregations++;
7d3d0439
RA
8278}
8279
1ee6dd77 8280static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
d44570e4 8281 struct tcphdr *tcp, u32 l4_pyld)
7d3d0439 8282{
d44570e4 8283 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8284 lro->total_len += l4_pyld;
8285 lro->frags_len += l4_pyld;
8286 lro->tcp_next_seq += l4_pyld;
8287 lro->sg_num++;
8288
8289 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8290 lro->tcp_ack = tcp->ack_seq;
8291 lro->window = tcp->window;
6aa20a22 8292
7d3d0439 8293 if (lro->saw_ts) {
c8855953 8294 __be32 *ptr;
7d3d0439 8295 /* Update tsecr and tsval from this packet */
c8855953
SR
8296 ptr = (__be32 *)(tcp+1);
8297 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8298 lro->cur_tsecr = *(ptr + 2);
8299 }
8300}
8301
1ee6dd77 8302static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
8303 struct tcphdr *tcp, u32 tcp_pyld_len)
8304{
7d3d0439
RA
8305 u8 *ptr;
8306
d44570e4 8307 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
79dc1901 8308
7d3d0439
RA
8309 if (!tcp_pyld_len) {
8310 /* Runt frame or a pure ack */
8311 return -1;
8312 }
8313
8314 if (ip->ihl != 5) /* IP has options */
8315 return -1;
8316
75c30b13
AR
8317 /* If we see CE codepoint in IP header, packet is not mergeable */
8318 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8319 return -1;
8320
8321 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
d44570e4
JP
8322 if (tcp->urg || tcp->psh || tcp->rst ||
8323 tcp->syn || tcp->fin ||
8324 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
8325 /*
8326 * Currently recognize only the ack control word and
8327 * any other control field being set would result in
8328 * flushing the LRO session
8329 */
8330 return -1;
8331 }
8332
6aa20a22 8333 /*
7d3d0439
RA
8334 * Allow only one TCP timestamp option. Don't aggregate if
8335 * any other options are detected.
8336 */
8337 if (tcp->doff != 5 && tcp->doff != 8)
8338 return -1;
8339
8340 if (tcp->doff == 8) {
6aa20a22 8341 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
8342 while (*ptr == TCPOPT_NOP)
8343 ptr++;
8344 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8345 return -1;
8346
8347 /* Ensure timestamp value increases monotonically */
8348 if (l_lro)
c8855953 8349 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
7d3d0439
RA
8350 return -1;
8351
8352 /* timestamp echo reply should be non-zero */
c8855953 8353 if (*((__be32 *)(ptr+6)) == 0)
7d3d0439
RA
8354 return -1;
8355 }
8356
8357 return 0;
8358}
8359
d44570e4
JP
8360static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8361 u8 **tcp, u32 *tcp_len, struct lro **lro,
8362 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
8363{
8364 struct iphdr *ip;
8365 struct tcphdr *tcph;
8366 int ret = 0, i;
cdb5bf02 8367 u16 vlan_tag = 0;
ffb5df6c 8368 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439 8369
d44570e4
JP
8370 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8371 rxdp, sp);
8372 if (ret)
7d3d0439 8373 return ret;
7d3d0439 8374
d44570e4
JP
8375 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8376
cdb5bf02 8377 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
7d3d0439
RA
8378 tcph = (struct tcphdr *)*tcp;
8379 *tcp_len = get_l4_pyld_length(ip, tcph);
d44570e4 8380 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8381 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8382 if (l_lro->in_use) {
8383 if (check_for_socket_match(l_lro, ip, tcph))
8384 continue;
8385 /* Sock pair matched */
8386 *lro = l_lro;
8387
8388 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
9e39f7c5
JP
8389 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8390 "expected 0x%x, actual 0x%x\n",
8391 __func__,
7d3d0439
RA
8392 (*lro)->tcp_next_seq,
8393 ntohl(tcph->seq));
8394
ffb5df6c 8395 swstats->outof_sequence_pkts++;
7d3d0439
RA
8396 ret = 2;
8397 break;
8398 }
8399
d44570e4
JP
8400 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8401 *tcp_len))
7d3d0439
RA
8402 ret = 1; /* Aggregate */
8403 else
8404 ret = 2; /* Flush both */
8405 break;
8406 }
8407 }
8408
8409 if (ret == 0) {
8410 /* Before searching for available LRO objects,
8411 * check if the pkt is L3/L4 aggregatable. If not
8412 * don't create new LRO session. Just send this
8413 * packet up.
8414 */
d44570e4 8415 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
7d3d0439 8416 return 5;
7d3d0439 8417
d44570e4 8418 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8419 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8420 if (!(l_lro->in_use)) {
8421 *lro = l_lro;
8422 ret = 3; /* Begin anew */
8423 break;
8424 }
8425 }
8426 }
8427
8428 if (ret == 0) { /* sessions exceeded */
9e39f7c5 8429 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
b39d66a8 8430 __func__);
7d3d0439
RA
8431 *lro = NULL;
8432 return ret;
8433 }
8434
8435 switch (ret) {
d44570e4
JP
8436 case 3:
8437 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8438 vlan_tag);
8439 break;
8440 case 2:
8441 update_L3L4_header(sp, *lro);
8442 break;
8443 case 1:
8444 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8445 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7d3d0439 8446 update_L3L4_header(sp, *lro);
d44570e4
JP
8447 ret = 4; /* Flush the LRO */
8448 }
8449 break;
8450 default:
9e39f7c5 8451 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
d44570e4 8452 break;
7d3d0439
RA
8453 }
8454
8455 return ret;
8456}
8457
1ee6dd77 8458static void clear_lro_session(struct lro *lro)
7d3d0439 8459{
1ee6dd77 8460 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
8461
8462 memset(lro, 0, lro_struct_size);
8463}
8464
cdb5bf02 8465static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
7d3d0439
RA
8466{
8467 struct net_device *dev = skb->dev;
4cf1653a 8468 struct s2io_nic *sp = netdev_priv(dev);
7d3d0439
RA
8469
8470 skb->protocol = eth_type_trans(skb, dev);
b85da2c0 8471 if (vlan_tag && sp->vlan_strip_flag)
86a9bad3 8472 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
b85da2c0
JP
8473 if (sp->config.napi)
8474 netif_receive_skb(skb);
8475 else
8476 netif_rx(skb);
7d3d0439
RA
8477}
8478
1ee6dd77 8479static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
d44570e4 8480 struct sk_buff *skb, u32 tcp_len)
7d3d0439 8481{
75c30b13 8482 struct sk_buff *first = lro->parent;
ffb5df6c 8483 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439
RA
8484
8485 first->len += tcp_len;
8486 first->data_len = lro->frags_len;
8487 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
8488 if (skb_shinfo(first)->frag_list)
8489 lro->last_frag->next = skb;
7d3d0439
RA
8490 else
8491 skb_shinfo(first)->frag_list = skb;
372cc597 8492 first->truesize += skb->truesize;
75c30b13 8493 lro->last_frag = skb;
ffb5df6c 8494 swstats->clubbed_frms_cnt++;
7d3d0439 8495}
d796fdb7
LV
8496
8497/**
8498 * s2io_io_error_detected - called when PCI error is detected
8499 * @pdev: Pointer to PCI device
8453d43f 8500 * @state: The current pci connection state
d796fdb7
LV
8501 *
8502 * This function is called after a PCI bus error affecting
8503 * this device has been detected.
8504 */
8505static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
d44570e4 8506 pci_channel_state_t state)
d796fdb7
LV
8507{
8508 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8509 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8510
8511 netif_device_detach(netdev);
8512
1e3c8bd6
DN
8513 if (state == pci_channel_io_perm_failure)
8514 return PCI_ERS_RESULT_DISCONNECT;
8515
d796fdb7
LV
8516 if (netif_running(netdev)) {
8517 /* Bring down the card, while avoiding PCI I/O */
8518 do_s2io_card_down(sp, 0);
d796fdb7
LV
8519 }
8520 pci_disable_device(pdev);
8521
8522 return PCI_ERS_RESULT_NEED_RESET;
8523}
8524
8525/**
8526 * s2io_io_slot_reset - called after the pci bus has been reset.
8527 * @pdev: Pointer to PCI device
8528 *
8529 * Restart the card from scratch, as if from a cold-boot.
8530 * At this point, the card has exprienced a hard reset,
8531 * followed by fixups by BIOS, and has its config space
8532 * set up identically to what it was at cold boot.
8533 */
8534static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8535{
8536 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8537 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8538
8539 if (pci_enable_device(pdev)) {
6cef2b8e 8540 pr_err("Cannot re-enable PCI device after reset.\n");
d796fdb7
LV
8541 return PCI_ERS_RESULT_DISCONNECT;
8542 }
8543
8544 pci_set_master(pdev);
8545 s2io_reset(sp);
8546
8547 return PCI_ERS_RESULT_RECOVERED;
8548}
8549
8550/**
8551 * s2io_io_resume - called when traffic can start flowing again.
8552 * @pdev: Pointer to PCI device
8553 *
8554 * This callback is called when the error recovery driver tells
8555 * us that its OK to resume normal operation.
8556 */
8557static void s2io_io_resume(struct pci_dev *pdev)
8558{
8559 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8560 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8561
8562 if (netif_running(netdev)) {
8563 if (s2io_card_up(sp)) {
6cef2b8e 8564 pr_err("Can't bring device back up after reset.\n");
d796fdb7
LV
8565 return;
8566 }
8567
8568 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8569 s2io_card_down(sp);
6cef2b8e 8570 pr_err("Can't restore mac addr after reset.\n");
d796fdb7
LV
8571 return;
8572 }
8573 }
8574
8575 netif_device_attach(netdev);
fd2ea0a7 8576 netif_tx_wake_all_queues(netdev);
d796fdb7 8577}