]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/s2io.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shaggy...
[mirror_ubuntu-artful-kernel.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
1da177e4
LT
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8
AR
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7
AR
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
1da177e4
LT
45 ************************************************************************/
46
1da177e4
LT
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/ioport.h>
51#include <linux/pci.h>
1e7f0bd8 52#include <linux/dma-mapping.h>
1da177e4
LT
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/stddef.h>
60#include <linux/ioctl.h>
61#include <linux/timex.h>
62#include <linux/sched.h>
63#include <linux/ethtool.h>
1da177e4 64#include <linux/workqueue.h>
be3a6b02 65#include <linux/if_vlan.h>
7d3d0439
RA
66#include <linux/ip.h>
67#include <linux/tcp.h>
68#include <net/tcp.h>
1da177e4 69
1da177e4
LT
70#include <asm/system.h>
71#include <asm/uaccess.h>
20346722 72#include <asm/io.h>
fe931395 73#include <asm/div64.h>
1da177e4
LT
74
75/* local include */
76#include "s2io.h"
77#include "s2io-regs.h"
78
75c30b13 79#define DRV_VERSION "2.0.15.2"
6c1792f4 80
1da177e4 81/* S2io Driver name & version. */
20346722 82static char s2io_driver_name[] = "Neterion";
6c1792f4 83static char s2io_driver_version[] = DRV_VERSION;
1da177e4 84
26df54bf
AB
85static int rxd_size[4] = {32,48,48,64};
86static int rxd_count[4] = {127,85,85,63};
da6971d8 87
5e25b9dd
K
88static inline int RXD_IS_UP2DT(RxD_t *rxdp)
89{
90 int ret;
91
92 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
93 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
94
95 return ret;
96}
97
20346722 98/*
1da177e4
LT
99 * Cards with following subsystem_id have a link state indication
100 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
101 * macro below identifies these cards given the subsystem_id.
102 */
541ae68f
K
103#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
104 (dev_type == XFRAME_I_DEVICE) ? \
105 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
106 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
107
108#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
109 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
110#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
111#define PANIC 1
112#define LOW 2
113static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
114{
20346722
K
115 mac_info_t *mac_control;
116
117 mac_control = &sp->mac_control;
863c11a9
AR
118 if (rxb_size <= rxd_count[sp->rxd_mode])
119 return PANIC;
120 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
121 return LOW;
122 return 0;
1da177e4
LT
123}
124
125/* Ethtool related variables and Macros. */
126static char s2io_gstrings[][ETH_GSTRING_LEN] = {
127 "Register test\t(offline)",
128 "Eeprom test\t(offline)",
129 "Link test\t(online)",
130 "RLDRAM test\t(offline)",
131 "BIST Test\t(offline)"
132};
133
134static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
135 {"tmac_frms"},
136 {"tmac_data_octets"},
137 {"tmac_drop_frms"},
138 {"tmac_mcst_frms"},
139 {"tmac_bcst_frms"},
140 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
141 {"tmac_ttl_octets"},
142 {"tmac_ucst_frms"},
143 {"tmac_nucst_frms"},
1da177e4 144 {"tmac_any_err_frms"},
bd1034f0 145 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
146 {"tmac_vld_ip_octets"},
147 {"tmac_vld_ip"},
148 {"tmac_drop_ip"},
149 {"tmac_icmp"},
150 {"tmac_rst_tcp"},
151 {"tmac_tcp"},
152 {"tmac_udp"},
153 {"rmac_vld_frms"},
154 {"rmac_data_octets"},
155 {"rmac_fcs_err_frms"},
156 {"rmac_drop_frms"},
157 {"rmac_vld_mcst_frms"},
158 {"rmac_vld_bcst_frms"},
159 {"rmac_in_rng_len_err_frms"},
bd1034f0 160 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
161 {"rmac_long_frms"},
162 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
163 {"rmac_unsup_ctrl_frms"},
164 {"rmac_ttl_octets"},
165 {"rmac_accepted_ucst_frms"},
166 {"rmac_accepted_nucst_frms"},
1da177e4 167 {"rmac_discarded_frms"},
bd1034f0
AR
168 {"rmac_drop_events"},
169 {"rmac_ttl_less_fb_octets"},
170 {"rmac_ttl_frms"},
1da177e4
LT
171 {"rmac_usized_frms"},
172 {"rmac_osized_frms"},
173 {"rmac_frag_frms"},
174 {"rmac_jabber_frms"},
bd1034f0
AR
175 {"rmac_ttl_64_frms"},
176 {"rmac_ttl_65_127_frms"},
177 {"rmac_ttl_128_255_frms"},
178 {"rmac_ttl_256_511_frms"},
179 {"rmac_ttl_512_1023_frms"},
180 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
181 {"rmac_ip"},
182 {"rmac_ip_octets"},
183 {"rmac_hdr_err_ip"},
184 {"rmac_drop_ip"},
185 {"rmac_icmp"},
186 {"rmac_tcp"},
187 {"rmac_udp"},
188 {"rmac_err_drp_udp"},
bd1034f0
AR
189 {"rmac_xgmii_err_sym"},
190 {"rmac_frms_q0"},
191 {"rmac_frms_q1"},
192 {"rmac_frms_q2"},
193 {"rmac_frms_q3"},
194 {"rmac_frms_q4"},
195 {"rmac_frms_q5"},
196 {"rmac_frms_q6"},
197 {"rmac_frms_q7"},
198 {"rmac_full_q0"},
199 {"rmac_full_q1"},
200 {"rmac_full_q2"},
201 {"rmac_full_q3"},
202 {"rmac_full_q4"},
203 {"rmac_full_q5"},
204 {"rmac_full_q6"},
205 {"rmac_full_q7"},
1da177e4 206 {"rmac_pause_cnt"},
bd1034f0
AR
207 {"rmac_xgmii_data_err_cnt"},
208 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
209 {"rmac_accepted_ip"},
210 {"rmac_err_tcp"},
bd1034f0
AR
211 {"rd_req_cnt"},
212 {"new_rd_req_cnt"},
213 {"new_rd_req_rtry_cnt"},
214 {"rd_rtry_cnt"},
215 {"wr_rtry_rd_ack_cnt"},
216 {"wr_req_cnt"},
217 {"new_wr_req_cnt"},
218 {"new_wr_req_rtry_cnt"},
219 {"wr_rtry_cnt"},
220 {"wr_disc_cnt"},
221 {"rd_rtry_wr_ack_cnt"},
222 {"txp_wr_cnt"},
223 {"txd_rd_cnt"},
224 {"txd_wr_cnt"},
225 {"rxd_rd_cnt"},
226 {"rxd_wr_cnt"},
227 {"txf_rd_cnt"},
228 {"rxf_wr_cnt"},
229 {"rmac_ttl_1519_4095_frms"},
230 {"rmac_ttl_4096_8191_frms"},
231 {"rmac_ttl_8192_max_frms"},
232 {"rmac_ttl_gt_max_frms"},
233 {"rmac_osized_alt_frms"},
234 {"rmac_jabber_alt_frms"},
235 {"rmac_gt_max_alt_frms"},
236 {"rmac_vlan_frms"},
237 {"rmac_len_discard"},
238 {"rmac_fcs_discard"},
239 {"rmac_pf_discard"},
240 {"rmac_da_discard"},
241 {"rmac_red_discard"},
242 {"rmac_rts_discard"},
243 {"rmac_ingm_full_discard"},
244 {"link_fault_cnt"},
7ba013ac
K
245 {"\n DRIVER STATISTICS"},
246 {"single_bit_ecc_errs"},
247 {"double_bit_ecc_errs"},
bd1034f0
AR
248 {"parity_err_cnt"},
249 {"serious_err_cnt"},
250 {"soft_reset_cnt"},
251 {"fifo_full_cnt"},
252 {"ring_full_cnt"},
253 ("alarm_transceiver_temp_high"),
254 ("alarm_transceiver_temp_low"),
255 ("alarm_laser_bias_current_high"),
256 ("alarm_laser_bias_current_low"),
257 ("alarm_laser_output_power_high"),
258 ("alarm_laser_output_power_low"),
259 ("warn_transceiver_temp_high"),
260 ("warn_transceiver_temp_low"),
261 ("warn_laser_bias_current_high"),
262 ("warn_laser_bias_current_low"),
263 ("warn_laser_output_power_high"),
264 ("warn_laser_output_power_low"),
7d3d0439
RA
265 ("lro_aggregated_pkts"),
266 ("lro_flush_both_count"),
267 ("lro_out_of_sequence_pkts"),
268 ("lro_flush_due_to_max_pkts"),
269 ("lro_avg_aggr_pkts"),
1da177e4
LT
270};
271
272#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
273#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
274
275#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
276#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
277
25fff88e
K
278#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
279 init_timer(&timer); \
280 timer.function = handle; \
281 timer.data = (unsigned long) arg; \
282 mod_timer(&timer, (jiffies + exp)) \
283
be3a6b02
K
284/* Add the vlan */
285static void s2io_vlan_rx_register(struct net_device *dev,
286 struct vlan_group *grp)
287{
288 nic_t *nic = dev->priv;
289 unsigned long flags;
290
291 spin_lock_irqsave(&nic->tx_lock, flags);
292 nic->vlgrp = grp;
293 spin_unlock_irqrestore(&nic->tx_lock, flags);
294}
295
296/* Unregister the vlan */
297static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
298{
299 nic_t *nic = dev->priv;
300 unsigned long flags;
301
302 spin_lock_irqsave(&nic->tx_lock, flags);
303 if (nic->vlgrp)
304 nic->vlgrp->vlan_devices[vid] = NULL;
305 spin_unlock_irqrestore(&nic->tx_lock, flags);
306}
307
20346722 308/*
1da177e4
LT
309 * Constants to be programmed into the Xena's registers, to configure
310 * the XAUI.
311 */
312
1da177e4 313#define END_SIGN 0x0
f71e1309 314static const u64 herc_act_dtx_cfg[] = {
541ae68f 315 /* Set address */
e960fc5c 316 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 317 /* Write data */
e960fc5c 318 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
319 /* Set address */
320 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
321 /* Write data */
322 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
323 /* Set address */
e960fc5c 324 0x801205150D440000ULL, 0x801205150D4400E0ULL,
325 /* Write data */
326 0x801205150D440004ULL, 0x801205150D4400E4ULL,
327 /* Set address */
541ae68f
K
328 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
329 /* Write data */
330 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
331 /* Done */
332 END_SIGN
333};
334
f71e1309 335static const u64 xena_dtx_cfg[] = {
c92ca04b 336 /* Set address */
1da177e4 337 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
338 /* Write data */
339 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
340 /* Set address */
341 0x8001051500000000ULL, 0x80010515000000E0ULL,
342 /* Write data */
343 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
344 /* Set address */
1da177e4 345 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
346 /* Write data */
347 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
348 END_SIGN
349};
350
20346722 351/*
1da177e4
LT
352 * Constants for Fixing the MacAddress problem seen mostly on
353 * Alpha machines.
354 */
f71e1309 355static const u64 fix_mac[] = {
1da177e4
LT
356 0x0060000000000000ULL, 0x0060600000000000ULL,
357 0x0040600000000000ULL, 0x0000600000000000ULL,
358 0x0020600000000000ULL, 0x0060600000000000ULL,
359 0x0020600000000000ULL, 0x0060600000000000ULL,
360 0x0020600000000000ULL, 0x0060600000000000ULL,
361 0x0020600000000000ULL, 0x0060600000000000ULL,
362 0x0020600000000000ULL, 0x0060600000000000ULL,
363 0x0020600000000000ULL, 0x0060600000000000ULL,
364 0x0020600000000000ULL, 0x0060600000000000ULL,
365 0x0020600000000000ULL, 0x0060600000000000ULL,
366 0x0020600000000000ULL, 0x0060600000000000ULL,
367 0x0020600000000000ULL, 0x0060600000000000ULL,
368 0x0020600000000000ULL, 0x0000600000000000ULL,
369 0x0040600000000000ULL, 0x0060600000000000ULL,
370 END_SIGN
371};
372
b41477f3
AR
373MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
374MODULE_LICENSE("GPL");
375MODULE_VERSION(DRV_VERSION);
376
377
1da177e4 378/* Module Loadable parameters. */
b41477f3
AR
379S2IO_PARM_INT(tx_fifo_num, 1);
380S2IO_PARM_INT(rx_ring_num, 1);
381
382
383S2IO_PARM_INT(rx_ring_mode, 1);
384S2IO_PARM_INT(use_continuous_tx_intrs, 1);
385S2IO_PARM_INT(rmac_pause_time, 0x100);
386S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
387S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
388S2IO_PARM_INT(shared_splits, 0);
389S2IO_PARM_INT(tmac_util_period, 5);
390S2IO_PARM_INT(rmac_util_period, 5);
391S2IO_PARM_INT(bimodal, 0);
392S2IO_PARM_INT(l3l4hdr_size, 128);
303bcb4b 393/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 394S2IO_PARM_INT(rxsync_frequency, 3);
cc6e7c44 395/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
b41477f3 396S2IO_PARM_INT(intr_type, 0);
7d3d0439 397/* Large receive offload feature */
b41477f3 398S2IO_PARM_INT(lro, 0);
7d3d0439
RA
399/* Max pkts to be aggregated by LRO at one time. If not specified,
400 * aggregation happens until we hit max IP pkt size(64K)
401 */
b41477f3
AR
402S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
403#ifndef CONFIG_S2IO_NAPI
404S2IO_PARM_INT(indicate_max_pkts, 0);
405#endif
406
407static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
408 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
409static unsigned int rx_ring_sz[MAX_RX_RINGS] =
410 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
411static unsigned int rts_frm_len[MAX_RX_RINGS] =
412 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
413
414module_param_array(tx_fifo_len, uint, NULL, 0);
415module_param_array(rx_ring_sz, uint, NULL, 0);
416module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 417
20346722 418/*
1da177e4 419 * S2IO device table.
20346722 420 * This table lists all the devices that this driver supports.
1da177e4
LT
421 */
422static struct pci_device_id s2io_tbl[] __devinitdata = {
423 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
424 PCI_ANY_ID, PCI_ANY_ID},
425 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
426 PCI_ANY_ID, PCI_ANY_ID},
427 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722
K
428 PCI_ANY_ID, PCI_ANY_ID},
429 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
430 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
431 {0,}
432};
433
434MODULE_DEVICE_TABLE(pci, s2io_tbl);
435
436static struct pci_driver s2io_driver = {
437 .name = "S2IO",
438 .id_table = s2io_tbl,
439 .probe = s2io_init_nic,
440 .remove = __devexit_p(s2io_rem_nic),
441};
442
443/* A simplifier macro used both by init and free shared_mem Fns(). */
444#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
445
446/**
447 * init_shared_mem - Allocation and Initialization of Memory
448 * @nic: Device private variable.
20346722
K
449 * Description: The function allocates all the memory areas shared
450 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
451 * Rx descriptors and the statistics block.
452 */
453
454static int init_shared_mem(struct s2io_nic *nic)
455{
456 u32 size;
457 void *tmp_v_addr, *tmp_v_addr_next;
458 dma_addr_t tmp_p_addr, tmp_p_addr_next;
459 RxD_block_t *pre_rxd_blk = NULL;
20346722 460 int i, j, blk_cnt, rx_sz, tx_sz;
1da177e4
LT
461 int lst_size, lst_per_page;
462 struct net_device *dev = nic->dev;
8ae418cf 463 unsigned long tmp;
1da177e4 464 buffAdd_t *ba;
1da177e4
LT
465
466 mac_info_t *mac_control;
467 struct config_param *config;
468
469 mac_control = &nic->mac_control;
470 config = &nic->config;
471
472
473 /* Allocation and initialization of TXDLs in FIOFs */
474 size = 0;
475 for (i = 0; i < config->tx_fifo_num; i++) {
476 size += config->tx_cfg[i].fifo_len;
477 }
478 if (size > MAX_AVAILABLE_TXDS) {
b41477f3 479 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
0b1f7ebe 480 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
b41477f3 481 return -EINVAL;
1da177e4
LT
482 }
483
484 lst_size = (sizeof(TxD_t) * config->max_txds);
20346722 485 tx_sz = lst_size * size;
1da177e4
LT
486 lst_per_page = PAGE_SIZE / lst_size;
487
488 for (i = 0; i < config->tx_fifo_num; i++) {
489 int fifo_len = config->tx_cfg[i].fifo_len;
490 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
20346722
K
491 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
492 GFP_KERNEL);
493 if (!mac_control->fifos[i].list_info) {
1da177e4
LT
494 DBG_PRINT(ERR_DBG,
495 "Malloc failed for list_info\n");
496 return -ENOMEM;
497 }
20346722 498 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
499 }
500 for (i = 0; i < config->tx_fifo_num; i++) {
501 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
502 lst_per_page);
20346722
K
503 mac_control->fifos[i].tx_curr_put_info.offset = 0;
504 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 505 config->tx_cfg[i].fifo_len - 1;
20346722
K
506 mac_control->fifos[i].tx_curr_get_info.offset = 0;
507 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 508 config->tx_cfg[i].fifo_len - 1;
20346722
K
509 mac_control->fifos[i].fifo_no = i;
510 mac_control->fifos[i].nic = nic;
fed5eccd 511 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
20346722 512
1da177e4
LT
513 for (j = 0; j < page_num; j++) {
514 int k = 0;
515 dma_addr_t tmp_p;
516 void *tmp_v;
517 tmp_v = pci_alloc_consistent(nic->pdev,
518 PAGE_SIZE, &tmp_p);
519 if (!tmp_v) {
520 DBG_PRINT(ERR_DBG,
521 "pci_alloc_consistent ");
522 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
523 return -ENOMEM;
524 }
776bd20f 525 /* If we got a zero DMA address(can happen on
526 * certain platforms like PPC), reallocate.
527 * Store virtual address of page we don't want,
528 * to be freed later.
529 */
530 if (!tmp_p) {
531 mac_control->zerodma_virt_addr = tmp_v;
532 DBG_PRINT(INIT_DBG,
533 "%s: Zero DMA address for TxDL. ", dev->name);
534 DBG_PRINT(INIT_DBG,
6b4d617d 535 "Virtual address %p\n", tmp_v);
776bd20f 536 tmp_v = pci_alloc_consistent(nic->pdev,
537 PAGE_SIZE, &tmp_p);
538 if (!tmp_v) {
539 DBG_PRINT(ERR_DBG,
540 "pci_alloc_consistent ");
541 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
542 return -ENOMEM;
543 }
544 }
1da177e4
LT
545 while (k < lst_per_page) {
546 int l = (j * lst_per_page) + k;
547 if (l == config->tx_cfg[i].fifo_len)
20346722
K
548 break;
549 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 550 tmp_v + (k * lst_size);
20346722 551 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
552 tmp_p + (k * lst_size);
553 k++;
554 }
555 }
556 }
1da177e4 557
fed5eccd
AR
558 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
559 if (!nic->ufo_in_band_v)
560 return -ENOMEM;
b41477f3 561 memset(nic->ufo_in_band_v, 0, size);
fed5eccd 562
1da177e4
LT
563 /* Allocation and initialization of RXDs in Rings */
564 size = 0;
565 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
566 if (config->rx_cfg[i].num_rxd %
567 (rxd_count[nic->rxd_mode] + 1)) {
1da177e4
LT
568 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
569 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
570 i);
571 DBG_PRINT(ERR_DBG, "RxDs per Block");
572 return FAILURE;
573 }
574 size += config->rx_cfg[i].num_rxd;
20346722 575 mac_control->rings[i].block_count =
da6971d8
AR
576 config->rx_cfg[i].num_rxd /
577 (rxd_count[nic->rxd_mode] + 1 );
578 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
579 mac_control->rings[i].block_count;
1da177e4 580 }
da6971d8
AR
581 if (nic->rxd_mode == RXD_MODE_1)
582 size = (size * (sizeof(RxD1_t)));
583 else
584 size = (size * (sizeof(RxD3_t)));
20346722 585 rx_sz = size;
1da177e4
LT
586
587 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
588 mac_control->rings[i].rx_curr_get_info.block_index = 0;
589 mac_control->rings[i].rx_curr_get_info.offset = 0;
590 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 591 config->rx_cfg[i].num_rxd - 1;
20346722
K
592 mac_control->rings[i].rx_curr_put_info.block_index = 0;
593 mac_control->rings[i].rx_curr_put_info.offset = 0;
594 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 595 config->rx_cfg[i].num_rxd - 1;
20346722
K
596 mac_control->rings[i].nic = nic;
597 mac_control->rings[i].ring_no = i;
598
da6971d8
AR
599 blk_cnt = config->rx_cfg[i].num_rxd /
600 (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
601 /* Allocating all the Rx blocks */
602 for (j = 0; j < blk_cnt; j++) {
da6971d8
AR
603 rx_block_info_t *rx_blocks;
604 int l;
605
606 rx_blocks = &mac_control->rings[i].rx_blocks[j];
607 size = SIZE_OF_BLOCK; //size is always page size
1da177e4
LT
608 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
609 &tmp_p_addr);
610 if (tmp_v_addr == NULL) {
611 /*
20346722
K
612 * In case of failure, free_shared_mem()
613 * is called, which should free any
614 * memory that was alloced till the
1da177e4
LT
615 * failure happened.
616 */
da6971d8 617 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
618 return -ENOMEM;
619 }
620 memset(tmp_v_addr, 0, size);
da6971d8
AR
621 rx_blocks->block_virt_addr = tmp_v_addr;
622 rx_blocks->block_dma_addr = tmp_p_addr;
623 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
624 rxd_count[nic->rxd_mode],
625 GFP_KERNEL);
626 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
627 rx_blocks->rxds[l].virt_addr =
628 rx_blocks->block_virt_addr +
629 (rxd_size[nic->rxd_mode] * l);
630 rx_blocks->rxds[l].dma_addr =
631 rx_blocks->block_dma_addr +
632 (rxd_size[nic->rxd_mode] * l);
633 }
1da177e4
LT
634 }
635 /* Interlinking all Rx Blocks */
636 for (j = 0; j < blk_cnt; j++) {
20346722
K
637 tmp_v_addr =
638 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 639 tmp_v_addr_next =
20346722 640 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 641 blk_cnt].block_virt_addr;
20346722
K
642 tmp_p_addr =
643 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 644 tmp_p_addr_next =
20346722 645 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
646 blk_cnt].block_dma_addr;
647
648 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
1da177e4
LT
649 pre_rxd_blk->reserved_2_pNext_RxD_block =
650 (unsigned long) tmp_v_addr_next;
1da177e4
LT
651 pre_rxd_blk->pNext_RxD_Blk_physical =
652 (u64) tmp_p_addr_next;
653 }
654 }
da6971d8
AR
655 if (nic->rxd_mode >= RXD_MODE_3A) {
656 /*
657 * Allocation of Storages for buffer addresses in 2BUFF mode
658 * and the buffers as well.
659 */
660 for (i = 0; i < config->rx_ring_num; i++) {
661 blk_cnt = config->rx_cfg[i].num_rxd /
662 (rxd_count[nic->rxd_mode]+ 1);
663 mac_control->rings[i].ba =
664 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
1da177e4 665 GFP_KERNEL);
da6971d8 666 if (!mac_control->rings[i].ba)
1da177e4 667 return -ENOMEM;
da6971d8
AR
668 for (j = 0; j < blk_cnt; j++) {
669 int k = 0;
670 mac_control->rings[i].ba[j] =
671 kmalloc((sizeof(buffAdd_t) *
672 (rxd_count[nic->rxd_mode] + 1)),
673 GFP_KERNEL);
674 if (!mac_control->rings[i].ba[j])
1da177e4 675 return -ENOMEM;
da6971d8
AR
676 while (k != rxd_count[nic->rxd_mode]) {
677 ba = &mac_control->rings[i].ba[j][k];
678
679 ba->ba_0_org = (void *) kmalloc
680 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
681 if (!ba->ba_0_org)
682 return -ENOMEM;
683 tmp = (unsigned long)ba->ba_0_org;
684 tmp += ALIGN_SIZE;
685 tmp &= ~((unsigned long) ALIGN_SIZE);
686 ba->ba_0 = (void *) tmp;
687
688 ba->ba_1_org = (void *) kmalloc
689 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
690 if (!ba->ba_1_org)
691 return -ENOMEM;
692 tmp = (unsigned long) ba->ba_1_org;
693 tmp += ALIGN_SIZE;
694 tmp &= ~((unsigned long) ALIGN_SIZE);
695 ba->ba_1 = (void *) tmp;
696 k++;
697 }
1da177e4
LT
698 }
699 }
700 }
1da177e4
LT
701
702 /* Allocation and initialization of Statistics block */
703 size = sizeof(StatInfo_t);
704 mac_control->stats_mem = pci_alloc_consistent
705 (nic->pdev, size, &mac_control->stats_mem_phy);
706
707 if (!mac_control->stats_mem) {
20346722
K
708 /*
709 * In case of failure, free_shared_mem() is called, which
710 * should free any memory that was alloced till the
1da177e4
LT
711 * failure happened.
712 */
713 return -ENOMEM;
714 }
715 mac_control->stats_mem_sz = size;
716
717 tmp_v_addr = mac_control->stats_mem;
718 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
719 memset(tmp_v_addr, 0, size);
1da177e4
LT
720 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
721 (unsigned long long) tmp_p_addr);
722
723 return SUCCESS;
724}
725
20346722
K
726/**
727 * free_shared_mem - Free the allocated Memory
1da177e4
LT
728 * @nic: Device private variable.
729 * Description: This function is to free all memory locations allocated by
730 * the init_shared_mem() function and return it to the kernel.
731 */
732
733static void free_shared_mem(struct s2io_nic *nic)
734{
735 int i, j, blk_cnt, size;
736 void *tmp_v_addr;
737 dma_addr_t tmp_p_addr;
738 mac_info_t *mac_control;
739 struct config_param *config;
740 int lst_size, lst_per_page;
776bd20f 741 struct net_device *dev = nic->dev;
1da177e4
LT
742
743 if (!nic)
744 return;
745
746 mac_control = &nic->mac_control;
747 config = &nic->config;
748
749 lst_size = (sizeof(TxD_t) * config->max_txds);
750 lst_per_page = PAGE_SIZE / lst_size;
751
752 for (i = 0; i < config->tx_fifo_num; i++) {
753 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
754 lst_per_page);
755 for (j = 0; j < page_num; j++) {
756 int mem_blks = (j * lst_per_page);
776bd20f 757 if (!mac_control->fifos[i].list_info)
758 return;
759 if (!mac_control->fifos[i].list_info[mem_blks].
760 list_virt_addr)
1da177e4
LT
761 break;
762 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722
K
763 mac_control->fifos[i].
764 list_info[mem_blks].
1da177e4 765 list_virt_addr,
20346722
K
766 mac_control->fifos[i].
767 list_info[mem_blks].
1da177e4
LT
768 list_phy_addr);
769 }
776bd20f 770 /* If we got a zero DMA address during allocation,
771 * free the page now
772 */
773 if (mac_control->zerodma_virt_addr) {
774 pci_free_consistent(nic->pdev, PAGE_SIZE,
775 mac_control->zerodma_virt_addr,
776 (dma_addr_t)0);
777 DBG_PRINT(INIT_DBG,
6b4d617d
AM
778 "%s: Freeing TxDL with zero DMA addr. ",
779 dev->name);
780 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
781 mac_control->zerodma_virt_addr);
776bd20f 782 }
20346722 783 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
784 }
785
1da177e4 786 size = SIZE_OF_BLOCK;
1da177e4 787 for (i = 0; i < config->rx_ring_num; i++) {
20346722 788 blk_cnt = mac_control->rings[i].block_count;
1da177e4 789 for (j = 0; j < blk_cnt; j++) {
20346722
K
790 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
791 block_virt_addr;
792 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
793 block_dma_addr;
1da177e4
LT
794 if (tmp_v_addr == NULL)
795 break;
796 pci_free_consistent(nic->pdev, size,
797 tmp_v_addr, tmp_p_addr);
da6971d8 798 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1da177e4
LT
799 }
800 }
801
da6971d8
AR
802 if (nic->rxd_mode >= RXD_MODE_3A) {
803 /* Freeing buffer storage addresses in 2BUFF mode. */
804 for (i = 0; i < config->rx_ring_num; i++) {
805 blk_cnt = config->rx_cfg[i].num_rxd /
806 (rxd_count[nic->rxd_mode] + 1);
807 for (j = 0; j < blk_cnt; j++) {
808 int k = 0;
809 if (!mac_control->rings[i].ba[j])
810 continue;
811 while (k != rxd_count[nic->rxd_mode]) {
812 buffAdd_t *ba =
813 &mac_control->rings[i].ba[j][k];
814 kfree(ba->ba_0_org);
815 kfree(ba->ba_1_org);
816 k++;
817 }
818 kfree(mac_control->rings[i].ba[j]);
1da177e4 819 }
da6971d8 820 kfree(mac_control->rings[i].ba);
1da177e4 821 }
1da177e4 822 }
1da177e4
LT
823
824 if (mac_control->stats_mem) {
825 pci_free_consistent(nic->pdev,
826 mac_control->stats_mem_sz,
827 mac_control->stats_mem,
828 mac_control->stats_mem_phy);
829 }
fed5eccd
AR
830 if (nic->ufo_in_band_v)
831 kfree(nic->ufo_in_band_v);
1da177e4
LT
832}
833
541ae68f
K
834/**
835 * s2io_verify_pci_mode -
836 */
837
838static int s2io_verify_pci_mode(nic_t *nic)
839{
509a2671 840 XENA_dev_config_t __iomem *bar0 = nic->bar0;
541ae68f
K
841 register u64 val64 = 0;
842 int mode;
843
844 val64 = readq(&bar0->pci_mode);
845 mode = (u8)GET_PCI_MODE(val64);
846
847 if ( val64 & PCI_MODE_UNKNOWN_MODE)
848 return -1; /* Unknown PCI mode */
849 return mode;
850}
851
c92ca04b
AR
852#define NEC_VENID 0x1033
853#define NEC_DEVID 0x0125
854static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
855{
856 struct pci_dev *tdev = NULL;
857 while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
858 if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){
859 if (tdev->bus == s2io_pdev->bus->parent)
860 return 1;
861 }
862 }
863 return 0;
864}
541ae68f 865
7b32a312 866static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f
K
867/**
868 * s2io_print_pci_mode -
869 */
870static int s2io_print_pci_mode(nic_t *nic)
871{
509a2671 872 XENA_dev_config_t __iomem *bar0 = nic->bar0;
541ae68f
K
873 register u64 val64 = 0;
874 int mode;
875 struct config_param *config = &nic->config;
876
877 val64 = readq(&bar0->pci_mode);
878 mode = (u8)GET_PCI_MODE(val64);
879
880 if ( val64 & PCI_MODE_UNKNOWN_MODE)
881 return -1; /* Unknown PCI mode */
882
c92ca04b
AR
883 config->bus_speed = bus_speed[mode];
884
885 if (s2io_on_nec_bridge(nic->pdev)) {
886 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
887 nic->dev->name);
888 return mode;
889 }
890
541ae68f
K
891 if (val64 & PCI_MODE_32_BITS) {
892 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
893 } else {
894 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
895 }
896
897 switch(mode) {
898 case PCI_MODE_PCI_33:
899 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
541ae68f
K
900 break;
901 case PCI_MODE_PCI_66:
902 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
541ae68f
K
903 break;
904 case PCI_MODE_PCIX_M1_66:
905 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
541ae68f
K
906 break;
907 case PCI_MODE_PCIX_M1_100:
908 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
541ae68f
K
909 break;
910 case PCI_MODE_PCIX_M1_133:
911 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
541ae68f
K
912 break;
913 case PCI_MODE_PCIX_M2_66:
914 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
541ae68f
K
915 break;
916 case PCI_MODE_PCIX_M2_100:
917 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
541ae68f
K
918 break;
919 case PCI_MODE_PCIX_M2_133:
920 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
541ae68f
K
921 break;
922 default:
923 return -1; /* Unsupported bus speed */
924 }
925
926 return mode;
927}
928
20346722
K
929/**
930 * init_nic - Initialization of hardware
1da177e4 931 * @nic: device peivate variable
20346722
K
932 * Description: The function sequentially configures every block
933 * of the H/W from their reset values.
934 * Return Value: SUCCESS on success and
1da177e4
LT
935 * '-1' on failure (endian settings incorrect).
936 */
937
938static int init_nic(struct s2io_nic *nic)
939{
940 XENA_dev_config_t __iomem *bar0 = nic->bar0;
941 struct net_device *dev = nic->dev;
942 register u64 val64 = 0;
943 void __iomem *add;
944 u32 time;
945 int i, j;
946 mac_info_t *mac_control;
947 struct config_param *config;
c92ca04b 948 int dtx_cnt = 0;
1da177e4 949 unsigned long long mem_share;
20346722 950 int mem_size;
1da177e4
LT
951
952 mac_control = &nic->mac_control;
953 config = &nic->config;
954
5e25b9dd 955 /* to set the swapper controle on the card */
20346722 956 if(s2io_set_swapper(nic)) {
1da177e4
LT
957 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
958 return -1;
959 }
960
541ae68f
K
961 /*
962 * Herc requires EOI to be removed from reset before XGXS, so..
963 */
964 if (nic->device_type & XFRAME_II_DEVICE) {
965 val64 = 0xA500000000ULL;
966 writeq(val64, &bar0->sw_reset);
967 msleep(500);
968 val64 = readq(&bar0->sw_reset);
969 }
970
1da177e4
LT
971 /* Remove XGXS from reset state */
972 val64 = 0;
973 writeq(val64, &bar0->sw_reset);
1da177e4 974 msleep(500);
20346722 975 val64 = readq(&bar0->sw_reset);
1da177e4
LT
976
977 /* Enable Receiving broadcasts */
978 add = &bar0->mac_cfg;
979 val64 = readq(&bar0->mac_cfg);
980 val64 |= MAC_RMAC_BCAST_ENABLE;
981 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
982 writel((u32) val64, add);
983 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
984 writel((u32) (val64 >> 32), (add + 4));
985
986 /* Read registers in all blocks */
987 val64 = readq(&bar0->mac_int_mask);
988 val64 = readq(&bar0->mc_int_mask);
989 val64 = readq(&bar0->xgxs_int_mask);
990
991 /* Set MTU */
992 val64 = dev->mtu;
993 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
994
541ae68f
K
995 if (nic->device_type & XFRAME_II_DEVICE) {
996 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 997 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 998 &bar0->dtx_control, UF);
541ae68f
K
999 if (dtx_cnt & 0x1)
1000 msleep(1); /* Necessary!! */
1da177e4
LT
1001 dtx_cnt++;
1002 }
541ae68f 1003 } else {
c92ca04b
AR
1004 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1005 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1006 &bar0->dtx_control, UF);
1007 val64 = readq(&bar0->dtx_control);
1008 dtx_cnt++;
1da177e4
LT
1009 }
1010 }
1011
1012 /* Tx DMA Initialization */
1013 val64 = 0;
1014 writeq(val64, &bar0->tx_fifo_partition_0);
1015 writeq(val64, &bar0->tx_fifo_partition_1);
1016 writeq(val64, &bar0->tx_fifo_partition_2);
1017 writeq(val64, &bar0->tx_fifo_partition_3);
1018
1019
1020 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1021 val64 |=
1022 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1023 13) | vBIT(config->tx_cfg[i].fifo_priority,
1024 ((i * 32) + 5), 3);
1025
1026 if (i == (config->tx_fifo_num - 1)) {
1027 if (i % 2 == 0)
1028 i++;
1029 }
1030
1031 switch (i) {
1032 case 1:
1033 writeq(val64, &bar0->tx_fifo_partition_0);
1034 val64 = 0;
1035 break;
1036 case 3:
1037 writeq(val64, &bar0->tx_fifo_partition_1);
1038 val64 = 0;
1039 break;
1040 case 5:
1041 writeq(val64, &bar0->tx_fifo_partition_2);
1042 val64 = 0;
1043 break;
1044 case 7:
1045 writeq(val64, &bar0->tx_fifo_partition_3);
1046 break;
1047 }
1048 }
1049
5e25b9dd
K
1050 /*
1051 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1052 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1053 */
541ae68f
K
1054 if ((nic->device_type == XFRAME_I_DEVICE) &&
1055 (get_xena_rev_id(nic->pdev) < 4))
5e25b9dd
K
1056 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1057
1da177e4
LT
1058 val64 = readq(&bar0->tx_fifo_partition_0);
1059 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1060 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1061
20346722
K
1062 /*
1063 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1064 * integrity checking.
1065 */
1066 val64 = readq(&bar0->tx_pa_cfg);
1067 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1068 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1069 writeq(val64, &bar0->tx_pa_cfg);
1070
1071 /* Rx DMA intialization. */
1072 val64 = 0;
1073 for (i = 0; i < config->rx_ring_num; i++) {
1074 val64 |=
1075 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1076 3);
1077 }
1078 writeq(val64, &bar0->rx_queue_priority);
1079
20346722
K
1080 /*
1081 * Allocating equal share of memory to all the
1da177e4
LT
1082 * configured Rings.
1083 */
1084 val64 = 0;
541ae68f
K
1085 if (nic->device_type & XFRAME_II_DEVICE)
1086 mem_size = 32;
1087 else
1088 mem_size = 64;
1089
1da177e4
LT
1090 for (i = 0; i < config->rx_ring_num; i++) {
1091 switch (i) {
1092 case 0:
20346722
K
1093 mem_share = (mem_size / config->rx_ring_num +
1094 mem_size % config->rx_ring_num);
1da177e4
LT
1095 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1096 continue;
1097 case 1:
20346722 1098 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1099 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1100 continue;
1101 case 2:
20346722 1102 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1103 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1104 continue;
1105 case 3:
20346722 1106 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1107 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1108 continue;
1109 case 4:
20346722 1110 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1111 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1112 continue;
1113 case 5:
20346722 1114 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1115 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1116 continue;
1117 case 6:
20346722 1118 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1119 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1120 continue;
1121 case 7:
20346722 1122 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1123 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1124 continue;
1125 }
1126 }
1127 writeq(val64, &bar0->rx_queue_cfg);
1128
20346722 1129 /*
5e25b9dd
K
1130 * Filling Tx round robin registers
1131 * as per the number of FIFOs
1da177e4 1132 */
5e25b9dd
K
1133 switch (config->tx_fifo_num) {
1134 case 1:
1135 val64 = 0x0000000000000000ULL;
1136 writeq(val64, &bar0->tx_w_round_robin_0);
1137 writeq(val64, &bar0->tx_w_round_robin_1);
1138 writeq(val64, &bar0->tx_w_round_robin_2);
1139 writeq(val64, &bar0->tx_w_round_robin_3);
1140 writeq(val64, &bar0->tx_w_round_robin_4);
1141 break;
1142 case 2:
1143 val64 = 0x0000010000010000ULL;
1144 writeq(val64, &bar0->tx_w_round_robin_0);
1145 val64 = 0x0100000100000100ULL;
1146 writeq(val64, &bar0->tx_w_round_robin_1);
1147 val64 = 0x0001000001000001ULL;
1148 writeq(val64, &bar0->tx_w_round_robin_2);
1149 val64 = 0x0000010000010000ULL;
1150 writeq(val64, &bar0->tx_w_round_robin_3);
1151 val64 = 0x0100000000000000ULL;
1152 writeq(val64, &bar0->tx_w_round_robin_4);
1153 break;
1154 case 3:
1155 val64 = 0x0001000102000001ULL;
1156 writeq(val64, &bar0->tx_w_round_robin_0);
1157 val64 = 0x0001020000010001ULL;
1158 writeq(val64, &bar0->tx_w_round_robin_1);
1159 val64 = 0x0200000100010200ULL;
1160 writeq(val64, &bar0->tx_w_round_robin_2);
1161 val64 = 0x0001000102000001ULL;
1162 writeq(val64, &bar0->tx_w_round_robin_3);
1163 val64 = 0x0001020000000000ULL;
1164 writeq(val64, &bar0->tx_w_round_robin_4);
1165 break;
1166 case 4:
1167 val64 = 0x0001020300010200ULL;
1168 writeq(val64, &bar0->tx_w_round_robin_0);
1169 val64 = 0x0100000102030001ULL;
1170 writeq(val64, &bar0->tx_w_round_robin_1);
1171 val64 = 0x0200010000010203ULL;
1172 writeq(val64, &bar0->tx_w_round_robin_2);
1173 val64 = 0x0001020001000001ULL;
1174 writeq(val64, &bar0->tx_w_round_robin_3);
1175 val64 = 0x0203000100000000ULL;
1176 writeq(val64, &bar0->tx_w_round_robin_4);
1177 break;
1178 case 5:
1179 val64 = 0x0001000203000102ULL;
1180 writeq(val64, &bar0->tx_w_round_robin_0);
1181 val64 = 0x0001020001030004ULL;
1182 writeq(val64, &bar0->tx_w_round_robin_1);
1183 val64 = 0x0001000203000102ULL;
1184 writeq(val64, &bar0->tx_w_round_robin_2);
1185 val64 = 0x0001020001030004ULL;
1186 writeq(val64, &bar0->tx_w_round_robin_3);
1187 val64 = 0x0001000000000000ULL;
1188 writeq(val64, &bar0->tx_w_round_robin_4);
1189 break;
1190 case 6:
1191 val64 = 0x0001020304000102ULL;
1192 writeq(val64, &bar0->tx_w_round_robin_0);
1193 val64 = 0x0304050001020001ULL;
1194 writeq(val64, &bar0->tx_w_round_robin_1);
1195 val64 = 0x0203000100000102ULL;
1196 writeq(val64, &bar0->tx_w_round_robin_2);
1197 val64 = 0x0304000102030405ULL;
1198 writeq(val64, &bar0->tx_w_round_robin_3);
1199 val64 = 0x0001000200000000ULL;
1200 writeq(val64, &bar0->tx_w_round_robin_4);
1201 break;
1202 case 7:
1203 val64 = 0x0001020001020300ULL;
1204 writeq(val64, &bar0->tx_w_round_robin_0);
1205 val64 = 0x0102030400010203ULL;
1206 writeq(val64, &bar0->tx_w_round_robin_1);
1207 val64 = 0x0405060001020001ULL;
1208 writeq(val64, &bar0->tx_w_round_robin_2);
1209 val64 = 0x0304050000010200ULL;
1210 writeq(val64, &bar0->tx_w_round_robin_3);
1211 val64 = 0x0102030000000000ULL;
1212 writeq(val64, &bar0->tx_w_round_robin_4);
1213 break;
1214 case 8:
1215 val64 = 0x0001020300040105ULL;
1216 writeq(val64, &bar0->tx_w_round_robin_0);
1217 val64 = 0x0200030106000204ULL;
1218 writeq(val64, &bar0->tx_w_round_robin_1);
1219 val64 = 0x0103000502010007ULL;
1220 writeq(val64, &bar0->tx_w_round_robin_2);
1221 val64 = 0x0304010002060500ULL;
1222 writeq(val64, &bar0->tx_w_round_robin_3);
1223 val64 = 0x0103020400000000ULL;
1224 writeq(val64, &bar0->tx_w_round_robin_4);
1225 break;
1226 }
1227
b41477f3 1228 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1229 val64 = readq(&bar0->tx_fifo_partition_0);
1230 val64 |= (TX_FIFO_PARTITION_EN);
1231 writeq(val64, &bar0->tx_fifo_partition_0);
1232
5e25b9dd
K
1233 /* Filling the Rx round robin registers as per the
1234 * number of Rings and steering based on QoS.
1235 */
1236 switch (config->rx_ring_num) {
1237 case 1:
1238 val64 = 0x8080808080808080ULL;
1239 writeq(val64, &bar0->rts_qos_steering);
1240 break;
1241 case 2:
1242 val64 = 0x0000010000010000ULL;
1243 writeq(val64, &bar0->rx_w_round_robin_0);
1244 val64 = 0x0100000100000100ULL;
1245 writeq(val64, &bar0->rx_w_round_robin_1);
1246 val64 = 0x0001000001000001ULL;
1247 writeq(val64, &bar0->rx_w_round_robin_2);
1248 val64 = 0x0000010000010000ULL;
1249 writeq(val64, &bar0->rx_w_round_robin_3);
1250 val64 = 0x0100000000000000ULL;
1251 writeq(val64, &bar0->rx_w_round_robin_4);
1252
1253 val64 = 0x8080808040404040ULL;
1254 writeq(val64, &bar0->rts_qos_steering);
1255 break;
1256 case 3:
1257 val64 = 0x0001000102000001ULL;
1258 writeq(val64, &bar0->rx_w_round_robin_0);
1259 val64 = 0x0001020000010001ULL;
1260 writeq(val64, &bar0->rx_w_round_robin_1);
1261 val64 = 0x0200000100010200ULL;
1262 writeq(val64, &bar0->rx_w_round_robin_2);
1263 val64 = 0x0001000102000001ULL;
1264 writeq(val64, &bar0->rx_w_round_robin_3);
1265 val64 = 0x0001020000000000ULL;
1266 writeq(val64, &bar0->rx_w_round_robin_4);
1267
1268 val64 = 0x8080804040402020ULL;
1269 writeq(val64, &bar0->rts_qos_steering);
1270 break;
1271 case 4:
1272 val64 = 0x0001020300010200ULL;
1273 writeq(val64, &bar0->rx_w_round_robin_0);
1274 val64 = 0x0100000102030001ULL;
1275 writeq(val64, &bar0->rx_w_round_robin_1);
1276 val64 = 0x0200010000010203ULL;
1277 writeq(val64, &bar0->rx_w_round_robin_2);
1278 val64 = 0x0001020001000001ULL;
1279 writeq(val64, &bar0->rx_w_round_robin_3);
1280 val64 = 0x0203000100000000ULL;
1281 writeq(val64, &bar0->rx_w_round_robin_4);
1282
1283 val64 = 0x8080404020201010ULL;
1284 writeq(val64, &bar0->rts_qos_steering);
1285 break;
1286 case 5:
1287 val64 = 0x0001000203000102ULL;
1288 writeq(val64, &bar0->rx_w_round_robin_0);
1289 val64 = 0x0001020001030004ULL;
1290 writeq(val64, &bar0->rx_w_round_robin_1);
1291 val64 = 0x0001000203000102ULL;
1292 writeq(val64, &bar0->rx_w_round_robin_2);
1293 val64 = 0x0001020001030004ULL;
1294 writeq(val64, &bar0->rx_w_round_robin_3);
1295 val64 = 0x0001000000000000ULL;
1296 writeq(val64, &bar0->rx_w_round_robin_4);
1297
1298 val64 = 0x8080404020201008ULL;
1299 writeq(val64, &bar0->rts_qos_steering);
1300 break;
1301 case 6:
1302 val64 = 0x0001020304000102ULL;
1303 writeq(val64, &bar0->rx_w_round_robin_0);
1304 val64 = 0x0304050001020001ULL;
1305 writeq(val64, &bar0->rx_w_round_robin_1);
1306 val64 = 0x0203000100000102ULL;
1307 writeq(val64, &bar0->rx_w_round_robin_2);
1308 val64 = 0x0304000102030405ULL;
1309 writeq(val64, &bar0->rx_w_round_robin_3);
1310 val64 = 0x0001000200000000ULL;
1311 writeq(val64, &bar0->rx_w_round_robin_4);
1312
1313 val64 = 0x8080404020100804ULL;
1314 writeq(val64, &bar0->rts_qos_steering);
1315 break;
1316 case 7:
1317 val64 = 0x0001020001020300ULL;
1318 writeq(val64, &bar0->rx_w_round_robin_0);
1319 val64 = 0x0102030400010203ULL;
1320 writeq(val64, &bar0->rx_w_round_robin_1);
1321 val64 = 0x0405060001020001ULL;
1322 writeq(val64, &bar0->rx_w_round_robin_2);
1323 val64 = 0x0304050000010200ULL;
1324 writeq(val64, &bar0->rx_w_round_robin_3);
1325 val64 = 0x0102030000000000ULL;
1326 writeq(val64, &bar0->rx_w_round_robin_4);
1327
1328 val64 = 0x8080402010080402ULL;
1329 writeq(val64, &bar0->rts_qos_steering);
1330 break;
1331 case 8:
1332 val64 = 0x0001020300040105ULL;
1333 writeq(val64, &bar0->rx_w_round_robin_0);
1334 val64 = 0x0200030106000204ULL;
1335 writeq(val64, &bar0->rx_w_round_robin_1);
1336 val64 = 0x0103000502010007ULL;
1337 writeq(val64, &bar0->rx_w_round_robin_2);
1338 val64 = 0x0304010002060500ULL;
1339 writeq(val64, &bar0->rx_w_round_robin_3);
1340 val64 = 0x0103020400000000ULL;
1341 writeq(val64, &bar0->rx_w_round_robin_4);
1342
1343 val64 = 0x8040201008040201ULL;
1344 writeq(val64, &bar0->rts_qos_steering);
1345 break;
1346 }
1da177e4
LT
1347
1348 /* UDP Fix */
1349 val64 = 0;
20346722 1350 for (i = 0; i < 8; i++)
1da177e4
LT
1351 writeq(val64, &bar0->rts_frm_len_n[i]);
1352
5e25b9dd
K
1353 /* Set the default rts frame length for the rings configured */
1354 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1355 for (i = 0 ; i < config->rx_ring_num ; i++)
1356 writeq(val64, &bar0->rts_frm_len_n[i]);
1357
1358 /* Set the frame length for the configured rings
1359 * desired by the user
1360 */
1361 for (i = 0; i < config->rx_ring_num; i++) {
1362 /* If rts_frm_len[i] == 0 then it is assumed that user not
1363 * specified frame length steering.
1364 * If the user provides the frame length then program
1365 * the rts_frm_len register for those values or else
1366 * leave it as it is.
1367 */
1368 if (rts_frm_len[i] != 0) {
1369 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1370 &bar0->rts_frm_len_n[i]);
1371 }
1372 }
1da177e4 1373
20346722 1374 /* Program statistics memory */
1da177e4 1375 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1376
541ae68f
K
1377 if (nic->device_type == XFRAME_II_DEVICE) {
1378 val64 = STAT_BC(0x320);
1379 writeq(val64, &bar0->stat_byte_cnt);
1380 }
1381
20346722 1382 /*
1da177e4
LT
1383 * Initializing the sampling rate for the device to calculate the
1384 * bandwidth utilization.
1385 */
1386 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1387 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1388 writeq(val64, &bar0->mac_link_util);
1389
1390
20346722
K
1391 /*
1392 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1393 * Scheme.
1394 */
20346722
K
1395 /*
1396 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1397 * 250 interrupts per sec. Continuous interrupts are enabled
1398 * by default.
1399 */
541ae68f
K
1400 if (nic->device_type == XFRAME_II_DEVICE) {
1401 int count = (nic->config.bus_speed * 125)/2;
1402 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1403 } else {
1404
1405 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1406 }
1407 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1da177e4 1408 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd 1409 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
541ae68f
K
1410 if (use_continuous_tx_intrs)
1411 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1412 writeq(val64, &bar0->tti_data1_mem);
1413
1414 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1415 TTI_DATA2_MEM_TX_UFC_B(0x20) |
5e25b9dd 1416 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1417 writeq(val64, &bar0->tti_data2_mem);
1418
1419 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1420 writeq(val64, &bar0->tti_command_mem);
1421
20346722 1422 /*
1da177e4
LT
1423 * Once the operation completes, the Strobe bit of the command
1424 * register will be reset. We poll for this particular condition
1425 * We wait for a maximum of 500ms for the operation to complete,
1426 * if it's not complete by then we return error.
1427 */
1428 time = 0;
1429 while (TRUE) {
1430 val64 = readq(&bar0->tti_command_mem);
1431 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1432 break;
1433 }
1434 if (time > 10) {
1435 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1436 dev->name);
1437 return -1;
1438 }
1439 msleep(50);
1440 time++;
1441 }
1442
b6e3f982
K
1443 if (nic->config.bimodal) {
1444 int k = 0;
1445 for (k = 0; k < config->rx_ring_num; k++) {
1446 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1447 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1448 writeq(val64, &bar0->tti_command_mem);
541ae68f 1449
541ae68f 1450 /*
b6e3f982
K
1451 * Once the operation completes, the Strobe bit of the command
1452 * register will be reset. We poll for this particular condition
1453 * We wait for a maximum of 500ms for the operation to complete,
1454 * if it's not complete by then we return error.
1455 */
1456 time = 0;
1457 while (TRUE) {
1458 val64 = readq(&bar0->tti_command_mem);
1459 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1460 break;
1461 }
1462 if (time > 10) {
1463 DBG_PRINT(ERR_DBG,
1464 "%s: TTI init Failed\n",
1465 dev->name);
1466 return -1;
1467 }
1468 time++;
1469 msleep(50);
1470 }
1471 }
541ae68f 1472 } else {
1da177e4 1473
b6e3f982
K
1474 /* RTI Initialization */
1475 if (nic->device_type == XFRAME_II_DEVICE) {
1476 /*
1477 * Programmed to generate Apprx 500 Intrs per
1478 * second
1479 */
1480 int count = (nic->config.bus_speed * 125)/4;
1481 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1482 } else {
1483 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1484 }
1485 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1486 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1487 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1da177e4 1488
b6e3f982 1489 writeq(val64, &bar0->rti_data1_mem);
1da177e4 1490
b6e3f982 1491 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
cc6e7c44
RA
1492 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1493 if (nic->intr_type == MSI_X)
1494 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1495 RTI_DATA2_MEM_RX_UFC_D(0x40));
1496 else
1497 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1498 RTI_DATA2_MEM_RX_UFC_D(0x80));
b6e3f982 1499 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1500
b6e3f982
K
1501 for (i = 0; i < config->rx_ring_num; i++) {
1502 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1503 | RTI_CMD_MEM_OFFSET(i);
1504 writeq(val64, &bar0->rti_command_mem);
1505
1506 /*
1507 * Once the operation completes, the Strobe bit of the
1508 * command register will be reset. We poll for this
1509 * particular condition. We wait for a maximum of 500ms
1510 * for the operation to complete, if it's not complete
1511 * by then we return error.
1512 */
1513 time = 0;
1514 while (TRUE) {
1515 val64 = readq(&bar0->rti_command_mem);
1516 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1517 break;
1518 }
1519 if (time > 10) {
1520 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1521 dev->name);
1522 return -1;
1523 }
1524 time++;
1525 msleep(50);
1526 }
1da177e4 1527 }
1da177e4
LT
1528 }
1529
20346722
K
1530 /*
1531 * Initializing proper values as Pause threshold into all
1da177e4
LT
1532 * the 8 Queues on Rx side.
1533 */
1534 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1535 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1536
1537 /* Disable RMAC PAD STRIPPING */
509a2671 1538 add = &bar0->mac_cfg;
1da177e4
LT
1539 val64 = readq(&bar0->mac_cfg);
1540 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1541 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1542 writel((u32) (val64), add);
1543 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1544 writel((u32) (val64 >> 32), (add + 4));
1545 val64 = readq(&bar0->mac_cfg);
1546
7d3d0439
RA
1547 /* Enable FCS stripping by adapter */
1548 add = &bar0->mac_cfg;
1549 val64 = readq(&bar0->mac_cfg);
1550 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1551 if (nic->device_type == XFRAME_II_DEVICE)
1552 writeq(val64, &bar0->mac_cfg);
1553 else {
1554 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1555 writel((u32) (val64), add);
1556 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1557 writel((u32) (val64 >> 32), (add + 4));
1558 }
1559
20346722
K
1560 /*
1561 * Set the time value to be inserted in the pause frame
1da177e4
LT
1562 * generated by xena.
1563 */
1564 val64 = readq(&bar0->rmac_pause_cfg);
1565 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1566 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1567 writeq(val64, &bar0->rmac_pause_cfg);
1568
20346722 1569 /*
1da177e4
LT
1570 * Set the Threshold Limit for Generating the pause frame
1571 * If the amount of data in any Queue exceeds ratio of
1572 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1573 * pause frame is generated
1574 */
1575 val64 = 0;
1576 for (i = 0; i < 4; i++) {
1577 val64 |=
1578 (((u64) 0xFF00 | nic->mac_control.
1579 mc_pause_threshold_q0q3)
1580 << (i * 2 * 8));
1581 }
1582 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1583
1584 val64 = 0;
1585 for (i = 0; i < 4; i++) {
1586 val64 |=
1587 (((u64) 0xFF00 | nic->mac_control.
1588 mc_pause_threshold_q4q7)
1589 << (i * 2 * 8));
1590 }
1591 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1592
20346722
K
1593 /*
1594 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1595 * exceeded the limit pointed by shared_splits
1596 */
1597 val64 = readq(&bar0->pic_control);
1598 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1599 writeq(val64, &bar0->pic_control);
1600
863c11a9
AR
1601 if (nic->config.bus_speed == 266) {
1602 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1603 writeq(0x0, &bar0->read_retry_delay);
1604 writeq(0x0, &bar0->write_retry_delay);
1605 }
1606
541ae68f
K
1607 /*
1608 * Programming the Herc to split every write transaction
1609 * that does not start on an ADB to reduce disconnects.
1610 */
1611 if (nic->device_type == XFRAME_II_DEVICE) {
863c11a9
AR
1612 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
1613 writeq(val64, &bar0->misc_control);
1614 val64 = readq(&bar0->pic_control2);
1615 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1616 writeq(val64, &bar0->pic_control2);
541ae68f 1617 }
c92ca04b
AR
1618 if (strstr(nic->product_name, "CX4")) {
1619 val64 = TMAC_AVG_IPG(0x17);
1620 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1621 }
1622
1da177e4
LT
1623 return SUCCESS;
1624}
a371a07d
K
1625#define LINK_UP_DOWN_INTERRUPT 1
1626#define MAC_RMAC_ERR_TIMER 2
1627
ac1f60db 1628static int s2io_link_fault_indication(nic_t *nic)
a371a07d 1629{
cc6e7c44
RA
1630 if (nic->intr_type != INTA)
1631 return MAC_RMAC_ERR_TIMER;
a371a07d
K
1632 if (nic->device_type == XFRAME_II_DEVICE)
1633 return LINK_UP_DOWN_INTERRUPT;
1634 else
1635 return MAC_RMAC_ERR_TIMER;
1636}
1da177e4 1637
20346722
K
1638/**
1639 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1640 * @nic: device private variable,
1641 * @mask: A mask indicating which Intr block must be modified and,
1642 * @flag: A flag indicating whether to enable or disable the Intrs.
1643 * Description: This function will either disable or enable the interrupts
20346722
K
1644 * depending on the flag argument. The mask argument can be used to
1645 * enable/disable any Intr block.
1da177e4
LT
1646 * Return Value: NONE.
1647 */
1648
1649static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1650{
1651 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1652 register u64 val64 = 0, temp64 = 0;
1653
1654 /* Top level interrupt classification */
1655 /* PIC Interrupts */
1656 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1657 /* Enable PIC Intrs in the general intr mask register */
1658 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1659 if (flag == ENABLE_INTRS) {
1660 temp64 = readq(&bar0->general_int_mask);
1661 temp64 &= ~((u64) val64);
1662 writeq(temp64, &bar0->general_int_mask);
20346722 1663 /*
a371a07d 1664 * If Hercules adapter enable GPIO otherwise
b41477f3 1665 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
1666 * interrupts for now.
1667 * TODO
1da177e4 1668 */
a371a07d
K
1669 if (s2io_link_fault_indication(nic) ==
1670 LINK_UP_DOWN_INTERRUPT ) {
1671 temp64 = readq(&bar0->pic_int_mask);
1672 temp64 &= ~((u64) PIC_INT_GPIO);
1673 writeq(temp64, &bar0->pic_int_mask);
1674 temp64 = readq(&bar0->gpio_int_mask);
1675 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1676 writeq(temp64, &bar0->gpio_int_mask);
1677 } else {
1678 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1679 }
20346722 1680 /*
1da177e4
LT
1681 * No MSI Support is available presently, so TTI and
1682 * RTI interrupts are also disabled.
1683 */
1684 } else if (flag == DISABLE_INTRS) {
20346722
K
1685 /*
1686 * Disable PIC Intrs in the general
1687 * intr mask register
1da177e4
LT
1688 */
1689 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1690 temp64 = readq(&bar0->general_int_mask);
1691 val64 |= temp64;
1692 writeq(val64, &bar0->general_int_mask);
1693 }
1694 }
1695
1696 /* DMA Interrupts */
1697 /* Enabling/Disabling Tx DMA interrupts */
1698 if (mask & TX_DMA_INTR) {
1699 /* Enable TxDMA Intrs in the general intr mask register */
1700 val64 = TXDMA_INT_M;
1701 if (flag == ENABLE_INTRS) {
1702 temp64 = readq(&bar0->general_int_mask);
1703 temp64 &= ~((u64) val64);
1704 writeq(temp64, &bar0->general_int_mask);
20346722
K
1705 /*
1706 * Keep all interrupts other than PFC interrupt
1da177e4
LT
1707 * and PCC interrupt disabled in DMA level.
1708 */
1709 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1710 TXDMA_PCC_INT_M);
1711 writeq(val64, &bar0->txdma_int_mask);
20346722
K
1712 /*
1713 * Enable only the MISC error 1 interrupt in PFC block
1da177e4
LT
1714 */
1715 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1716 writeq(val64, &bar0->pfc_err_mask);
20346722
K
1717 /*
1718 * Enable only the FB_ECC error interrupt in PCC block
1da177e4
LT
1719 */
1720 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1721 writeq(val64, &bar0->pcc_err_mask);
1722 } else if (flag == DISABLE_INTRS) {
20346722
K
1723 /*
1724 * Disable TxDMA Intrs in the general intr mask
1725 * register
1da177e4
LT
1726 */
1727 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1728 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1729 temp64 = readq(&bar0->general_int_mask);
1730 val64 |= temp64;
1731 writeq(val64, &bar0->general_int_mask);
1732 }
1733 }
1734
1735 /* Enabling/Disabling Rx DMA interrupts */
1736 if (mask & RX_DMA_INTR) {
1737 /* Enable RxDMA Intrs in the general intr mask register */
1738 val64 = RXDMA_INT_M;
1739 if (flag == ENABLE_INTRS) {
1740 temp64 = readq(&bar0->general_int_mask);
1741 temp64 &= ~((u64) val64);
1742 writeq(temp64, &bar0->general_int_mask);
20346722
K
1743 /*
1744 * All RxDMA block interrupts are disabled for now
1745 * TODO
1da177e4
LT
1746 */
1747 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1748 } else if (flag == DISABLE_INTRS) {
20346722
K
1749 /*
1750 * Disable RxDMA Intrs in the general intr mask
1751 * register
1da177e4
LT
1752 */
1753 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1754 temp64 = readq(&bar0->general_int_mask);
1755 val64 |= temp64;
1756 writeq(val64, &bar0->general_int_mask);
1757 }
1758 }
1759
1760 /* MAC Interrupts */
1761 /* Enabling/Disabling MAC interrupts */
1762 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1763 val64 = TXMAC_INT_M | RXMAC_INT_M;
1764 if (flag == ENABLE_INTRS) {
1765 temp64 = readq(&bar0->general_int_mask);
1766 temp64 &= ~((u64) val64);
1767 writeq(temp64, &bar0->general_int_mask);
20346722
K
1768 /*
1769 * All MAC block error interrupts are disabled for now
1da177e4
LT
1770 * TODO
1771 */
1da177e4 1772 } else if (flag == DISABLE_INTRS) {
20346722
K
1773 /*
1774 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1775 */
1776 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1777 writeq(DISABLE_ALL_INTRS,
1778 &bar0->mac_rmac_err_mask);
1779
1780 temp64 = readq(&bar0->general_int_mask);
1781 val64 |= temp64;
1782 writeq(val64, &bar0->general_int_mask);
1783 }
1784 }
1785
1786 /* XGXS Interrupts */
1787 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1788 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1789 if (flag == ENABLE_INTRS) {
1790 temp64 = readq(&bar0->general_int_mask);
1791 temp64 &= ~((u64) val64);
1792 writeq(temp64, &bar0->general_int_mask);
20346722 1793 /*
1da177e4 1794 * All XGXS block error interrupts are disabled for now
20346722 1795 * TODO
1da177e4
LT
1796 */
1797 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1798 } else if (flag == DISABLE_INTRS) {
20346722
K
1799 /*
1800 * Disable MC Intrs in the general intr mask register
1da177e4
LT
1801 */
1802 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1803 temp64 = readq(&bar0->general_int_mask);
1804 val64 |= temp64;
1805 writeq(val64, &bar0->general_int_mask);
1806 }
1807 }
1808
1809 /* Memory Controller(MC) interrupts */
1810 if (mask & MC_INTR) {
1811 val64 = MC_INT_M;
1812 if (flag == ENABLE_INTRS) {
1813 temp64 = readq(&bar0->general_int_mask);
1814 temp64 &= ~((u64) val64);
1815 writeq(temp64, &bar0->general_int_mask);
20346722 1816 /*
5e25b9dd 1817 * Enable all MC Intrs.
1da177e4 1818 */
5e25b9dd
K
1819 writeq(0x0, &bar0->mc_int_mask);
1820 writeq(0x0, &bar0->mc_err_mask);
1da177e4
LT
1821 } else if (flag == DISABLE_INTRS) {
1822 /*
1823 * Disable MC Intrs in the general intr mask register
1824 */
1825 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1826 temp64 = readq(&bar0->general_int_mask);
1827 val64 |= temp64;
1828 writeq(val64, &bar0->general_int_mask);
1829 }
1830 }
1831
1832
1833 /* Tx traffic interrupts */
1834 if (mask & TX_TRAFFIC_INTR) {
1835 val64 = TXTRAFFIC_INT_M;
1836 if (flag == ENABLE_INTRS) {
1837 temp64 = readq(&bar0->general_int_mask);
1838 temp64 &= ~((u64) val64);
1839 writeq(temp64, &bar0->general_int_mask);
20346722 1840 /*
1da177e4 1841 * Enable all the Tx side interrupts
20346722 1842 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1843 */
1844 writeq(0x0, &bar0->tx_traffic_mask);
1845 } else if (flag == DISABLE_INTRS) {
20346722
K
1846 /*
1847 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1848 * register.
1849 */
1850 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1851 temp64 = readq(&bar0->general_int_mask);
1852 val64 |= temp64;
1853 writeq(val64, &bar0->general_int_mask);
1854 }
1855 }
1856
1857 /* Rx traffic interrupts */
1858 if (mask & RX_TRAFFIC_INTR) {
1859 val64 = RXTRAFFIC_INT_M;
1860 if (flag == ENABLE_INTRS) {
1861 temp64 = readq(&bar0->general_int_mask);
1862 temp64 &= ~((u64) val64);
1863 writeq(temp64, &bar0->general_int_mask);
1864 /* writing 0 Enables all 8 RX interrupt levels */
1865 writeq(0x0, &bar0->rx_traffic_mask);
1866 } else if (flag == DISABLE_INTRS) {
20346722
K
1867 /*
1868 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1869 * register.
1870 */
1871 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1872 temp64 = readq(&bar0->general_int_mask);
1873 val64 |= temp64;
1874 writeq(val64, &bar0->general_int_mask);
1875 }
1876 }
1877}
1878
541ae68f 1879static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
20346722
K
1880{
1881 int ret = 0;
1882
1883 if (flag == FALSE) {
541ae68f 1884 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd
K
1885 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1886 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1887 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1888 ret = 1;
1889 }
541ae68f 1890 }else {
5e25b9dd
K
1891 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1892 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1893 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1894 ret = 1;
1895 }
20346722
K
1896 }
1897 } else {
541ae68f 1898 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd
K
1899 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1900 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1901 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1902 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1903 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1904 ret = 1;
1905 }
1906 } else {
1907 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1908 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1909 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1910 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1911 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1912 ret = 1;
1913 }
20346722
K
1914 }
1915 }
1916
1917 return ret;
1918}
1919/**
1920 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4
LT
1921 * @val64 : Value read from adapter status register.
1922 * @flag : indicates if the adapter enable bit was ever written once
1923 * before.
1924 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1925 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1926 * differs and the calling function passes the input argument flag to
1927 * indicate this.
20346722 1928 * Return: 1 If xena is quiescence
1da177e4
LT
1929 * 0 If Xena is not quiescence
1930 */
1931
20346722 1932static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1da177e4 1933{
541ae68f 1934 int ret = 0, herc;
1da177e4 1935 u64 tmp64 = ~((u64) val64);
5e25b9dd 1936 int rev_id = get_xena_rev_id(sp->pdev);
1da177e4 1937
541ae68f 1938 herc = (sp->device_type == XFRAME_II_DEVICE);
1da177e4
LT
1939 if (!
1940 (tmp64 &
1941 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1942 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1943 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1944 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1945 ADAPTER_STATUS_P_PLL_LOCK))) {
541ae68f 1946 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1da177e4
LT
1947 }
1948
1949 return ret;
1950}
1951
1952/**
1953 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1954 * @sp: Pointer to device specifc structure
20346722 1955 * Description :
1da177e4
LT
1956 * New procedure to clear mac address reading problems on Alpha platforms
1957 *
1958 */
1959
ac1f60db 1960static void fix_mac_address(nic_t * sp)
1da177e4
LT
1961{
1962 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1963 u64 val64;
1964 int i = 0;
1965
1966 while (fix_mac[i] != END_SIGN) {
1967 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1968 udelay(10);
1da177e4
LT
1969 val64 = readq(&bar0->gpio_control);
1970 }
1971}
1972
1973/**
20346722 1974 * start_nic - Turns the device on
1da177e4 1975 * @nic : device private variable.
20346722
K
1976 * Description:
1977 * This function actually turns the device on. Before this function is
1978 * called,all Registers are configured from their reset states
1979 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1980 * calling this function, the device interrupts are cleared and the NIC is
1981 * literally switched on by writing into the adapter control register.
20346722 1982 * Return Value:
1da177e4
LT
1983 * SUCCESS on success and -1 on failure.
1984 */
1985
1986static int start_nic(struct s2io_nic *nic)
1987{
1988 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1989 struct net_device *dev = nic->dev;
1990 register u64 val64 = 0;
20346722 1991 u16 subid, i;
1da177e4
LT
1992 mac_info_t *mac_control;
1993 struct config_param *config;
1994
1995 mac_control = &nic->mac_control;
1996 config = &nic->config;
1997
1998 /* PRC Initialization and configuration */
1999 for (i = 0; i < config->rx_ring_num; i++) {
20346722 2000 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
2001 &bar0->prc_rxd0_n[i]);
2002
2003 val64 = readq(&bar0->prc_ctrl_n[i]);
b6e3f982
K
2004 if (nic->config.bimodal)
2005 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
da6971d8
AR
2006 if (nic->rxd_mode == RXD_MODE_1)
2007 val64 |= PRC_CTRL_RC_ENABLED;
2008 else
2009 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
2010 if (nic->device_type == XFRAME_II_DEVICE)
2011 val64 |= PRC_CTRL_GROUP_READS;
2012 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2013 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2014 writeq(val64, &bar0->prc_ctrl_n[i]);
2015 }
2016
da6971d8
AR
2017 if (nic->rxd_mode == RXD_MODE_3B) {
2018 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2019 val64 = readq(&bar0->rx_pa_cfg);
2020 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2021 writeq(val64, &bar0->rx_pa_cfg);
2022 }
1da177e4 2023
20346722 2024 /*
1da177e4
LT
2025 * Enabling MC-RLDRAM. After enabling the device, we timeout
2026 * for around 100ms, which is approximately the time required
2027 * for the device to be ready for operation.
2028 */
2029 val64 = readq(&bar0->mc_rldram_mrs);
2030 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2031 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2032 val64 = readq(&bar0->mc_rldram_mrs);
2033
20346722 2034 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2035
2036 /* Enabling ECC Protection. */
2037 val64 = readq(&bar0->adapter_control);
2038 val64 &= ~ADAPTER_ECC_EN;
2039 writeq(val64, &bar0->adapter_control);
2040
20346722
K
2041 /*
2042 * Clearing any possible Link state change interrupts that
1da177e4
LT
2043 * could have popped up just before Enabling the card.
2044 */
2045 val64 = readq(&bar0->mac_rmac_err_reg);
2046 if (val64)
2047 writeq(val64, &bar0->mac_rmac_err_reg);
2048
20346722
K
2049 /*
2050 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2051 * it.
2052 */
2053 val64 = readq(&bar0->adapter_status);
20346722 2054 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
2055 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2056 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2057 (unsigned long long) val64);
2058 return FAILURE;
2059 }
2060
20346722 2061 /*
1da177e4 2062 * With some switches, link might be already up at this point.
20346722
K
2063 * Because of this weird behavior, when we enable laser,
2064 * we may not get link. We need to handle this. We cannot
2065 * figure out which switch is misbehaving. So we are forced to
2066 * make a global change.
1da177e4
LT
2067 */
2068
2069 /* Enabling Laser. */
2070 val64 = readq(&bar0->adapter_control);
2071 val64 |= ADAPTER_EOI_TX_ON;
2072 writeq(val64, &bar0->adapter_control);
2073
c92ca04b
AR
2074 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2075 /*
2076 * Dont see link state interrupts initally on some switches,
2077 * so directly scheduling the link state task here.
2078 */
2079 schedule_work(&nic->set_link_task);
2080 }
1da177e4
LT
2081 /* SXE-002: Initialize link and activity LED */
2082 subid = nic->pdev->subsystem_device;
541ae68f
K
2083 if (((subid & 0xFF) >= 0x07) &&
2084 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2085 val64 = readq(&bar0->gpio_control);
2086 val64 |= 0x0000800000000000ULL;
2087 writeq(val64, &bar0->gpio_control);
2088 val64 = 0x0411040400000000ULL;
509a2671 2089 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2090 }
2091
1da177e4
LT
2092 return SUCCESS;
2093}
fed5eccd
AR
2094/**
2095 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2096 */
2097static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2098{
2099 nic_t *nic = fifo_data->nic;
2100 struct sk_buff *skb;
2101 TxD_t *txds;
2102 u16 j, frg_cnt;
2103
2104 txds = txdlp;
26b7625c 2105 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
fed5eccd
AR
2106 pci_unmap_single(nic->pdev, (dma_addr_t)
2107 txds->Buffer_Pointer, sizeof(u64),
2108 PCI_DMA_TODEVICE);
2109 txds++;
2110 }
2111
2112 skb = (struct sk_buff *) ((unsigned long)
2113 txds->Host_Control);
2114 if (!skb) {
2115 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2116 return NULL;
2117 }
2118 pci_unmap_single(nic->pdev, (dma_addr_t)
2119 txds->Buffer_Pointer,
2120 skb->len - skb->data_len,
2121 PCI_DMA_TODEVICE);
2122 frg_cnt = skb_shinfo(skb)->nr_frags;
2123 if (frg_cnt) {
2124 txds++;
2125 for (j = 0; j < frg_cnt; j++, txds++) {
2126 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2127 if (!txds->Buffer_Pointer)
2128 break;
2129 pci_unmap_page(nic->pdev, (dma_addr_t)
2130 txds->Buffer_Pointer,
2131 frag->size, PCI_DMA_TODEVICE);
2132 }
2133 }
b41477f3 2134 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds));
fed5eccd
AR
2135 return(skb);
2136}
1da177e4 2137
20346722
K
2138/**
2139 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2140 * @nic : device private variable.
20346722 2141 * Description:
1da177e4 2142 * Free all queued Tx buffers.
20346722 2143 * Return Value: void
1da177e4
LT
2144*/
2145
2146static void free_tx_buffers(struct s2io_nic *nic)
2147{
2148 struct net_device *dev = nic->dev;
2149 struct sk_buff *skb;
2150 TxD_t *txdp;
2151 int i, j;
2152 mac_info_t *mac_control;
2153 struct config_param *config;
fed5eccd 2154 int cnt = 0;
1da177e4
LT
2155
2156 mac_control = &nic->mac_control;
2157 config = &nic->config;
2158
2159 for (i = 0; i < config->tx_fifo_num; i++) {
2160 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
20346722 2161 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1da177e4 2162 list_virt_addr;
fed5eccd
AR
2163 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2164 if (skb) {
2165 dev_kfree_skb(skb);
2166 cnt++;
1da177e4 2167 }
1da177e4
LT
2168 }
2169 DBG_PRINT(INTR_DBG,
2170 "%s:forcibly freeing %d skbs on FIFO%d\n",
2171 dev->name, cnt, i);
20346722
K
2172 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2173 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
2174 }
2175}
2176
20346722
K
2177/**
2178 * stop_nic - To stop the nic
1da177e4 2179 * @nic ; device private variable.
20346722
K
2180 * Description:
2181 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2182 * function does. This function is called to stop the device.
2183 * Return Value:
2184 * void.
2185 */
2186
2187static void stop_nic(struct s2io_nic *nic)
2188{
2189 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2190 register u64 val64 = 0;
5d3213cc 2191 u16 interruptible;
1da177e4
LT
2192 mac_info_t *mac_control;
2193 struct config_param *config;
2194
2195 mac_control = &nic->mac_control;
2196 config = &nic->config;
2197
2198 /* Disable all interrupts */
e960fc5c 2199 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
a371a07d
K
2200 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2201 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1da177e4
LT
2202 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2203
5d3213cc
AR
2204 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2205 val64 = readq(&bar0->adapter_control);
2206 val64 &= ~(ADAPTER_CNTL_EN);
2207 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2208}
2209
26df54bf 2210static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
da6971d8
AR
2211{
2212 struct net_device *dev = nic->dev;
2213 struct sk_buff *frag_list;
50eb8006 2214 void *tmp;
da6971d8
AR
2215
2216 /* Buffer-1 receives L3/L4 headers */
2217 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2218 (nic->pdev, skb->data, l3l4hdr_size + 4,
2219 PCI_DMA_FROMDEVICE);
2220
2221 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2222 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2223 if (skb_shinfo(skb)->frag_list == NULL) {
2224 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2225 return -ENOMEM ;
2226 }
2227 frag_list = skb_shinfo(skb)->frag_list;
2228 frag_list->next = NULL;
50eb8006
JG
2229 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2230 frag_list->data = tmp;
2231 frag_list->tail = tmp;
da6971d8
AR
2232
2233 /* Buffer-2 receives L4 data payload */
2234 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2235 frag_list->data, dev->mtu,
2236 PCI_DMA_FROMDEVICE);
2237 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2238 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2239
2240 return SUCCESS;
2241}
2242
20346722
K
2243/**
2244 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2245 * @nic: device private variable
20346722
K
2246 * @ring_no: ring number
2247 * Description:
1da177e4
LT
2248 * The function allocates Rx side skbs and puts the physical
2249 * address of these buffers into the RxD buffer pointers, so that the NIC
2250 * can DMA the received frame into these locations.
2251 * The NIC supports 3 receive modes, viz
2252 * 1. single buffer,
2253 * 2. three buffer and
2254 * 3. Five buffer modes.
20346722
K
2255 * Each mode defines how many fragments the received frame will be split
2256 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2257 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2258 * is split into 3 fragments. As of now only single buffer mode is
2259 * supported.
2260 * Return Value:
2261 * SUCCESS on success or an appropriate -ve value on failure.
2262 */
2263
ac1f60db 2264static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2265{
2266 struct net_device *dev = nic->dev;
2267 struct sk_buff *skb;
2268 RxD_t *rxdp;
2269 int off, off1, size, block_no, block_no1;
1da177e4 2270 u32 alloc_tab = 0;
20346722 2271 u32 alloc_cnt;
1da177e4
LT
2272 mac_info_t *mac_control;
2273 struct config_param *config;
20346722 2274 u64 tmp;
1da177e4 2275 buffAdd_t *ba;
1da177e4
LT
2276#ifndef CONFIG_S2IO_NAPI
2277 unsigned long flags;
2278#endif
303bcb4b 2279 RxD_t *first_rxdp = NULL;
1da177e4
LT
2280
2281 mac_control = &nic->mac_control;
2282 config = &nic->config;
20346722
K
2283 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2284 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4 2285
5d3213cc 2286 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
863c11a9 2287 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4 2288 while (alloc_tab < alloc_cnt) {
20346722 2289 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2290 block_index;
20346722 2291 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1da177e4 2292
da6971d8
AR
2293 rxdp = mac_control->rings[ring_no].
2294 rx_blocks[block_no].rxds[off].virt_addr;
2295
2296 if ((block_no == block_no1) && (off == off1) &&
2297 (rxdp->Host_Control)) {
2298 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2299 dev->name);
1da177e4
LT
2300 DBG_PRINT(INTR_DBG, " info equated\n");
2301 goto end;
2302 }
da6971d8 2303 if (off && (off == rxd_count[nic->rxd_mode])) {
20346722 2304 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2305 block_index++;
da6971d8
AR
2306 if (mac_control->rings[ring_no].rx_curr_put_info.
2307 block_index == mac_control->rings[ring_no].
2308 block_count)
2309 mac_control->rings[ring_no].rx_curr_put_info.
2310 block_index = 0;
2311 block_no = mac_control->rings[ring_no].
2312 rx_curr_put_info.block_index;
2313 if (off == rxd_count[nic->rxd_mode])
2314 off = 0;
20346722 2315 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8
AR
2316 offset = off;
2317 rxdp = mac_control->rings[ring_no].
2318 rx_blocks[block_no].block_virt_addr;
1da177e4
LT
2319 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2320 dev->name, rxdp);
2321 }
2322#ifndef CONFIG_S2IO_NAPI
2323 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2324 mac_control->rings[ring_no].put_pos =
da6971d8 2325 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
1da177e4
LT
2326 spin_unlock_irqrestore(&nic->put_lock, flags);
2327#endif
da6971d8
AR
2328 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2329 ((nic->rxd_mode >= RXD_MODE_3A) &&
2330 (rxdp->Control_2 & BIT(0)))) {
20346722 2331 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8 2332 offset = off;
1da177e4
LT
2333 goto end;
2334 }
da6971d8
AR
2335 /* calculate size of skb based on ring mode */
2336 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2337 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2338 if (nic->rxd_mode == RXD_MODE_1)
2339 size += NET_IP_ALIGN;
2340 else if (nic->rxd_mode == RXD_MODE_3B)
2341 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2342 else
2343 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2344
da6971d8
AR
2345 /* allocate skb */
2346 skb = dev_alloc_skb(size);
2347 if(!skb) {
1da177e4
LT
2348 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2349 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
303bcb4b
K
2350 if (first_rxdp) {
2351 wmb();
2352 first_rxdp->Control_1 |= RXD_OWN_XENA;
2353 }
da6971d8
AR
2354 return -ENOMEM ;
2355 }
2356 if (nic->rxd_mode == RXD_MODE_1) {
2357 /* 1 buffer mode - normal operation mode */
2358 memset(rxdp, 0, sizeof(RxD1_t));
2359 skb_reserve(skb, NET_IP_ALIGN);
2360 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
863c11a9
AR
2361 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2362 PCI_DMA_FROMDEVICE);
2363 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
da6971d8
AR
2364
2365 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2366 /*
2367 * 2 or 3 buffer mode -
2368 * Both 2 buffer mode and 3 buffer mode provides 128
2369 * byte aligned receive buffers.
2370 *
2371 * 3 buffer mode provides header separation where in
2372 * skb->data will have L3/L4 headers where as
2373 * skb_shinfo(skb)->frag_list will have the L4 data
2374 * payload
2375 */
2376
2377 memset(rxdp, 0, sizeof(RxD3_t));
2378 ba = &mac_control->rings[ring_no].ba[block_no][off];
2379 skb_reserve(skb, BUF0_LEN);
2380 tmp = (u64)(unsigned long) skb->data;
2381 tmp += ALIGN_SIZE;
2382 tmp &= ~ALIGN_SIZE;
2383 skb->data = (void *) (unsigned long)tmp;
2384 skb->tail = (void *) (unsigned long)tmp;
2385
75c30b13
AR
2386 if (!(((RxD3_t*)rxdp)->Buffer0_ptr))
2387 ((RxD3_t*)rxdp)->Buffer0_ptr =
2388 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
da6971d8 2389 PCI_DMA_FROMDEVICE);
75c30b13
AR
2390 else
2391 pci_dma_sync_single_for_device(nic->pdev,
2392 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr,
2393 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8
AR
2394 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2395 if (nic->rxd_mode == RXD_MODE_3B) {
2396 /* Two buffer mode */
2397
2398 /*
2399 * Buffer2 will have L3/L4 header plus
2400 * L4 payload
2401 */
2402 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2403 (nic->pdev, skb->data, dev->mtu + 4,
2404 PCI_DMA_FROMDEVICE);
2405
75c30b13
AR
2406 /* Buffer-1 will be dummy buffer. Not used */
2407 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
2408 ((RxD3_t*)rxdp)->Buffer1_ptr =
2409 pci_map_single(nic->pdev,
2410 ba->ba_1, BUF1_LEN,
2411 PCI_DMA_FROMDEVICE);
2412 }
da6971d8
AR
2413 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2414 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2415 (dev->mtu + 4);
2416 } else {
2417 /* 3 buffer mode */
2418 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2419 dev_kfree_skb_irq(skb);
2420 if (first_rxdp) {
2421 wmb();
2422 first_rxdp->Control_1 |=
2423 RXD_OWN_XENA;
2424 }
2425 return -ENOMEM ;
2426 }
2427 }
2428 rxdp->Control_2 |= BIT(0);
1da177e4 2429 }
1da177e4 2430 rxdp->Host_Control = (unsigned long) (skb);
303bcb4b
K
2431 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2432 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2433 off++;
da6971d8
AR
2434 if (off == (rxd_count[nic->rxd_mode] + 1))
2435 off = 0;
20346722 2436 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
20346722 2437
da6971d8 2438 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2439 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2440 if (first_rxdp) {
2441 wmb();
2442 first_rxdp->Control_1 |= RXD_OWN_XENA;
2443 }
2444 first_rxdp = rxdp;
2445 }
1da177e4
LT
2446 atomic_inc(&nic->rx_bufs_left[ring_no]);
2447 alloc_tab++;
2448 }
2449
2450 end:
303bcb4b
K
2451 /* Transfer ownership of first descriptor to adapter just before
2452 * exiting. Before that, use memory barrier so that ownership
2453 * and other fields are seen by adapter correctly.
2454 */
2455 if (first_rxdp) {
2456 wmb();
2457 first_rxdp->Control_1 |= RXD_OWN_XENA;
2458 }
2459
1da177e4
LT
2460 return SUCCESS;
2461}
2462
da6971d8
AR
2463static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2464{
2465 struct net_device *dev = sp->dev;
2466 int j;
2467 struct sk_buff *skb;
2468 RxD_t *rxdp;
2469 mac_info_t *mac_control;
2470 buffAdd_t *ba;
2471
2472 mac_control = &sp->mac_control;
2473 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2474 rxdp = mac_control->rings[ring_no].
2475 rx_blocks[blk].rxds[j].virt_addr;
2476 skb = (struct sk_buff *)
2477 ((unsigned long) rxdp->Host_Control);
2478 if (!skb) {
2479 continue;
2480 }
2481 if (sp->rxd_mode == RXD_MODE_1) {
2482 pci_unmap_single(sp->pdev, (dma_addr_t)
2483 ((RxD1_t*)rxdp)->Buffer0_ptr,
2484 dev->mtu +
2485 HEADER_ETHERNET_II_802_3_SIZE
2486 + HEADER_802_2_SIZE +
2487 HEADER_SNAP_SIZE,
2488 PCI_DMA_FROMDEVICE);
2489 memset(rxdp, 0, sizeof(RxD1_t));
2490 } else if(sp->rxd_mode == RXD_MODE_3B) {
2491 ba = &mac_control->rings[ring_no].
2492 ba[blk][j];
2493 pci_unmap_single(sp->pdev, (dma_addr_t)
2494 ((RxD3_t*)rxdp)->Buffer0_ptr,
2495 BUF0_LEN,
2496 PCI_DMA_FROMDEVICE);
2497 pci_unmap_single(sp->pdev, (dma_addr_t)
2498 ((RxD3_t*)rxdp)->Buffer1_ptr,
2499 BUF1_LEN,
2500 PCI_DMA_FROMDEVICE);
2501 pci_unmap_single(sp->pdev, (dma_addr_t)
2502 ((RxD3_t*)rxdp)->Buffer2_ptr,
2503 dev->mtu + 4,
2504 PCI_DMA_FROMDEVICE);
2505 memset(rxdp, 0, sizeof(RxD3_t));
2506 } else {
2507 pci_unmap_single(sp->pdev, (dma_addr_t)
2508 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2509 PCI_DMA_FROMDEVICE);
2510 pci_unmap_single(sp->pdev, (dma_addr_t)
2511 ((RxD3_t*)rxdp)->Buffer1_ptr,
2512 l3l4hdr_size + 4,
2513 PCI_DMA_FROMDEVICE);
2514 pci_unmap_single(sp->pdev, (dma_addr_t)
2515 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2516 PCI_DMA_FROMDEVICE);
2517 memset(rxdp, 0, sizeof(RxD3_t));
2518 }
2519 dev_kfree_skb(skb);
2520 atomic_dec(&sp->rx_bufs_left[ring_no]);
2521 }
2522}
2523
1da177e4 2524/**
20346722 2525 * free_rx_buffers - Frees all Rx buffers
1da177e4 2526 * @sp: device private variable.
20346722 2527 * Description:
1da177e4
LT
2528 * This function will free all Rx buffers allocated by host.
2529 * Return Value:
2530 * NONE.
2531 */
2532
2533static void free_rx_buffers(struct s2io_nic *sp)
2534{
2535 struct net_device *dev = sp->dev;
da6971d8 2536 int i, blk = 0, buf_cnt = 0;
1da177e4
LT
2537 mac_info_t *mac_control;
2538 struct config_param *config;
1da177e4
LT
2539
2540 mac_control = &sp->mac_control;
2541 config = &sp->config;
2542
2543 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
2544 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2545 free_rxd_blk(sp,i,blk);
1da177e4 2546
20346722
K
2547 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2548 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2549 mac_control->rings[i].rx_curr_put_info.offset = 0;
2550 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2551 atomic_set(&sp->rx_bufs_left[i], 0);
2552 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2553 dev->name, buf_cnt, i);
2554 }
2555}
2556
2557/**
2558 * s2io_poll - Rx interrupt handler for NAPI support
2559 * @dev : pointer to the device structure.
20346722 2560 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2561 * during one pass through the 'Poll" function.
2562 * Description:
2563 * Comes into picture only if NAPI support has been incorporated. It does
2564 * the same thing that rx_intr_handler does, but not in a interrupt context
2565 * also It will process only a given number of packets.
2566 * Return value:
2567 * 0 on success and 1 if there are No Rx packets to be processed.
2568 */
2569
20346722 2570#if defined(CONFIG_S2IO_NAPI)
1da177e4
LT
2571static int s2io_poll(struct net_device *dev, int *budget)
2572{
2573 nic_t *nic = dev->priv;
20346722 2574 int pkt_cnt = 0, org_pkts_to_process;
1da177e4
LT
2575 mac_info_t *mac_control;
2576 struct config_param *config;
509a2671 2577 XENA_dev_config_t __iomem *bar0 = nic->bar0;
863c11a9 2578 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
20346722 2579 int i;
1da177e4 2580
7ba013ac 2581 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2582 mac_control = &nic->mac_control;
2583 config = &nic->config;
2584
20346722
K
2585 nic->pkts_to_process = *budget;
2586 if (nic->pkts_to_process > dev->quota)
2587 nic->pkts_to_process = dev->quota;
2588 org_pkts_to_process = nic->pkts_to_process;
1da177e4 2589
1da177e4 2590 writeq(val64, &bar0->rx_traffic_int);
863c11a9 2591 val64 = readl(&bar0->rx_traffic_int);
1da177e4
LT
2592
2593 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
2594 rx_intr_handler(&mac_control->rings[i]);
2595 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2596 if (!nic->pkts_to_process) {
2597 /* Quota for the current iteration has been met */
2598 goto no_rx;
1da177e4 2599 }
1da177e4
LT
2600 }
2601 if (!pkt_cnt)
2602 pkt_cnt = 1;
2603
2604 dev->quota -= pkt_cnt;
2605 *budget -= pkt_cnt;
2606 netif_rx_complete(dev);
2607
2608 for (i = 0; i < config->rx_ring_num; i++) {
2609 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2610 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2611 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2612 break;
2613 }
2614 }
2615 /* Re enable the Rx interrupts. */
c92ca04b
AR
2616 writeq(0x0, &bar0->rx_traffic_mask);
2617 val64 = readl(&bar0->rx_traffic_mask);
7ba013ac 2618 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2619 return 0;
2620
20346722 2621no_rx:
1da177e4
LT
2622 dev->quota -= pkt_cnt;
2623 *budget -= pkt_cnt;
2624
2625 for (i = 0; i < config->rx_ring_num; i++) {
2626 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2627 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2628 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2629 break;
2630 }
2631 }
7ba013ac 2632 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2633 return 1;
2634}
20346722
K
2635#endif
2636
b41477f3 2637#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2638/**
b41477f3 2639 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2640 * @dev : pointer to the device structure.
2641 * Description:
b41477f3
AR
2642 * This function will be called by upper layer to check for events on the
2643 * interface in situations where interrupts are disabled. It is used for
2644 * specific in-kernel networking tasks, such as remote consoles and kernel
2645 * debugging over the network (example netdump in RedHat).
612eff0e 2646 */
612eff0e
BH
2647static void s2io_netpoll(struct net_device *dev)
2648{
2649 nic_t *nic = dev->priv;
2650 mac_info_t *mac_control;
2651 struct config_param *config;
2652 XENA_dev_config_t __iomem *bar0 = nic->bar0;
b41477f3 2653 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e
BH
2654 int i;
2655
2656 disable_irq(dev->irq);
2657
2658 atomic_inc(&nic->isr_cnt);
2659 mac_control = &nic->mac_control;
2660 config = &nic->config;
2661
612eff0e 2662 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2663 writeq(val64, &bar0->tx_traffic_int);
2664
2665 /* we need to free up the transmitted skbufs or else netpoll will
2666 * run out of skbs and will fail and eventually netpoll application such
2667 * as netdump will fail.
2668 */
2669 for (i = 0; i < config->tx_fifo_num; i++)
2670 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2671
b41477f3 2672 /* check for received packet and indicate up to network */
612eff0e
BH
2673 for (i = 0; i < config->rx_ring_num; i++)
2674 rx_intr_handler(&mac_control->rings[i]);
2675
2676 for (i = 0; i < config->rx_ring_num; i++) {
2677 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2678 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2679 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2680 break;
2681 }
2682 }
2683 atomic_dec(&nic->isr_cnt);
2684 enable_irq(dev->irq);
2685 return;
2686}
2687#endif
2688
20346722 2689/**
1da177e4
LT
2690 * rx_intr_handler - Rx interrupt handler
2691 * @nic: device private variable.
20346722
K
2692 * Description:
2693 * If the interrupt is because of a received frame or if the
1da177e4 2694 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2695 * called. It picks out the RxD at which place the last Rx processing had
2696 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2697 * the offset.
2698 * Return Value:
2699 * NONE.
2700 */
20346722 2701static void rx_intr_handler(ring_info_t *ring_data)
1da177e4 2702{
20346722 2703 nic_t *nic = ring_data->nic;
1da177e4 2704 struct net_device *dev = (struct net_device *) nic->dev;
da6971d8 2705 int get_block, put_block, put_offset;
1da177e4
LT
2706 rx_curr_get_info_t get_info, put_info;
2707 RxD_t *rxdp;
2708 struct sk_buff *skb;
20346722
K
2709#ifndef CONFIG_S2IO_NAPI
2710 int pkt_cnt = 0;
1da177e4 2711#endif
7d3d0439
RA
2712 int i;
2713
7ba013ac
K
2714 spin_lock(&nic->rx_lock);
2715 if (atomic_read(&nic->card_state) == CARD_DOWN) {
776bd20f 2716 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
7ba013ac
K
2717 __FUNCTION__, dev->name);
2718 spin_unlock(&nic->rx_lock);
776bd20f 2719 return;
7ba013ac
K
2720 }
2721
20346722
K
2722 get_info = ring_data->rx_curr_get_info;
2723 get_block = get_info.block_index;
2724 put_info = ring_data->rx_curr_put_info;
2725 put_block = put_info.block_index;
da6971d8 2726 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
20346722
K
2727#ifndef CONFIG_S2IO_NAPI
2728 spin_lock(&nic->put_lock);
2729 put_offset = ring_data->put_pos;
2730 spin_unlock(&nic->put_lock);
2731#else
da6971d8 2732 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
20346722
K
2733 put_info.offset;
2734#endif
da6971d8
AR
2735 while (RXD_IS_UP2DT(rxdp)) {
2736 /* If your are next to put index then it's FIFO full condition */
2737 if ((get_block == put_block) &&
2738 (get_info.offset + 1) == put_info.offset) {
75c30b13 2739 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
da6971d8
AR
2740 break;
2741 }
20346722
K
2742 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2743 if (skb == NULL) {
2744 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2745 dev->name);
2746 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2747 spin_unlock(&nic->rx_lock);
20346722 2748 return;
1da177e4 2749 }
da6971d8
AR
2750 if (nic->rxd_mode == RXD_MODE_1) {
2751 pci_unmap_single(nic->pdev, (dma_addr_t)
2752 ((RxD1_t*)rxdp)->Buffer0_ptr,
20346722
K
2753 dev->mtu +
2754 HEADER_ETHERNET_II_802_3_SIZE +
2755 HEADER_802_2_SIZE +
2756 HEADER_SNAP_SIZE,
2757 PCI_DMA_FROMDEVICE);
da6971d8 2758 } else if (nic->rxd_mode == RXD_MODE_3B) {
75c30b13 2759 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
da6971d8 2760 ((RxD3_t*)rxdp)->Buffer0_ptr,
20346722 2761 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8
AR
2762 pci_unmap_single(nic->pdev, (dma_addr_t)
2763 ((RxD3_t*)rxdp)->Buffer2_ptr,
2764 dev->mtu + 4,
20346722 2765 PCI_DMA_FROMDEVICE);
da6971d8 2766 } else {
75c30b13 2767 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
da6971d8
AR
2768 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2769 PCI_DMA_FROMDEVICE);
2770 pci_unmap_single(nic->pdev, (dma_addr_t)
2771 ((RxD3_t*)rxdp)->Buffer1_ptr,
2772 l3l4hdr_size + 4,
2773 PCI_DMA_FROMDEVICE);
2774 pci_unmap_single(nic->pdev, (dma_addr_t)
2775 ((RxD3_t*)rxdp)->Buffer2_ptr,
2776 dev->mtu, PCI_DMA_FROMDEVICE);
2777 }
863c11a9 2778 prefetch(skb->data);
20346722
K
2779 rx_osm_handler(ring_data, rxdp);
2780 get_info.offset++;
da6971d8
AR
2781 ring_data->rx_curr_get_info.offset = get_info.offset;
2782 rxdp = ring_data->rx_blocks[get_block].
2783 rxds[get_info.offset].virt_addr;
2784 if (get_info.offset == rxd_count[nic->rxd_mode]) {
20346722 2785 get_info.offset = 0;
da6971d8 2786 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 2787 get_block++;
da6971d8
AR
2788 if (get_block == ring_data->block_count)
2789 get_block = 0;
2790 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
2791 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2792 }
1da177e4 2793
20346722
K
2794#ifdef CONFIG_S2IO_NAPI
2795 nic->pkts_to_process -= 1;
2796 if (!nic->pkts_to_process)
2797 break;
2798#else
2799 pkt_cnt++;
1da177e4
LT
2800 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2801 break;
20346722 2802#endif
1da177e4 2803 }
7d3d0439
RA
2804 if (nic->lro) {
2805 /* Clear all LRO sessions before exiting */
2806 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2807 lro_t *lro = &nic->lro0_n[i];
2808 if (lro->in_use) {
2809 update_L3L4_header(nic, lro);
2810 queue_rx_frame(lro->parent);
2811 clear_lro_session(lro);
2812 }
2813 }
2814 }
2815
7ba013ac 2816 spin_unlock(&nic->rx_lock);
1da177e4 2817}
20346722
K
2818
2819/**
1da177e4
LT
2820 * tx_intr_handler - Transmit interrupt handler
2821 * @nic : device private variable
20346722
K
2822 * Description:
2823 * If an interrupt was raised to indicate DMA complete of the
2824 * Tx packet, this function is called. It identifies the last TxD
2825 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2826 * DMA'ed into the NICs internal memory.
2827 * Return Value:
2828 * NONE
2829 */
2830
20346722 2831static void tx_intr_handler(fifo_info_t *fifo_data)
1da177e4 2832{
20346722 2833 nic_t *nic = fifo_data->nic;
1da177e4
LT
2834 struct net_device *dev = (struct net_device *) nic->dev;
2835 tx_curr_get_info_t get_info, put_info;
2836 struct sk_buff *skb;
2837 TxD_t *txdlp;
1da177e4 2838
20346722
K
2839 get_info = fifo_data->tx_curr_get_info;
2840 put_info = fifo_data->tx_curr_put_info;
2841 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2842 list_virt_addr;
2843 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2844 (get_info.offset != put_info.offset) &&
2845 (txdlp->Host_Control)) {
2846 /* Check for TxD errors */
2847 if (txdlp->Control_1 & TXD_T_CODE) {
2848 unsigned long long err;
2849 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0
AR
2850 if (err & 0x1) {
2851 nic->mac_control.stats_info->sw_stat.
2852 parity_err_cnt++;
2853 }
776bd20f 2854 if ((err >> 48) == 0xA) {
2855 DBG_PRINT(TX_DBG, "TxD returned due \
cc6e7c44 2856to loss of link\n");
776bd20f 2857 }
2858 else {
2859 DBG_PRINT(ERR_DBG, "***TxD error \
cc6e7c44 2860%llx\n", err);
776bd20f 2861 }
20346722 2862 }
1da177e4 2863
fed5eccd 2864 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722
K
2865 if (skb == NULL) {
2866 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2867 __FUNCTION__);
2868 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2869 return;
2870 }
2871
20346722 2872 /* Updating the statistics block */
20346722
K
2873 nic->stats.tx_bytes += skb->len;
2874 dev_kfree_skb_irq(skb);
2875
2876 get_info.offset++;
863c11a9
AR
2877 if (get_info.offset == get_info.fifo_len + 1)
2878 get_info.offset = 0;
20346722
K
2879 txdlp = (TxD_t *) fifo_data->list_info
2880 [get_info.offset].list_virt_addr;
2881 fifo_data->tx_curr_get_info.offset =
2882 get_info.offset;
1da177e4
LT
2883 }
2884
2885 spin_lock(&nic->tx_lock);
2886 if (netif_queue_stopped(dev))
2887 netif_wake_queue(dev);
2888 spin_unlock(&nic->tx_lock);
2889}
2890
bd1034f0
AR
2891/**
2892 * s2io_mdio_write - Function to write in to MDIO registers
2893 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2894 * @addr : address value
2895 * @value : data value
2896 * @dev : pointer to net_device structure
2897 * Description:
2898 * This function is used to write values to the MDIO registers
2899 * NONE
2900 */
2901static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2902{
2903 u64 val64 = 0x0;
2904 nic_t *sp = dev->priv;
2905 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2906
2907 //address transaction
2908 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2909 | MDIO_MMD_DEV_ADDR(mmd_type)
2910 | MDIO_MMS_PRT_ADDR(0x0);
2911 writeq(val64, &bar0->mdio_control);
2912 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2913 writeq(val64, &bar0->mdio_control);
2914 udelay(100);
2915
2916 //Data transaction
2917 val64 = 0x0;
2918 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2919 | MDIO_MMD_DEV_ADDR(mmd_type)
2920 | MDIO_MMS_PRT_ADDR(0x0)
2921 | MDIO_MDIO_DATA(value)
2922 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2923 writeq(val64, &bar0->mdio_control);
2924 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2925 writeq(val64, &bar0->mdio_control);
2926 udelay(100);
2927
2928 val64 = 0x0;
2929 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2930 | MDIO_MMD_DEV_ADDR(mmd_type)
2931 | MDIO_MMS_PRT_ADDR(0x0)
2932 | MDIO_OP(MDIO_OP_READ_TRANS);
2933 writeq(val64, &bar0->mdio_control);
2934 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2935 writeq(val64, &bar0->mdio_control);
2936 udelay(100);
2937
2938}
2939
2940/**
2941 * s2io_mdio_read - Function to write in to MDIO registers
2942 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2943 * @addr : address value
2944 * @dev : pointer to net_device structure
2945 * Description:
2946 * This function is used to read values to the MDIO registers
2947 * NONE
2948 */
2949static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2950{
2951 u64 val64 = 0x0;
2952 u64 rval64 = 0x0;
2953 nic_t *sp = dev->priv;
2954 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2955
2956 /* address transaction */
2957 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2958 | MDIO_MMD_DEV_ADDR(mmd_type)
2959 | MDIO_MMS_PRT_ADDR(0x0);
2960 writeq(val64, &bar0->mdio_control);
2961 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2962 writeq(val64, &bar0->mdio_control);
2963 udelay(100);
2964
2965 /* Data transaction */
2966 val64 = 0x0;
2967 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2968 | MDIO_MMD_DEV_ADDR(mmd_type)
2969 | MDIO_MMS_PRT_ADDR(0x0)
2970 | MDIO_OP(MDIO_OP_READ_TRANS);
2971 writeq(val64, &bar0->mdio_control);
2972 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2973 writeq(val64, &bar0->mdio_control);
2974 udelay(100);
2975
2976 /* Read the value from regs */
2977 rval64 = readq(&bar0->mdio_control);
2978 rval64 = rval64 & 0xFFFF0000;
2979 rval64 = rval64 >> 16;
2980 return rval64;
2981}
2982/**
2983 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2984 * @counter : couter value to be updated
2985 * @flag : flag to indicate the status
2986 * @type : counter type
2987 * Description:
2988 * This function is to check the status of the xpak counters value
2989 * NONE
2990 */
2991
2992static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2993{
2994 u64 mask = 0x3;
2995 u64 val64;
2996 int i;
2997 for(i = 0; i <index; i++)
2998 mask = mask << 0x2;
2999
3000 if(flag > 0)
3001 {
3002 *counter = *counter + 1;
3003 val64 = *regs_stat & mask;
3004 val64 = val64 >> (index * 0x2);
3005 val64 = val64 + 1;
3006 if(val64 == 3)
3007 {
3008 switch(type)
3009 {
3010 case 1:
3011 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3012 "service. Excessive temperatures may "
3013 "result in premature transceiver "
3014 "failure \n");
3015 break;
3016 case 2:
3017 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3018 "service Excessive bias currents may "
3019 "indicate imminent laser diode "
3020 "failure \n");
3021 break;
3022 case 3:
3023 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3024 "service Excessive laser output "
3025 "power may saturate far-end "
3026 "receiver\n");
3027 break;
3028 default:
3029 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3030 "type \n");
3031 }
3032 val64 = 0x0;
3033 }
3034 val64 = val64 << (index * 0x2);
3035 *regs_stat = (*regs_stat & (~mask)) | (val64);
3036
3037 } else {
3038 *regs_stat = *regs_stat & (~mask);
3039 }
3040}
3041
3042/**
3043 * s2io_updt_xpak_counter - Function to update the xpak counters
3044 * @dev : pointer to net_device struct
3045 * Description:
3046 * This function is to upate the status of the xpak counters value
3047 * NONE
3048 */
3049static void s2io_updt_xpak_counter(struct net_device *dev)
3050{
3051 u16 flag = 0x0;
3052 u16 type = 0x0;
3053 u16 val16 = 0x0;
3054 u64 val64 = 0x0;
3055 u64 addr = 0x0;
3056
3057 nic_t *sp = dev->priv;
3058 StatInfo_t *stat_info = sp->mac_control.stats_info;
3059
3060 /* Check the communication with the MDIO slave */
3061 addr = 0x0000;
3062 val64 = 0x0;
3063 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3064 if((val64 == 0xFFFF) || (val64 == 0x0000))
3065 {
3066 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3067 "Returned %llx\n", (unsigned long long)val64);
3068 return;
3069 }
3070
3071 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3072 if(val64 != 0x2040)
3073 {
3074 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3075 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3076 (unsigned long long)val64);
3077 return;
3078 }
3079
3080 /* Loading the DOM register to MDIO register */
3081 addr = 0xA100;
3082 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3083 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3084
3085 /* Reading the Alarm flags */
3086 addr = 0xA070;
3087 val64 = 0x0;
3088 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3089
3090 flag = CHECKBIT(val64, 0x7);
3091 type = 1;
3092 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3093 &stat_info->xpak_stat.xpak_regs_stat,
3094 0x0, flag, type);
3095
3096 if(CHECKBIT(val64, 0x6))
3097 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3098
3099 flag = CHECKBIT(val64, 0x3);
3100 type = 2;
3101 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3102 &stat_info->xpak_stat.xpak_regs_stat,
3103 0x2, flag, type);
3104
3105 if(CHECKBIT(val64, 0x2))
3106 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3107
3108 flag = CHECKBIT(val64, 0x1);
3109 type = 3;
3110 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3111 &stat_info->xpak_stat.xpak_regs_stat,
3112 0x4, flag, type);
3113
3114 if(CHECKBIT(val64, 0x0))
3115 stat_info->xpak_stat.alarm_laser_output_power_low++;
3116
3117 /* Reading the Warning flags */
3118 addr = 0xA074;
3119 val64 = 0x0;
3120 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3121
3122 if(CHECKBIT(val64, 0x7))
3123 stat_info->xpak_stat.warn_transceiver_temp_high++;
3124
3125 if(CHECKBIT(val64, 0x6))
3126 stat_info->xpak_stat.warn_transceiver_temp_low++;
3127
3128 if(CHECKBIT(val64, 0x3))
3129 stat_info->xpak_stat.warn_laser_bias_current_high++;
3130
3131 if(CHECKBIT(val64, 0x2))
3132 stat_info->xpak_stat.warn_laser_bias_current_low++;
3133
3134 if(CHECKBIT(val64, 0x1))
3135 stat_info->xpak_stat.warn_laser_output_power_high++;
3136
3137 if(CHECKBIT(val64, 0x0))
3138 stat_info->xpak_stat.warn_laser_output_power_low++;
3139}
3140
20346722 3141/**
1da177e4
LT
3142 * alarm_intr_handler - Alarm Interrrupt handler
3143 * @nic: device private variable
20346722 3144 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 3145 * complete, this function is called. If the interrupt was to indicate
20346722
K
3146 * a loss of link, the OSM link status handler is invoked for any other
3147 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
3148 * and a H/W reset is issued.
3149 * Return Value:
3150 * NONE
3151*/
3152
3153static void alarm_intr_handler(struct s2io_nic *nic)
3154{
3155 struct net_device *dev = (struct net_device *) nic->dev;
3156 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3157 register u64 val64 = 0, err_reg = 0;
bd1034f0
AR
3158 u64 cnt;
3159 int i;
3160 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3161 /* Handling the XPAK counters update */
3162 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3163 /* waiting for an hour */
3164 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3165 } else {
3166 s2io_updt_xpak_counter(dev);
3167 /* reset the count to zero */
3168 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3169 }
1da177e4
LT
3170
3171 /* Handling link status change error Intr */
a371a07d
K
3172 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3173 err_reg = readq(&bar0->mac_rmac_err_reg);
3174 writeq(err_reg, &bar0->mac_rmac_err_reg);
3175 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3176 schedule_work(&nic->set_link_task);
3177 }
1da177e4
LT
3178 }
3179
5e25b9dd
K
3180 /* Handling Ecc errors */
3181 val64 = readq(&bar0->mc_err_reg);
3182 writeq(val64, &bar0->mc_err_reg);
3183 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3184 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac
K
3185 nic->mac_control.stats_info->sw_stat.
3186 double_ecc_errs++;
776bd20f 3187 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
5e25b9dd 3188 dev->name);
776bd20f 3189 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
e960fc5c 3190 if (nic->device_type != XFRAME_II_DEVICE) {
776bd20f 3191 /* Reset XframeI only if critical error */
3192 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3193 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3194 netif_stop_queue(dev);
3195 schedule_work(&nic->rst_timer_task);
bd1034f0
AR
3196 nic->mac_control.stats_info->sw_stat.
3197 soft_reset_cnt++;
776bd20f 3198 }
e960fc5c 3199 }
5e25b9dd 3200 } else {
7ba013ac
K
3201 nic->mac_control.stats_info->sw_stat.
3202 single_ecc_errs++;
5e25b9dd
K
3203 }
3204 }
3205
1da177e4
LT
3206 /* In case of a serious error, the device will be Reset. */
3207 val64 = readq(&bar0->serr_source);
3208 if (val64 & SERR_SOURCE_ANY) {
bd1034f0 3209 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
1da177e4 3210 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
776bd20f 3211 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3212 (unsigned long long)val64);
1da177e4
LT
3213 netif_stop_queue(dev);
3214 schedule_work(&nic->rst_timer_task);
bd1034f0 3215 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
3216 }
3217
3218 /*
3219 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3220 * Error occurs, the adapter will be recycled by disabling the
20346722 3221 * adapter enable bit and enabling it again after the device
1da177e4
LT
3222 * becomes Quiescent.
3223 */
3224 val64 = readq(&bar0->pcc_err_reg);
3225 writeq(val64, &bar0->pcc_err_reg);
3226 if (val64 & PCC_FB_ECC_DB_ERR) {
3227 u64 ac = readq(&bar0->adapter_control);
3228 ac &= ~(ADAPTER_CNTL_EN);
3229 writeq(ac, &bar0->adapter_control);
3230 ac = readq(&bar0->adapter_control);
3231 schedule_work(&nic->set_link_task);
3232 }
bd1034f0
AR
3233 /* Check for data parity error */
3234 val64 = readq(&bar0->pic_int_status);
3235 if (val64 & PIC_INT_GPIO) {
3236 val64 = readq(&bar0->gpio_int_reg);
3237 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3238 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3239 schedule_work(&nic->rst_timer_task);
3240 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3241 }
3242 }
3243
3244 /* Check for ring full counter */
3245 if (nic->device_type & XFRAME_II_DEVICE) {
3246 val64 = readq(&bar0->ring_bump_counter1);
3247 for (i=0; i<4; i++) {
3248 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3249 cnt >>= 64 - ((i+1)*16);
3250 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3251 += cnt;
3252 }
3253
3254 val64 = readq(&bar0->ring_bump_counter2);
3255 for (i=0; i<4; i++) {
3256 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3257 cnt >>= 64 - ((i+1)*16);
3258 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3259 += cnt;
3260 }
3261 }
1da177e4
LT
3262
3263 /* Other type of interrupts are not being handled now, TODO */
3264}
3265
20346722 3266/**
1da177e4 3267 * wait_for_cmd_complete - waits for a command to complete.
20346722 3268 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3269 * s2io_nic structure.
20346722
K
3270 * Description: Function that waits for a command to Write into RMAC
3271 * ADDR DATA registers to be completed and returns either success or
3272 * error depending on whether the command was complete or not.
1da177e4
LT
3273 * Return value:
3274 * SUCCESS on success and FAILURE on failure.
3275 */
3276
c92ca04b 3277static int wait_for_cmd_complete(void *addr, u64 busy_bit)
1da177e4 3278{
1da177e4
LT
3279 int ret = FAILURE, cnt = 0;
3280 u64 val64;
3281
3282 while (TRUE) {
c92ca04b
AR
3283 val64 = readq(addr);
3284 if (!(val64 & busy_bit)) {
1da177e4
LT
3285 ret = SUCCESS;
3286 break;
3287 }
c92ca04b
AR
3288
3289 if(in_interrupt())
3290 mdelay(50);
3291 else
3292 msleep(50);
3293
1da177e4
LT
3294 if (cnt++ > 10)
3295 break;
3296 }
1da177e4
LT
3297 return ret;
3298}
3299
20346722
K
3300/**
3301 * s2io_reset - Resets the card.
1da177e4
LT
3302 * @sp : private member of the device structure.
3303 * Description: Function to Reset the card. This function then also
20346722 3304 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3305 * the card reset also resets the configuration space.
3306 * Return value:
3307 * void.
3308 */
3309
26df54bf 3310static void s2io_reset(nic_t * sp)
1da177e4
LT
3311{
3312 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3313 u64 val64;
5e25b9dd 3314 u16 subid, pci_cmd;
1da177e4 3315
0b1f7ebe 3316 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3317 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3318
1da177e4
LT
3319 val64 = SW_RESET_ALL;
3320 writeq(val64, &bar0->sw_reset);
3321
20346722
K
3322 /*
3323 * At this stage, if the PCI write is indeed completed, the
3324 * card is reset and so is the PCI Config space of the device.
3325 * So a read cannot be issued at this stage on any of the
1da177e4
LT
3326 * registers to ensure the write into "sw_reset" register
3327 * has gone through.
3328 * Question: Is there any system call that will explicitly force
3329 * all the write commands still pending on the bus to be pushed
3330 * through?
3331 * As of now I'am just giving a 250ms delay and hoping that the
3332 * PCI write to sw_reset register is done by this time.
3333 */
3334 msleep(250);
c92ca04b
AR
3335 if (strstr(sp->product_name, "CX4")) {
3336 msleep(750);
3337 }
1da177e4 3338
e960fc5c 3339 /* Restore the PCI state saved during initialization. */
3340 pci_restore_state(sp->pdev);
3341 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
0b1f7ebe 3342 pci_cmd);
1da177e4
LT
3343 s2io_init_pci(sp);
3344
3345 msleep(250);
3346
20346722
K
3347 /* Set swapper to enable I/O register access */
3348 s2io_set_swapper(sp);
3349
cc6e7c44
RA
3350 /* Restore the MSIX table entries from local variables */
3351 restore_xmsi_data(sp);
3352
5e25b9dd 3353 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3354 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3355 /* Clear "detected parity error" bit */
303bcb4b 3356 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3357
303bcb4b
K
3358 /* Clearing PCIX Ecc status register */
3359 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3360
303bcb4b
K
3361 /* Clearing PCI_STATUS error reflected here */
3362 writeq(BIT(62), &bar0->txpic_int_reg);
3363 }
5e25b9dd 3364
20346722
K
3365 /* Reset device statistics maintained by OS */
3366 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3367
1da177e4
LT
3368 /* SXE-002: Configure link and activity LED to turn it off */
3369 subid = sp->pdev->subsystem_device;
541ae68f
K
3370 if (((subid & 0xFF) >= 0x07) &&
3371 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3372 val64 = readq(&bar0->gpio_control);
3373 val64 |= 0x0000800000000000ULL;
3374 writeq(val64, &bar0->gpio_control);
3375 val64 = 0x0411040400000000ULL;
509a2671 3376 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3377 }
3378
541ae68f
K
3379 /*
3380 * Clear spurious ECC interrupts that would have occured on
3381 * XFRAME II cards after reset.
3382 */
3383 if (sp->device_type == XFRAME_II_DEVICE) {
3384 val64 = readq(&bar0->pcc_err_reg);
3385 writeq(val64, &bar0->pcc_err_reg);
3386 }
3387
1da177e4
LT
3388 sp->device_enabled_once = FALSE;
3389}
3390
3391/**
20346722
K
3392 * s2io_set_swapper - to set the swapper controle on the card
3393 * @sp : private member of the device structure,
1da177e4 3394 * pointer to the s2io_nic structure.
20346722 3395 * Description: Function to set the swapper control on the card
1da177e4
LT
3396 * correctly depending on the 'endianness' of the system.
3397 * Return value:
3398 * SUCCESS on success and FAILURE on failure.
3399 */
3400
26df54bf 3401static int s2io_set_swapper(nic_t * sp)
1da177e4
LT
3402{
3403 struct net_device *dev = sp->dev;
3404 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3405 u64 val64, valt, valr;
3406
20346722 3407 /*
1da177e4
LT
3408 * Set proper endian settings and verify the same by reading
3409 * the PIF Feed-back register.
3410 */
3411
3412 val64 = readq(&bar0->pif_rd_swapper_fb);
3413 if (val64 != 0x0123456789ABCDEFULL) {
3414 int i = 0;
3415 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3416 0x8100008181000081ULL, /* FE=1, SE=0 */
3417 0x4200004242000042ULL, /* FE=0, SE=1 */
3418 0}; /* FE=0, SE=0 */
3419
3420 while(i<4) {
3421 writeq(value[i], &bar0->swapper_ctrl);
3422 val64 = readq(&bar0->pif_rd_swapper_fb);
3423 if (val64 == 0x0123456789ABCDEFULL)
3424 break;
3425 i++;
3426 }
3427 if (i == 4) {
3428 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3429 dev->name);
3430 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3431 (unsigned long long) val64);
3432 return FAILURE;
3433 }
3434 valr = value[i];
3435 } else {
3436 valr = readq(&bar0->swapper_ctrl);
3437 }
3438
3439 valt = 0x0123456789ABCDEFULL;
3440 writeq(valt, &bar0->xmsi_address);
3441 val64 = readq(&bar0->xmsi_address);
3442
3443 if(val64 != valt) {
3444 int i = 0;
3445 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3446 0x0081810000818100ULL, /* FE=1, SE=0 */
3447 0x0042420000424200ULL, /* FE=0, SE=1 */
3448 0}; /* FE=0, SE=0 */
3449
3450 while(i<4) {
3451 writeq((value[i] | valr), &bar0->swapper_ctrl);
3452 writeq(valt, &bar0->xmsi_address);
3453 val64 = readq(&bar0->xmsi_address);
3454 if(val64 == valt)
3455 break;
3456 i++;
3457 }
3458 if(i == 4) {
20346722 3459 unsigned long long x = val64;
1da177e4 3460 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 3461 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
3462 return FAILURE;
3463 }
3464 }
3465 val64 = readq(&bar0->swapper_ctrl);
3466 val64 &= 0xFFFF000000000000ULL;
3467
3468#ifdef __BIG_ENDIAN
20346722
K
3469 /*
3470 * The device by default set to a big endian format, so a
1da177e4
LT
3471 * big endian driver need not set anything.
3472 */
3473 val64 |= (SWAPPER_CTRL_TXP_FE |
3474 SWAPPER_CTRL_TXP_SE |
3475 SWAPPER_CTRL_TXD_R_FE |
3476 SWAPPER_CTRL_TXD_W_FE |
3477 SWAPPER_CTRL_TXF_R_FE |
3478 SWAPPER_CTRL_RXD_R_FE |
3479 SWAPPER_CTRL_RXD_W_FE |
3480 SWAPPER_CTRL_RXF_W_FE |
3481 SWAPPER_CTRL_XMSI_FE |
1da177e4 3482 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
92383340 3483 if (sp->intr_type == INTA)
cc6e7c44 3484 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3485 writeq(val64, &bar0->swapper_ctrl);
3486#else
20346722 3487 /*
1da177e4 3488 * Initially we enable all bits to make it accessible by the
20346722 3489 * driver, then we selectively enable only those bits that
1da177e4
LT
3490 * we want to set.
3491 */
3492 val64 |= (SWAPPER_CTRL_TXP_FE |
3493 SWAPPER_CTRL_TXP_SE |
3494 SWAPPER_CTRL_TXD_R_FE |
3495 SWAPPER_CTRL_TXD_R_SE |
3496 SWAPPER_CTRL_TXD_W_FE |
3497 SWAPPER_CTRL_TXD_W_SE |
3498 SWAPPER_CTRL_TXF_R_FE |
3499 SWAPPER_CTRL_RXD_R_FE |
3500 SWAPPER_CTRL_RXD_R_SE |
3501 SWAPPER_CTRL_RXD_W_FE |
3502 SWAPPER_CTRL_RXD_W_SE |
3503 SWAPPER_CTRL_RXF_W_FE |
3504 SWAPPER_CTRL_XMSI_FE |
1da177e4 3505 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
cc6e7c44
RA
3506 if (sp->intr_type == INTA)
3507 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3508 writeq(val64, &bar0->swapper_ctrl);
3509#endif
3510 val64 = readq(&bar0->swapper_ctrl);
3511
20346722
K
3512 /*
3513 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3514 * feedback register.
3515 */
3516 val64 = readq(&bar0->pif_rd_swapper_fb);
3517 if (val64 != 0x0123456789ABCDEFULL) {
3518 /* Endian settings are incorrect, calls for another dekko. */
3519 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3520 dev->name);
3521 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3522 (unsigned long long) val64);
3523 return FAILURE;
3524 }
3525
3526 return SUCCESS;
3527}
3528
ac1f60db 3529static int wait_for_msix_trans(nic_t *nic, int i)
cc6e7c44 3530{
37eb47ed 3531 XENA_dev_config_t __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3532 u64 val64;
3533 int ret = 0, cnt = 0;
3534
3535 do {
3536 val64 = readq(&bar0->xmsi_access);
3537 if (!(val64 & BIT(15)))
3538 break;
3539 mdelay(1);
3540 cnt++;
3541 } while(cnt < 5);
3542 if (cnt == 5) {
3543 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3544 ret = 1;
3545 }
3546
3547 return ret;
3548}
3549
26df54bf 3550static void restore_xmsi_data(nic_t *nic)
cc6e7c44 3551{
37eb47ed 3552 XENA_dev_config_t __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3553 u64 val64;
3554 int i;
3555
75c30b13 3556 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
cc6e7c44
RA
3557 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3558 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3559 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3560 writeq(val64, &bar0->xmsi_access);
3561 if (wait_for_msix_trans(nic, i)) {
3562 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3563 continue;
3564 }
3565 }
3566}
3567
ac1f60db 3568static void store_xmsi_data(nic_t *nic)
cc6e7c44 3569{
37eb47ed 3570 XENA_dev_config_t __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3571 u64 val64, addr, data;
3572 int i;
3573
3574 /* Store and display */
75c30b13 3575 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
cc6e7c44
RA
3576 val64 = (BIT(15) | vBIT(i, 26, 6));
3577 writeq(val64, &bar0->xmsi_access);
3578 if (wait_for_msix_trans(nic, i)) {
3579 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3580 continue;
3581 }
3582 addr = readq(&bar0->xmsi_address);
3583 data = readq(&bar0->xmsi_data);
3584 if (addr && data) {
3585 nic->msix_info[i].addr = addr;
3586 nic->msix_info[i].data = data;
3587 }
3588 }
3589}
3590
3591int s2io_enable_msi(nic_t *nic)
3592{
37eb47ed 3593 XENA_dev_config_t __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3594 u16 msi_ctrl, msg_val;
3595 struct config_param *config = &nic->config;
3596 struct net_device *dev = nic->dev;
3597 u64 val64, tx_mat, rx_mat;
3598 int i, err;
3599
3600 val64 = readq(&bar0->pic_control);
3601 val64 &= ~BIT(1);
3602 writeq(val64, &bar0->pic_control);
3603
3604 err = pci_enable_msi(nic->pdev);
3605 if (err) {
3606 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3607 nic->dev->name);
3608 return err;
3609 }
3610
3611 /*
3612 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3613 * for interrupt handling.
3614 */
3615 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3616 msg_val ^= 0x1;
3617 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3618 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3619
3620 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3621 msi_ctrl |= 0x10;
3622 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3623
3624 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3625 tx_mat = readq(&bar0->tx_mat0_n[0]);
3626 for (i=0; i<config->tx_fifo_num; i++) {
3627 tx_mat |= TX_MAT_SET(i, 1);
3628 }
3629 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3630
3631 rx_mat = readq(&bar0->rx_mat);
3632 for (i=0; i<config->rx_ring_num; i++) {
3633 rx_mat |= RX_MAT_SET(i, 1);
3634 }
3635 writeq(rx_mat, &bar0->rx_mat);
3636
3637 dev->irq = nic->pdev->irq;
3638 return 0;
3639}
3640
26df54bf 3641static int s2io_enable_msi_x(nic_t *nic)
cc6e7c44 3642{
37eb47ed 3643 XENA_dev_config_t __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3644 u64 tx_mat, rx_mat;
3645 u16 msi_control; /* Temp variable */
3646 int ret, i, j, msix_indx = 1;
3647
3648 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3649 GFP_KERNEL);
3650 if (nic->entries == NULL) {
3651 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3652 return -ENOMEM;
3653 }
3654 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3655
3656 nic->s2io_entries =
3657 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3658 GFP_KERNEL);
3659 if (nic->s2io_entries == NULL) {
3660 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3661 kfree(nic->entries);
3662 return -ENOMEM;
3663 }
3664 memset(nic->s2io_entries, 0,
3665 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3666
3667 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3668 nic->entries[i].entry = i;
3669 nic->s2io_entries[i].entry = i;
3670 nic->s2io_entries[i].arg = NULL;
3671 nic->s2io_entries[i].in_use = 0;
3672 }
3673
3674 tx_mat = readq(&bar0->tx_mat0_n[0]);
3675 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3676 tx_mat |= TX_MAT_SET(i, msix_indx);
3677 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3678 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3679 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3680 }
3681 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3682
3683 if (!nic->config.bimodal) {
3684 rx_mat = readq(&bar0->rx_mat);
3685 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3686 rx_mat |= RX_MAT_SET(j, msix_indx);
3687 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3688 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3689 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3690 }
3691 writeq(rx_mat, &bar0->rx_mat);
3692 } else {
3693 tx_mat = readq(&bar0->tx_mat0_n[7]);
3694 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3695 tx_mat |= TX_MAT_SET(i, msix_indx);
3696 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3697 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3698 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3699 }
3700 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3701 }
3702
c92ca04b 3703 nic->avail_msix_vectors = 0;
cc6e7c44 3704 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
c92ca04b
AR
3705 /* We fail init if error or we get less vectors than min required */
3706 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3707 nic->avail_msix_vectors = ret;
3708 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3709 }
cc6e7c44
RA
3710 if (ret) {
3711 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3712 kfree(nic->entries);
3713 kfree(nic->s2io_entries);
3714 nic->entries = NULL;
3715 nic->s2io_entries = NULL;
c92ca04b 3716 nic->avail_msix_vectors = 0;
cc6e7c44
RA
3717 return -ENOMEM;
3718 }
c92ca04b
AR
3719 if (!nic->avail_msix_vectors)
3720 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
cc6e7c44
RA
3721
3722 /*
3723 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3724 * in the herc NIC. (Temp change, needs to be removed later)
3725 */
3726 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3727 msi_control |= 0x1; /* Enable MSI */
3728 pci_write_config_word(nic->pdev, 0x42, msi_control);
3729
3730 return 0;
3731}
3732
1da177e4
LT
3733/* ********************************************************* *
3734 * Functions defined below concern the OS part of the driver *
3735 * ********************************************************* */
3736
20346722 3737/**
1da177e4
LT
3738 * s2io_open - open entry point of the driver
3739 * @dev : pointer to the device structure.
3740 * Description:
3741 * This function is the open entry point of the driver. It mainly calls a
3742 * function to allocate Rx buffers and inserts them into the buffer
20346722 3743 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3744 * Return value:
3745 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3746 * file on failure.
3747 */
3748
ac1f60db 3749static int s2io_open(struct net_device *dev)
1da177e4
LT
3750{
3751 nic_t *sp = dev->priv;
3752 int err = 0;
3753
20346722
K
3754 /*
3755 * Make sure you have link off by default every time
1da177e4
LT
3756 * Nic is initialized
3757 */
3758 netif_carrier_off(dev);
0b1f7ebe 3759 sp->last_link_state = 0;
1da177e4
LT
3760
3761 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3762 err = s2io_card_up(sp);
3763 if (err) {
1da177e4
LT
3764 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3765 dev->name);
e6a8fee2 3766 goto hw_init_failed;
1da177e4
LT
3767 }
3768
3769 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3770 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3771 s2io_card_down(sp);
20346722 3772 err = -ENODEV;
e6a8fee2 3773 goto hw_init_failed;
1da177e4
LT
3774 }
3775
3776 netif_start_queue(dev);
3777 return 0;
20346722 3778
20346722 3779hw_init_failed:
cc6e7c44
RA
3780 if (sp->intr_type == MSI_X) {
3781 if (sp->entries)
3782 kfree(sp->entries);
3783 if (sp->s2io_entries)
3784 kfree(sp->s2io_entries);
3785 }
20346722 3786 return err;
1da177e4
LT
3787}
3788
3789/**
3790 * s2io_close -close entry point of the driver
3791 * @dev : device pointer.
3792 * Description:
3793 * This is the stop entry point of the driver. It needs to undo exactly
3794 * whatever was done by the open entry point,thus it's usually referred to
3795 * as the close function.Among other things this function mainly stops the
3796 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3797 * Return value:
3798 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3799 * file on failure.
3800 */
3801
ac1f60db 3802static int s2io_close(struct net_device *dev)
1da177e4
LT
3803{
3804 nic_t *sp = dev->priv;
cc6e7c44 3805
1da177e4
LT
3806 flush_scheduled_work();
3807 netif_stop_queue(dev);
3808 /* Reset card, kill tasklet and free Tx and Rx buffers. */
e6a8fee2 3809 s2io_card_down(sp);
cc6e7c44 3810
1da177e4
LT
3811 sp->device_close_flag = TRUE; /* Device is shut down. */
3812 return 0;
3813}
3814
3815/**
3816 * s2io_xmit - Tx entry point of te driver
3817 * @skb : the socket buffer containing the Tx data.
3818 * @dev : device pointer.
3819 * Description :
3820 * This function is the Tx entry point of the driver. S2IO NIC supports
3821 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3822 * NOTE: when device cant queue the pkt,just the trans_start variable will
3823 * not be upadted.
3824 * Return value:
3825 * 0 on success & 1 on failure.
3826 */
3827
ac1f60db 3828static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
3829{
3830 nic_t *sp = dev->priv;
3831 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3832 register u64 val64;
3833 TxD_t *txdp;
3834 TxFIFO_element_t __iomem *tx_fifo;
3835 unsigned long flags;
be3a6b02
K
3836 u16 vlan_tag = 0;
3837 int vlan_priority = 0;
1da177e4
LT
3838 mac_info_t *mac_control;
3839 struct config_param *config;
75c30b13 3840 int offload_type;
1da177e4
LT
3841
3842 mac_control = &sp->mac_control;
3843 config = &sp->config;
3844
20346722 3845 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 3846 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 3847 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 3848 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
3849 dev->name);
3850 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722
K
3851 dev_kfree_skb(skb);
3852 return 0;
1da177e4
LT
3853 }
3854
3855 queue = 0;
1da177e4 3856
be3a6b02
K
3857 /* Get Fifo number to Transmit based on vlan priority */
3858 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3859 vlan_tag = vlan_tx_tag_get(skb);
3860 vlan_priority = vlan_tag >> 13;
3861 queue = config->fifo_mapping[vlan_priority];
3862 }
3863
20346722
K
3864 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3865 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3866 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3867 list_virt_addr;
3868
3869 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4 3870 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9
AR
3871 if (txdp->Host_Control ||
3872 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 3873 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
1da177e4
LT
3874 netif_stop_queue(dev);
3875 dev_kfree_skb(skb);
3876 spin_unlock_irqrestore(&sp->tx_lock, flags);
3877 return 0;
3878 }
0b1f7ebe
K
3879
3880 /* A buffer with no data will be dropped */
3881 if (!skb->len) {
3882 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3883 dev_kfree_skb(skb);
3884 spin_unlock_irqrestore(&sp->tx_lock, flags);
3885 return 0;
3886 }
3887
75c30b13 3888 offload_type = s2io_offload_type(skb);
1da177e4 3889#ifdef NETIF_F_TSO
75c30b13 3890 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 3891 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 3892 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4
LT
3893 }
3894#endif
1da177e4
LT
3895 if (skb->ip_summed == CHECKSUM_HW) {
3896 txdp->Control_2 |=
3897 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3898 TXD_TX_CKO_UDP_EN);
3899 }
fed5eccd
AR
3900 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3901 txdp->Control_1 |= TXD_LIST_OWN_XENA;
1da177e4 3902 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 3903
be3a6b02
K
3904 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3905 txdp->Control_2 |= TXD_VLAN_ENABLE;
3906 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3907 }
3908
fed5eccd 3909 frg_len = skb->len - skb->data_len;
75c30b13 3910 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
3911 int ufo_size;
3912
75c30b13 3913 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
3914 ufo_size &= ~7;
3915 txdp->Control_1 |= TXD_UFO_EN;
3916 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3917 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3918#ifdef __BIG_ENDIAN
3919 sp->ufo_in_band_v[put_off] =
3920 (u64)skb_shinfo(skb)->ip6_frag_id;
3921#else
3922 sp->ufo_in_band_v[put_off] =
3923 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3924#endif
3925 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3926 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3927 sp->ufo_in_band_v,
3928 sizeof(u64), PCI_DMA_TODEVICE);
3929 txdp++;
fed5eccd 3930 }
1da177e4 3931
fed5eccd
AR
3932 txdp->Buffer_Pointer = pci_map_single
3933 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3934 txdp->Host_Control = (unsigned long) skb;
3935 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 3936 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
3937 txdp->Control_1 |= TXD_UFO_EN;
3938
3939 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
3940 /* For fragmented SKB. */
3941 for (i = 0; i < frg_cnt; i++) {
3942 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe
K
3943 /* A '0' length fragment will be ignored */
3944 if (!frag->size)
3945 continue;
1da177e4
LT
3946 txdp++;
3947 txdp->Buffer_Pointer = (u64) pci_map_page
3948 (sp->pdev, frag->page, frag->page_offset,
3949 frag->size, PCI_DMA_TODEVICE);
efd51b5c 3950 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
75c30b13 3951 if (offload_type == SKB_GSO_UDP)
fed5eccd 3952 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
3953 }
3954 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3955
75c30b13 3956 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
3957 frg_cnt++; /* as Txd0 was used for inband header */
3958
1da177e4 3959 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 3960 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
3961 writeq(val64, &tx_fifo->TxDL_Pointer);
3962
3963 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3964 TX_FIFO_LAST_LIST);
75c30b13 3965 if (offload_type)
fed5eccd 3966 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 3967
1da177e4
LT
3968 writeq(val64, &tx_fifo->List_Control);
3969
303bcb4b
K
3970 mmiowb();
3971
1da177e4 3972 put_off++;
863c11a9
AR
3973 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3974 put_off = 0;
20346722 3975 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
3976
3977 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 3978 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
bd1034f0 3979 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
1da177e4
LT
3980 DBG_PRINT(TX_DBG,
3981 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3982 put_off, get_off);
3983 netif_stop_queue(dev);
3984 }
3985
3986 dev->trans_start = jiffies;
3987 spin_unlock_irqrestore(&sp->tx_lock, flags);
3988
3989 return 0;
3990}
3991
25fff88e
K
3992static void
3993s2io_alarm_handle(unsigned long data)
3994{
3995 nic_t *sp = (nic_t *)data;
3996
3997 alarm_intr_handler(sp);
3998 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3999}
4000
75c30b13
AR
4001static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
4002{
4003 int rxb_size, level;
4004
4005 if (!sp->lro) {
4006 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4007 level = rx_buffer_level(sp, rxb_size, rng_n);
4008
4009 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4010 int ret;
4011 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4012 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4013 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4014 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4015 __FUNCTION__);
4016 clear_bit(0, (&sp->tasklet_status));
4017 return -1;
4018 }
4019 clear_bit(0, (&sp->tasklet_status));
4020 } else if (level == LOW)
4021 tasklet_schedule(&sp->task);
4022
4023 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4024 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
4025 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4026 }
4027 return 0;
4028}
4029
cc6e7c44
RA
4030static irqreturn_t
4031s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
4032{
4033 struct net_device *dev = (struct net_device *) dev_id;
4034 nic_t *sp = dev->priv;
4035 int i;
cc6e7c44
RA
4036 mac_info_t *mac_control;
4037 struct config_param *config;
4038
4039 atomic_inc(&sp->isr_cnt);
4040 mac_control = &sp->mac_control;
4041 config = &sp->config;
4042 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4043
4044 /* If Intr is because of Rx Traffic */
4045 for (i = 0; i < config->rx_ring_num; i++)
4046 rx_intr_handler(&mac_control->rings[i]);
4047
4048 /* If Intr is because of Tx Traffic */
4049 for (i = 0; i < config->tx_fifo_num; i++)
4050 tx_intr_handler(&mac_control->fifos[i]);
4051
4052 /*
4053 * If the Rx buffer count is below the panic threshold then
4054 * reallocate the buffers from the interrupt handler itself,
4055 * else schedule a tasklet to reallocate the buffers.
4056 */
75c30b13
AR
4057 for (i = 0; i < config->rx_ring_num; i++)
4058 s2io_chk_rx_buffers(sp, i);
cc6e7c44
RA
4059
4060 atomic_dec(&sp->isr_cnt);
4061 return IRQ_HANDLED;
4062}
4063
4064static irqreturn_t
4065s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
4066{
4067 ring_info_t *ring = (ring_info_t *)dev_id;
4068 nic_t *sp = ring->nic;
cc6e7c44
RA
4069
4070 atomic_inc(&sp->isr_cnt);
cc6e7c44 4071
75c30b13
AR
4072 rx_intr_handler(ring);
4073 s2io_chk_rx_buffers(sp, ring->ring_no);
7d3d0439 4074
cc6e7c44 4075 atomic_dec(&sp->isr_cnt);
cc6e7c44
RA
4076 return IRQ_HANDLED;
4077}
4078
4079static irqreturn_t
4080s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
4081{
4082 fifo_info_t *fifo = (fifo_info_t *)dev_id;
4083 nic_t *sp = fifo->nic;
4084
4085 atomic_inc(&sp->isr_cnt);
4086 tx_intr_handler(fifo);
4087 atomic_dec(&sp->isr_cnt);
4088 return IRQ_HANDLED;
4089}
a371a07d
K
4090static void s2io_txpic_intr_handle(nic_t *sp)
4091{
509a2671 4092 XENA_dev_config_t __iomem *bar0 = sp->bar0;
a371a07d
K
4093 u64 val64;
4094
4095 val64 = readq(&bar0->pic_int_status);
4096 if (val64 & PIC_INT_GPIO) {
4097 val64 = readq(&bar0->gpio_int_reg);
4098 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4099 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4100 /*
4101 * This is unstable state so clear both up/down
4102 * interrupt and adapter to re-evaluate the link state.
4103 */
a371a07d
K
4104 val64 |= GPIO_INT_REG_LINK_DOWN;
4105 val64 |= GPIO_INT_REG_LINK_UP;
4106 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4107 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4108 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4109 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4110 writeq(val64, &bar0->gpio_int_mask);
a371a07d 4111 }
c92ca04b
AR
4112 else if (val64 & GPIO_INT_REG_LINK_UP) {
4113 val64 = readq(&bar0->adapter_status);
4114 if (verify_xena_quiescence(sp, val64,
4115 sp->device_enabled_once)) {
4116 /* Enable Adapter */
4117 val64 = readq(&bar0->adapter_control);
4118 val64 |= ADAPTER_CNTL_EN;
4119 writeq(val64, &bar0->adapter_control);
4120 val64 |= ADAPTER_LED_ON;
4121 writeq(val64, &bar0->adapter_control);
4122 if (!sp->device_enabled_once)
4123 sp->device_enabled_once = 1;
4124
4125 s2io_link(sp, LINK_UP);
4126 /*
4127 * unmask link down interrupt and mask link-up
4128 * intr
4129 */
4130 val64 = readq(&bar0->gpio_int_mask);
4131 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4132 val64 |= GPIO_INT_MASK_LINK_UP;
4133 writeq(val64, &bar0->gpio_int_mask);
4134
4135 }
4136 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4137 val64 = readq(&bar0->adapter_status);
4138 if (verify_xena_quiescence(sp, val64,
4139 sp->device_enabled_once)) {
4140 s2io_link(sp, LINK_DOWN);
4141 /* Link is down so unmaks link up interrupt */
4142 val64 = readq(&bar0->gpio_int_mask);
4143 val64 &= ~GPIO_INT_MASK_LINK_UP;
4144 val64 |= GPIO_INT_MASK_LINK_DOWN;
4145 writeq(val64, &bar0->gpio_int_mask);
4146 }
a371a07d
K
4147 }
4148 }
c92ca04b 4149 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4150}
4151
1da177e4
LT
4152/**
4153 * s2io_isr - ISR handler of the device .
4154 * @irq: the irq of the device.
4155 * @dev_id: a void pointer to the dev structure of the NIC.
4156 * @pt_regs: pointer to the registers pushed on the stack.
20346722
K
4157 * Description: This function is the ISR handler of the device. It
4158 * identifies the reason for the interrupt and calls the relevant
4159 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4160 * recv buffers, if their numbers are below the panic value which is
4161 * presently set to 25% of the original number of rcv buffers allocated.
4162 * Return value:
20346722 4163 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4164 * IRQ_NONE: will be returned if interrupt is not from our device
4165 */
4166static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4167{
4168 struct net_device *dev = (struct net_device *) dev_id;
4169 nic_t *sp = dev->priv;
4170 XENA_dev_config_t __iomem *bar0 = sp->bar0;
20346722 4171 int i;
5d3213cc 4172 u64 reason = 0, val64, org_mask;
1da177e4
LT
4173 mac_info_t *mac_control;
4174 struct config_param *config;
4175
7ba013ac 4176 atomic_inc(&sp->isr_cnt);
1da177e4
LT
4177 mac_control = &sp->mac_control;
4178 config = &sp->config;
4179
20346722 4180 /*
1da177e4
LT
4181 * Identify the cause for interrupt and call the appropriate
4182 * interrupt handler. Causes for the interrupt could be;
4183 * 1. Rx of packet.
4184 * 2. Tx complete.
4185 * 3. Link down.
20346722 4186 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
4187 */
4188 reason = readq(&bar0->general_int_status);
4189
4190 if (!reason) {
4191 /* The interrupt was not raised by Xena. */
7ba013ac 4192 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4193 return IRQ_NONE;
4194 }
4195
863c11a9 4196 val64 = 0xFFFFFFFFFFFFFFFFULL;
5d3213cc
AR
4197 /* Store current mask before masking all interrupts */
4198 org_mask = readq(&bar0->general_int_mask);
4199 writeq(val64, &bar0->general_int_mask);
4200
1da177e4
LT
4201#ifdef CONFIG_S2IO_NAPI
4202 if (reason & GEN_INTR_RXTRAFFIC) {
4203 if (netif_rx_schedule_prep(dev)) {
863c11a9 4204 writeq(val64, &bar0->rx_traffic_mask);
1da177e4
LT
4205 __netif_rx_schedule(dev);
4206 }
4207 }
4208#else
863c11a9
AR
4209 /*
4210 * Rx handler is called by default, without checking for the
4211 * cause of interrupt.
4212 * rx_traffic_int reg is an R1 register, writing all 1's
4213 * will ensure that the actual interrupt causing bit get's
4214 * cleared and hence a read can be avoided.
4215 */
4216 writeq(val64, &bar0->rx_traffic_int);
4217 for (i = 0; i < config->rx_ring_num; i++) {
4218 rx_intr_handler(&mac_control->rings[i]);
1da177e4
LT
4219 }
4220#endif
4221
863c11a9
AR
4222 /*
4223 * tx_traffic_int reg is an R1 register, writing all 1's
4224 * will ensure that the actual interrupt causing bit get's
4225 * cleared and hence a read can be avoided.
4226 */
4227 writeq(val64, &bar0->tx_traffic_int);
fe113638 4228
863c11a9
AR
4229 for (i = 0; i < config->tx_fifo_num; i++)
4230 tx_intr_handler(&mac_control->fifos[i]);
20346722 4231
a371a07d
K
4232 if (reason & GEN_INTR_TXPIC)
4233 s2io_txpic_intr_handle(sp);
20346722
K
4234 /*
4235 * If the Rx buffer count is below the panic threshold then
4236 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
4237 * else schedule a tasklet to reallocate the buffers.
4238 */
4239#ifndef CONFIG_S2IO_NAPI
75c30b13
AR
4240 for (i = 0; i < config->rx_ring_num; i++)
4241 s2io_chk_rx_buffers(sp, i);
1da177e4 4242#endif
5d3213cc 4243 writeq(org_mask, &bar0->general_int_mask);
7ba013ac 4244 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4245 return IRQ_HANDLED;
4246}
4247
7ba013ac
K
4248/**
4249 * s2io_updt_stats -
4250 */
4251static void s2io_updt_stats(nic_t *sp)
4252{
4253 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4254 u64 val64;
4255 int cnt = 0;
4256
4257 if (atomic_read(&sp->card_state) == CARD_UP) {
4258 /* Apprx 30us on a 133 MHz bus */
4259 val64 = SET_UPDT_CLICKS(10) |
4260 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4261 writeq(val64, &bar0->stat_cfg);
4262 do {
4263 udelay(100);
4264 val64 = readq(&bar0->stat_cfg);
4265 if (!(val64 & BIT(0)))
4266 break;
4267 cnt++;
4268 if (cnt == 5)
4269 break; /* Updt failed */
4270 } while(1);
75c30b13
AR
4271 } else {
4272 memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t));
7ba013ac
K
4273 }
4274}
4275
1da177e4 4276/**
20346722 4277 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4278 * @dev : pointer to the device structure.
4279 * Description:
20346722 4280 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4281 * structure and returns a pointer to the same.
4282 * Return value:
4283 * pointer to the updated net_device_stats structure.
4284 */
4285
ac1f60db 4286static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4
LT
4287{
4288 nic_t *sp = dev->priv;
4289 mac_info_t *mac_control;
4290 struct config_param *config;
4291
20346722 4292
1da177e4
LT
4293 mac_control = &sp->mac_control;
4294 config = &sp->config;
4295
7ba013ac
K
4296 /* Configure Stats for immediate updt */
4297 s2io_updt_stats(sp);
4298
4299 sp->stats.tx_packets =
4300 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722
K
4301 sp->stats.tx_errors =
4302 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4303 sp->stats.rx_errors =
4304 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
4305 sp->stats.multicast =
4306 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 4307 sp->stats.rx_length_errors =
20346722 4308 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
4309
4310 return (&sp->stats);
4311}
4312
4313/**
4314 * s2io_set_multicast - entry point for multicast address enable/disable.
4315 * @dev : pointer to the device structure
4316 * Description:
20346722
K
4317 * This function is a driver entry point which gets called by the kernel
4318 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4319 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4320 * determine, if multicast address must be enabled or if promiscuous mode
4321 * is to be disabled etc.
4322 * Return value:
4323 * void.
4324 */
4325
4326static void s2io_set_multicast(struct net_device *dev)
4327{
4328 int i, j, prev_cnt;
4329 struct dev_mc_list *mclist;
4330 nic_t *sp = dev->priv;
4331 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4332 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4333 0xfeffffffffffULL;
4334 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4335 void __iomem *add;
4336
4337 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4338 /* Enable all Multicast addresses */
4339 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4340 &bar0->rmac_addr_data0_mem);
4341 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4342 &bar0->rmac_addr_data1_mem);
4343 val64 = RMAC_ADDR_CMD_MEM_WE |
4344 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4345 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4346 writeq(val64, &bar0->rmac_addr_cmd_mem);
4347 /* Wait till command completes */
c92ca04b
AR
4348 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4349 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
1da177e4
LT
4350
4351 sp->m_cast_flg = 1;
4352 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4353 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4354 /* Disable all Multicast addresses */
4355 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4356 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
4357 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4358 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4359 val64 = RMAC_ADDR_CMD_MEM_WE |
4360 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4361 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4362 writeq(val64, &bar0->rmac_addr_cmd_mem);
4363 /* Wait till command completes */
c92ca04b
AR
4364 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4365 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
1da177e4
LT
4366
4367 sp->m_cast_flg = 0;
4368 sp->all_multi_pos = 0;
4369 }
4370
4371 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4372 /* Put the NIC into promiscuous mode */
4373 add = &bar0->mac_cfg;
4374 val64 = readq(&bar0->mac_cfg);
4375 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4376
4377 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4378 writel((u32) val64, add);
4379 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4380 writel((u32) (val64 >> 32), (add + 4));
4381
4382 val64 = readq(&bar0->mac_cfg);
4383 sp->promisc_flg = 1;
776bd20f 4384 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
4385 dev->name);
4386 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4387 /* Remove the NIC from promiscuous mode */
4388 add = &bar0->mac_cfg;
4389 val64 = readq(&bar0->mac_cfg);
4390 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4391
4392 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4393 writel((u32) val64, add);
4394 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4395 writel((u32) (val64 >> 32), (add + 4));
4396
4397 val64 = readq(&bar0->mac_cfg);
4398 sp->promisc_flg = 0;
776bd20f 4399 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
1da177e4
LT
4400 dev->name);
4401 }
4402
4403 /* Update individual M_CAST address list */
4404 if ((!sp->m_cast_flg) && dev->mc_count) {
4405 if (dev->mc_count >
4406 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4407 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4408 dev->name);
4409 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4410 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4411 return;
4412 }
4413
4414 prev_cnt = sp->mc_addr_count;
4415 sp->mc_addr_count = dev->mc_count;
4416
4417 /* Clear out the previous list of Mc in the H/W. */
4418 for (i = 0; i < prev_cnt; i++) {
4419 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4420 &bar0->rmac_addr_data0_mem);
4421 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4422 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4423 val64 = RMAC_ADDR_CMD_MEM_WE |
4424 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4425 RMAC_ADDR_CMD_MEM_OFFSET
4426 (MAC_MC_ADDR_START_OFFSET + i);
4427 writeq(val64, &bar0->rmac_addr_cmd_mem);
4428
4429 /* Wait for command completes */
c92ca04b
AR
4430 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4431 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
1da177e4
LT
4432 DBG_PRINT(ERR_DBG, "%s: Adding ",
4433 dev->name);
4434 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4435 return;
4436 }
4437 }
4438
4439 /* Create the new Rx filter list and update the same in H/W. */
4440 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4441 i++, mclist = mclist->next) {
4442 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4443 ETH_ALEN);
a7a80d5a 4444 mac_addr = 0;
1da177e4
LT
4445 for (j = 0; j < ETH_ALEN; j++) {
4446 mac_addr |= mclist->dmi_addr[j];
4447 mac_addr <<= 8;
4448 }
4449 mac_addr >>= 8;
4450 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4451 &bar0->rmac_addr_data0_mem);
4452 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4453 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4454 val64 = RMAC_ADDR_CMD_MEM_WE |
4455 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4456 RMAC_ADDR_CMD_MEM_OFFSET
4457 (i + MAC_MC_ADDR_START_OFFSET);
4458 writeq(val64, &bar0->rmac_addr_cmd_mem);
4459
4460 /* Wait for command completes */
c92ca04b
AR
4461 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4462 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
1da177e4
LT
4463 DBG_PRINT(ERR_DBG, "%s: Adding ",
4464 dev->name);
4465 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4466 return;
4467 }
4468 }
4469 }
4470}
4471
4472/**
20346722 4473 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
4474 * @dev : pointer to the device structure.
4475 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 4476 * Description : This procedure will program the Xframe to receive
1da177e4 4477 * frames with new Mac Address
20346722 4478 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
4479 * as defined in errno.h file on failure.
4480 */
4481
26df54bf 4482static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
1da177e4
LT
4483{
4484 nic_t *sp = dev->priv;
4485 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4486 register u64 val64, mac_addr = 0;
4487 int i;
4488
20346722 4489 /*
1da177e4
LT
4490 * Set the new MAC address as the new unicast filter and reflect this
4491 * change on the device address registered with the OS. It will be
20346722 4492 * at offset 0.
1da177e4
LT
4493 */
4494 for (i = 0; i < ETH_ALEN; i++) {
4495 mac_addr <<= 8;
4496 mac_addr |= addr[i];
4497 }
4498
4499 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4500 &bar0->rmac_addr_data0_mem);
4501
4502 val64 =
4503 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4504 RMAC_ADDR_CMD_MEM_OFFSET(0);
4505 writeq(val64, &bar0->rmac_addr_cmd_mem);
4506 /* Wait till command completes */
c92ca04b
AR
4507 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4508 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
1da177e4
LT
4509 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4510 return FAILURE;
4511 }
4512
4513 return SUCCESS;
4514}
4515
4516/**
20346722 4517 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
4518 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4519 * @info: pointer to the structure with parameters given by ethtool to set
4520 * link information.
4521 * Description:
20346722 4522 * The function sets different link parameters provided by the user onto
1da177e4
LT
4523 * the NIC.
4524 * Return value:
4525 * 0 on success.
4526*/
4527
4528static int s2io_ethtool_sset(struct net_device *dev,
4529 struct ethtool_cmd *info)
4530{
4531 nic_t *sp = dev->priv;
4532 if ((info->autoneg == AUTONEG_ENABLE) ||
4533 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4534 return -EINVAL;
4535 else {
4536 s2io_close(sp->dev);
4537 s2io_open(sp->dev);
4538 }
4539
4540 return 0;
4541}
4542
4543/**
20346722 4544 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
4545 * @sp : private member of the device structure, pointer to the
4546 * s2io_nic structure.
4547 * @info : pointer to the structure with parameters given by ethtool
4548 * to return link information.
4549 * Description:
4550 * Returns link specific information like speed, duplex etc.. to ethtool.
4551 * Return value :
4552 * return 0 on success.
4553 */
4554
4555static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4556{
4557 nic_t *sp = dev->priv;
4558 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4559 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4560 info->port = PORT_FIBRE;
4561 /* info->transceiver?? TODO */
4562
4563 if (netif_carrier_ok(sp->dev)) {
4564 info->speed = 10000;
4565 info->duplex = DUPLEX_FULL;
4566 } else {
4567 info->speed = -1;
4568 info->duplex = -1;
4569 }
4570
4571 info->autoneg = AUTONEG_DISABLE;
4572 return 0;
4573}
4574
4575/**
20346722
K
4576 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4577 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4578 * s2io_nic structure.
4579 * @info : pointer to the structure with parameters given by ethtool to
4580 * return driver information.
4581 * Description:
4582 * Returns driver specefic information like name, version etc.. to ethtool.
4583 * Return value:
4584 * void
4585 */
4586
4587static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4588 struct ethtool_drvinfo *info)
4589{
4590 nic_t *sp = dev->priv;
4591
dbc2309d
JL
4592 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4593 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4594 strncpy(info->fw_version, "", sizeof(info->fw_version));
4595 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
4596 info->regdump_len = XENA_REG_SPACE;
4597 info->eedump_len = XENA_EEPROM_SPACE;
4598 info->testinfo_len = S2IO_TEST_LEN;
4599 info->n_stats = S2IO_STAT_LEN;
4600}
4601
4602/**
4603 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 4604 * @sp: private member of the device structure, which is a pointer to the
1da177e4 4605 * s2io_nic structure.
20346722 4606 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
4607 * dumping the registers.
4608 * @reg_space: The input argumnet into which all the registers are dumped.
4609 * Description:
4610 * Dumps the entire register space of xFrame NIC into the user given
4611 * buffer area.
4612 * Return value :
4613 * void .
4614*/
4615
4616static void s2io_ethtool_gregs(struct net_device *dev,
4617 struct ethtool_regs *regs, void *space)
4618{
4619 int i;
4620 u64 reg;
4621 u8 *reg_space = (u8 *) space;
4622 nic_t *sp = dev->priv;
4623
4624 regs->len = XENA_REG_SPACE;
4625 regs->version = sp->pdev->subsystem_device;
4626
4627 for (i = 0; i < regs->len; i += 8) {
4628 reg = readq(sp->bar0 + i);
4629 memcpy((reg_space + i), &reg, 8);
4630 }
4631}
4632
4633/**
4634 * s2io_phy_id - timer function that alternates adapter LED.
20346722 4635 * @data : address of the private member of the device structure, which
1da177e4 4636 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
4637 * Description: This is actually the timer function that alternates the
4638 * adapter LED bit of the adapter control bit to set/reset every time on
4639 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
4640 * once every second.
4641*/
4642static void s2io_phy_id(unsigned long data)
4643{
4644 nic_t *sp = (nic_t *) data;
4645 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4646 u64 val64 = 0;
4647 u16 subid;
4648
4649 subid = sp->pdev->subsystem_device;
541ae68f
K
4650 if ((sp->device_type == XFRAME_II_DEVICE) ||
4651 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
4652 val64 = readq(&bar0->gpio_control);
4653 val64 ^= GPIO_CTRL_GPIO_0;
4654 writeq(val64, &bar0->gpio_control);
4655 } else {
4656 val64 = readq(&bar0->adapter_control);
4657 val64 ^= ADAPTER_LED_ON;
4658 writeq(val64, &bar0->adapter_control);
4659 }
4660
4661 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4662}
4663
4664/**
4665 * s2io_ethtool_idnic - To physically identify the nic on the system.
4666 * @sp : private member of the device structure, which is a pointer to the
4667 * s2io_nic structure.
20346722 4668 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
4669 * ethtool.
4670 * Description: Used to physically identify the NIC on the system.
20346722 4671 * The Link LED will blink for a time specified by the user for
1da177e4 4672 * identification.
20346722 4673 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
4674 * identification is possible only if it's link is up.
4675 * Return value:
4676 * int , returns 0 on success
4677 */
4678
4679static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4680{
4681 u64 val64 = 0, last_gpio_ctrl_val;
4682 nic_t *sp = dev->priv;
4683 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4684 u16 subid;
4685
4686 subid = sp->pdev->subsystem_device;
4687 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f
K
4688 if ((sp->device_type == XFRAME_I_DEVICE) &&
4689 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
4690 val64 = readq(&bar0->adapter_control);
4691 if (!(val64 & ADAPTER_CNTL_EN)) {
4692 printk(KERN_ERR
4693 "Adapter Link down, cannot blink LED\n");
4694 return -EFAULT;
4695 }
4696 }
4697 if (sp->id_timer.function == NULL) {
4698 init_timer(&sp->id_timer);
4699 sp->id_timer.function = s2io_phy_id;
4700 sp->id_timer.data = (unsigned long) sp;
4701 }
4702 mod_timer(&sp->id_timer, jiffies);
4703 if (data)
20346722 4704 msleep_interruptible(data * HZ);
1da177e4 4705 else
20346722 4706 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
4707 del_timer_sync(&sp->id_timer);
4708
541ae68f 4709 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
4710 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4711 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4712 }
4713
4714 return 0;
4715}
4716
4717/**
4718 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
4719 * @sp : private member of the device structure, which is a pointer to the
4720 * s2io_nic structure.
1da177e4
LT
4721 * @ep : pointer to the structure with pause parameters given by ethtool.
4722 * Description:
4723 * Returns the Pause frame generation and reception capability of the NIC.
4724 * Return value:
4725 * void
4726 */
4727static void s2io_ethtool_getpause_data(struct net_device *dev,
4728 struct ethtool_pauseparam *ep)
4729{
4730 u64 val64;
4731 nic_t *sp = dev->priv;
4732 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4733
4734 val64 = readq(&bar0->rmac_pause_cfg);
4735 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4736 ep->tx_pause = TRUE;
4737 if (val64 & RMAC_PAUSE_RX_ENABLE)
4738 ep->rx_pause = TRUE;
4739 ep->autoneg = FALSE;
4740}
4741
4742/**
4743 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 4744 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4745 * s2io_nic structure.
4746 * @ep : pointer to the structure with pause parameters given by ethtool.
4747 * Description:
4748 * It can be used to set or reset Pause frame generation or reception
4749 * support of the NIC.
4750 * Return value:
4751 * int, returns 0 on Success
4752 */
4753
4754static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 4755 struct ethtool_pauseparam *ep)
1da177e4
LT
4756{
4757 u64 val64;
4758 nic_t *sp = dev->priv;
4759 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4760
4761 val64 = readq(&bar0->rmac_pause_cfg);
4762 if (ep->tx_pause)
4763 val64 |= RMAC_PAUSE_GEN_ENABLE;
4764 else
4765 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4766 if (ep->rx_pause)
4767 val64 |= RMAC_PAUSE_RX_ENABLE;
4768 else
4769 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4770 writeq(val64, &bar0->rmac_pause_cfg);
4771 return 0;
4772}
4773
4774/**
4775 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 4776 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4777 * s2io_nic structure.
4778 * @off : offset at which the data must be written
4779 * @data : Its an output parameter where the data read at the given
20346722 4780 * offset is stored.
1da177e4 4781 * Description:
20346722 4782 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
4783 * read data.
4784 * NOTE: Will allow to read only part of the EEPROM visible through the
4785 * I2C bus.
4786 * Return value:
4787 * -1 on failure and 0 on success.
4788 */
4789
4790#define S2IO_DEV_ID 5
ad4ebed0 4791static int read_eeprom(nic_t * sp, int off, u64 * data)
1da177e4
LT
4792{
4793 int ret = -1;
4794 u32 exit_cnt = 0;
4795 u64 val64;
4796 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4797
ad4ebed0 4798 if (sp->device_type == XFRAME_I_DEVICE) {
4799 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4800 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4801 I2C_CONTROL_CNTL_START;
4802 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 4803
ad4ebed0 4804 while (exit_cnt < 5) {
4805 val64 = readq(&bar0->i2c_control);
4806 if (I2C_CONTROL_CNTL_END(val64)) {
4807 *data = I2C_CONTROL_GET_DATA(val64);
4808 ret = 0;
4809 break;
4810 }
4811 msleep(50);
4812 exit_cnt++;
1da177e4 4813 }
1da177e4
LT
4814 }
4815
ad4ebed0 4816 if (sp->device_type == XFRAME_II_DEVICE) {
4817 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4818 SPI_CONTROL_BYTECNT(0x3) |
4819 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4820 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4821 val64 |= SPI_CONTROL_REQ;
4822 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4823 while (exit_cnt < 5) {
4824 val64 = readq(&bar0->spi_control);
4825 if (val64 & SPI_CONTROL_NACK) {
4826 ret = 1;
4827 break;
4828 } else if (val64 & SPI_CONTROL_DONE) {
4829 *data = readq(&bar0->spi_data);
4830 *data &= 0xffffff;
4831 ret = 0;
4832 break;
4833 }
4834 msleep(50);
4835 exit_cnt++;
4836 }
4837 }
1da177e4
LT
4838 return ret;
4839}
4840
4841/**
4842 * write_eeprom - actually writes the relevant part of the data value.
4843 * @sp : private member of the device structure, which is a pointer to the
4844 * s2io_nic structure.
4845 * @off : offset at which the data must be written
4846 * @data : The data that is to be written
20346722 4847 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
4848 * the Eeprom. (max of 3)
4849 * Description:
4850 * Actually writes the relevant part of the data value into the Eeprom
4851 * through the I2C bus.
4852 * Return value:
4853 * 0 on success, -1 on failure.
4854 */
4855
ad4ebed0 4856static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
1da177e4
LT
4857{
4858 int exit_cnt = 0, ret = -1;
4859 u64 val64;
4860 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4861
ad4ebed0 4862 if (sp->device_type == XFRAME_I_DEVICE) {
4863 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4864 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4865 I2C_CONTROL_CNTL_START;
4866 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4867
4868 while (exit_cnt < 5) {
4869 val64 = readq(&bar0->i2c_control);
4870 if (I2C_CONTROL_CNTL_END(val64)) {
4871 if (!(val64 & I2C_CONTROL_NACK))
4872 ret = 0;
4873 break;
4874 }
4875 msleep(50);
4876 exit_cnt++;
4877 }
4878 }
1da177e4 4879
ad4ebed0 4880 if (sp->device_type == XFRAME_II_DEVICE) {
4881 int write_cnt = (cnt == 8) ? 0 : cnt;
4882 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4883
4884 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4885 SPI_CONTROL_BYTECNT(write_cnt) |
4886 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4887 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4888 val64 |= SPI_CONTROL_REQ;
4889 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4890 while (exit_cnt < 5) {
4891 val64 = readq(&bar0->spi_control);
4892 if (val64 & SPI_CONTROL_NACK) {
4893 ret = 1;
4894 break;
4895 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 4896 ret = 0;
ad4ebed0 4897 break;
4898 }
4899 msleep(50);
4900 exit_cnt++;
1da177e4 4901 }
1da177e4 4902 }
1da177e4
LT
4903 return ret;
4904}
9dc737a7
AR
4905static void s2io_vpd_read(nic_t *nic)
4906{
b41477f3
AR
4907 u8 *vpd_data;
4908 u8 data;
9dc737a7
AR
4909 int i=0, cnt, fail = 0;
4910 int vpd_addr = 0x80;
4911
4912 if (nic->device_type == XFRAME_II_DEVICE) {
4913 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4914 vpd_addr = 0x80;
4915 }
4916 else {
4917 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4918 vpd_addr = 0x50;
4919 }
4920
b41477f3
AR
4921 vpd_data = kmalloc(256, GFP_KERNEL);
4922 if (!vpd_data)
4923 return;
4924
9dc737a7
AR
4925 for (i = 0; i < 256; i +=4 ) {
4926 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4927 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
4928 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4929 for (cnt = 0; cnt <5; cnt++) {
4930 msleep(2);
4931 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4932 if (data == 0x80)
4933 break;
4934 }
4935 if (cnt >= 5) {
4936 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4937 fail = 1;
4938 break;
4939 }
4940 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
4941 (u32 *)&vpd_data[i]);
4942 }
4943 if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) {
4944 memset(nic->product_name, 0, vpd_data[1]);
4945 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4946 }
b41477f3 4947 kfree(vpd_data);
9dc737a7
AR
4948}
4949
1da177e4
LT
4950/**
4951 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4952 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 4953 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
4954 * containing all relevant information.
4955 * @data_buf : user defined value to be written into Eeprom.
4956 * Description: Reads the values stored in the Eeprom at given offset
4957 * for a given length. Stores these values int the input argument data
4958 * buffer 'data_buf' and returns these to the caller (ethtool.)
4959 * Return value:
4960 * int 0 on success
4961 */
4962
4963static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 4964 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 4965{
ad4ebed0 4966 u32 i, valid;
4967 u64 data;
1da177e4
LT
4968 nic_t *sp = dev->priv;
4969
4970 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4971
4972 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4973 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4974
4975 for (i = 0; i < eeprom->len; i += 4) {
4976 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4977 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4978 return -EFAULT;
4979 }
4980 valid = INV(data);
4981 memcpy((data_buf + i), &valid, 4);
4982 }
4983 return 0;
4984}
4985
4986/**
4987 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4988 * @sp : private member of the device structure, which is a pointer to the
4989 * s2io_nic structure.
20346722 4990 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
4991 * containing all relevant information.
4992 * @data_buf ; user defined value to be written into Eeprom.
4993 * Description:
4994 * Tries to write the user provided value in the Eeprom, at the offset
4995 * given by the user.
4996 * Return value:
4997 * 0 on success, -EFAULT on failure.
4998 */
4999
5000static int s2io_ethtool_seeprom(struct net_device *dev,
5001 struct ethtool_eeprom *eeprom,
5002 u8 * data_buf)
5003{
5004 int len = eeprom->len, cnt = 0;
ad4ebed0 5005 u64 valid = 0, data;
1da177e4
LT
5006 nic_t *sp = dev->priv;
5007
5008 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5009 DBG_PRINT(ERR_DBG,
5010 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5011 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5012 eeprom->magic);
5013 return -EFAULT;
5014 }
5015
5016 while (len) {
5017 data = (u32) data_buf[cnt] & 0x000000FF;
5018 if (data) {
5019 valid = (u32) (data << 24);
5020 } else
5021 valid = data;
5022
5023 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5024 DBG_PRINT(ERR_DBG,
5025 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5026 DBG_PRINT(ERR_DBG,
5027 "write into the specified offset\n");
5028 return -EFAULT;
5029 }
5030 cnt++;
5031 len--;
5032 }
5033
5034 return 0;
5035}
5036
5037/**
20346722
K
5038 * s2io_register_test - reads and writes into all clock domains.
5039 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5040 * s2io_nic structure.
5041 * @data : variable that returns the result of each of the test conducted b
5042 * by the driver.
5043 * Description:
5044 * Read and write into all clock domains. The NIC has 3 clock domains,
5045 * see that registers in all the three regions are accessible.
5046 * Return value:
5047 * 0 on success.
5048 */
5049
5050static int s2io_register_test(nic_t * sp, uint64_t * data)
5051{
5052 XENA_dev_config_t __iomem *bar0 = sp->bar0;
ad4ebed0 5053 u64 val64 = 0, exp_val;
1da177e4
LT
5054 int fail = 0;
5055
20346722
K
5056 val64 = readq(&bar0->pif_rd_swapper_fb);
5057 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
5058 fail = 1;
5059 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5060 }
5061
5062 val64 = readq(&bar0->rmac_pause_cfg);
5063 if (val64 != 0xc000ffff00000000ULL) {
5064 fail = 1;
5065 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5066 }
5067
5068 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5069 if (sp->device_type == XFRAME_II_DEVICE)
5070 exp_val = 0x0404040404040404ULL;
5071 else
5072 exp_val = 0x0808080808080808ULL;
5073 if (val64 != exp_val) {
1da177e4
LT
5074 fail = 1;
5075 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5076 }
5077
5078 val64 = readq(&bar0->xgxs_efifo_cfg);
5079 if (val64 != 0x000000001923141EULL) {
5080 fail = 1;
5081 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5082 }
5083
5084 val64 = 0x5A5A5A5A5A5A5A5AULL;
5085 writeq(val64, &bar0->xmsi_data);
5086 val64 = readq(&bar0->xmsi_data);
5087 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5088 fail = 1;
5089 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5090 }
5091
5092 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5093 writeq(val64, &bar0->xmsi_data);
5094 val64 = readq(&bar0->xmsi_data);
5095 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5096 fail = 1;
5097 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5098 }
5099
5100 *data = fail;
ad4ebed0 5101 return fail;
1da177e4
LT
5102}
5103
5104/**
20346722 5105 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5106 * @sp : private member of the device structure, which is a pointer to the
5107 * s2io_nic structure.
5108 * @data:variable that returns the result of each of the test conducted by
5109 * the driver.
5110 * Description:
20346722 5111 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5112 * register.
5113 * Return value:
5114 * 0 on success.
5115 */
5116
5117static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
5118{
5119 int fail = 0;
ad4ebed0 5120 u64 ret_data, org_4F0, org_7F0;
5121 u8 saved_4F0 = 0, saved_7F0 = 0;
5122 struct net_device *dev = sp->dev;
1da177e4
LT
5123
5124 /* Test Write Error at offset 0 */
ad4ebed0 5125 /* Note that SPI interface allows write access to all areas
5126 * of EEPROM. Hence doing all negative testing only for Xframe I.
5127 */
5128 if (sp->device_type == XFRAME_I_DEVICE)
5129 if (!write_eeprom(sp, 0, 0, 3))
5130 fail = 1;
5131
5132 /* Save current values at offsets 0x4F0 and 0x7F0 */
5133 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5134 saved_4F0 = 1;
5135 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5136 saved_7F0 = 1;
1da177e4
LT
5137
5138 /* Test Write at offset 4f0 */
ad4ebed0 5139 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5140 fail = 1;
5141 if (read_eeprom(sp, 0x4F0, &ret_data))
5142 fail = 1;
5143
ad4ebed0 5144 if (ret_data != 0x012345) {
26b7625c
AM
5145 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5146 "Data written %llx Data read %llx\n",
5147 dev->name, (unsigned long long)0x12345,
5148 (unsigned long long)ret_data);
1da177e4 5149 fail = 1;
ad4ebed0 5150 }
1da177e4
LT
5151
5152 /* Reset the EEPROM data go FFFF */
ad4ebed0 5153 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5154
5155 /* Test Write Request Error at offset 0x7c */
ad4ebed0 5156 if (sp->device_type == XFRAME_I_DEVICE)
5157 if (!write_eeprom(sp, 0x07C, 0, 3))
5158 fail = 1;
1da177e4 5159
ad4ebed0 5160 /* Test Write Request at offset 0x7f0 */
5161 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 5162 fail = 1;
ad4ebed0 5163 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
5164 fail = 1;
5165
ad4ebed0 5166 if (ret_data != 0x012345) {
26b7625c
AM
5167 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5168 "Data written %llx Data read %llx\n",
5169 dev->name, (unsigned long long)0x12345,
5170 (unsigned long long)ret_data);
1da177e4 5171 fail = 1;
ad4ebed0 5172 }
1da177e4
LT
5173
5174 /* Reset the EEPROM data go FFFF */
ad4ebed0 5175 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 5176
ad4ebed0 5177 if (sp->device_type == XFRAME_I_DEVICE) {
5178 /* Test Write Error at offset 0x80 */
5179 if (!write_eeprom(sp, 0x080, 0, 3))
5180 fail = 1;
1da177e4 5181
ad4ebed0 5182 /* Test Write Error at offset 0xfc */
5183 if (!write_eeprom(sp, 0x0FC, 0, 3))
5184 fail = 1;
1da177e4 5185
ad4ebed0 5186 /* Test Write Error at offset 0x100 */
5187 if (!write_eeprom(sp, 0x100, 0, 3))
5188 fail = 1;
1da177e4 5189
ad4ebed0 5190 /* Test Write Error at offset 4ec */
5191 if (!write_eeprom(sp, 0x4EC, 0, 3))
5192 fail = 1;
5193 }
5194
5195 /* Restore values at offsets 0x4F0 and 0x7F0 */
5196 if (saved_4F0)
5197 write_eeprom(sp, 0x4F0, org_4F0, 3);
5198 if (saved_7F0)
5199 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
5200
5201 *data = fail;
ad4ebed0 5202 return fail;
1da177e4
LT
5203}
5204
5205/**
5206 * s2io_bist_test - invokes the MemBist test of the card .
20346722 5207 * @sp : private member of the device structure, which is a pointer to the
1da177e4 5208 * s2io_nic structure.
20346722 5209 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
5210 * the driver.
5211 * Description:
5212 * This invokes the MemBist test of the card. We give around
5213 * 2 secs time for the Test to complete. If it's still not complete
20346722 5214 * within this peiod, we consider that the test failed.
1da177e4
LT
5215 * Return value:
5216 * 0 on success and -1 on failure.
5217 */
5218
5219static int s2io_bist_test(nic_t * sp, uint64_t * data)
5220{
5221 u8 bist = 0;
5222 int cnt = 0, ret = -1;
5223
5224 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5225 bist |= PCI_BIST_START;
5226 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5227
5228 while (cnt < 20) {
5229 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5230 if (!(bist & PCI_BIST_START)) {
5231 *data = (bist & PCI_BIST_CODE_MASK);
5232 ret = 0;
5233 break;
5234 }
5235 msleep(100);
5236 cnt++;
5237 }
5238
5239 return ret;
5240}
5241
5242/**
20346722
K
5243 * s2io-link_test - verifies the link state of the nic
5244 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
5245 * s2io_nic structure.
5246 * @data: variable that returns the result of each of the test conducted by
5247 * the driver.
5248 * Description:
20346722 5249 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
5250 * argument 'data' appropriately.
5251 * Return value:
5252 * 0 on success.
5253 */
5254
5255static int s2io_link_test(nic_t * sp, uint64_t * data)
5256{
5257 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5258 u64 val64;
5259
5260 val64 = readq(&bar0->adapter_status);
c92ca04b 5261 if(!(LINK_IS_UP(val64)))
1da177e4 5262 *data = 1;
c92ca04b
AR
5263 else
5264 *data = 0;
1da177e4 5265
b41477f3 5266 return *data;
1da177e4
LT
5267}
5268
5269/**
20346722
K
5270 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5271 * @sp - private member of the device structure, which is a pointer to the
1da177e4 5272 * s2io_nic structure.
20346722 5273 * @data - variable that returns the result of each of the test
1da177e4
LT
5274 * conducted by the driver.
5275 * Description:
20346722 5276 * This is one of the offline test that tests the read and write
1da177e4
LT
5277 * access to the RldRam chip on the NIC.
5278 * Return value:
5279 * 0 on success.
5280 */
5281
5282static int s2io_rldram_test(nic_t * sp, uint64_t * data)
5283{
5284 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5285 u64 val64;
ad4ebed0 5286 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
5287
5288 val64 = readq(&bar0->adapter_control);
5289 val64 &= ~ADAPTER_ECC_EN;
5290 writeq(val64, &bar0->adapter_control);
5291
5292 val64 = readq(&bar0->mc_rldram_test_ctrl);
5293 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 5294 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5295
5296 val64 = readq(&bar0->mc_rldram_mrs);
5297 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5298 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5299
5300 val64 |= MC_RLDRAM_MRS_ENABLE;
5301 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5302
5303 while (iteration < 2) {
5304 val64 = 0x55555555aaaa0000ULL;
5305 if (iteration == 1) {
5306 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5307 }
5308 writeq(val64, &bar0->mc_rldram_test_d0);
5309
5310 val64 = 0xaaaa5a5555550000ULL;
5311 if (iteration == 1) {
5312 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5313 }
5314 writeq(val64, &bar0->mc_rldram_test_d1);
5315
5316 val64 = 0x55aaaaaaaa5a0000ULL;
5317 if (iteration == 1) {
5318 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5319 }
5320 writeq(val64, &bar0->mc_rldram_test_d2);
5321
ad4ebed0 5322 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
5323 writeq(val64, &bar0->mc_rldram_test_add);
5324
ad4ebed0 5325 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5326 MC_RLDRAM_TEST_GO;
5327 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5328
5329 for (cnt = 0; cnt < 5; cnt++) {
5330 val64 = readq(&bar0->mc_rldram_test_ctrl);
5331 if (val64 & MC_RLDRAM_TEST_DONE)
5332 break;
5333 msleep(200);
5334 }
5335
5336 if (cnt == 5)
5337 break;
5338
ad4ebed0 5339 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5340 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5341
5342 for (cnt = 0; cnt < 5; cnt++) {
5343 val64 = readq(&bar0->mc_rldram_test_ctrl);
5344 if (val64 & MC_RLDRAM_TEST_DONE)
5345 break;
5346 msleep(500);
5347 }
5348
5349 if (cnt == 5)
5350 break;
5351
5352 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 5353 if (!(val64 & MC_RLDRAM_TEST_PASS))
5354 test_fail = 1;
1da177e4
LT
5355
5356 iteration++;
5357 }
5358
ad4ebed0 5359 *data = test_fail;
1da177e4 5360
ad4ebed0 5361 /* Bring the adapter out of test mode */
5362 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5363
5364 return test_fail;
1da177e4
LT
5365}
5366
5367/**
5368 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5369 * @sp : private member of the device structure, which is a pointer to the
5370 * s2io_nic structure.
5371 * @ethtest : pointer to a ethtool command specific structure that will be
5372 * returned to the user.
20346722 5373 * @data : variable that returns the result of each of the test
1da177e4
LT
5374 * conducted by the driver.
5375 * Description:
5376 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5377 * the health of the card.
5378 * Return value:
5379 * void
5380 */
5381
5382static void s2io_ethtool_test(struct net_device *dev,
5383 struct ethtool_test *ethtest,
5384 uint64_t * data)
5385{
5386 nic_t *sp = dev->priv;
5387 int orig_state = netif_running(sp->dev);
5388
5389 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5390 /* Offline Tests. */
20346722 5391 if (orig_state)
1da177e4 5392 s2io_close(sp->dev);
1da177e4
LT
5393
5394 if (s2io_register_test(sp, &data[0]))
5395 ethtest->flags |= ETH_TEST_FL_FAILED;
5396
5397 s2io_reset(sp);
1da177e4
LT
5398
5399 if (s2io_rldram_test(sp, &data[3]))
5400 ethtest->flags |= ETH_TEST_FL_FAILED;
5401
5402 s2io_reset(sp);
1da177e4
LT
5403
5404 if (s2io_eeprom_test(sp, &data[1]))
5405 ethtest->flags |= ETH_TEST_FL_FAILED;
5406
5407 if (s2io_bist_test(sp, &data[4]))
5408 ethtest->flags |= ETH_TEST_FL_FAILED;
5409
5410 if (orig_state)
5411 s2io_open(sp->dev);
5412
5413 data[2] = 0;
5414 } else {
5415 /* Online Tests. */
5416 if (!orig_state) {
5417 DBG_PRINT(ERR_DBG,
5418 "%s: is not up, cannot run test\n",
5419 dev->name);
5420 data[0] = -1;
5421 data[1] = -1;
5422 data[2] = -1;
5423 data[3] = -1;
5424 data[4] = -1;
5425 }
5426
5427 if (s2io_link_test(sp, &data[2]))
5428 ethtest->flags |= ETH_TEST_FL_FAILED;
5429
5430 data[0] = 0;
5431 data[1] = 0;
5432 data[3] = 0;
5433 data[4] = 0;
5434 }
5435}
5436
5437static void s2io_get_ethtool_stats(struct net_device *dev,
5438 struct ethtool_stats *estats,
5439 u64 * tmp_stats)
5440{
5441 int i = 0;
5442 nic_t *sp = dev->priv;
5443 StatInfo_t *stat_info = sp->mac_control.stats_info;
5444
7ba013ac 5445 s2io_updt_stats(sp);
541ae68f
K
5446 tmp_stats[i++] =
5447 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5448 le32_to_cpu(stat_info->tmac_frms);
5449 tmp_stats[i++] =
5450 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5451 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 5452 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f
K
5453 tmp_stats[i++] =
5454 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5455 le32_to_cpu(stat_info->tmac_mcst_frms);
5456 tmp_stats[i++] =
5457 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5458 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 5459 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
bd1034f0
AR
5460 tmp_stats[i++] =
5461 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5462 le32_to_cpu(stat_info->tmac_ttl_octets);
5463 tmp_stats[i++] =
5464 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5465 le32_to_cpu(stat_info->tmac_ucst_frms);
5466 tmp_stats[i++] =
5467 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5468 le32_to_cpu(stat_info->tmac_nucst_frms);
541ae68f
K
5469 tmp_stats[i++] =
5470 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5471 le32_to_cpu(stat_info->tmac_any_err_frms);
bd1034f0 5472 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
1da177e4 5473 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f
K
5474 tmp_stats[i++] =
5475 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5476 le32_to_cpu(stat_info->tmac_vld_ip);
5477 tmp_stats[i++] =
5478 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5479 le32_to_cpu(stat_info->tmac_drop_ip);
5480 tmp_stats[i++] =
5481 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5482 le32_to_cpu(stat_info->tmac_icmp);
5483 tmp_stats[i++] =
5484 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5485 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 5486 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f
K
5487 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5488 le32_to_cpu(stat_info->tmac_udp);
5489 tmp_stats[i++] =
5490 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5491 le32_to_cpu(stat_info->rmac_vld_frms);
5492 tmp_stats[i++] =
5493 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5494 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
5495 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5496 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f
K
5497 tmp_stats[i++] =
5498 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5499 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5500 tmp_stats[i++] =
5501 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5502 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4 5503 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
bd1034f0 5504 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
1da177e4
LT
5505 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5506 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
bd1034f0
AR
5507 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5508 tmp_stats[i++] =
5509 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5510 le32_to_cpu(stat_info->rmac_ttl_octets);
5511 tmp_stats[i++] =
5512 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5513 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5514 tmp_stats[i++] =
5515 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5516 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
541ae68f
K
5517 tmp_stats[i++] =
5518 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5519 le32_to_cpu(stat_info->rmac_discarded_frms);
bd1034f0
AR
5520 tmp_stats[i++] =
5521 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5522 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5523 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5524 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
541ae68f
K
5525 tmp_stats[i++] =
5526 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5527 le32_to_cpu(stat_info->rmac_usized_frms);
5528 tmp_stats[i++] =
5529 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5530 le32_to_cpu(stat_info->rmac_osized_frms);
5531 tmp_stats[i++] =
5532 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5533 le32_to_cpu(stat_info->rmac_frag_frms);
5534 tmp_stats[i++] =
5535 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5536 le32_to_cpu(stat_info->rmac_jabber_frms);
bd1034f0
AR
5537 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5538 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5539 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5540 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5541 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5542 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5543 tmp_stats[i++] =
5544 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
541ae68f 5545 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
5546 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5547 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
bd1034f0
AR
5548 tmp_stats[i++] =
5549 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
541ae68f 5550 le32_to_cpu(stat_info->rmac_drop_ip);
bd1034f0
AR
5551 tmp_stats[i++] =
5552 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
541ae68f 5553 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 5554 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
bd1034f0
AR
5555 tmp_stats[i++] =
5556 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
541ae68f
K
5557 le32_to_cpu(stat_info->rmac_udp);
5558 tmp_stats[i++] =
5559 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5560 le32_to_cpu(stat_info->rmac_err_drp_udp);
bd1034f0
AR
5561 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5562 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5563 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5564 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5565 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5566 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5567 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5568 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5569 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5570 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5571 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5572 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5573 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5574 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5575 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5576 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5577 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
541ae68f
K
5578 tmp_stats[i++] =
5579 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5580 le32_to_cpu(stat_info->rmac_pause_cnt);
bd1034f0
AR
5581 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5582 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
541ae68f
K
5583 tmp_stats[i++] =
5584 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5585 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 5586 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
bd1034f0
AR
5587 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5588 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5589 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5590 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5591 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5592 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5593 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5594 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5595 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5596 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5597 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5598 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5599 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5600 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5601 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5602 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5603 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5604 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5605 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5606 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5607 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5608 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5609 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5610 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5611 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5612 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5613 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5614 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5615 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5616 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5617 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5618 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5619 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5620 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
7ba013ac
K
5621 tmp_stats[i++] = 0;
5622 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5623 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
bd1034f0
AR
5624 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5625 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5626 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5627 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5628 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5629 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5630 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5631 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5632 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5633 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5634 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5635 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5636 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5637 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5638 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5639 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5640 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
7d3d0439
RA
5641 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5642 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5643 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5644 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
fe931395 5645 if (stat_info->sw_stat.num_aggregations) {
bd1034f0
AR
5646 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5647 int count = 0;
5648 /*
5649 * Since 64-bit divide does not work on all platforms,
5650 * do repeated subtraction.
5651 */
5652 while (tmp >= stat_info->sw_stat.num_aggregations) {
5653 tmp -= stat_info->sw_stat.num_aggregations;
5654 count++;
5655 }
5656 tmp_stats[i++] = count;
fe931395 5657 }
bd1034f0
AR
5658 else
5659 tmp_stats[i++] = 0;
1da177e4
LT
5660}
5661
ac1f60db 5662static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
5663{
5664 return (XENA_REG_SPACE);
5665}
5666
5667
ac1f60db 5668static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4
LT
5669{
5670 nic_t *sp = dev->priv;
5671
5672 return (sp->rx_csum);
5673}
ac1f60db
AB
5674
5675static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4
LT
5676{
5677 nic_t *sp = dev->priv;
5678
5679 if (data)
5680 sp->rx_csum = 1;
5681 else
5682 sp->rx_csum = 0;
5683
5684 return 0;
5685}
ac1f60db
AB
5686
5687static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
5688{
5689 return (XENA_EEPROM_SPACE);
5690}
5691
ac1f60db 5692static int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
5693{
5694 return (S2IO_TEST_LEN);
5695}
ac1f60db
AB
5696
5697static void s2io_ethtool_get_strings(struct net_device *dev,
5698 u32 stringset, u8 * data)
1da177e4
LT
5699{
5700 switch (stringset) {
5701 case ETH_SS_TEST:
5702 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5703 break;
5704 case ETH_SS_STATS:
5705 memcpy(data, &ethtool_stats_keys,
5706 sizeof(ethtool_stats_keys));
5707 }
5708}
1da177e4
LT
5709static int s2io_ethtool_get_stats_count(struct net_device *dev)
5710{
5711 return (S2IO_STAT_LEN);
5712}
5713
ac1f60db 5714static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
5715{
5716 if (data)
5717 dev->features |= NETIF_F_IP_CSUM;
5718 else
5719 dev->features &= ~NETIF_F_IP_CSUM;
5720
5721 return 0;
5722}
5723
75c30b13
AR
5724static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5725{
5726 return (dev->features & NETIF_F_TSO) != 0;
5727}
5728static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5729{
5730 if (data)
5731 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5732 else
5733 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5734
5735 return 0;
5736}
1da177e4
LT
5737
5738static struct ethtool_ops netdev_ethtool_ops = {
5739 .get_settings = s2io_ethtool_gset,
5740 .set_settings = s2io_ethtool_sset,
5741 .get_drvinfo = s2io_ethtool_gdrvinfo,
5742 .get_regs_len = s2io_ethtool_get_regs_len,
5743 .get_regs = s2io_ethtool_gregs,
5744 .get_link = ethtool_op_get_link,
5745 .get_eeprom_len = s2io_get_eeprom_len,
5746 .get_eeprom = s2io_ethtool_geeprom,
5747 .set_eeprom = s2io_ethtool_seeprom,
5748 .get_pauseparam = s2io_ethtool_getpause_data,
5749 .set_pauseparam = s2io_ethtool_setpause_data,
5750 .get_rx_csum = s2io_ethtool_get_rx_csum,
5751 .set_rx_csum = s2io_ethtool_set_rx_csum,
5752 .get_tx_csum = ethtool_op_get_tx_csum,
5753 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5754 .get_sg = ethtool_op_get_sg,
5755 .set_sg = ethtool_op_set_sg,
5756#ifdef NETIF_F_TSO
75c30b13
AR
5757 .get_tso = s2io_ethtool_op_get_tso,
5758 .set_tso = s2io_ethtool_op_set_tso,
1da177e4 5759#endif
fed5eccd
AR
5760 .get_ufo = ethtool_op_get_ufo,
5761 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
5762 .self_test_count = s2io_ethtool_self_test_count,
5763 .self_test = s2io_ethtool_test,
5764 .get_strings = s2io_ethtool_get_strings,
5765 .phys_id = s2io_ethtool_idnic,
5766 .get_stats_count = s2io_ethtool_get_stats_count,
5767 .get_ethtool_stats = s2io_get_ethtool_stats
5768};
5769
5770/**
20346722 5771 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
5772 * @dev : Device pointer.
5773 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5774 * a proprietary structure used to pass information to the driver.
5775 * @cmd : This is used to distinguish between the different commands that
5776 * can be passed to the IOCTL functions.
5777 * Description:
20346722
K
5778 * Currently there are no special functionality supported in IOCTL, hence
5779 * function always return EOPNOTSUPPORTED
1da177e4
LT
5780 */
5781
ac1f60db 5782static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
5783{
5784 return -EOPNOTSUPP;
5785}
5786
5787/**
5788 * s2io_change_mtu - entry point to change MTU size for the device.
5789 * @dev : device pointer.
5790 * @new_mtu : the new MTU size for the device.
5791 * Description: A driver entry point to change MTU size for the device.
5792 * Before changing the MTU the device must be stopped.
5793 * Return value:
5794 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5795 * file on failure.
5796 */
5797
ac1f60db 5798static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4
LT
5799{
5800 nic_t *sp = dev->priv;
1da177e4
LT
5801
5802 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5803 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5804 dev->name);
5805 return -EPERM;
5806 }
5807
1da177e4 5808 dev->mtu = new_mtu;
d8892c6e 5809 if (netif_running(dev)) {
e6a8fee2 5810 s2io_card_down(sp);
d8892c6e
K
5811 netif_stop_queue(dev);
5812 if (s2io_card_up(sp)) {
5813 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5814 __FUNCTION__);
5815 }
5816 if (netif_queue_stopped(dev))
5817 netif_wake_queue(dev);
5818 } else { /* Device is down */
5819 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5820 u64 val64 = new_mtu;
5821
5822 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5823 }
1da177e4
LT
5824
5825 return 0;
5826}
5827
5828/**
5829 * s2io_tasklet - Bottom half of the ISR.
5830 * @dev_adr : address of the device structure in dma_addr_t format.
5831 * Description:
5832 * This is the tasklet or the bottom half of the ISR. This is
20346722 5833 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 5834 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 5835 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
5836 * replenish the Rx buffers in the Rx buffer descriptors.
5837 * Return value:
5838 * void.
5839 */
5840
5841static void s2io_tasklet(unsigned long dev_addr)
5842{
5843 struct net_device *dev = (struct net_device *) dev_addr;
5844 nic_t *sp = dev->priv;
5845 int i, ret;
5846 mac_info_t *mac_control;
5847 struct config_param *config;
5848
5849 mac_control = &sp->mac_control;
5850 config = &sp->config;
5851
5852 if (!TASKLET_IN_USE) {
5853 for (i = 0; i < config->rx_ring_num; i++) {
5854 ret = fill_rx_buffers(sp, i);
5855 if (ret == -ENOMEM) {
5856 DBG_PRINT(ERR_DBG, "%s: Out of ",
5857 dev->name);
5858 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5859 break;
5860 } else if (ret == -EFILL) {
5861 DBG_PRINT(ERR_DBG,
5862 "%s: Rx Ring %d is full\n",
5863 dev->name, i);
5864 break;
5865 }
5866 }
5867 clear_bit(0, (&sp->tasklet_status));
5868 }
5869}
5870
5871/**
5872 * s2io_set_link - Set the LInk status
5873 * @data: long pointer to device private structue
5874 * Description: Sets the link status for the adapter
5875 */
5876
5877static void s2io_set_link(unsigned long data)
5878{
5879 nic_t *nic = (nic_t *) data;
5880 struct net_device *dev = nic->dev;
5881 XENA_dev_config_t __iomem *bar0 = nic->bar0;
5882 register u64 val64;
5883 u16 subid;
5884
5885 if (test_and_set_bit(0, &(nic->link_state))) {
5886 /* The card is being reset, no point doing anything */
5887 return;
5888 }
5889
5890 subid = nic->pdev->subsystem_device;
a371a07d
K
5891 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5892 /*
5893 * Allow a small delay for the NICs self initiated
5894 * cleanup to complete.
5895 */
5896 msleep(100);
5897 }
1da177e4
LT
5898
5899 val64 = readq(&bar0->adapter_status);
20346722 5900 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
5901 if (LINK_IS_UP(val64)) {
5902 val64 = readq(&bar0->adapter_control);
5903 val64 |= ADAPTER_CNTL_EN;
5904 writeq(val64, &bar0->adapter_control);
541ae68f
K
5905 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5906 subid)) {
1da177e4
LT
5907 val64 = readq(&bar0->gpio_control);
5908 val64 |= GPIO_CTRL_GPIO_0;
5909 writeq(val64, &bar0->gpio_control);
5910 val64 = readq(&bar0->gpio_control);
5911 } else {
5912 val64 |= ADAPTER_LED_ON;
5913 writeq(val64, &bar0->adapter_control);
5914 }
a371a07d
K
5915 if (s2io_link_fault_indication(nic) ==
5916 MAC_RMAC_ERR_TIMER) {
5917 val64 = readq(&bar0->adapter_status);
5918 if (!LINK_IS_UP(val64)) {
5919 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5920 DBG_PRINT(ERR_DBG, " Link down");
5921 DBG_PRINT(ERR_DBG, "after ");
5922 DBG_PRINT(ERR_DBG, "enabling ");
5923 DBG_PRINT(ERR_DBG, "device \n");
5924 }
1da177e4
LT
5925 }
5926 if (nic->device_enabled_once == FALSE) {
5927 nic->device_enabled_once = TRUE;
5928 }
5929 s2io_link(nic, LINK_UP);
5930 } else {
541ae68f
K
5931 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5932 subid)) {
1da177e4
LT
5933 val64 = readq(&bar0->gpio_control);
5934 val64 &= ~GPIO_CTRL_GPIO_0;
5935 writeq(val64, &bar0->gpio_control);
5936 val64 = readq(&bar0->gpio_control);
5937 }
5938 s2io_link(nic, LINK_DOWN);
5939 }
5940 } else { /* NIC is not Quiescent. */
5941 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5942 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5943 netif_stop_queue(dev);
5944 }
5945 clear_bit(0, &(nic->link_state));
5946}
5947
5d3213cc
AR
5948static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5949 struct sk_buff **skb, u64 *temp0, u64 *temp1,
5950 u64 *temp2, int size)
5951{
5952 struct net_device *dev = sp->dev;
5953 struct sk_buff *frag_list;
5954
5955 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
5956 /* allocate skb */
5957 if (*skb) {
5958 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
5959 /*
5960 * As Rx frame are not going to be processed,
5961 * using same mapped address for the Rxd
5962 * buffer pointer
5963 */
5964 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0;
5965 } else {
5966 *skb = dev_alloc_skb(size);
5967 if (!(*skb)) {
5968 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
5969 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
5970 return -ENOMEM ;
5971 }
5972 /* storing the mapped addr in a temp variable
5973 * such it will be used for next rxd whose
5974 * Host Control is NULL
5975 */
5976 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 =
5977 pci_map_single( sp->pdev, (*skb)->data,
5978 size - NET_IP_ALIGN,
5979 PCI_DMA_FROMDEVICE);
5980 rxdp->Host_Control = (unsigned long) (*skb);
5981 }
5982 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
5983 /* Two buffer Mode */
5984 if (*skb) {
5985 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
5986 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
5987 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
5988 } else {
5989 *skb = dev_alloc_skb(size);
5990 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
5991 pci_map_single(sp->pdev, (*skb)->data,
5992 dev->mtu + 4,
5993 PCI_DMA_FROMDEVICE);
5994 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
5995 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
5996 PCI_DMA_FROMDEVICE);
5997 rxdp->Host_Control = (unsigned long) (*skb);
5998
5999 /* Buffer-1 will be dummy buffer not used */
6000 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
6001 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6002 PCI_DMA_FROMDEVICE);
6003 }
6004 } else if ((rxdp->Host_Control == 0)) {
6005 /* Three buffer mode */
6006 if (*skb) {
6007 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
6008 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
6009 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
6010 } else {
6011 *skb = dev_alloc_skb(size);
6012
6013 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
6014 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6015 PCI_DMA_FROMDEVICE);
6016 /* Buffer-1 receives L3/L4 headers */
6017 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
6018 pci_map_single( sp->pdev, (*skb)->data,
6019 l3l4hdr_size + 4,
6020 PCI_DMA_FROMDEVICE);
6021 /*
6022 * skb_shinfo(skb)->frag_list will have L4
6023 * data payload
6024 */
6025 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6026 ALIGN_SIZE);
6027 if (skb_shinfo(*skb)->frag_list == NULL) {
6028 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6029 failed\n ", dev->name);
6030 return -ENOMEM ;
6031 }
6032 frag_list = skb_shinfo(*skb)->frag_list;
6033 frag_list->next = NULL;
6034 /*
6035 * Buffer-2 receives L4 data payload
6036 */
6037 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
6038 pci_map_single( sp->pdev, frag_list->data,
6039 dev->mtu, PCI_DMA_FROMDEVICE);
6040 }
6041 }
6042 return 0;
6043}
6044static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
6045{
6046 struct net_device *dev = sp->dev;
6047 if (sp->rxd_mode == RXD_MODE_1) {
6048 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6049 } else if (sp->rxd_mode == RXD_MODE_3B) {
6050 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6051 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6052 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6053 } else {
6054 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6055 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6056 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6057 }
6058}
6059
6060static int rxd_owner_bit_reset(nic_t *sp)
6061{
6062 int i, j, k, blk_cnt = 0, size;
6063 mac_info_t * mac_control = &sp->mac_control;
6064 struct config_param *config = &sp->config;
6065 struct net_device *dev = sp->dev;
6066 RxD_t *rxdp = NULL;
6067 struct sk_buff *skb = NULL;
6068 buffAdd_t *ba = NULL;
6069 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6070
6071 /* Calculate the size based on ring mode */
6072 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6073 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6074 if (sp->rxd_mode == RXD_MODE_1)
6075 size += NET_IP_ALIGN;
6076 else if (sp->rxd_mode == RXD_MODE_3B)
6077 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6078 else
6079 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6080
6081 for (i = 0; i < config->rx_ring_num; i++) {
6082 blk_cnt = config->rx_cfg[i].num_rxd /
6083 (rxd_count[sp->rxd_mode] +1);
6084
6085 for (j = 0; j < blk_cnt; j++) {
6086 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6087 rxdp = mac_control->rings[i].
6088 rx_blocks[j].rxds[k].virt_addr;
6089 if(sp->rxd_mode >= RXD_MODE_3A)
6090 ba = &mac_control->rings[i].ba[j][k];
6091 set_rxd_buffer_pointer(sp, rxdp, ba,
6092 &skb,(u64 *)&temp0_64,
6093 (u64 *)&temp1_64,
6094 (u64 *)&temp2_64, size);
6095
6096 set_rxd_buffer_size(sp, rxdp, size);
6097 wmb();
6098 /* flip the Ownership bit to Hardware */
6099 rxdp->Control_1 |= RXD_OWN_XENA;
6100 }
6101 }
6102 }
6103 return 0;
6104
6105}
6106
e6a8fee2 6107static int s2io_add_isr(nic_t * sp)
1da177e4 6108{
e6a8fee2 6109 int ret = 0;
c92ca04b 6110 struct net_device *dev = sp->dev;
e6a8fee2 6111 int err = 0;
1da177e4 6112
e6a8fee2
AR
6113 if (sp->intr_type == MSI)
6114 ret = s2io_enable_msi(sp);
6115 else if (sp->intr_type == MSI_X)
6116 ret = s2io_enable_msi_x(sp);
6117 if (ret) {
6118 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6119 sp->intr_type = INTA;
20346722 6120 }
1da177e4 6121
e6a8fee2
AR
6122 /* Store the values of the MSIX table in the nic_t structure */
6123 store_xmsi_data(sp);
c92ca04b 6124
e6a8fee2
AR
6125 /* After proper initialization of H/W, register ISR */
6126 if (sp->intr_type == MSI) {
6127 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6128 IRQF_SHARED, sp->name, dev);
6129 if (err) {
6130 pci_disable_msi(sp->pdev);
6131 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6132 dev->name);
6133 return -1;
6134 }
6135 }
6136 if (sp->intr_type == MSI_X) {
6137 int i;
c92ca04b 6138
e6a8fee2
AR
6139 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6140 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6141 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6142 dev->name, i);
6143 err = request_irq(sp->entries[i].vector,
6144 s2io_msix_fifo_handle, 0, sp->desc[i],
6145 sp->s2io_entries[i].arg);
6146 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
6147 (unsigned long long)sp->msix_info[i].addr);
6148 } else {
6149 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6150 dev->name, i);
6151 err = request_irq(sp->entries[i].vector,
6152 s2io_msix_ring_handle, 0, sp->desc[i],
6153 sp->s2io_entries[i].arg);
6154 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
6155 (unsigned long long)sp->msix_info[i].addr);
c92ca04b 6156 }
e6a8fee2
AR
6157 if (err) {
6158 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6159 "failed\n", dev->name, i);
6160 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6161 return -1;
6162 }
6163 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6164 }
6165 }
6166 if (sp->intr_type == INTA) {
6167 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6168 sp->name, dev);
6169 if (err) {
6170 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6171 dev->name);
6172 return -1;
6173 }
6174 }
6175 return 0;
6176}
6177static void s2io_rem_isr(nic_t * sp)
6178{
6179 int cnt = 0;
6180 struct net_device *dev = sp->dev;
6181
6182 if (sp->intr_type == MSI_X) {
6183 int i;
6184 u16 msi_control;
6185
6186 for (i=1; (sp->s2io_entries[i].in_use ==
6187 MSIX_REGISTERED_SUCCESS); i++) {
6188 int vector = sp->entries[i].vector;
6189 void *arg = sp->s2io_entries[i].arg;
6190
6191 free_irq(vector, arg);
6192 }
6193 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6194 msi_control &= 0xFFFE; /* Disable MSI */
6195 pci_write_config_word(sp->pdev, 0x42, msi_control);
6196
6197 pci_disable_msix(sp->pdev);
6198 } else {
6199 free_irq(sp->pdev->irq, dev);
6200 if (sp->intr_type == MSI) {
6201 u16 val;
6202
6203 pci_disable_msi(sp->pdev);
6204 pci_read_config_word(sp->pdev, 0x4c, &val);
6205 val ^= 0x1;
6206 pci_write_config_word(sp->pdev, 0x4c, val);
c92ca04b
AR
6207 }
6208 }
6209 /* Waiting till all Interrupt handlers are complete */
6210 cnt = 0;
6211 do {
6212 msleep(10);
6213 if (!atomic_read(&sp->isr_cnt))
6214 break;
6215 cnt++;
6216 } while(cnt < 5);
e6a8fee2
AR
6217}
6218
6219static void s2io_card_down(nic_t * sp)
6220{
6221 int cnt = 0;
6222 XENA_dev_config_t __iomem *bar0 = sp->bar0;
6223 unsigned long flags;
6224 register u64 val64 = 0;
6225
6226 del_timer_sync(&sp->alarm_timer);
6227 /* If s2io_set_link task is executing, wait till it completes. */
6228 while (test_and_set_bit(0, &(sp->link_state))) {
6229 msleep(50);
6230 }
6231 atomic_set(&sp->card_state, CARD_DOWN);
6232
6233 /* disable Tx and Rx traffic on the NIC */
6234 stop_nic(sp);
6235
6236 s2io_rem_isr(sp);
1da177e4
LT
6237
6238 /* Kill tasklet. */
6239 tasklet_kill(&sp->task);
6240
6241 /* Check if the device is Quiescent and then Reset the NIC */
6242 do {
5d3213cc
AR
6243 /* As per the HW requirement we need to replenish the
6244 * receive buffer to avoid the ring bump. Since there is
6245 * no intention of processing the Rx frame at this pointwe are
6246 * just settting the ownership bit of rxd in Each Rx
6247 * ring to HW and set the appropriate buffer size
6248 * based on the ring mode
6249 */
6250 rxd_owner_bit_reset(sp);
6251
1da177e4 6252 val64 = readq(&bar0->adapter_status);
20346722 6253 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
1da177e4
LT
6254 break;
6255 }
6256
6257 msleep(50);
6258 cnt++;
6259 if (cnt == 10) {
6260 DBG_PRINT(ERR_DBG,
6261 "s2io_close:Device not Quiescent ");
6262 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6263 (unsigned long long) val64);
6264 break;
6265 }
6266 } while (1);
1da177e4
LT
6267 s2io_reset(sp);
6268
7ba013ac
K
6269 spin_lock_irqsave(&sp->tx_lock, flags);
6270 /* Free all Tx buffers */
1da177e4 6271 free_tx_buffers(sp);
7ba013ac
K
6272 spin_unlock_irqrestore(&sp->tx_lock, flags);
6273
6274 /* Free all Rx buffers */
6275 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 6276 free_rx_buffers(sp);
7ba013ac 6277 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 6278
1da177e4
LT
6279 clear_bit(0, &(sp->link_state));
6280}
6281
6282static int s2io_card_up(nic_t * sp)
6283{
cc6e7c44 6284 int i, ret = 0;
1da177e4
LT
6285 mac_info_t *mac_control;
6286 struct config_param *config;
6287 struct net_device *dev = (struct net_device *) sp->dev;
e6a8fee2 6288 u16 interruptible;
1da177e4
LT
6289
6290 /* Initialize the H/W I/O registers */
6291 if (init_nic(sp) != 0) {
6292 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6293 dev->name);
e6a8fee2 6294 s2io_reset(sp);
1da177e4
LT
6295 return -ENODEV;
6296 }
6297
20346722
K
6298 /*
6299 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
6300 * Rx ring and initializing buffers into 30 Rx blocks
6301 */
6302 mac_control = &sp->mac_control;
6303 config = &sp->config;
6304
6305 for (i = 0; i < config->rx_ring_num; i++) {
6306 if ((ret = fill_rx_buffers(sp, i))) {
6307 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6308 dev->name);
6309 s2io_reset(sp);
6310 free_rx_buffers(sp);
6311 return -ENOMEM;
6312 }
6313 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6314 atomic_read(&sp->rx_bufs_left[i]));
6315 }
6316
6317 /* Setting its receive mode */
6318 s2io_set_multicast(dev);
6319
7d3d0439 6320 if (sp->lro) {
b41477f3 6321 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439
RA
6322 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6323 /* Check if we can use(if specified) user provided value */
6324 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6325 sp->lro_max_aggr_per_sess = lro_max_pkts;
6326 }
6327
1da177e4
LT
6328 /* Enable Rx Traffic and interrupts on the NIC */
6329 if (start_nic(sp)) {
6330 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 6331 s2io_reset(sp);
e6a8fee2
AR
6332 free_rx_buffers(sp);
6333 return -ENODEV;
6334 }
6335
6336 /* Add interrupt service routine */
6337 if (s2io_add_isr(sp) != 0) {
6338 if (sp->intr_type == MSI_X)
6339 s2io_rem_isr(sp);
6340 s2io_reset(sp);
1da177e4
LT
6341 free_rx_buffers(sp);
6342 return -ENODEV;
6343 }
6344
25fff88e
K
6345 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6346
e6a8fee2
AR
6347 /* Enable tasklet for the device */
6348 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6349
6350 /* Enable select interrupts */
6351 if (sp->intr_type != INTA)
6352 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6353 else {
6354 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6355 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6356 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6357 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6358 }
6359
6360
1da177e4
LT
6361 atomic_set(&sp->card_state, CARD_UP);
6362 return 0;
6363}
6364
20346722 6365/**
1da177e4
LT
6366 * s2io_restart_nic - Resets the NIC.
6367 * @data : long pointer to the device private structure
6368 * Description:
6369 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 6370 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
6371 * the run time of the watch dog routine which is run holding a
6372 * spin lock.
6373 */
6374
6375static void s2io_restart_nic(unsigned long data)
6376{
6377 struct net_device *dev = (struct net_device *) data;
6378 nic_t *sp = dev->priv;
6379
e6a8fee2 6380 s2io_card_down(sp);
1da177e4
LT
6381 if (s2io_card_up(sp)) {
6382 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6383 dev->name);
6384 }
6385 netif_wake_queue(dev);
6386 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6387 dev->name);
20346722 6388
1da177e4
LT
6389}
6390
20346722
K
6391/**
6392 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
6393 * @dev : Pointer to net device structure
6394 * Description:
6395 * This function is triggered if the Tx Queue is stopped
6396 * for a pre-defined amount of time when the Interface is still up.
6397 * If the Interface is jammed in such a situation, the hardware is
6398 * reset (by s2io_close) and restarted again (by s2io_open) to
6399 * overcome any problem that might have been caused in the hardware.
6400 * Return value:
6401 * void
6402 */
6403
6404static void s2io_tx_watchdog(struct net_device *dev)
6405{
6406 nic_t *sp = dev->priv;
6407
6408 if (netif_carrier_ok(dev)) {
6409 schedule_work(&sp->rst_timer_task);
bd1034f0 6410 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
6411 }
6412}
6413
6414/**
6415 * rx_osm_handler - To perform some OS related operations on SKB.
6416 * @sp: private member of the device structure,pointer to s2io_nic structure.
6417 * @skb : the socket buffer pointer.
6418 * @len : length of the packet
6419 * @cksum : FCS checksum of the frame.
6420 * @ring_no : the ring from which this RxD was extracted.
20346722 6421 * Description:
b41477f3 6422 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
6423 * some OS related operations on the SKB before passing it to the upper
6424 * layers. It mainly checks if the checksum is OK, if so adds it to the
6425 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6426 * to the upper layer. If the checksum is wrong, it increments the Rx
6427 * packet error count, frees the SKB and returns error.
6428 * Return value:
6429 * SUCCESS on success and -1 on failure.
6430 */
20346722 6431static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
1da177e4 6432{
20346722 6433 nic_t *sp = ring_data->nic;
1da177e4 6434 struct net_device *dev = (struct net_device *) sp->dev;
20346722
K
6435 struct sk_buff *skb = (struct sk_buff *)
6436 ((unsigned long) rxdp->Host_Control);
6437 int ring_no = ring_data->ring_no;
1da177e4 6438 u16 l3_csum, l4_csum;
863c11a9 6439 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7d3d0439 6440 lro_t *lro;
da6971d8 6441
20346722 6442 skb->dev = dev;
c92ca04b 6443
863c11a9 6444 if (err) {
bd1034f0
AR
6445 /* Check for parity error */
6446 if (err & 0x1) {
6447 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6448 }
6449
863c11a9
AR
6450 /*
6451 * Drop the packet if bad transfer code. Exception being
6452 * 0x5, which could be due to unsupported IPv6 extension header.
6453 * In this case, we let stack handle the packet.
6454 * Note that in this case, since checksum will be incorrect,
6455 * stack will validate the same.
6456 */
6457 if (err && ((err >> 48) != 0x5)) {
6458 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6459 dev->name, err);
6460 sp->stats.rx_crc_errors++;
6461 dev_kfree_skb(skb);
6462 atomic_dec(&sp->rx_bufs_left[ring_no]);
6463 rxdp->Host_Control = 0;
6464 return 0;
6465 }
20346722 6466 }
1da177e4 6467
20346722
K
6468 /* Updating statistics */
6469 rxdp->Host_Control = 0;
6470 sp->rx_pkt_count++;
6471 sp->stats.rx_packets++;
da6971d8
AR
6472 if (sp->rxd_mode == RXD_MODE_1) {
6473 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 6474
da6971d8
AR
6475 sp->stats.rx_bytes += len;
6476 skb_put(skb, len);
6477
6478 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6479 int get_block = ring_data->rx_curr_get_info.block_index;
6480 int get_off = ring_data->rx_curr_get_info.offset;
6481 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6482 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6483 unsigned char *buff = skb_push(skb, buf0_len);
6484
6485 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
6486 sp->stats.rx_bytes += buf0_len + buf2_len;
6487 memcpy(buff, ba->ba_0, buf0_len);
6488
6489 if (sp->rxd_mode == RXD_MODE_3A) {
6490 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6491
6492 skb_put(skb, buf1_len);
6493 skb->len += buf2_len;
6494 skb->data_len += buf2_len;
6495 skb->truesize += buf2_len;
6496 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6497 sp->stats.rx_bytes += buf1_len;
6498
6499 } else
6500 skb_put(skb, buf2_len);
6501 }
20346722 6502
7d3d0439
RA
6503 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6504 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722
K
6505 (sp->rx_csum)) {
6506 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
6507 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6508 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 6509 /*
1da177e4
LT
6510 * NIC verifies if the Checksum of the received
6511 * frame is Ok or not and accordingly returns
6512 * a flag in the RxD.
6513 */
6514 skb->ip_summed = CHECKSUM_UNNECESSARY;
7d3d0439
RA
6515 if (sp->lro) {
6516 u32 tcp_len;
6517 u8 *tcp;
6518 int ret = 0;
6519
6520 ret = s2io_club_tcp_session(skb->data, &tcp,
6521 &tcp_len, &lro, rxdp, sp);
6522 switch (ret) {
6523 case 3: /* Begin anew */
6524 lro->parent = skb;
6525 goto aggregate;
6526 case 1: /* Aggregate */
6527 {
6528 lro_append_pkt(sp, lro,
6529 skb, tcp_len);
6530 goto aggregate;
6531 }
6532 case 4: /* Flush session */
6533 {
6534 lro_append_pkt(sp, lro,
6535 skb, tcp_len);
6536 queue_rx_frame(lro->parent);
6537 clear_lro_session(lro);
6538 sp->mac_control.stats_info->
6539 sw_stat.flush_max_pkts++;
6540 goto aggregate;
6541 }
6542 case 2: /* Flush both */
6543 lro->parent->data_len =
6544 lro->frags_len;
6545 sp->mac_control.stats_info->
6546 sw_stat.sending_both++;
6547 queue_rx_frame(lro->parent);
6548 clear_lro_session(lro);
6549 goto send_up;
6550 case 0: /* sessions exceeded */
c92ca04b
AR
6551 case -1: /* non-TCP or not
6552 * L2 aggregatable
6553 */
7d3d0439
RA
6554 case 5: /*
6555 * First pkt in session not
6556 * L3/L4 aggregatable
6557 */
6558 break;
6559 default:
6560 DBG_PRINT(ERR_DBG,
6561 "%s: Samadhana!!\n",
6562 __FUNCTION__);
6563 BUG();
6564 }
6565 }
1da177e4 6566 } else {
20346722
K
6567 /*
6568 * Packet with erroneous checksum, let the
1da177e4
LT
6569 * upper layers deal with it.
6570 */
6571 skb->ip_summed = CHECKSUM_NONE;
6572 }
6573 } else {
6574 skb->ip_summed = CHECKSUM_NONE;
6575 }
6576
7d3d0439
RA
6577 if (!sp->lro) {
6578 skb->protocol = eth_type_trans(skb, dev);
1da177e4 6579#ifdef CONFIG_S2IO_NAPI
7d3d0439
RA
6580 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6581 /* Queueing the vlan frame to the upper layer */
6582 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6583 RXD_GET_VLAN_TAG(rxdp->Control_2));
6584 } else {
6585 netif_receive_skb(skb);
6586 }
1da177e4 6587#else
7d3d0439
RA
6588 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6589 /* Queueing the vlan frame to the upper layer */
6590 vlan_hwaccel_rx(skb, sp->vlgrp,
6591 RXD_GET_VLAN_TAG(rxdp->Control_2));
6592 } else {
6593 netif_rx(skb);
6594 }
1da177e4 6595#endif
7d3d0439
RA
6596 } else {
6597send_up:
6598 queue_rx_frame(skb);
6599 }
1da177e4 6600 dev->last_rx = jiffies;
7d3d0439 6601aggregate:
1da177e4 6602 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
6603 return SUCCESS;
6604}
6605
6606/**
6607 * s2io_link - stops/starts the Tx queue.
6608 * @sp : private member of the device structure, which is a pointer to the
6609 * s2io_nic structure.
6610 * @link : inidicates whether link is UP/DOWN.
6611 * Description:
6612 * This function stops/starts the Tx queue depending on whether the link
20346722
K
6613 * status of the NIC is is down or up. This is called by the Alarm
6614 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
6615 * Return value:
6616 * void.
6617 */
6618
26df54bf 6619static void s2io_link(nic_t * sp, int link)
1da177e4
LT
6620{
6621 struct net_device *dev = (struct net_device *) sp->dev;
6622
6623 if (link != sp->last_link_state) {
6624 if (link == LINK_DOWN) {
6625 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6626 netif_carrier_off(dev);
6627 } else {
6628 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6629 netif_carrier_on(dev);
6630 }
6631 }
6632 sp->last_link_state = link;
6633}
6634
6635/**
20346722
K
6636 * get_xena_rev_id - to identify revision ID of xena.
6637 * @pdev : PCI Dev structure
6638 * Description:
6639 * Function to identify the Revision ID of xena.
6640 * Return value:
6641 * returns the revision ID of the device.
6642 */
6643
26df54bf 6644static int get_xena_rev_id(struct pci_dev *pdev)
20346722
K
6645{
6646 u8 id = 0;
6647 int ret;
6648 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6649 return id;
6650}
6651
6652/**
6653 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6654 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
6655 * s2io_nic structure.
6656 * Description:
6657 * This function initializes a few of the PCI and PCI-X configuration registers
6658 * with recommended values.
6659 * Return value:
6660 * void
6661 */
6662
6663static void s2io_init_pci(nic_t * sp)
6664{
20346722 6665 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
6666
6667 /* Enable Data Parity Error Recovery in PCI-X command register. */
6668 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6669 &(pcix_cmd));
1da177e4 6670 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6671 (pcix_cmd | 1));
1da177e4 6672 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6673 &(pcix_cmd));
1da177e4
LT
6674
6675 /* Set the PErr Response bit in PCI command register. */
6676 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6677 pci_write_config_word(sp->pdev, PCI_COMMAND,
6678 (pci_cmd | PCI_COMMAND_PARITY));
6679 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
6680}
6681
9dc737a7
AR
6682static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6683{
6684 if ( tx_fifo_num > 8) {
6685 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6686 "supported\n");
6687 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6688 tx_fifo_num = 8;
6689 }
6690 if ( rx_ring_num > 8) {
6691 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6692 "supported\n");
6693 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6694 rx_ring_num = 8;
6695 }
6696#ifdef CONFIG_S2IO_NAPI
6697 if (*dev_intr_type != INTA) {
6698 DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when "
6699 "MSI/MSI-X is enabled. Defaulting to INTA\n");
6700 *dev_intr_type = INTA;
6701 }
6702#endif
6703#ifndef CONFIG_PCI_MSI
6704 if (*dev_intr_type != INTA) {
6705 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6706 "MSI/MSI-X. Defaulting to INTA\n");
6707 *dev_intr_type = INTA;
6708 }
6709#else
6710 if (*dev_intr_type > MSI_X) {
6711 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6712 "Defaulting to INTA\n");
6713 *dev_intr_type = INTA;
6714 }
6715#endif
6716 if ((*dev_intr_type == MSI_X) &&
6717 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6718 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6719 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6720 "Defaulting to INTA\n");
6721 *dev_intr_type = INTA;
6722 }
6723 if (rx_ring_mode > 3) {
6724 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6725 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6726 rx_ring_mode = 3;
6727 }
6728 return SUCCESS;
6729}
6730
1da177e4 6731/**
20346722 6732 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
6733 * @pdev : structure containing the PCI related information of the device.
6734 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6735 * Description:
6736 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
6737 * All OS related initialization including memory and device structure and
6738 * initlaization of the device private variable is done. Also the swapper
6739 * control register is initialized to enable read and write into the I/O
1da177e4
LT
6740 * registers of the device.
6741 * Return value:
6742 * returns 0 on success and negative on failure.
6743 */
6744
6745static int __devinit
6746s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6747{
6748 nic_t *sp;
6749 struct net_device *dev;
1da177e4
LT
6750 int i, j, ret;
6751 int dma_flag = FALSE;
6752 u32 mac_up, mac_down;
6753 u64 val64 = 0, tmp64 = 0;
6754 XENA_dev_config_t __iomem *bar0 = NULL;
6755 u16 subid;
6756 mac_info_t *mac_control;
6757 struct config_param *config;
541ae68f 6758 int mode;
cc6e7c44 6759 u8 dev_intr_type = intr_type;
1da177e4 6760
9dc737a7
AR
6761 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6762 return ret;
1da177e4
LT
6763
6764 if ((ret = pci_enable_device(pdev))) {
6765 DBG_PRINT(ERR_DBG,
6766 "s2io_init_nic: pci_enable_device failed\n");
6767 return ret;
6768 }
6769
1e7f0bd8 6770 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
6771 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6772 dma_flag = TRUE;
1da177e4 6773 if (pci_set_consistent_dma_mask
1e7f0bd8 6774 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
6775 DBG_PRINT(ERR_DBG,
6776 "Unable to obtain 64bit DMA for \
6777 consistent allocations\n");
6778 pci_disable_device(pdev);
6779 return -ENOMEM;
6780 }
1e7f0bd8 6781 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
6782 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6783 } else {
6784 pci_disable_device(pdev);
6785 return -ENOMEM;
6786 }
cc6e7c44
RA
6787 if (dev_intr_type != MSI_X) {
6788 if (pci_request_regions(pdev, s2io_driver_name)) {
b41477f3
AR
6789 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6790 pci_disable_device(pdev);
cc6e7c44
RA
6791 return -ENODEV;
6792 }
6793 }
6794 else {
6795 if (!(request_mem_region(pci_resource_start(pdev, 0),
6796 pci_resource_len(pdev, 0), s2io_driver_name))) {
6797 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6798 pci_disable_device(pdev);
6799 return -ENODEV;
6800 }
6801 if (!(request_mem_region(pci_resource_start(pdev, 2),
6802 pci_resource_len(pdev, 2), s2io_driver_name))) {
6803 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6804 release_mem_region(pci_resource_start(pdev, 0),
6805 pci_resource_len(pdev, 0));
6806 pci_disable_device(pdev);
6807 return -ENODEV;
6808 }
1da177e4
LT
6809 }
6810
6811 dev = alloc_etherdev(sizeof(nic_t));
6812 if (dev == NULL) {
6813 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6814 pci_disable_device(pdev);
6815 pci_release_regions(pdev);
6816 return -ENODEV;
6817 }
6818
6819 pci_set_master(pdev);
6820 pci_set_drvdata(pdev, dev);
6821 SET_MODULE_OWNER(dev);
6822 SET_NETDEV_DEV(dev, &pdev->dev);
6823
6824 /* Private member variable initialized to s2io NIC structure */
6825 sp = dev->priv;
6826 memset(sp, 0, sizeof(nic_t));
6827 sp->dev = dev;
6828 sp->pdev = pdev;
1da177e4 6829 sp->high_dma_flag = dma_flag;
1da177e4 6830 sp->device_enabled_once = FALSE;
da6971d8
AR
6831 if (rx_ring_mode == 1)
6832 sp->rxd_mode = RXD_MODE_1;
6833 if (rx_ring_mode == 2)
6834 sp->rxd_mode = RXD_MODE_3B;
6835 if (rx_ring_mode == 3)
6836 sp->rxd_mode = RXD_MODE_3A;
6837
cc6e7c44 6838 sp->intr_type = dev_intr_type;
1da177e4 6839
541ae68f
K
6840 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6841 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6842 sp->device_type = XFRAME_II_DEVICE;
6843 else
6844 sp->device_type = XFRAME_I_DEVICE;
6845
7d3d0439 6846 sp->lro = lro;
cc6e7c44 6847
1da177e4
LT
6848 /* Initialize some PCI/PCI-X fields of the NIC. */
6849 s2io_init_pci(sp);
6850
20346722 6851 /*
1da177e4 6852 * Setting the device configuration parameters.
20346722
K
6853 * Most of these parameters can be specified by the user during
6854 * module insertion as they are module loadable parameters. If
6855 * these parameters are not not specified during load time, they
1da177e4
LT
6856 * are initialized with default values.
6857 */
6858 mac_control = &sp->mac_control;
6859 config = &sp->config;
6860
6861 /* Tx side parameters. */
1da177e4
LT
6862 config->tx_fifo_num = tx_fifo_num;
6863 for (i = 0; i < MAX_TX_FIFOS; i++) {
6864 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
6865 config->tx_cfg[i].fifo_priority = i;
6866 }
6867
20346722
K
6868 /* mapping the QoS priority to the configured fifos */
6869 for (i = 0; i < MAX_TX_FIFOS; i++)
6870 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
6871
1da177e4
LT
6872 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
6873 for (i = 0; i < config->tx_fifo_num; i++) {
6874 config->tx_cfg[i].f_no_snoop =
6875 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
6876 if (config->tx_cfg[i].fifo_len < 65) {
6877 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
6878 break;
6879 }
6880 }
fed5eccd
AR
6881 /* + 2 because one Txd for skb->data and one Txd for UFO */
6882 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
6883
6884 /* Rx side parameters. */
1da177e4
LT
6885 config->rx_ring_num = rx_ring_num;
6886 for (i = 0; i < MAX_RX_RINGS; i++) {
6887 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
da6971d8 6888 (rxd_count[sp->rxd_mode] + 1);
1da177e4
LT
6889 config->rx_cfg[i].ring_priority = i;
6890 }
6891
6892 for (i = 0; i < rx_ring_num; i++) {
6893 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
6894 config->rx_cfg[i].f_no_snoop =
6895 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
6896 }
6897
6898 /* Setting Mac Control parameters */
6899 mac_control->rmac_pause_time = rmac_pause_time;
6900 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
6901 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
6902
6903
6904 /* Initialize Ring buffer parameters. */
6905 for (i = 0; i < config->rx_ring_num; i++)
6906 atomic_set(&sp->rx_bufs_left[i], 0);
6907
7ba013ac
K
6908 /* Initialize the number of ISRs currently running */
6909 atomic_set(&sp->isr_cnt, 0);
6910
1da177e4
LT
6911 /* initialize the shared memory used by the NIC and the host */
6912 if (init_shared_mem(sp)) {
6913 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
b41477f3 6914 dev->name);
1da177e4
LT
6915 ret = -ENOMEM;
6916 goto mem_alloc_failed;
6917 }
6918
6919 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
6920 pci_resource_len(pdev, 0));
6921 if (!sp->bar0) {
6922 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
6923 dev->name);
6924 ret = -ENOMEM;
6925 goto bar0_remap_failed;
6926 }
6927
6928 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
6929 pci_resource_len(pdev, 2));
6930 if (!sp->bar1) {
6931 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
6932 dev->name);
6933 ret = -ENOMEM;
6934 goto bar1_remap_failed;
6935 }
6936
6937 dev->irq = pdev->irq;
6938 dev->base_addr = (unsigned long) sp->bar0;
6939
6940 /* Initializing the BAR1 address as the start of the FIFO pointer. */
6941 for (j = 0; j < MAX_TX_FIFOS; j++) {
6942 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
6943 (sp->bar1 + (j * 0x00020000));
6944 }
6945
6946 /* Driver entry points */
6947 dev->open = &s2io_open;
6948 dev->stop = &s2io_close;
6949 dev->hard_start_xmit = &s2io_xmit;
6950 dev->get_stats = &s2io_get_stats;
6951 dev->set_multicast_list = &s2io_set_multicast;
6952 dev->do_ioctl = &s2io_ioctl;
6953 dev->change_mtu = &s2io_change_mtu;
6954 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02
K
6955 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6956 dev->vlan_rx_register = s2io_vlan_rx_register;
6957 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 6958
1da177e4
LT
6959 /*
6960 * will use eth_mac_addr() for dev->set_mac_address
6961 * mac address will be set every time dev->open() is called
6962 */
20346722 6963#if defined(CONFIG_S2IO_NAPI)
1da177e4 6964 dev->poll = s2io_poll;
20346722 6965 dev->weight = 32;
1da177e4
LT
6966#endif
6967
612eff0e
BH
6968#ifdef CONFIG_NET_POLL_CONTROLLER
6969 dev->poll_controller = s2io_netpoll;
6970#endif
6971
1da177e4
LT
6972 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
6973 if (sp->high_dma_flag == TRUE)
6974 dev->features |= NETIF_F_HIGHDMA;
6975#ifdef NETIF_F_TSO
6976 dev->features |= NETIF_F_TSO;
f83ef8c0
HX
6977#endif
6978#ifdef NETIF_F_TSO6
6979 dev->features |= NETIF_F_TSO6;
1da177e4 6980#endif
fed5eccd
AR
6981 if (sp->device_type & XFRAME_II_DEVICE) {
6982 dev->features |= NETIF_F_UFO;
6983 dev->features |= NETIF_F_HW_CSUM;
6984 }
1da177e4
LT
6985
6986 dev->tx_timeout = &s2io_tx_watchdog;
6987 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
6988 INIT_WORK(&sp->rst_timer_task,
6989 (void (*)(void *)) s2io_restart_nic, dev);
6990 INIT_WORK(&sp->set_link_task,
6991 (void (*)(void *)) s2io_set_link, sp);
6992
e960fc5c 6993 pci_save_state(sp->pdev);
1da177e4
LT
6994
6995 /* Setting swapper control on the NIC, for proper reset operation */
6996 if (s2io_set_swapper(sp)) {
6997 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
6998 dev->name);
6999 ret = -EAGAIN;
7000 goto set_swap_failed;
7001 }
7002
541ae68f
K
7003 /* Verify if the Herc works on the slot its placed into */
7004 if (sp->device_type & XFRAME_II_DEVICE) {
7005 mode = s2io_verify_pci_mode(sp);
7006 if (mode < 0) {
7007 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7008 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7009 ret = -EBADSLT;
7010 goto set_swap_failed;
7011 }
7012 }
7013
7014 /* Not needed for Herc */
7015 if (sp->device_type & XFRAME_I_DEVICE) {
7016 /*
7017 * Fix for all "FFs" MAC address problems observed on
7018 * Alpha platforms
7019 */
7020 fix_mac_address(sp);
7021 s2io_reset(sp);
7022 }
1da177e4
LT
7023
7024 /*
1da177e4
LT
7025 * MAC address initialization.
7026 * For now only one mac address will be read and used.
7027 */
7028 bar0 = sp->bar0;
7029 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7030 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7031 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b
AR
7032 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7033 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
1da177e4
LT
7034 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7035 mac_down = (u32) tmp64;
7036 mac_up = (u32) (tmp64 >> 32);
7037
7038 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7039
7040 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7041 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7042 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7043 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7044 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7045 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7046
1da177e4
LT
7047 /* Set the factory defined MAC address initially */
7048 dev->addr_len = ETH_ALEN;
7049 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7050
b41477f3
AR
7051 /* reset Nic and bring it to known state */
7052 s2io_reset(sp);
7053
1da177e4 7054 /*
20346722 7055 * Initialize the tasklet status and link state flags
541ae68f 7056 * and the card state parameter
1da177e4
LT
7057 */
7058 atomic_set(&(sp->card_state), 0);
7059 sp->tasklet_status = 0;
7060 sp->link_state = 0;
7061
1da177e4
LT
7062 /* Initialize spinlocks */
7063 spin_lock_init(&sp->tx_lock);
7064#ifndef CONFIG_S2IO_NAPI
7065 spin_lock_init(&sp->put_lock);
7066#endif
7ba013ac 7067 spin_lock_init(&sp->rx_lock);
1da177e4 7068
20346722
K
7069 /*
7070 * SXE-002: Configure link and activity LED to init state
7071 * on driver load.
1da177e4
LT
7072 */
7073 subid = sp->pdev->subsystem_device;
7074 if ((subid & 0xFF) >= 0x07) {
7075 val64 = readq(&bar0->gpio_control);
7076 val64 |= 0x0000800000000000ULL;
7077 writeq(val64, &bar0->gpio_control);
7078 val64 = 0x0411040400000000ULL;
7079 writeq(val64, (void __iomem *) bar0 + 0x2700);
7080 val64 = readq(&bar0->gpio_control);
7081 }
7082
7083 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7084
7085 if (register_netdev(dev)) {
7086 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7087 ret = -ENODEV;
7088 goto register_failed;
7089 }
9dc737a7 7090 s2io_vpd_read(sp);
9dc737a7 7091 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
b41477f3
AR
7092 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7093 sp->product_name, get_xena_rev_id(sp->pdev));
7094 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7095 s2io_driver_version);
9dc737a7
AR
7096 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7097 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
541ae68f
K
7098 sp->def_mac_addr[0].mac_addr[0],
7099 sp->def_mac_addr[0].mac_addr[1],
7100 sp->def_mac_addr[0].mac_addr[2],
7101 sp->def_mac_addr[0].mac_addr[3],
7102 sp->def_mac_addr[0].mac_addr[4],
7103 sp->def_mac_addr[0].mac_addr[5]);
9dc737a7 7104 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 7105 mode = s2io_print_pci_mode(sp);
541ae68f 7106 if (mode < 0) {
9dc737a7 7107 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
541ae68f 7108 ret = -EBADSLT;
9dc737a7 7109 unregister_netdev(dev);
541ae68f
K
7110 goto set_swap_failed;
7111 }
541ae68f 7112 }
9dc737a7
AR
7113 switch(sp->rxd_mode) {
7114 case RXD_MODE_1:
7115 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7116 dev->name);
7117 break;
7118 case RXD_MODE_3B:
7119 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7120 dev->name);
7121 break;
7122 case RXD_MODE_3A:
7123 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7124 dev->name);
7125 break;
7126 }
7127#ifdef CONFIG_S2IO_NAPI
7128 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7129#endif
7130 switch(sp->intr_type) {
7131 case INTA:
7132 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7133 break;
7134 case MSI:
7135 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7136 break;
7137 case MSI_X:
7138 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7139 break;
7140 }
7d3d0439
RA
7141 if (sp->lro)
7142 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
9dc737a7 7143 dev->name);
7d3d0439 7144
7ba013ac 7145 /* Initialize device name */
9dc737a7 7146 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 7147
b6e3f982
K
7148 /* Initialize bimodal Interrupts */
7149 sp->config.bimodal = bimodal;
7150 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7151 sp->config.bimodal = 0;
7152 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7153 dev->name);
7154 }
7155
20346722
K
7156 /*
7157 * Make Link state as off at this point, when the Link change
7158 * interrupt comes the state will be automatically changed to
1da177e4
LT
7159 * the right state.
7160 */
7161 netif_carrier_off(dev);
1da177e4
LT
7162
7163 return 0;
7164
7165 register_failed:
7166 set_swap_failed:
7167 iounmap(sp->bar1);
7168 bar1_remap_failed:
7169 iounmap(sp->bar0);
7170 bar0_remap_failed:
7171 mem_alloc_failed:
7172 free_shared_mem(sp);
7173 pci_disable_device(pdev);
cc6e7c44
RA
7174 if (dev_intr_type != MSI_X)
7175 pci_release_regions(pdev);
7176 else {
7177 release_mem_region(pci_resource_start(pdev, 0),
7178 pci_resource_len(pdev, 0));
7179 release_mem_region(pci_resource_start(pdev, 2),
7180 pci_resource_len(pdev, 2));
7181 }
1da177e4
LT
7182 pci_set_drvdata(pdev, NULL);
7183 free_netdev(dev);
7184
7185 return ret;
7186}
7187
7188/**
20346722 7189 * s2io_rem_nic - Free the PCI device
1da177e4 7190 * @pdev: structure containing the PCI related information of the device.
20346722 7191 * Description: This function is called by the Pci subsystem to release a
1da177e4 7192 * PCI device and free up all resource held up by the device. This could
20346722 7193 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
7194 * from memory.
7195 */
7196
7197static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7198{
7199 struct net_device *dev =
7200 (struct net_device *) pci_get_drvdata(pdev);
7201 nic_t *sp;
7202
7203 if (dev == NULL) {
7204 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7205 return;
7206 }
7207
7208 sp = dev->priv;
7209 unregister_netdev(dev);
7210
7211 free_shared_mem(sp);
7212 iounmap(sp->bar0);
7213 iounmap(sp->bar1);
7214 pci_disable_device(pdev);
cc6e7c44
RA
7215 if (sp->intr_type != MSI_X)
7216 pci_release_regions(pdev);
7217 else {
7218 release_mem_region(pci_resource_start(pdev, 0),
7219 pci_resource_len(pdev, 0));
7220 release_mem_region(pci_resource_start(pdev, 2),
7221 pci_resource_len(pdev, 2));
7222 }
1da177e4 7223 pci_set_drvdata(pdev, NULL);
1da177e4
LT
7224 free_netdev(dev);
7225}
7226
7227/**
7228 * s2io_starter - Entry point for the driver
7229 * Description: This function is the entry point for the driver. It verifies
7230 * the module loadable parameters and initializes PCI configuration space.
7231 */
7232
7233int __init s2io_starter(void)
7234{
7235 return pci_module_init(&s2io_driver);
7236}
7237
7238/**
20346722 7239 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
7240 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7241 */
7242
26df54bf 7243static void s2io_closer(void)
1da177e4
LT
7244{
7245 pci_unregister_driver(&s2io_driver);
7246 DBG_PRINT(INIT_DBG, "cleanup done\n");
7247}
7248
7249module_init(s2io_starter);
7250module_exit(s2io_closer);
7d3d0439
RA
7251
7252static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7253 struct tcphdr **tcp, RxD_t *rxdp)
7254{
7255 int ip_off;
7256 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7257
7258 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7259 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7260 __FUNCTION__);
7261 return -1;
7262 }
7263
7264 /* TODO:
7265 * By default the VLAN field in the MAC is stripped by the card, if this
7266 * feature is turned off in rx_pa_cfg register, then the ip_off field
7267 * has to be shifted by a further 2 bytes
7268 */
7269 switch (l2_type) {
7270 case 0: /* DIX type */
7271 case 4: /* DIX type with VLAN */
7272 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7273 break;
7274 /* LLC, SNAP etc are considered non-mergeable */
7275 default:
7276 return -1;
7277 }
7278
7279 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7280 ip_len = (u8)((*ip)->ihl);
7281 ip_len <<= 2;
7282 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7283
7284 return 0;
7285}
7286
7287static int check_for_socket_match(lro_t *lro, struct iphdr *ip,
7288 struct tcphdr *tcp)
7289{
7290 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7291 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7292 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7293 return -1;
7294 return 0;
7295}
7296
7297static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7298{
7299 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7300}
7301
7302static void initiate_new_session(lro_t *lro, u8 *l2h,
7303 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7304{
7305 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7306 lro->l2h = l2h;
7307 lro->iph = ip;
7308 lro->tcph = tcp;
7309 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7310 lro->tcp_ack = ntohl(tcp->ack_seq);
7311 lro->sg_num = 1;
7312 lro->total_len = ntohs(ip->tot_len);
7313 lro->frags_len = 0;
7314 /*
7315 * check if we saw TCP timestamp. Other consistency checks have
7316 * already been done.
7317 */
7318 if (tcp->doff == 8) {
7319 u32 *ptr;
7320 ptr = (u32 *)(tcp+1);
7321 lro->saw_ts = 1;
7322 lro->cur_tsval = *(ptr+1);
7323 lro->cur_tsecr = *(ptr+2);
7324 }
7325 lro->in_use = 1;
7326}
7327
7328static void update_L3L4_header(nic_t *sp, lro_t *lro)
7329{
7330 struct iphdr *ip = lro->iph;
7331 struct tcphdr *tcp = lro->tcph;
7332 u16 nchk;
7333 StatInfo_t *statinfo = sp->mac_control.stats_info;
7334 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7335
7336 /* Update L3 header */
7337 ip->tot_len = htons(lro->total_len);
7338 ip->check = 0;
7339 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7340 ip->check = nchk;
7341
7342 /* Update L4 header */
7343 tcp->ack_seq = lro->tcp_ack;
7344 tcp->window = lro->window;
7345
7346 /* Update tsecr field if this session has timestamps enabled */
7347 if (lro->saw_ts) {
7348 u32 *ptr = (u32 *)(tcp + 1);
7349 *(ptr+2) = lro->cur_tsecr;
7350 }
7351
7352 /* Update counters required for calculation of
7353 * average no. of packets aggregated.
7354 */
7355 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7356 statinfo->sw_stat.num_aggregations++;
7357}
7358
7359static void aggregate_new_rx(lro_t *lro, struct iphdr *ip,
7360 struct tcphdr *tcp, u32 l4_pyld)
7361{
7362 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7363 lro->total_len += l4_pyld;
7364 lro->frags_len += l4_pyld;
7365 lro->tcp_next_seq += l4_pyld;
7366 lro->sg_num++;
7367
7368 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7369 lro->tcp_ack = tcp->ack_seq;
7370 lro->window = tcp->window;
7371
7372 if (lro->saw_ts) {
7373 u32 *ptr;
7374 /* Update tsecr and tsval from this packet */
7375 ptr = (u32 *) (tcp + 1);
7376 lro->cur_tsval = *(ptr + 1);
7377 lro->cur_tsecr = *(ptr + 2);
7378 }
7379}
7380
7381static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7382 struct tcphdr *tcp, u32 tcp_pyld_len)
7383{
7d3d0439
RA
7384 u8 *ptr;
7385
79dc1901
AM
7386 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7387
7d3d0439
RA
7388 if (!tcp_pyld_len) {
7389 /* Runt frame or a pure ack */
7390 return -1;
7391 }
7392
7393 if (ip->ihl != 5) /* IP has options */
7394 return -1;
7395
75c30b13
AR
7396 /* If we see CE codepoint in IP header, packet is not mergeable */
7397 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7398 return -1;
7399
7400 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7d3d0439 7401 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
75c30b13 7402 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
7403 /*
7404 * Currently recognize only the ack control word and
7405 * any other control field being set would result in
7406 * flushing the LRO session
7407 */
7408 return -1;
7409 }
7410
7411 /*
7412 * Allow only one TCP timestamp option. Don't aggregate if
7413 * any other options are detected.
7414 */
7415 if (tcp->doff != 5 && tcp->doff != 8)
7416 return -1;
7417
7418 if (tcp->doff == 8) {
7419 ptr = (u8 *)(tcp + 1);
7420 while (*ptr == TCPOPT_NOP)
7421 ptr++;
7422 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7423 return -1;
7424
7425 /* Ensure timestamp value increases monotonically */
7426 if (l_lro)
7427 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7428 return -1;
7429
7430 /* timestamp echo reply should be non-zero */
7431 if (*((u32 *)(ptr+6)) == 0)
7432 return -1;
7433 }
7434
7435 return 0;
7436}
7437
7438static int
7439s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7440 RxD_t *rxdp, nic_t *sp)
7441{
7442 struct iphdr *ip;
7443 struct tcphdr *tcph;
7444 int ret = 0, i;
7445
7446 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7447 rxdp))) {
7448 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7449 ip->saddr, ip->daddr);
7450 } else {
7451 return ret;
7452 }
7453
7454 tcph = (struct tcphdr *)*tcp;
7455 *tcp_len = get_l4_pyld_length(ip, tcph);
7456 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7457 lro_t *l_lro = &sp->lro0_n[i];
7458 if (l_lro->in_use) {
7459 if (check_for_socket_match(l_lro, ip, tcph))
7460 continue;
7461 /* Sock pair matched */
7462 *lro = l_lro;
7463
7464 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7465 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7466 "0x%x, actual 0x%x\n", __FUNCTION__,
7467 (*lro)->tcp_next_seq,
7468 ntohl(tcph->seq));
7469
7470 sp->mac_control.stats_info->
7471 sw_stat.outof_sequence_pkts++;
7472 ret = 2;
7473 break;
7474 }
7475
7476 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7477 ret = 1; /* Aggregate */
7478 else
7479 ret = 2; /* Flush both */
7480 break;
7481 }
7482 }
7483
7484 if (ret == 0) {
7485 /* Before searching for available LRO objects,
7486 * check if the pkt is L3/L4 aggregatable. If not
7487 * don't create new LRO session. Just send this
7488 * packet up.
7489 */
7490 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7491 return 5;
7492 }
7493
7494 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7495 lro_t *l_lro = &sp->lro0_n[i];
7496 if (!(l_lro->in_use)) {
7497 *lro = l_lro;
7498 ret = 3; /* Begin anew */
7499 break;
7500 }
7501 }
7502 }
7503
7504 if (ret == 0) { /* sessions exceeded */
7505 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7506 __FUNCTION__);
7507 *lro = NULL;
7508 return ret;
7509 }
7510
7511 switch (ret) {
7512 case 3:
7513 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7514 break;
7515 case 2:
7516 update_L3L4_header(sp, *lro);
7517 break;
7518 case 1:
7519 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7520 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7521 update_L3L4_header(sp, *lro);
7522 ret = 4; /* Flush the LRO */
7523 }
7524 break;
7525 default:
7526 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7527 __FUNCTION__);
7528 break;
7529 }
7530
7531 return ret;
7532}
7533
7534static void clear_lro_session(lro_t *lro)
7535{
7536 static u16 lro_struct_size = sizeof(lro_t);
7537
7538 memset(lro, 0, lro_struct_size);
7539}
7540
7541static void queue_rx_frame(struct sk_buff *skb)
7542{
7543 struct net_device *dev = skb->dev;
7544
7545 skb->protocol = eth_type_trans(skb, dev);
7546#ifdef CONFIG_S2IO_NAPI
7547 netif_receive_skb(skb);
7548#else
7549 netif_rx(skb);
7550#endif
7551}
7552
7553static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7554 u32 tcp_len)
7555{
75c30b13 7556 struct sk_buff *first = lro->parent;
7d3d0439
RA
7557
7558 first->len += tcp_len;
7559 first->data_len = lro->frags_len;
7560 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
7561 if (skb_shinfo(first)->frag_list)
7562 lro->last_frag->next = skb;
7d3d0439
RA
7563 else
7564 skb_shinfo(first)->frag_list = skb;
75c30b13 7565 lro->last_frag = skb;
7d3d0439
RA
7566 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7567 return;
7568}