]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/s2io.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[mirror_ubuntu-bionic-kernel.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
1da177e4
LT
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
20346722
K
29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver.
776bd20f 31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
1da177e4
LT
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 35 * Tx descriptors that can be associated with each corresponding FIFO.
1da177e4
LT
36 ************************************************************************/
37
38#include <linux/config.h>
39#include <linux/module.h>
40#include <linux/types.h>
41#include <linux/errno.h>
42#include <linux/ioport.h>
43#include <linux/pci.h>
1e7f0bd8 44#include <linux/dma-mapping.h>
1da177e4
LT
45#include <linux/kernel.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/skbuff.h>
49#include <linux/init.h>
50#include <linux/delay.h>
51#include <linux/stddef.h>
52#include <linux/ioctl.h>
53#include <linux/timex.h>
54#include <linux/sched.h>
55#include <linux/ethtool.h>
56#include <linux/version.h>
57#include <linux/workqueue.h>
be3a6b02 58#include <linux/if_vlan.h>
1da177e4 59
1da177e4
LT
60#include <asm/system.h>
61#include <asm/uaccess.h>
20346722 62#include <asm/io.h>
1da177e4
LT
63
64/* local include */
65#include "s2io.h"
66#include "s2io-regs.h"
67
68/* S2io Driver name & version. */
20346722 69static char s2io_driver_name[] = "Neterion";
776bd20f 70static char s2io_driver_version[] = "Version 2.0.8.1";
1da177e4 71
5e25b9dd
K
72static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73{
74 int ret;
75
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
78
79 return ret;
80}
81
20346722 82/*
1da177e4
LT
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
86 */
541ae68f
K
87#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88 (dev_type == XFRAME_I_DEVICE) ? \
89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
91
92#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95#define PANIC 1
96#define LOW 2
97static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98{
99 int level = 0;
20346722
K
100 mac_info_t *mac_control;
101
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
1da177e4 104 level = LOW;
fe113638 105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
1da177e4
LT
106 level = PANIC;
107 }
108 }
109
110 return level;
111}
112
113/* Ethtool related variables and Macros. */
114static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115 "Register test\t(offline)",
116 "Eeprom test\t(offline)",
117 "Link test\t(online)",
118 "RLDRAM test\t(offline)",
119 "BIST Test\t(offline)"
120};
121
122static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123 {"tmac_frms"},
124 {"tmac_data_octets"},
125 {"tmac_drop_frms"},
126 {"tmac_mcst_frms"},
127 {"tmac_bcst_frms"},
128 {"tmac_pause_ctrl_frms"},
129 {"tmac_any_err_frms"},
130 {"tmac_vld_ip_octets"},
131 {"tmac_vld_ip"},
132 {"tmac_drop_ip"},
133 {"tmac_icmp"},
134 {"tmac_rst_tcp"},
135 {"tmac_tcp"},
136 {"tmac_udp"},
137 {"rmac_vld_frms"},
138 {"rmac_data_octets"},
139 {"rmac_fcs_err_frms"},
140 {"rmac_drop_frms"},
141 {"rmac_vld_mcst_frms"},
142 {"rmac_vld_bcst_frms"},
143 {"rmac_in_rng_len_err_frms"},
144 {"rmac_long_frms"},
145 {"rmac_pause_ctrl_frms"},
146 {"rmac_discarded_frms"},
147 {"rmac_usized_frms"},
148 {"rmac_osized_frms"},
149 {"rmac_frag_frms"},
150 {"rmac_jabber_frms"},
151 {"rmac_ip"},
152 {"rmac_ip_octets"},
153 {"rmac_hdr_err_ip"},
154 {"rmac_drop_ip"},
155 {"rmac_icmp"},
156 {"rmac_tcp"},
157 {"rmac_udp"},
158 {"rmac_err_drp_udp"},
159 {"rmac_pause_cnt"},
160 {"rmac_accepted_ip"},
161 {"rmac_err_tcp"},
7ba013ac
K
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
1da177e4
LT
165};
166
167#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169
170#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
172
25fff88e
K
173#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
178
be3a6b02
K
179/* Add the vlan */
180static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
182{
183 nic_t *nic = dev->priv;
184 unsigned long flags;
185
186 spin_lock_irqsave(&nic->tx_lock, flags);
187 nic->vlgrp = grp;
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
189}
190
191/* Unregister the vlan */
192static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193{
194 nic_t *nic = dev->priv;
195 unsigned long flags;
196
197 spin_lock_irqsave(&nic->tx_lock, flags);
198 if (nic->vlgrp)
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
201}
202
20346722 203/*
1da177e4
LT
204 * Constants to be programmed into the Xena's registers, to configure
205 * the XAUI.
206 */
207
208#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
209#define END_SIGN 0x0
210
541ae68f
K
211static u64 herc_act_dtx_cfg[] = {
212 /* Set address */
e960fc5c 213 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 214 /* Write data */
e960fc5c 215 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
216 /* Set address */
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218 /* Write data */
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220 /* Set address */
e960fc5c 221 0x801205150D440000ULL, 0x801205150D4400E0ULL,
222 /* Write data */
223 0x801205150D440004ULL, 0x801205150D4400E4ULL,
224 /* Set address */
541ae68f
K
225 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
226 /* Write data */
227 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
228 /* Done */
229 END_SIGN
230};
231
232static u64 xena_mdio_cfg[] = {
1da177e4
LT
233 /* Reset PMA PLL */
234 0xC001010000000000ULL, 0xC0010100000000E0ULL,
235 0xC0010100008000E4ULL,
236 /* Remove Reset from PMA PLL */
237 0xC001010000000000ULL, 0xC0010100000000E0ULL,
238 0xC0010100000000E4ULL,
239 END_SIGN
240};
241
541ae68f 242static u64 xena_dtx_cfg[] = {
1da177e4
LT
243 0x8000051500000000ULL, 0x80000515000000E0ULL,
244 0x80000515D93500E4ULL, 0x8001051500000000ULL,
245 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
246 0x8002051500000000ULL, 0x80020515000000E0ULL,
247 0x80020515F21000E4ULL,
248 /* Set PADLOOPBACKN */
249 0x8002051500000000ULL, 0x80020515000000E0ULL,
250 0x80020515B20000E4ULL, 0x8003051500000000ULL,
251 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
252 0x8004051500000000ULL, 0x80040515000000E0ULL,
253 0x80040515B20000E4ULL, 0x8005051500000000ULL,
254 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
255 SWITCH_SIGN,
256 /* Remove PADLOOPBACKN */
257 0x8002051500000000ULL, 0x80020515000000E0ULL,
258 0x80020515F20000E4ULL, 0x8003051500000000ULL,
259 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
260 0x8004051500000000ULL, 0x80040515000000E0ULL,
261 0x80040515F20000E4ULL, 0x8005051500000000ULL,
262 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
263 END_SIGN
264};
265
20346722 266/*
1da177e4
LT
267 * Constants for Fixing the MacAddress problem seen mostly on
268 * Alpha machines.
269 */
270static u64 fix_mac[] = {
271 0x0060000000000000ULL, 0x0060600000000000ULL,
272 0x0040600000000000ULL, 0x0000600000000000ULL,
273 0x0020600000000000ULL, 0x0060600000000000ULL,
274 0x0020600000000000ULL, 0x0060600000000000ULL,
275 0x0020600000000000ULL, 0x0060600000000000ULL,
276 0x0020600000000000ULL, 0x0060600000000000ULL,
277 0x0020600000000000ULL, 0x0060600000000000ULL,
278 0x0020600000000000ULL, 0x0060600000000000ULL,
279 0x0020600000000000ULL, 0x0060600000000000ULL,
280 0x0020600000000000ULL, 0x0060600000000000ULL,
281 0x0020600000000000ULL, 0x0060600000000000ULL,
282 0x0020600000000000ULL, 0x0060600000000000ULL,
283 0x0020600000000000ULL, 0x0000600000000000ULL,
284 0x0040600000000000ULL, 0x0060600000000000ULL,
285 END_SIGN
286};
287
288/* Module Loadable parameters. */
289static unsigned int tx_fifo_num = 1;
290static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
291 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
292static unsigned int rx_ring_num = 1;
293static unsigned int rx_ring_sz[MAX_RX_RINGS] =
294 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
20346722
K
295static unsigned int rts_frm_len[MAX_RX_RINGS] =
296 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
5e25b9dd 297static unsigned int use_continuous_tx_intrs = 1;
1da177e4
LT
298static unsigned int rmac_pause_time = 65535;
299static unsigned int mc_pause_threshold_q0q3 = 187;
300static unsigned int mc_pause_threshold_q4q7 = 187;
301static unsigned int shared_splits;
302static unsigned int tmac_util_period = 5;
303static unsigned int rmac_util_period = 5;
b6e3f982 304static unsigned int bimodal = 0;
1da177e4
LT
305#ifndef CONFIG_S2IO_NAPI
306static unsigned int indicate_max_pkts;
307#endif
303bcb4b
K
308/* Frequency of Rx desc syncs expressed as power of 2 */
309static unsigned int rxsync_frequency = 3;
1da177e4 310
20346722 311/*
1da177e4 312 * S2IO device table.
20346722 313 * This table lists all the devices that this driver supports.
1da177e4
LT
314 */
315static struct pci_device_id s2io_tbl[] __devinitdata = {
316 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
317 PCI_ANY_ID, PCI_ANY_ID},
318 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
319 PCI_ANY_ID, PCI_ANY_ID},
320 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722
K
321 PCI_ANY_ID, PCI_ANY_ID},
322 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
323 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
324 {0,}
325};
326
327MODULE_DEVICE_TABLE(pci, s2io_tbl);
328
329static struct pci_driver s2io_driver = {
330 .name = "S2IO",
331 .id_table = s2io_tbl,
332 .probe = s2io_init_nic,
333 .remove = __devexit_p(s2io_rem_nic),
334};
335
336/* A simplifier macro used both by init and free shared_mem Fns(). */
337#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
338
339/**
340 * init_shared_mem - Allocation and Initialization of Memory
341 * @nic: Device private variable.
20346722
K
342 * Description: The function allocates all the memory areas shared
343 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
344 * Rx descriptors and the statistics block.
345 */
346
347static int init_shared_mem(struct s2io_nic *nic)
348{
349 u32 size;
350 void *tmp_v_addr, *tmp_v_addr_next;
351 dma_addr_t tmp_p_addr, tmp_p_addr_next;
352 RxD_block_t *pre_rxd_blk = NULL;
20346722 353 int i, j, blk_cnt, rx_sz, tx_sz;
1da177e4
LT
354 int lst_size, lst_per_page;
355 struct net_device *dev = nic->dev;
356#ifdef CONFIG_2BUFF_MODE
8ae418cf 357 unsigned long tmp;
1da177e4
LT
358 buffAdd_t *ba;
359#endif
360
361 mac_info_t *mac_control;
362 struct config_param *config;
363
364 mac_control = &nic->mac_control;
365 config = &nic->config;
366
367
368 /* Allocation and initialization of TXDLs in FIOFs */
369 size = 0;
370 for (i = 0; i < config->tx_fifo_num; i++) {
371 size += config->tx_cfg[i].fifo_len;
372 }
373 if (size > MAX_AVAILABLE_TXDS) {
0b1f7ebe
K
374 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
375 __FUNCTION__);
376 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
1da177e4
LT
377 return FAILURE;
378 }
379
380 lst_size = (sizeof(TxD_t) * config->max_txds);
20346722 381 tx_sz = lst_size * size;
1da177e4
LT
382 lst_per_page = PAGE_SIZE / lst_size;
383
384 for (i = 0; i < config->tx_fifo_num; i++) {
385 int fifo_len = config->tx_cfg[i].fifo_len;
386 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
20346722
K
387 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
388 GFP_KERNEL);
389 if (!mac_control->fifos[i].list_info) {
1da177e4
LT
390 DBG_PRINT(ERR_DBG,
391 "Malloc failed for list_info\n");
392 return -ENOMEM;
393 }
20346722 394 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
395 }
396 for (i = 0; i < config->tx_fifo_num; i++) {
397 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
398 lst_per_page);
20346722
K
399 mac_control->fifos[i].tx_curr_put_info.offset = 0;
400 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 401 config->tx_cfg[i].fifo_len - 1;
20346722
K
402 mac_control->fifos[i].tx_curr_get_info.offset = 0;
403 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 404 config->tx_cfg[i].fifo_len - 1;
20346722
K
405 mac_control->fifos[i].fifo_no = i;
406 mac_control->fifos[i].nic = nic;
776bd20f 407 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1;
20346722 408
1da177e4
LT
409 for (j = 0; j < page_num; j++) {
410 int k = 0;
411 dma_addr_t tmp_p;
412 void *tmp_v;
413 tmp_v = pci_alloc_consistent(nic->pdev,
414 PAGE_SIZE, &tmp_p);
415 if (!tmp_v) {
416 DBG_PRINT(ERR_DBG,
417 "pci_alloc_consistent ");
418 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
419 return -ENOMEM;
420 }
776bd20f 421 /* If we got a zero DMA address(can happen on
422 * certain platforms like PPC), reallocate.
423 * Store virtual address of page we don't want,
424 * to be freed later.
425 */
426 if (!tmp_p) {
427 mac_control->zerodma_virt_addr = tmp_v;
428 DBG_PRINT(INIT_DBG,
429 "%s: Zero DMA address for TxDL. ", dev->name);
430 DBG_PRINT(INIT_DBG,
431 "Virtual address %llx\n", (u64)tmp_v);
432 tmp_v = pci_alloc_consistent(nic->pdev,
433 PAGE_SIZE, &tmp_p);
434 if (!tmp_v) {
435 DBG_PRINT(ERR_DBG,
436 "pci_alloc_consistent ");
437 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
438 return -ENOMEM;
439 }
440 }
1da177e4
LT
441 while (k < lst_per_page) {
442 int l = (j * lst_per_page) + k;
443 if (l == config->tx_cfg[i].fifo_len)
20346722
K
444 break;
445 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 446 tmp_v + (k * lst_size);
20346722 447 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
448 tmp_p + (k * lst_size);
449 k++;
450 }
451 }
452 }
1da177e4
LT
453
454 /* Allocation and initialization of RXDs in Rings */
455 size = 0;
456 for (i = 0; i < config->rx_ring_num; i++) {
457 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
458 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
459 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
460 i);
461 DBG_PRINT(ERR_DBG, "RxDs per Block");
462 return FAILURE;
463 }
464 size += config->rx_cfg[i].num_rxd;
20346722 465 mac_control->rings[i].block_count =
1da177e4 466 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722
K
467 mac_control->rings[i].pkt_cnt =
468 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
1da177e4 469 }
20346722
K
470 size = (size * (sizeof(RxD_t)));
471 rx_sz = size;
1da177e4
LT
472
473 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
474 mac_control->rings[i].rx_curr_get_info.block_index = 0;
475 mac_control->rings[i].rx_curr_get_info.offset = 0;
476 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 477 config->rx_cfg[i].num_rxd - 1;
20346722
K
478 mac_control->rings[i].rx_curr_put_info.block_index = 0;
479 mac_control->rings[i].rx_curr_put_info.offset = 0;
480 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 481 config->rx_cfg[i].num_rxd - 1;
20346722
K
482 mac_control->rings[i].nic = nic;
483 mac_control->rings[i].ring_no = i;
484
1da177e4
LT
485 blk_cnt =
486 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
487 /* Allocating all the Rx blocks */
488 for (j = 0; j < blk_cnt; j++) {
489#ifndef CONFIG_2BUFF_MODE
490 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
491#else
492 size = SIZE_OF_BLOCK;
493#endif
494 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
495 &tmp_p_addr);
496 if (tmp_v_addr == NULL) {
497 /*
20346722
K
498 * In case of failure, free_shared_mem()
499 * is called, which should free any
500 * memory that was alloced till the
1da177e4
LT
501 * failure happened.
502 */
20346722 503 mac_control->rings[i].rx_blocks[j].block_virt_addr =
1da177e4
LT
504 tmp_v_addr;
505 return -ENOMEM;
506 }
507 memset(tmp_v_addr, 0, size);
20346722
K
508 mac_control->rings[i].rx_blocks[j].block_virt_addr =
509 tmp_v_addr;
510 mac_control->rings[i].rx_blocks[j].block_dma_addr =
511 tmp_p_addr;
1da177e4
LT
512 }
513 /* Interlinking all Rx Blocks */
514 for (j = 0; j < blk_cnt; j++) {
20346722
K
515 tmp_v_addr =
516 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 517 tmp_v_addr_next =
20346722 518 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 519 blk_cnt].block_virt_addr;
20346722
K
520 tmp_p_addr =
521 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 522 tmp_p_addr_next =
20346722 523 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
524 blk_cnt].block_dma_addr;
525
526 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
20346722 527 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
1da177e4
LT
528 * marker.
529 */
530#ifndef CONFIG_2BUFF_MODE
531 pre_rxd_blk->reserved_2_pNext_RxD_block =
532 (unsigned long) tmp_v_addr_next;
533#endif
534 pre_rxd_blk->pNext_RxD_Blk_physical =
535 (u64) tmp_p_addr_next;
536 }
537 }
538
539#ifdef CONFIG_2BUFF_MODE
20346722 540 /*
1da177e4
LT
541 * Allocation of Storages for buffer addresses in 2BUFF mode
542 * and the buffers as well.
543 */
544 for (i = 0; i < config->rx_ring_num; i++) {
545 blk_cnt =
546 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722 547 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
1da177e4 548 GFP_KERNEL);
20346722 549 if (!mac_control->rings[i].ba)
1da177e4
LT
550 return -ENOMEM;
551 for (j = 0; j < blk_cnt; j++) {
552 int k = 0;
20346722 553 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
1da177e4
LT
554 (MAX_RXDS_PER_BLOCK + 1)),
555 GFP_KERNEL);
20346722 556 if (!mac_control->rings[i].ba[j])
1da177e4
LT
557 return -ENOMEM;
558 while (k != MAX_RXDS_PER_BLOCK) {
20346722 559 ba = &mac_control->rings[i].ba[j][k];
1da177e4 560
20346722 561 ba->ba_0_org = (void *) kmalloc
1da177e4
LT
562 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
563 if (!ba->ba_0_org)
564 return -ENOMEM;
8ae418cf 565 tmp = (unsigned long) ba->ba_0_org;
1da177e4 566 tmp += ALIGN_SIZE;
8ae418cf 567 tmp &= ~((unsigned long) ALIGN_SIZE);
1da177e4
LT
568 ba->ba_0 = (void *) tmp;
569
20346722 570 ba->ba_1_org = (void *) kmalloc
1da177e4
LT
571 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
572 if (!ba->ba_1_org)
573 return -ENOMEM;
8ae418cf 574 tmp = (unsigned long) ba->ba_1_org;
1da177e4 575 tmp += ALIGN_SIZE;
8ae418cf 576 tmp &= ~((unsigned long) ALIGN_SIZE);
1da177e4
LT
577 ba->ba_1 = (void *) tmp;
578 k++;
579 }
580 }
581 }
582#endif
583
584 /* Allocation and initialization of Statistics block */
585 size = sizeof(StatInfo_t);
586 mac_control->stats_mem = pci_alloc_consistent
587 (nic->pdev, size, &mac_control->stats_mem_phy);
588
589 if (!mac_control->stats_mem) {
20346722
K
590 /*
591 * In case of failure, free_shared_mem() is called, which
592 * should free any memory that was alloced till the
1da177e4
LT
593 * failure happened.
594 */
595 return -ENOMEM;
596 }
597 mac_control->stats_mem_sz = size;
598
599 tmp_v_addr = mac_control->stats_mem;
600 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
601 memset(tmp_v_addr, 0, size);
1da177e4
LT
602 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
603 (unsigned long long) tmp_p_addr);
604
605 return SUCCESS;
606}
607
20346722
K
608/**
609 * free_shared_mem - Free the allocated Memory
1da177e4
LT
610 * @nic: Device private variable.
611 * Description: This function is to free all memory locations allocated by
612 * the init_shared_mem() function and return it to the kernel.
613 */
614
615static void free_shared_mem(struct s2io_nic *nic)
616{
617 int i, j, blk_cnt, size;
618 void *tmp_v_addr;
619 dma_addr_t tmp_p_addr;
620 mac_info_t *mac_control;
621 struct config_param *config;
622 int lst_size, lst_per_page;
776bd20f 623 struct net_device *dev = nic->dev;
1da177e4
LT
624
625 if (!nic)
626 return;
627
628 mac_control = &nic->mac_control;
629 config = &nic->config;
630
631 lst_size = (sizeof(TxD_t) * config->max_txds);
632 lst_per_page = PAGE_SIZE / lst_size;
633
634 for (i = 0; i < config->tx_fifo_num; i++) {
635 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
636 lst_per_page);
637 for (j = 0; j < page_num; j++) {
638 int mem_blks = (j * lst_per_page);
776bd20f 639 if (!mac_control->fifos[i].list_info)
640 return;
641 if (!mac_control->fifos[i].list_info[mem_blks].
642 list_virt_addr)
1da177e4
LT
643 break;
644 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722
K
645 mac_control->fifos[i].
646 list_info[mem_blks].
1da177e4 647 list_virt_addr,
20346722
K
648 mac_control->fifos[i].
649 list_info[mem_blks].
1da177e4
LT
650 list_phy_addr);
651 }
776bd20f 652 /* If we got a zero DMA address during allocation,
653 * free the page now
654 */
655 if (mac_control->zerodma_virt_addr) {
656 pci_free_consistent(nic->pdev, PAGE_SIZE,
657 mac_control->zerodma_virt_addr,
658 (dma_addr_t)0);
659 DBG_PRINT(INIT_DBG,
660 "%s: Freeing TxDL with zero DMA addr. ", dev->name);
661 DBG_PRINT(INIT_DBG, "Virtual address %llx\n",
662 (u64)(mac_control->zerodma_virt_addr));
663 }
20346722 664 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
665 }
666
667#ifndef CONFIG_2BUFF_MODE
668 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
669#else
670 size = SIZE_OF_BLOCK;
671#endif
672 for (i = 0; i < config->rx_ring_num; i++) {
20346722 673 blk_cnt = mac_control->rings[i].block_count;
1da177e4 674 for (j = 0; j < blk_cnt; j++) {
20346722
K
675 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
676 block_virt_addr;
677 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
678 block_dma_addr;
1da177e4
LT
679 if (tmp_v_addr == NULL)
680 break;
681 pci_free_consistent(nic->pdev, size,
682 tmp_v_addr, tmp_p_addr);
683 }
684 }
685
686#ifdef CONFIG_2BUFF_MODE
687 /* Freeing buffer storage addresses in 2BUFF mode. */
688 for (i = 0; i < config->rx_ring_num; i++) {
689 blk_cnt =
690 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
1da177e4
LT
691 for (j = 0; j < blk_cnt; j++) {
692 int k = 0;
20346722
K
693 if (!mac_control->rings[i].ba[j])
694 continue;
1da177e4 695 while (k != MAX_RXDS_PER_BLOCK) {
20346722 696 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
1da177e4
LT
697 kfree(ba->ba_0_org);
698 kfree(ba->ba_1_org);
699 k++;
700 }
20346722 701 kfree(mac_control->rings[i].ba[j]);
1da177e4 702 }
20346722
K
703 if (mac_control->rings[i].ba)
704 kfree(mac_control->rings[i].ba);
1da177e4 705 }
1da177e4
LT
706#endif
707
708 if (mac_control->stats_mem) {
709 pci_free_consistent(nic->pdev,
710 mac_control->stats_mem_sz,
711 mac_control->stats_mem,
712 mac_control->stats_mem_phy);
713 }
714}
715
541ae68f
K
716/**
717 * s2io_verify_pci_mode -
718 */
719
720static int s2io_verify_pci_mode(nic_t *nic)
721{
509a2671 722 XENA_dev_config_t __iomem *bar0 = nic->bar0;
541ae68f
K
723 register u64 val64 = 0;
724 int mode;
725
726 val64 = readq(&bar0->pci_mode);
727 mode = (u8)GET_PCI_MODE(val64);
728
729 if ( val64 & PCI_MODE_UNKNOWN_MODE)
730 return -1; /* Unknown PCI mode */
731 return mode;
732}
733
734
735/**
736 * s2io_print_pci_mode -
737 */
738static int s2io_print_pci_mode(nic_t *nic)
739{
509a2671 740 XENA_dev_config_t __iomem *bar0 = nic->bar0;
541ae68f
K
741 register u64 val64 = 0;
742 int mode;
743 struct config_param *config = &nic->config;
744
745 val64 = readq(&bar0->pci_mode);
746 mode = (u8)GET_PCI_MODE(val64);
747
748 if ( val64 & PCI_MODE_UNKNOWN_MODE)
749 return -1; /* Unknown PCI mode */
750
751 if (val64 & PCI_MODE_32_BITS) {
752 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
753 } else {
754 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
755 }
756
757 switch(mode) {
758 case PCI_MODE_PCI_33:
759 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
760 config->bus_speed = 33;
761 break;
762 case PCI_MODE_PCI_66:
763 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
764 config->bus_speed = 133;
765 break;
766 case PCI_MODE_PCIX_M1_66:
767 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
768 config->bus_speed = 133; /* Herc doubles the clock rate */
769 break;
770 case PCI_MODE_PCIX_M1_100:
771 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
772 config->bus_speed = 200;
773 break;
774 case PCI_MODE_PCIX_M1_133:
775 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
776 config->bus_speed = 266;
777 break;
778 case PCI_MODE_PCIX_M2_66:
779 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
780 config->bus_speed = 133;
781 break;
782 case PCI_MODE_PCIX_M2_100:
783 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
784 config->bus_speed = 200;
785 break;
786 case PCI_MODE_PCIX_M2_133:
787 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
788 config->bus_speed = 266;
789 break;
790 default:
791 return -1; /* Unsupported bus speed */
792 }
793
794 return mode;
795}
796
20346722
K
797/**
798 * init_nic - Initialization of hardware
1da177e4 799 * @nic: device peivate variable
20346722
K
800 * Description: The function sequentially configures every block
801 * of the H/W from their reset values.
802 * Return Value: SUCCESS on success and
1da177e4
LT
803 * '-1' on failure (endian settings incorrect).
804 */
805
806static int init_nic(struct s2io_nic *nic)
807{
808 XENA_dev_config_t __iomem *bar0 = nic->bar0;
809 struct net_device *dev = nic->dev;
810 register u64 val64 = 0;
811 void __iomem *add;
812 u32 time;
813 int i, j;
814 mac_info_t *mac_control;
815 struct config_param *config;
816 int mdio_cnt = 0, dtx_cnt = 0;
817 unsigned long long mem_share;
20346722 818 int mem_size;
1da177e4
LT
819
820 mac_control = &nic->mac_control;
821 config = &nic->config;
822
5e25b9dd 823 /* to set the swapper controle on the card */
20346722 824 if(s2io_set_swapper(nic)) {
1da177e4
LT
825 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
826 return -1;
827 }
828
541ae68f
K
829 /*
830 * Herc requires EOI to be removed from reset before XGXS, so..
831 */
832 if (nic->device_type & XFRAME_II_DEVICE) {
833 val64 = 0xA500000000ULL;
834 writeq(val64, &bar0->sw_reset);
835 msleep(500);
836 val64 = readq(&bar0->sw_reset);
837 }
838
1da177e4
LT
839 /* Remove XGXS from reset state */
840 val64 = 0;
841 writeq(val64, &bar0->sw_reset);
1da177e4 842 msleep(500);
20346722 843 val64 = readq(&bar0->sw_reset);
1da177e4
LT
844
845 /* Enable Receiving broadcasts */
846 add = &bar0->mac_cfg;
847 val64 = readq(&bar0->mac_cfg);
848 val64 |= MAC_RMAC_BCAST_ENABLE;
849 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
850 writel((u32) val64, add);
851 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
852 writel((u32) (val64 >> 32), (add + 4));
853
854 /* Read registers in all blocks */
855 val64 = readq(&bar0->mac_int_mask);
856 val64 = readq(&bar0->mc_int_mask);
857 val64 = readq(&bar0->xgxs_int_mask);
858
859 /* Set MTU */
860 val64 = dev->mtu;
861 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
862
20346722
K
863 /*
864 * Configuring the XAUI Interface of Xena.
1da177e4 865 * ***************************************
20346722
K
866 * To Configure the Xena's XAUI, one has to write a series
867 * of 64 bit values into two registers in a particular
868 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
869 * which will be defined in the array of configuration values
541ae68f 870 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
20346722 871 * to switch writing from one regsiter to another. We continue
1da177e4 872 * writing these values until we encounter the 'END_SIGN' macro.
20346722
K
873 * For example, After making a series of 21 writes into
874 * dtx_control register the 'SWITCH_SIGN' appears and hence we
1da177e4
LT
875 * start writing into mdio_control until we encounter END_SIGN.
876 */
541ae68f
K
877 if (nic->device_type & XFRAME_II_DEVICE) {
878 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 879 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 880 &bar0->dtx_control, UF);
541ae68f
K
881 if (dtx_cnt & 0x1)
882 msleep(1); /* Necessary!! */
1da177e4
LT
883 dtx_cnt++;
884 }
541ae68f
K
885 } else {
886 while (1) {
887 dtx_cfg:
888 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
889 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
890 dtx_cnt++;
891 goto mdio_cfg;
892 }
893 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
894 &bar0->dtx_control, UF);
895 val64 = readq(&bar0->dtx_control);
896 dtx_cnt++;
897 }
898 mdio_cfg:
899 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
900 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
901 mdio_cnt++;
902 goto dtx_cfg;
903 }
904 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
905 &bar0->mdio_control, UF);
906 val64 = readq(&bar0->mdio_control);
1da177e4 907 mdio_cnt++;
541ae68f
K
908 }
909 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
910 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
911 break;
912 } else {
1da177e4
LT
913 goto dtx_cfg;
914 }
1da177e4
LT
915 }
916 }
917
918 /* Tx DMA Initialization */
919 val64 = 0;
920 writeq(val64, &bar0->tx_fifo_partition_0);
921 writeq(val64, &bar0->tx_fifo_partition_1);
922 writeq(val64, &bar0->tx_fifo_partition_2);
923 writeq(val64, &bar0->tx_fifo_partition_3);
924
925
926 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
927 val64 |=
928 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
929 13) | vBIT(config->tx_cfg[i].fifo_priority,
930 ((i * 32) + 5), 3);
931
932 if (i == (config->tx_fifo_num - 1)) {
933 if (i % 2 == 0)
934 i++;
935 }
936
937 switch (i) {
938 case 1:
939 writeq(val64, &bar0->tx_fifo_partition_0);
940 val64 = 0;
941 break;
942 case 3:
943 writeq(val64, &bar0->tx_fifo_partition_1);
944 val64 = 0;
945 break;
946 case 5:
947 writeq(val64, &bar0->tx_fifo_partition_2);
948 val64 = 0;
949 break;
950 case 7:
951 writeq(val64, &bar0->tx_fifo_partition_3);
952 break;
953 }
954 }
955
956 /* Enable Tx FIFO partition 0. */
957 val64 = readq(&bar0->tx_fifo_partition_0);
958 val64 |= BIT(0); /* To enable the FIFO partition. */
959 writeq(val64, &bar0->tx_fifo_partition_0);
960
5e25b9dd
K
961 /*
962 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
963 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
964 */
541ae68f
K
965 if ((nic->device_type == XFRAME_I_DEVICE) &&
966 (get_xena_rev_id(nic->pdev) < 4))
5e25b9dd
K
967 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
968
1da177e4
LT
969 val64 = readq(&bar0->tx_fifo_partition_0);
970 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
971 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
972
20346722
K
973 /*
974 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
975 * integrity checking.
976 */
977 val64 = readq(&bar0->tx_pa_cfg);
978 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
979 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
980 writeq(val64, &bar0->tx_pa_cfg);
981
982 /* Rx DMA intialization. */
983 val64 = 0;
984 for (i = 0; i < config->rx_ring_num; i++) {
985 val64 |=
986 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
987 3);
988 }
989 writeq(val64, &bar0->rx_queue_priority);
990
20346722
K
991 /*
992 * Allocating equal share of memory to all the
1da177e4
LT
993 * configured Rings.
994 */
995 val64 = 0;
541ae68f
K
996 if (nic->device_type & XFRAME_II_DEVICE)
997 mem_size = 32;
998 else
999 mem_size = 64;
1000
1da177e4
LT
1001 for (i = 0; i < config->rx_ring_num; i++) {
1002 switch (i) {
1003 case 0:
20346722
K
1004 mem_share = (mem_size / config->rx_ring_num +
1005 mem_size % config->rx_ring_num);
1da177e4
LT
1006 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1007 continue;
1008 case 1:
20346722 1009 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1010 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1011 continue;
1012 case 2:
20346722 1013 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1014 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1015 continue;
1016 case 3:
20346722 1017 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1018 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1019 continue;
1020 case 4:
20346722 1021 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1022 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1023 continue;
1024 case 5:
20346722 1025 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1026 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1027 continue;
1028 case 6:
20346722 1029 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1030 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1031 continue;
1032 case 7:
20346722 1033 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1034 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1035 continue;
1036 }
1037 }
1038 writeq(val64, &bar0->rx_queue_cfg);
1039
20346722 1040 /*
5e25b9dd
K
1041 * Filling Tx round robin registers
1042 * as per the number of FIFOs
1da177e4 1043 */
5e25b9dd
K
1044 switch (config->tx_fifo_num) {
1045 case 1:
1046 val64 = 0x0000000000000000ULL;
1047 writeq(val64, &bar0->tx_w_round_robin_0);
1048 writeq(val64, &bar0->tx_w_round_robin_1);
1049 writeq(val64, &bar0->tx_w_round_robin_2);
1050 writeq(val64, &bar0->tx_w_round_robin_3);
1051 writeq(val64, &bar0->tx_w_round_robin_4);
1052 break;
1053 case 2:
1054 val64 = 0x0000010000010000ULL;
1055 writeq(val64, &bar0->tx_w_round_robin_0);
1056 val64 = 0x0100000100000100ULL;
1057 writeq(val64, &bar0->tx_w_round_robin_1);
1058 val64 = 0x0001000001000001ULL;
1059 writeq(val64, &bar0->tx_w_round_robin_2);
1060 val64 = 0x0000010000010000ULL;
1061 writeq(val64, &bar0->tx_w_round_robin_3);
1062 val64 = 0x0100000000000000ULL;
1063 writeq(val64, &bar0->tx_w_round_robin_4);
1064 break;
1065 case 3:
1066 val64 = 0x0001000102000001ULL;
1067 writeq(val64, &bar0->tx_w_round_robin_0);
1068 val64 = 0x0001020000010001ULL;
1069 writeq(val64, &bar0->tx_w_round_robin_1);
1070 val64 = 0x0200000100010200ULL;
1071 writeq(val64, &bar0->tx_w_round_robin_2);
1072 val64 = 0x0001000102000001ULL;
1073 writeq(val64, &bar0->tx_w_round_robin_3);
1074 val64 = 0x0001020000000000ULL;
1075 writeq(val64, &bar0->tx_w_round_robin_4);
1076 break;
1077 case 4:
1078 val64 = 0x0001020300010200ULL;
1079 writeq(val64, &bar0->tx_w_round_robin_0);
1080 val64 = 0x0100000102030001ULL;
1081 writeq(val64, &bar0->tx_w_round_robin_1);
1082 val64 = 0x0200010000010203ULL;
1083 writeq(val64, &bar0->tx_w_round_robin_2);
1084 val64 = 0x0001020001000001ULL;
1085 writeq(val64, &bar0->tx_w_round_robin_3);
1086 val64 = 0x0203000100000000ULL;
1087 writeq(val64, &bar0->tx_w_round_robin_4);
1088 break;
1089 case 5:
1090 val64 = 0x0001000203000102ULL;
1091 writeq(val64, &bar0->tx_w_round_robin_0);
1092 val64 = 0x0001020001030004ULL;
1093 writeq(val64, &bar0->tx_w_round_robin_1);
1094 val64 = 0x0001000203000102ULL;
1095 writeq(val64, &bar0->tx_w_round_robin_2);
1096 val64 = 0x0001020001030004ULL;
1097 writeq(val64, &bar0->tx_w_round_robin_3);
1098 val64 = 0x0001000000000000ULL;
1099 writeq(val64, &bar0->tx_w_round_robin_4);
1100 break;
1101 case 6:
1102 val64 = 0x0001020304000102ULL;
1103 writeq(val64, &bar0->tx_w_round_robin_0);
1104 val64 = 0x0304050001020001ULL;
1105 writeq(val64, &bar0->tx_w_round_robin_1);
1106 val64 = 0x0203000100000102ULL;
1107 writeq(val64, &bar0->tx_w_round_robin_2);
1108 val64 = 0x0304000102030405ULL;
1109 writeq(val64, &bar0->tx_w_round_robin_3);
1110 val64 = 0x0001000200000000ULL;
1111 writeq(val64, &bar0->tx_w_round_robin_4);
1112 break;
1113 case 7:
1114 val64 = 0x0001020001020300ULL;
1115 writeq(val64, &bar0->tx_w_round_robin_0);
1116 val64 = 0x0102030400010203ULL;
1117 writeq(val64, &bar0->tx_w_round_robin_1);
1118 val64 = 0x0405060001020001ULL;
1119 writeq(val64, &bar0->tx_w_round_robin_2);
1120 val64 = 0x0304050000010200ULL;
1121 writeq(val64, &bar0->tx_w_round_robin_3);
1122 val64 = 0x0102030000000000ULL;
1123 writeq(val64, &bar0->tx_w_round_robin_4);
1124 break;
1125 case 8:
1126 val64 = 0x0001020300040105ULL;
1127 writeq(val64, &bar0->tx_w_round_robin_0);
1128 val64 = 0x0200030106000204ULL;
1129 writeq(val64, &bar0->tx_w_round_robin_1);
1130 val64 = 0x0103000502010007ULL;
1131 writeq(val64, &bar0->tx_w_round_robin_2);
1132 val64 = 0x0304010002060500ULL;
1133 writeq(val64, &bar0->tx_w_round_robin_3);
1134 val64 = 0x0103020400000000ULL;
1135 writeq(val64, &bar0->tx_w_round_robin_4);
1136 break;
1137 }
1138
1139 /* Filling the Rx round robin registers as per the
1140 * number of Rings and steering based on QoS.
1141 */
1142 switch (config->rx_ring_num) {
1143 case 1:
1144 val64 = 0x8080808080808080ULL;
1145 writeq(val64, &bar0->rts_qos_steering);
1146 break;
1147 case 2:
1148 val64 = 0x0000010000010000ULL;
1149 writeq(val64, &bar0->rx_w_round_robin_0);
1150 val64 = 0x0100000100000100ULL;
1151 writeq(val64, &bar0->rx_w_round_robin_1);
1152 val64 = 0x0001000001000001ULL;
1153 writeq(val64, &bar0->rx_w_round_robin_2);
1154 val64 = 0x0000010000010000ULL;
1155 writeq(val64, &bar0->rx_w_round_robin_3);
1156 val64 = 0x0100000000000000ULL;
1157 writeq(val64, &bar0->rx_w_round_robin_4);
1158
1159 val64 = 0x8080808040404040ULL;
1160 writeq(val64, &bar0->rts_qos_steering);
1161 break;
1162 case 3:
1163 val64 = 0x0001000102000001ULL;
1164 writeq(val64, &bar0->rx_w_round_robin_0);
1165 val64 = 0x0001020000010001ULL;
1166 writeq(val64, &bar0->rx_w_round_robin_1);
1167 val64 = 0x0200000100010200ULL;
1168 writeq(val64, &bar0->rx_w_round_robin_2);
1169 val64 = 0x0001000102000001ULL;
1170 writeq(val64, &bar0->rx_w_round_robin_3);
1171 val64 = 0x0001020000000000ULL;
1172 writeq(val64, &bar0->rx_w_round_robin_4);
1173
1174 val64 = 0x8080804040402020ULL;
1175 writeq(val64, &bar0->rts_qos_steering);
1176 break;
1177 case 4:
1178 val64 = 0x0001020300010200ULL;
1179 writeq(val64, &bar0->rx_w_round_robin_0);
1180 val64 = 0x0100000102030001ULL;
1181 writeq(val64, &bar0->rx_w_round_robin_1);
1182 val64 = 0x0200010000010203ULL;
1183 writeq(val64, &bar0->rx_w_round_robin_2);
1184 val64 = 0x0001020001000001ULL;
1185 writeq(val64, &bar0->rx_w_round_robin_3);
1186 val64 = 0x0203000100000000ULL;
1187 writeq(val64, &bar0->rx_w_round_robin_4);
1188
1189 val64 = 0x8080404020201010ULL;
1190 writeq(val64, &bar0->rts_qos_steering);
1191 break;
1192 case 5:
1193 val64 = 0x0001000203000102ULL;
1194 writeq(val64, &bar0->rx_w_round_robin_0);
1195 val64 = 0x0001020001030004ULL;
1196 writeq(val64, &bar0->rx_w_round_robin_1);
1197 val64 = 0x0001000203000102ULL;
1198 writeq(val64, &bar0->rx_w_round_robin_2);
1199 val64 = 0x0001020001030004ULL;
1200 writeq(val64, &bar0->rx_w_round_robin_3);
1201 val64 = 0x0001000000000000ULL;
1202 writeq(val64, &bar0->rx_w_round_robin_4);
1203
1204 val64 = 0x8080404020201008ULL;
1205 writeq(val64, &bar0->rts_qos_steering);
1206 break;
1207 case 6:
1208 val64 = 0x0001020304000102ULL;
1209 writeq(val64, &bar0->rx_w_round_robin_0);
1210 val64 = 0x0304050001020001ULL;
1211 writeq(val64, &bar0->rx_w_round_robin_1);
1212 val64 = 0x0203000100000102ULL;
1213 writeq(val64, &bar0->rx_w_round_robin_2);
1214 val64 = 0x0304000102030405ULL;
1215 writeq(val64, &bar0->rx_w_round_robin_3);
1216 val64 = 0x0001000200000000ULL;
1217 writeq(val64, &bar0->rx_w_round_robin_4);
1218
1219 val64 = 0x8080404020100804ULL;
1220 writeq(val64, &bar0->rts_qos_steering);
1221 break;
1222 case 7:
1223 val64 = 0x0001020001020300ULL;
1224 writeq(val64, &bar0->rx_w_round_robin_0);
1225 val64 = 0x0102030400010203ULL;
1226 writeq(val64, &bar0->rx_w_round_robin_1);
1227 val64 = 0x0405060001020001ULL;
1228 writeq(val64, &bar0->rx_w_round_robin_2);
1229 val64 = 0x0304050000010200ULL;
1230 writeq(val64, &bar0->rx_w_round_robin_3);
1231 val64 = 0x0102030000000000ULL;
1232 writeq(val64, &bar0->rx_w_round_robin_4);
1233
1234 val64 = 0x8080402010080402ULL;
1235 writeq(val64, &bar0->rts_qos_steering);
1236 break;
1237 case 8:
1238 val64 = 0x0001020300040105ULL;
1239 writeq(val64, &bar0->rx_w_round_robin_0);
1240 val64 = 0x0200030106000204ULL;
1241 writeq(val64, &bar0->rx_w_round_robin_1);
1242 val64 = 0x0103000502010007ULL;
1243 writeq(val64, &bar0->rx_w_round_robin_2);
1244 val64 = 0x0304010002060500ULL;
1245 writeq(val64, &bar0->rx_w_round_robin_3);
1246 val64 = 0x0103020400000000ULL;
1247 writeq(val64, &bar0->rx_w_round_robin_4);
1248
1249 val64 = 0x8040201008040201ULL;
1250 writeq(val64, &bar0->rts_qos_steering);
1251 break;
1252 }
1da177e4
LT
1253
1254 /* UDP Fix */
1255 val64 = 0;
20346722 1256 for (i = 0; i < 8; i++)
1da177e4
LT
1257 writeq(val64, &bar0->rts_frm_len_n[i]);
1258
5e25b9dd
K
1259 /* Set the default rts frame length for the rings configured */
1260 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1261 for (i = 0 ; i < config->rx_ring_num ; i++)
1262 writeq(val64, &bar0->rts_frm_len_n[i]);
1263
1264 /* Set the frame length for the configured rings
1265 * desired by the user
1266 */
1267 for (i = 0; i < config->rx_ring_num; i++) {
1268 /* If rts_frm_len[i] == 0 then it is assumed that user not
1269 * specified frame length steering.
1270 * If the user provides the frame length then program
1271 * the rts_frm_len register for those values or else
1272 * leave it as it is.
1273 */
1274 if (rts_frm_len[i] != 0) {
1275 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1276 &bar0->rts_frm_len_n[i]);
1277 }
1278 }
1da177e4 1279
20346722 1280 /* Program statistics memory */
1da177e4 1281 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1282
541ae68f
K
1283 if (nic->device_type == XFRAME_II_DEVICE) {
1284 val64 = STAT_BC(0x320);
1285 writeq(val64, &bar0->stat_byte_cnt);
1286 }
1287
20346722 1288 /*
1da177e4
LT
1289 * Initializing the sampling rate for the device to calculate the
1290 * bandwidth utilization.
1291 */
1292 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1293 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1294 writeq(val64, &bar0->mac_link_util);
1295
1296
20346722
K
1297 /*
1298 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1299 * Scheme.
1300 */
20346722
K
1301 /*
1302 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1303 * 250 interrupts per sec. Continuous interrupts are enabled
1304 * by default.
1305 */
541ae68f
K
1306 if (nic->device_type == XFRAME_II_DEVICE) {
1307 int count = (nic->config.bus_speed * 125)/2;
1308 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1309 } else {
1310
1311 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1312 }
1313 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1da177e4 1314 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd 1315 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
541ae68f
K
1316 if (use_continuous_tx_intrs)
1317 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1318 writeq(val64, &bar0->tti_data1_mem);
1319
1320 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1321 TTI_DATA2_MEM_TX_UFC_B(0x20) |
5e25b9dd 1322 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1323 writeq(val64, &bar0->tti_data2_mem);
1324
1325 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1326 writeq(val64, &bar0->tti_command_mem);
1327
20346722 1328 /*
1da177e4
LT
1329 * Once the operation completes, the Strobe bit of the command
1330 * register will be reset. We poll for this particular condition
1331 * We wait for a maximum of 500ms for the operation to complete,
1332 * if it's not complete by then we return error.
1333 */
1334 time = 0;
1335 while (TRUE) {
1336 val64 = readq(&bar0->tti_command_mem);
1337 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1338 break;
1339 }
1340 if (time > 10) {
1341 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1342 dev->name);
1343 return -1;
1344 }
1345 msleep(50);
1346 time++;
1347 }
1348
b6e3f982
K
1349 if (nic->config.bimodal) {
1350 int k = 0;
1351 for (k = 0; k < config->rx_ring_num; k++) {
1352 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1353 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1354 writeq(val64, &bar0->tti_command_mem);
541ae68f 1355
541ae68f 1356 /*
b6e3f982
K
1357 * Once the operation completes, the Strobe bit of the command
1358 * register will be reset. We poll for this particular condition
1359 * We wait for a maximum of 500ms for the operation to complete,
1360 * if it's not complete by then we return error.
1361 */
1362 time = 0;
1363 while (TRUE) {
1364 val64 = readq(&bar0->tti_command_mem);
1365 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1366 break;
1367 }
1368 if (time > 10) {
1369 DBG_PRINT(ERR_DBG,
1370 "%s: TTI init Failed\n",
1371 dev->name);
1372 return -1;
1373 }
1374 time++;
1375 msleep(50);
1376 }
1377 }
541ae68f 1378 } else {
1da177e4 1379
b6e3f982
K
1380 /* RTI Initialization */
1381 if (nic->device_type == XFRAME_II_DEVICE) {
1382 /*
1383 * Programmed to generate Apprx 500 Intrs per
1384 * second
1385 */
1386 int count = (nic->config.bus_speed * 125)/4;
1387 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1388 } else {
1389 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1390 }
1391 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1392 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1393 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1da177e4 1394
b6e3f982 1395 writeq(val64, &bar0->rti_data1_mem);
1da177e4 1396
b6e3f982
K
1397 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1398 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1399 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1400 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1401
b6e3f982
K
1402 for (i = 0; i < config->rx_ring_num; i++) {
1403 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1404 | RTI_CMD_MEM_OFFSET(i);
1405 writeq(val64, &bar0->rti_command_mem);
1406
1407 /*
1408 * Once the operation completes, the Strobe bit of the
1409 * command register will be reset. We poll for this
1410 * particular condition. We wait for a maximum of 500ms
1411 * for the operation to complete, if it's not complete
1412 * by then we return error.
1413 */
1414 time = 0;
1415 while (TRUE) {
1416 val64 = readq(&bar0->rti_command_mem);
1417 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1418 break;
1419 }
1420 if (time > 10) {
1421 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1422 dev->name);
1423 return -1;
1424 }
1425 time++;
1426 msleep(50);
1427 }
1da177e4 1428 }
1da177e4
LT
1429 }
1430
20346722
K
1431 /*
1432 * Initializing proper values as Pause threshold into all
1da177e4
LT
1433 * the 8 Queues on Rx side.
1434 */
1435 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1436 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1437
1438 /* Disable RMAC PAD STRIPPING */
509a2671 1439 add = &bar0->mac_cfg;
1da177e4
LT
1440 val64 = readq(&bar0->mac_cfg);
1441 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1442 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1443 writel((u32) (val64), add);
1444 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1445 writel((u32) (val64 >> 32), (add + 4));
1446 val64 = readq(&bar0->mac_cfg);
1447
20346722
K
1448 /*
1449 * Set the time value to be inserted in the pause frame
1da177e4
LT
1450 * generated by xena.
1451 */
1452 val64 = readq(&bar0->rmac_pause_cfg);
1453 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1454 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1455 writeq(val64, &bar0->rmac_pause_cfg);
1456
20346722 1457 /*
1da177e4
LT
1458 * Set the Threshold Limit for Generating the pause frame
1459 * If the amount of data in any Queue exceeds ratio of
1460 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1461 * pause frame is generated
1462 */
1463 val64 = 0;
1464 for (i = 0; i < 4; i++) {
1465 val64 |=
1466 (((u64) 0xFF00 | nic->mac_control.
1467 mc_pause_threshold_q0q3)
1468 << (i * 2 * 8));
1469 }
1470 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1471
1472 val64 = 0;
1473 for (i = 0; i < 4; i++) {
1474 val64 |=
1475 (((u64) 0xFF00 | nic->mac_control.
1476 mc_pause_threshold_q4q7)
1477 << (i * 2 * 8));
1478 }
1479 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1480
20346722
K
1481 /*
1482 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1483 * exceeded the limit pointed by shared_splits
1484 */
1485 val64 = readq(&bar0->pic_control);
1486 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1487 writeq(val64, &bar0->pic_control);
1488
541ae68f
K
1489 /*
1490 * Programming the Herc to split every write transaction
1491 * that does not start on an ADB to reduce disconnects.
1492 */
1493 if (nic->device_type == XFRAME_II_DEVICE) {
1494 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1495 writeq(val64, &bar0->wreq_split_mask);
1496 }
1497
a371a07d
K
1498 /* Setting Link stability period to 64 ms */
1499 if (nic->device_type == XFRAME_II_DEVICE) {
1500 val64 = MISC_LINK_STABILITY_PRD(3);
1501 writeq(val64, &bar0->misc_control);
1502 }
1503
1da177e4
LT
1504 return SUCCESS;
1505}
a371a07d
K
1506#define LINK_UP_DOWN_INTERRUPT 1
1507#define MAC_RMAC_ERR_TIMER 2
1508
1509#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1510#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1511#else
1512int s2io_link_fault_indication(nic_t *nic)
1513{
1514 if (nic->device_type == XFRAME_II_DEVICE)
1515 return LINK_UP_DOWN_INTERRUPT;
1516 else
1517 return MAC_RMAC_ERR_TIMER;
1518}
1519#endif
1da177e4 1520
20346722
K
1521/**
1522 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1523 * @nic: device private variable,
1524 * @mask: A mask indicating which Intr block must be modified and,
1525 * @flag: A flag indicating whether to enable or disable the Intrs.
1526 * Description: This function will either disable or enable the interrupts
20346722
K
1527 * depending on the flag argument. The mask argument can be used to
1528 * enable/disable any Intr block.
1da177e4
LT
1529 * Return Value: NONE.
1530 */
1531
1532static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1533{
1534 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1535 register u64 val64 = 0, temp64 = 0;
1536
1537 /* Top level interrupt classification */
1538 /* PIC Interrupts */
1539 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1540 /* Enable PIC Intrs in the general intr mask register */
1541 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1542 if (flag == ENABLE_INTRS) {
1543 temp64 = readq(&bar0->general_int_mask);
1544 temp64 &= ~((u64) val64);
1545 writeq(temp64, &bar0->general_int_mask);
20346722 1546 /*
a371a07d
K
1547 * If Hercules adapter enable GPIO otherwise
1548 * disabled all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
1549 * interrupts for now.
1550 * TODO
1da177e4 1551 */
a371a07d
K
1552 if (s2io_link_fault_indication(nic) ==
1553 LINK_UP_DOWN_INTERRUPT ) {
1554 temp64 = readq(&bar0->pic_int_mask);
1555 temp64 &= ~((u64) PIC_INT_GPIO);
1556 writeq(temp64, &bar0->pic_int_mask);
1557 temp64 = readq(&bar0->gpio_int_mask);
1558 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1559 writeq(temp64, &bar0->gpio_int_mask);
1560 } else {
1561 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1562 }
20346722 1563 /*
1da177e4
LT
1564 * No MSI Support is available presently, so TTI and
1565 * RTI interrupts are also disabled.
1566 */
1567 } else if (flag == DISABLE_INTRS) {
20346722
K
1568 /*
1569 * Disable PIC Intrs in the general
1570 * intr mask register
1da177e4
LT
1571 */
1572 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1573 temp64 = readq(&bar0->general_int_mask);
1574 val64 |= temp64;
1575 writeq(val64, &bar0->general_int_mask);
1576 }
1577 }
1578
1579 /* DMA Interrupts */
1580 /* Enabling/Disabling Tx DMA interrupts */
1581 if (mask & TX_DMA_INTR) {
1582 /* Enable TxDMA Intrs in the general intr mask register */
1583 val64 = TXDMA_INT_M;
1584 if (flag == ENABLE_INTRS) {
1585 temp64 = readq(&bar0->general_int_mask);
1586 temp64 &= ~((u64) val64);
1587 writeq(temp64, &bar0->general_int_mask);
20346722
K
1588 /*
1589 * Keep all interrupts other than PFC interrupt
1da177e4
LT
1590 * and PCC interrupt disabled in DMA level.
1591 */
1592 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1593 TXDMA_PCC_INT_M);
1594 writeq(val64, &bar0->txdma_int_mask);
20346722
K
1595 /*
1596 * Enable only the MISC error 1 interrupt in PFC block
1da177e4
LT
1597 */
1598 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1599 writeq(val64, &bar0->pfc_err_mask);
20346722
K
1600 /*
1601 * Enable only the FB_ECC error interrupt in PCC block
1da177e4
LT
1602 */
1603 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1604 writeq(val64, &bar0->pcc_err_mask);
1605 } else if (flag == DISABLE_INTRS) {
20346722
K
1606 /*
1607 * Disable TxDMA Intrs in the general intr mask
1608 * register
1da177e4
LT
1609 */
1610 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1611 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1612 temp64 = readq(&bar0->general_int_mask);
1613 val64 |= temp64;
1614 writeq(val64, &bar0->general_int_mask);
1615 }
1616 }
1617
1618 /* Enabling/Disabling Rx DMA interrupts */
1619 if (mask & RX_DMA_INTR) {
1620 /* Enable RxDMA Intrs in the general intr mask register */
1621 val64 = RXDMA_INT_M;
1622 if (flag == ENABLE_INTRS) {
1623 temp64 = readq(&bar0->general_int_mask);
1624 temp64 &= ~((u64) val64);
1625 writeq(temp64, &bar0->general_int_mask);
20346722
K
1626 /*
1627 * All RxDMA block interrupts are disabled for now
1628 * TODO
1da177e4
LT
1629 */
1630 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1631 } else if (flag == DISABLE_INTRS) {
20346722
K
1632 /*
1633 * Disable RxDMA Intrs in the general intr mask
1634 * register
1da177e4
LT
1635 */
1636 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1637 temp64 = readq(&bar0->general_int_mask);
1638 val64 |= temp64;
1639 writeq(val64, &bar0->general_int_mask);
1640 }
1641 }
1642
1643 /* MAC Interrupts */
1644 /* Enabling/Disabling MAC interrupts */
1645 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1646 val64 = TXMAC_INT_M | RXMAC_INT_M;
1647 if (flag == ENABLE_INTRS) {
1648 temp64 = readq(&bar0->general_int_mask);
1649 temp64 &= ~((u64) val64);
1650 writeq(temp64, &bar0->general_int_mask);
20346722
K
1651 /*
1652 * All MAC block error interrupts are disabled for now
1da177e4
LT
1653 * TODO
1654 */
1da177e4 1655 } else if (flag == DISABLE_INTRS) {
20346722
K
1656 /*
1657 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1658 */
1659 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1660 writeq(DISABLE_ALL_INTRS,
1661 &bar0->mac_rmac_err_mask);
1662
1663 temp64 = readq(&bar0->general_int_mask);
1664 val64 |= temp64;
1665 writeq(val64, &bar0->general_int_mask);
1666 }
1667 }
1668
1669 /* XGXS Interrupts */
1670 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1671 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1672 if (flag == ENABLE_INTRS) {
1673 temp64 = readq(&bar0->general_int_mask);
1674 temp64 &= ~((u64) val64);
1675 writeq(temp64, &bar0->general_int_mask);
20346722 1676 /*
1da177e4 1677 * All XGXS block error interrupts are disabled for now
20346722 1678 * TODO
1da177e4
LT
1679 */
1680 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1681 } else if (flag == DISABLE_INTRS) {
20346722
K
1682 /*
1683 * Disable MC Intrs in the general intr mask register
1da177e4
LT
1684 */
1685 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1686 temp64 = readq(&bar0->general_int_mask);
1687 val64 |= temp64;
1688 writeq(val64, &bar0->general_int_mask);
1689 }
1690 }
1691
1692 /* Memory Controller(MC) interrupts */
1693 if (mask & MC_INTR) {
1694 val64 = MC_INT_M;
1695 if (flag == ENABLE_INTRS) {
1696 temp64 = readq(&bar0->general_int_mask);
1697 temp64 &= ~((u64) val64);
1698 writeq(temp64, &bar0->general_int_mask);
20346722 1699 /*
5e25b9dd 1700 * Enable all MC Intrs.
1da177e4 1701 */
5e25b9dd
K
1702 writeq(0x0, &bar0->mc_int_mask);
1703 writeq(0x0, &bar0->mc_err_mask);
1da177e4
LT
1704 } else if (flag == DISABLE_INTRS) {
1705 /*
1706 * Disable MC Intrs in the general intr mask register
1707 */
1708 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1709 temp64 = readq(&bar0->general_int_mask);
1710 val64 |= temp64;
1711 writeq(val64, &bar0->general_int_mask);
1712 }
1713 }
1714
1715
1716 /* Tx traffic interrupts */
1717 if (mask & TX_TRAFFIC_INTR) {
1718 val64 = TXTRAFFIC_INT_M;
1719 if (flag == ENABLE_INTRS) {
1720 temp64 = readq(&bar0->general_int_mask);
1721 temp64 &= ~((u64) val64);
1722 writeq(temp64, &bar0->general_int_mask);
20346722 1723 /*
1da177e4 1724 * Enable all the Tx side interrupts
20346722 1725 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1726 */
1727 writeq(0x0, &bar0->tx_traffic_mask);
1728 } else if (flag == DISABLE_INTRS) {
20346722
K
1729 /*
1730 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1731 * register.
1732 */
1733 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1734 temp64 = readq(&bar0->general_int_mask);
1735 val64 |= temp64;
1736 writeq(val64, &bar0->general_int_mask);
1737 }
1738 }
1739
1740 /* Rx traffic interrupts */
1741 if (mask & RX_TRAFFIC_INTR) {
1742 val64 = RXTRAFFIC_INT_M;
1743 if (flag == ENABLE_INTRS) {
1744 temp64 = readq(&bar0->general_int_mask);
1745 temp64 &= ~((u64) val64);
1746 writeq(temp64, &bar0->general_int_mask);
1747 /* writing 0 Enables all 8 RX interrupt levels */
1748 writeq(0x0, &bar0->rx_traffic_mask);
1749 } else if (flag == DISABLE_INTRS) {
20346722
K
1750 /*
1751 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1752 * register.
1753 */
1754 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1755 temp64 = readq(&bar0->general_int_mask);
1756 val64 |= temp64;
1757 writeq(val64, &bar0->general_int_mask);
1758 }
1759 }
1760}
1761
541ae68f 1762static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
20346722
K
1763{
1764 int ret = 0;
1765
1766 if (flag == FALSE) {
541ae68f 1767 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd
K
1768 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1769 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1770 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1771 ret = 1;
1772 }
541ae68f 1773 }else {
5e25b9dd
K
1774 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1775 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1776 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1777 ret = 1;
1778 }
20346722
K
1779 }
1780 } else {
541ae68f 1781 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd
K
1782 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1783 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1784 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1785 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1786 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1787 ret = 1;
1788 }
1789 } else {
1790 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1791 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1792 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1793 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1794 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1795 ret = 1;
1796 }
20346722
K
1797 }
1798 }
1799
1800 return ret;
1801}
1802/**
1803 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4
LT
1804 * @val64 : Value read from adapter status register.
1805 * @flag : indicates if the adapter enable bit was ever written once
1806 * before.
1807 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1808 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1809 * differs and the calling function passes the input argument flag to
1810 * indicate this.
20346722 1811 * Return: 1 If xena is quiescence
1da177e4
LT
1812 * 0 If Xena is not quiescence
1813 */
1814
20346722 1815static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1da177e4 1816{
541ae68f 1817 int ret = 0, herc;
1da177e4 1818 u64 tmp64 = ~((u64) val64);
5e25b9dd 1819 int rev_id = get_xena_rev_id(sp->pdev);
1da177e4 1820
541ae68f 1821 herc = (sp->device_type == XFRAME_II_DEVICE);
1da177e4
LT
1822 if (!
1823 (tmp64 &
1824 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1825 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1826 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1827 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1828 ADAPTER_STATUS_P_PLL_LOCK))) {
541ae68f 1829 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1da177e4
LT
1830 }
1831
1832 return ret;
1833}
1834
1835/**
1836 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1837 * @sp: Pointer to device specifc structure
20346722 1838 * Description :
1da177e4
LT
1839 * New procedure to clear mac address reading problems on Alpha platforms
1840 *
1841 */
1842
20346722 1843void fix_mac_address(nic_t * sp)
1da177e4
LT
1844{
1845 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1846 u64 val64;
1847 int i = 0;
1848
1849 while (fix_mac[i] != END_SIGN) {
1850 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1851 udelay(10);
1da177e4
LT
1852 val64 = readq(&bar0->gpio_control);
1853 }
1854}
1855
1856/**
20346722 1857 * start_nic - Turns the device on
1da177e4 1858 * @nic : device private variable.
20346722
K
1859 * Description:
1860 * This function actually turns the device on. Before this function is
1861 * called,all Registers are configured from their reset states
1862 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1863 * calling this function, the device interrupts are cleared and the NIC is
1864 * literally switched on by writing into the adapter control register.
20346722 1865 * Return Value:
1da177e4
LT
1866 * SUCCESS on success and -1 on failure.
1867 */
1868
1869static int start_nic(struct s2io_nic *nic)
1870{
1871 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1872 struct net_device *dev = nic->dev;
1873 register u64 val64 = 0;
20346722
K
1874 u16 interruptible;
1875 u16 subid, i;
1da177e4
LT
1876 mac_info_t *mac_control;
1877 struct config_param *config;
1878
1879 mac_control = &nic->mac_control;
1880 config = &nic->config;
1881
1882 /* PRC Initialization and configuration */
1883 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1884 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
1885 &bar0->prc_rxd0_n[i]);
1886
1887 val64 = readq(&bar0->prc_ctrl_n[i]);
b6e3f982
K
1888 if (nic->config.bimodal)
1889 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1da177e4
LT
1890#ifndef CONFIG_2BUFF_MODE
1891 val64 |= PRC_CTRL_RC_ENABLED;
1892#else
1893 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1894#endif
1895 writeq(val64, &bar0->prc_ctrl_n[i]);
1896 }
1897
1898#ifdef CONFIG_2BUFF_MODE
1899 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1900 val64 = readq(&bar0->rx_pa_cfg);
1901 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1902 writeq(val64, &bar0->rx_pa_cfg);
1903#endif
1904
20346722 1905 /*
1da177e4
LT
1906 * Enabling MC-RLDRAM. After enabling the device, we timeout
1907 * for around 100ms, which is approximately the time required
1908 * for the device to be ready for operation.
1909 */
1910 val64 = readq(&bar0->mc_rldram_mrs);
1911 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1912 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1913 val64 = readq(&bar0->mc_rldram_mrs);
1914
20346722 1915 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
1916
1917 /* Enabling ECC Protection. */
1918 val64 = readq(&bar0->adapter_control);
1919 val64 &= ~ADAPTER_ECC_EN;
1920 writeq(val64, &bar0->adapter_control);
1921
20346722
K
1922 /*
1923 * Clearing any possible Link state change interrupts that
1da177e4
LT
1924 * could have popped up just before Enabling the card.
1925 */
1926 val64 = readq(&bar0->mac_rmac_err_reg);
1927 if (val64)
1928 writeq(val64, &bar0->mac_rmac_err_reg);
1929
20346722
K
1930 /*
1931 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
1932 * it.
1933 */
1934 val64 = readq(&bar0->adapter_status);
20346722 1935 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
1936 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1937 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1938 (unsigned long long) val64);
1939 return FAILURE;
1940 }
1941
1942 /* Enable select interrupts */
e960fc5c 1943 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
a371a07d
K
1944 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1945 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1946
1da177e4
LT
1947 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1948
20346722 1949 /*
1da177e4 1950 * With some switches, link might be already up at this point.
20346722
K
1951 * Because of this weird behavior, when we enable laser,
1952 * we may not get link. We need to handle this. We cannot
1953 * figure out which switch is misbehaving. So we are forced to
1954 * make a global change.
1da177e4
LT
1955 */
1956
1957 /* Enabling Laser. */
1958 val64 = readq(&bar0->adapter_control);
1959 val64 |= ADAPTER_EOI_TX_ON;
1960 writeq(val64, &bar0->adapter_control);
1961
1962 /* SXE-002: Initialize link and activity LED */
1963 subid = nic->pdev->subsystem_device;
541ae68f
K
1964 if (((subid & 0xFF) >= 0x07) &&
1965 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
1966 val64 = readq(&bar0->gpio_control);
1967 val64 |= 0x0000800000000000ULL;
1968 writeq(val64, &bar0->gpio_control);
1969 val64 = 0x0411040400000000ULL;
509a2671 1970 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
1971 }
1972
20346722
K
1973 /*
1974 * Don't see link state interrupts on certain switches, so
1da177e4
LT
1975 * directly scheduling a link state task from here.
1976 */
1977 schedule_work(&nic->set_link_task);
1978
1da177e4
LT
1979 return SUCCESS;
1980}
1981
20346722
K
1982/**
1983 * free_tx_buffers - Free all queued Tx buffers
1da177e4 1984 * @nic : device private variable.
20346722 1985 * Description:
1da177e4 1986 * Free all queued Tx buffers.
20346722 1987 * Return Value: void
1da177e4
LT
1988*/
1989
1990static void free_tx_buffers(struct s2io_nic *nic)
1991{
1992 struct net_device *dev = nic->dev;
1993 struct sk_buff *skb;
1994 TxD_t *txdp;
1995 int i, j;
1996 mac_info_t *mac_control;
1997 struct config_param *config;
1ddc50d4 1998 int cnt = 0, frg_cnt;
1da177e4
LT
1999
2000 mac_control = &nic->mac_control;
2001 config = &nic->config;
2002
2003 for (i = 0; i < config->tx_fifo_num; i++) {
2004 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
20346722 2005 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1da177e4
LT
2006 list_virt_addr;
2007 skb =
2008 (struct sk_buff *) ((unsigned long) txdp->
2009 Host_Control);
2010 if (skb == NULL) {
1ddc50d4
K
2011 memset(txdp, 0, sizeof(TxD_t) *
2012 config->max_txds);
1da177e4
LT
2013 continue;
2014 }
1ddc50d4
K
2015 frg_cnt = skb_shinfo(skb)->nr_frags;
2016 pci_unmap_single(nic->pdev, (dma_addr_t)
2017 txdp->Buffer_Pointer,
2018 skb->len - skb->data_len,
2019 PCI_DMA_TODEVICE);
2020 if (frg_cnt) {
2021 TxD_t *temp;
2022 temp = txdp;
2023 txdp++;
2024 for (j = 0; j < frg_cnt; j++, txdp++) {
2025 skb_frag_t *frag =
2026 &skb_shinfo(skb)->frags[j];
2027 pci_unmap_page(nic->pdev,
2028 (dma_addr_t)
2029 txdp->
2030 Buffer_Pointer,
2031 frag->size,
2032 PCI_DMA_TODEVICE);
2033 }
2034 txdp = temp;
2035 }
1da177e4 2036 dev_kfree_skb(skb);
1ddc50d4 2037 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1da177e4
LT
2038 cnt++;
2039 }
2040 DBG_PRINT(INTR_DBG,
2041 "%s:forcibly freeing %d skbs on FIFO%d\n",
2042 dev->name, cnt, i);
20346722
K
2043 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2044 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
2045 }
2046}
2047
20346722
K
2048/**
2049 * stop_nic - To stop the nic
1da177e4 2050 * @nic ; device private variable.
20346722
K
2051 * Description:
2052 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2053 * function does. This function is called to stop the device.
2054 * Return Value:
2055 * void.
2056 */
2057
2058static void stop_nic(struct s2io_nic *nic)
2059{
2060 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2061 register u64 val64 = 0;
2062 u16 interruptible, i;
2063 mac_info_t *mac_control;
2064 struct config_param *config;
2065
2066 mac_control = &nic->mac_control;
2067 config = &nic->config;
2068
2069 /* Disable all interrupts */
e960fc5c 2070 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
a371a07d
K
2071 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2072 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1da177e4
LT
2073 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2074
2075 /* Disable PRCs */
2076 for (i = 0; i < config->rx_ring_num; i++) {
2077 val64 = readq(&bar0->prc_ctrl_n[i]);
2078 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2079 writeq(val64, &bar0->prc_ctrl_n[i]);
2080 }
2081}
2082
20346722
K
2083/**
2084 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2085 * @nic: device private variable
20346722
K
2086 * @ring_no: ring number
2087 * Description:
1da177e4
LT
2088 * The function allocates Rx side skbs and puts the physical
2089 * address of these buffers into the RxD buffer pointers, so that the NIC
2090 * can DMA the received frame into these locations.
2091 * The NIC supports 3 receive modes, viz
2092 * 1. single buffer,
2093 * 2. three buffer and
2094 * 3. Five buffer modes.
20346722
K
2095 * Each mode defines how many fragments the received frame will be split
2096 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2097 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2098 * is split into 3 fragments. As of now only single buffer mode is
2099 * supported.
2100 * Return Value:
2101 * SUCCESS on success or an appropriate -ve value on failure.
2102 */
2103
20346722 2104int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2105{
2106 struct net_device *dev = nic->dev;
2107 struct sk_buff *skb;
2108 RxD_t *rxdp;
2109 int off, off1, size, block_no, block_no1;
2110 int offset, offset1;
2111 u32 alloc_tab = 0;
20346722 2112 u32 alloc_cnt;
1da177e4
LT
2113 mac_info_t *mac_control;
2114 struct config_param *config;
2115#ifdef CONFIG_2BUFF_MODE
2116 RxD_t *rxdpnext;
2117 int nextblk;
20346722 2118 u64 tmp;
1da177e4
LT
2119 buffAdd_t *ba;
2120 dma_addr_t rxdpphys;
2121#endif
2122#ifndef CONFIG_S2IO_NAPI
2123 unsigned long flags;
2124#endif
303bcb4b 2125 RxD_t *first_rxdp = NULL;
1da177e4
LT
2126
2127 mac_control = &nic->mac_control;
2128 config = &nic->config;
20346722
K
2129 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2130 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4
LT
2131 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2132 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2133
2134 while (alloc_tab < alloc_cnt) {
20346722 2135 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2136 block_index;
20346722 2137 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1da177e4 2138 block_index;
20346722
K
2139 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2140 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4
LT
2141#ifndef CONFIG_2BUFF_MODE
2142 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2143 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2144#else
2145 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2146 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2147#endif
2148
20346722 2149 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2150 block_virt_addr + off;
2151 if ((offset == offset1) && (rxdp->Host_Control)) {
2152 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2153 DBG_PRINT(INTR_DBG, " info equated\n");
2154 goto end;
2155 }
2156#ifndef CONFIG_2BUFF_MODE
2157 if (rxdp->Control_1 == END_OF_BLOCK) {
20346722 2158 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2159 block_index++;
20346722
K
2160 mac_control->rings[ring_no].rx_curr_put_info.
2161 block_index %= mac_control->rings[ring_no].block_count;
2162 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2163 block_index;
1da177e4
LT
2164 off++;
2165 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 2166 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4
LT
2167 off;
2168 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2169 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2170 dev->name, rxdp);
2171 }
2172#ifndef CONFIG_S2IO_NAPI
2173 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2174 mac_control->rings[ring_no].put_pos =
1da177e4
LT
2175 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2176 spin_unlock_irqrestore(&nic->put_lock, flags);
2177#endif
2178#else
2179 if (rxdp->Host_Control == END_OF_BLOCK) {
20346722 2180 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2181 block_index++;
20346722
K
2182 mac_control->rings[ring_no].rx_curr_put_info.block_index
2183 %= mac_control->rings[ring_no].block_count;
2184 block_no = mac_control->rings[ring_no].rx_curr_put_info
2185 .block_index;
1da177e4
LT
2186 off = 0;
2187 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2188 dev->name, block_no,
2189 (unsigned long long) rxdp->Control_1);
20346722 2190 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4 2191 off;
20346722 2192 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2193 block_virt_addr;
2194 }
2195#ifndef CONFIG_S2IO_NAPI
2196 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2197 mac_control->rings[ring_no].put_pos = (block_no *
1da177e4
LT
2198 (MAX_RXDS_PER_BLOCK + 1)) + off;
2199 spin_unlock_irqrestore(&nic->put_lock, flags);
2200#endif
2201#endif
2202
2203#ifndef CONFIG_2BUFF_MODE
2204 if (rxdp->Control_1 & RXD_OWN_XENA)
2205#else
2206 if (rxdp->Control_2 & BIT(0))
2207#endif
2208 {
20346722 2209 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4
LT
2210 offset = off;
2211 goto end;
2212 }
2213#ifdef CONFIG_2BUFF_MODE
20346722
K
2214 /*
2215 * RxDs Spanning cache lines will be replenished only
2216 * if the succeeding RxD is also owned by Host. It
2217 * will always be the ((8*i)+3) and ((8*i)+6)
2218 * descriptors for the 48 byte descriptor. The offending
1da177e4
LT
2219 * decsriptor is of-course the 3rd descriptor.
2220 */
20346722 2221 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2222 block_dma_addr + (off * sizeof(RxD_t));
2223 if (((u64) (rxdpphys)) % 128 > 80) {
20346722 2224 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2225 block_virt_addr + (off + 1);
2226 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2227 nextblk = (block_no + 1) %
20346722
K
2228 (mac_control->rings[ring_no].block_count);
2229 rxdpnext = mac_control->rings[ring_no].rx_blocks
1da177e4
LT
2230 [nextblk].block_virt_addr;
2231 }
2232 if (rxdpnext->Control_2 & BIT(0))
2233 goto end;
2234 }
2235#endif
2236
2237#ifndef CONFIG_2BUFF_MODE
2238 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2239#else
2240 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2241#endif
2242 if (!skb) {
2243 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2244 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
303bcb4b
K
2245 if (first_rxdp) {
2246 wmb();
2247 first_rxdp->Control_1 |= RXD_OWN_XENA;
2248 }
1da177e4
LT
2249 return -ENOMEM;
2250 }
2251#ifndef CONFIG_2BUFF_MODE
2252 skb_reserve(skb, NET_IP_ALIGN);
2253 memset(rxdp, 0, sizeof(RxD_t));
2254 rxdp->Buffer0_ptr = pci_map_single
2255 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2256 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2257 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2258 rxdp->Host_Control = (unsigned long) (skb);
303bcb4b
K
2259 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2260 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4
LT
2261 off++;
2262 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 2263 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 2264#else
20346722 2265 ba = &mac_control->rings[ring_no].ba[block_no][off];
1da177e4 2266 skb_reserve(skb, BUF0_LEN);
689be439
DM
2267 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2268 if (tmp)
2269 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1da177e4
LT
2270
2271 memset(rxdp, 0, sizeof(RxD_t));
2272 rxdp->Buffer2_ptr = pci_map_single
2273 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2274 PCI_DMA_FROMDEVICE);
2275 rxdp->Buffer0_ptr =
2276 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2277 PCI_DMA_FROMDEVICE);
2278 rxdp->Buffer1_ptr =
2279 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2280 PCI_DMA_FROMDEVICE);
2281
2282 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2283 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2284 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2285 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2286 rxdp->Host_Control = (u64) ((unsigned long) (skb));
303bcb4b
K
2287 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2288 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2289 off++;
20346722 2290 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 2291#endif
5e25b9dd 2292 rxdp->Control_2 |= SET_RXD_MARKER;
20346722 2293
303bcb4b
K
2294 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2295 if (first_rxdp) {
2296 wmb();
2297 first_rxdp->Control_1 |= RXD_OWN_XENA;
2298 }
2299 first_rxdp = rxdp;
2300 }
1da177e4
LT
2301 atomic_inc(&nic->rx_bufs_left[ring_no]);
2302 alloc_tab++;
2303 }
2304
2305 end:
303bcb4b
K
2306 /* Transfer ownership of first descriptor to adapter just before
2307 * exiting. Before that, use memory barrier so that ownership
2308 * and other fields are seen by adapter correctly.
2309 */
2310 if (first_rxdp) {
2311 wmb();
2312 first_rxdp->Control_1 |= RXD_OWN_XENA;
2313 }
2314
1da177e4
LT
2315 return SUCCESS;
2316}
2317
2318/**
20346722 2319 * free_rx_buffers - Frees all Rx buffers
1da177e4 2320 * @sp: device private variable.
20346722 2321 * Description:
1da177e4
LT
2322 * This function will free all Rx buffers allocated by host.
2323 * Return Value:
2324 * NONE.
2325 */
2326
2327static void free_rx_buffers(struct s2io_nic *sp)
2328{
2329 struct net_device *dev = sp->dev;
2330 int i, j, blk = 0, off, buf_cnt = 0;
2331 RxD_t *rxdp;
2332 struct sk_buff *skb;
2333 mac_info_t *mac_control;
2334 struct config_param *config;
2335#ifdef CONFIG_2BUFF_MODE
2336 buffAdd_t *ba;
2337#endif
2338
2339 mac_control = &sp->mac_control;
2340 config = &sp->config;
2341
2342 for (i = 0; i < config->rx_ring_num; i++) {
2343 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2344 off = j % (MAX_RXDS_PER_BLOCK + 1);
20346722
K
2345 rxdp = mac_control->rings[i].rx_blocks[blk].
2346 block_virt_addr + off;
1da177e4
LT
2347
2348#ifndef CONFIG_2BUFF_MODE
2349 if (rxdp->Control_1 == END_OF_BLOCK) {
2350 rxdp =
2351 (RxD_t *) ((unsigned long) rxdp->
2352 Control_2);
2353 j++;
2354 blk++;
2355 }
2356#else
2357 if (rxdp->Host_Control == END_OF_BLOCK) {
2358 blk++;
2359 continue;
2360 }
2361#endif
2362
2363 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2364 memset(rxdp, 0, sizeof(RxD_t));
2365 continue;
2366 }
2367
2368 skb =
2369 (struct sk_buff *) ((unsigned long) rxdp->
2370 Host_Control);
2371 if (skb) {
2372#ifndef CONFIG_2BUFF_MODE
2373 pci_unmap_single(sp->pdev, (dma_addr_t)
2374 rxdp->Buffer0_ptr,
2375 dev->mtu +
2376 HEADER_ETHERNET_II_802_3_SIZE
2377 + HEADER_802_2_SIZE +
2378 HEADER_SNAP_SIZE,
2379 PCI_DMA_FROMDEVICE);
2380#else
20346722 2381 ba = &mac_control->rings[i].ba[blk][off];
1da177e4
LT
2382 pci_unmap_single(sp->pdev, (dma_addr_t)
2383 rxdp->Buffer0_ptr,
2384 BUF0_LEN,
2385 PCI_DMA_FROMDEVICE);
2386 pci_unmap_single(sp->pdev, (dma_addr_t)
2387 rxdp->Buffer1_ptr,
2388 BUF1_LEN,
2389 PCI_DMA_FROMDEVICE);
2390 pci_unmap_single(sp->pdev, (dma_addr_t)
2391 rxdp->Buffer2_ptr,
2392 dev->mtu + BUF0_LEN + 4,
2393 PCI_DMA_FROMDEVICE);
2394#endif
2395 dev_kfree_skb(skb);
2396 atomic_dec(&sp->rx_bufs_left[i]);
2397 buf_cnt++;
2398 }
2399 memset(rxdp, 0, sizeof(RxD_t));
2400 }
20346722
K
2401 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2402 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2403 mac_control->rings[i].rx_curr_put_info.offset = 0;
2404 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2405 atomic_set(&sp->rx_bufs_left[i], 0);
2406 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2407 dev->name, buf_cnt, i);
2408 }
2409}
2410
2411/**
2412 * s2io_poll - Rx interrupt handler for NAPI support
2413 * @dev : pointer to the device structure.
20346722 2414 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2415 * during one pass through the 'Poll" function.
2416 * Description:
2417 * Comes into picture only if NAPI support has been incorporated. It does
2418 * the same thing that rx_intr_handler does, but not in a interrupt context
2419 * also It will process only a given number of packets.
2420 * Return value:
2421 * 0 on success and 1 if there are No Rx packets to be processed.
2422 */
2423
20346722 2424#if defined(CONFIG_S2IO_NAPI)
1da177e4
LT
2425static int s2io_poll(struct net_device *dev, int *budget)
2426{
2427 nic_t *nic = dev->priv;
20346722 2428 int pkt_cnt = 0, org_pkts_to_process;
1da177e4
LT
2429 mac_info_t *mac_control;
2430 struct config_param *config;
509a2671 2431 XENA_dev_config_t __iomem *bar0 = nic->bar0;
20346722
K
2432 u64 val64;
2433 int i;
1da177e4 2434
7ba013ac 2435 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2436 mac_control = &nic->mac_control;
2437 config = &nic->config;
2438
20346722
K
2439 nic->pkts_to_process = *budget;
2440 if (nic->pkts_to_process > dev->quota)
2441 nic->pkts_to_process = dev->quota;
2442 org_pkts_to_process = nic->pkts_to_process;
1da177e4
LT
2443
2444 val64 = readq(&bar0->rx_traffic_int);
2445 writeq(val64, &bar0->rx_traffic_int);
2446
2447 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
2448 rx_intr_handler(&mac_control->rings[i]);
2449 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2450 if (!nic->pkts_to_process) {
2451 /* Quota for the current iteration has been met */
2452 goto no_rx;
1da177e4 2453 }
1da177e4
LT
2454 }
2455 if (!pkt_cnt)
2456 pkt_cnt = 1;
2457
2458 dev->quota -= pkt_cnt;
2459 *budget -= pkt_cnt;
2460 netif_rx_complete(dev);
2461
2462 for (i = 0; i < config->rx_ring_num; i++) {
2463 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2464 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2465 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2466 break;
2467 }
2468 }
2469 /* Re enable the Rx interrupts. */
2470 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
7ba013ac 2471 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2472 return 0;
2473
20346722 2474no_rx:
1da177e4
LT
2475 dev->quota -= pkt_cnt;
2476 *budget -= pkt_cnt;
2477
2478 for (i = 0; i < config->rx_ring_num; i++) {
2479 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2480 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2481 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2482 break;
2483 }
2484 }
7ba013ac 2485 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2486 return 1;
2487}
20346722
K
2488#endif
2489
2490/**
1da177e4
LT
2491 * rx_intr_handler - Rx interrupt handler
2492 * @nic: device private variable.
20346722
K
2493 * Description:
2494 * If the interrupt is because of a received frame or if the
1da177e4 2495 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2496 * called. It picks out the RxD at which place the last Rx processing had
2497 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2498 * the offset.
2499 * Return Value:
2500 * NONE.
2501 */
20346722 2502static void rx_intr_handler(ring_info_t *ring_data)
1da177e4 2503{
20346722 2504 nic_t *nic = ring_data->nic;
1da177e4 2505 struct net_device *dev = (struct net_device *) nic->dev;
20346722 2506 int get_block, get_offset, put_block, put_offset, ring_bufs;
1da177e4
LT
2507 rx_curr_get_info_t get_info, put_info;
2508 RxD_t *rxdp;
2509 struct sk_buff *skb;
20346722
K
2510#ifndef CONFIG_S2IO_NAPI
2511 int pkt_cnt = 0;
1da177e4 2512#endif
7ba013ac
K
2513 spin_lock(&nic->rx_lock);
2514 if (atomic_read(&nic->card_state) == CARD_DOWN) {
776bd20f 2515 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
7ba013ac
K
2516 __FUNCTION__, dev->name);
2517 spin_unlock(&nic->rx_lock);
776bd20f 2518 return;
7ba013ac
K
2519 }
2520
20346722
K
2521 get_info = ring_data->rx_curr_get_info;
2522 get_block = get_info.block_index;
2523 put_info = ring_data->rx_curr_put_info;
2524 put_block = put_info.block_index;
2525 ring_bufs = get_info.ring_len+1;
2526 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
1da177e4 2527 get_info.offset;
20346722
K
2528 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2529 get_info.offset;
2530#ifndef CONFIG_S2IO_NAPI
2531 spin_lock(&nic->put_lock);
2532 put_offset = ring_data->put_pos;
2533 spin_unlock(&nic->put_lock);
2534#else
2535 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2536 put_info.offset;
2537#endif
5e25b9dd
K
2538 while (RXD_IS_UP2DT(rxdp) &&
2539 (((get_offset + 1) % ring_bufs) != put_offset)) {
20346722
K
2540 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2541 if (skb == NULL) {
2542 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2543 dev->name);
2544 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2545 spin_unlock(&nic->rx_lock);
20346722 2546 return;
1da177e4 2547 }
20346722
K
2548#ifndef CONFIG_2BUFF_MODE
2549 pci_unmap_single(nic->pdev, (dma_addr_t)
2550 rxdp->Buffer0_ptr,
2551 dev->mtu +
2552 HEADER_ETHERNET_II_802_3_SIZE +
2553 HEADER_802_2_SIZE +
2554 HEADER_SNAP_SIZE,
2555 PCI_DMA_FROMDEVICE);
1da177e4 2556#else
20346722
K
2557 pci_unmap_single(nic->pdev, (dma_addr_t)
2558 rxdp->Buffer0_ptr,
2559 BUF0_LEN, PCI_DMA_FROMDEVICE);
2560 pci_unmap_single(nic->pdev, (dma_addr_t)
2561 rxdp->Buffer1_ptr,
2562 BUF1_LEN, PCI_DMA_FROMDEVICE);
2563 pci_unmap_single(nic->pdev, (dma_addr_t)
2564 rxdp->Buffer2_ptr,
2565 dev->mtu + BUF0_LEN + 4,
2566 PCI_DMA_FROMDEVICE);
2567#endif
2568 rx_osm_handler(ring_data, rxdp);
2569 get_info.offset++;
2570 ring_data->rx_curr_get_info.offset =
1da177e4 2571 get_info.offset;
20346722
K
2572 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2573 get_info.offset;
2574 if (get_info.offset &&
2575 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2576 get_info.offset = 0;
2577 ring_data->rx_curr_get_info.offset
2578 = get_info.offset;
2579 get_block++;
2580 get_block %= ring_data->block_count;
2581 ring_data->rx_curr_get_info.block_index
2582 = get_block;
2583 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2584 }
1da177e4 2585
20346722 2586 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1da177e4 2587 get_info.offset;
20346722
K
2588#ifdef CONFIG_S2IO_NAPI
2589 nic->pkts_to_process -= 1;
2590 if (!nic->pkts_to_process)
2591 break;
2592#else
2593 pkt_cnt++;
1da177e4
LT
2594 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2595 break;
20346722 2596#endif
1da177e4 2597 }
7ba013ac 2598 spin_unlock(&nic->rx_lock);
1da177e4 2599}
20346722
K
2600
2601/**
1da177e4
LT
2602 * tx_intr_handler - Transmit interrupt handler
2603 * @nic : device private variable
20346722
K
2604 * Description:
2605 * If an interrupt was raised to indicate DMA complete of the
2606 * Tx packet, this function is called. It identifies the last TxD
2607 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2608 * DMA'ed into the NICs internal memory.
2609 * Return Value:
2610 * NONE
2611 */
2612
20346722 2613static void tx_intr_handler(fifo_info_t *fifo_data)
1da177e4 2614{
20346722 2615 nic_t *nic = fifo_data->nic;
1da177e4
LT
2616 struct net_device *dev = (struct net_device *) nic->dev;
2617 tx_curr_get_info_t get_info, put_info;
2618 struct sk_buff *skb;
2619 TxD_t *txdlp;
1da177e4 2620 u16 j, frg_cnt;
1da177e4 2621
20346722
K
2622 get_info = fifo_data->tx_curr_get_info;
2623 put_info = fifo_data->tx_curr_put_info;
2624 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2625 list_virt_addr;
2626 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2627 (get_info.offset != put_info.offset) &&
2628 (txdlp->Host_Control)) {
2629 /* Check for TxD errors */
2630 if (txdlp->Control_1 & TXD_T_CODE) {
2631 unsigned long long err;
2632 err = txdlp->Control_1 & TXD_T_CODE;
776bd20f 2633 if ((err >> 48) == 0xA) {
2634 DBG_PRINT(TX_DBG, "TxD returned due \
2635 to loss of link\n");
2636 }
2637 else {
2638 DBG_PRINT(ERR_DBG, "***TxD error \
2639 %llx\n", err);
2640 }
20346722 2641 }
1da177e4 2642
20346722
K
2643 skb = (struct sk_buff *) ((unsigned long)
2644 txdlp->Host_Control);
2645 if (skb == NULL) {
2646 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2647 __FUNCTION__);
2648 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2649 return;
2650 }
2651
2652 frg_cnt = skb_shinfo(skb)->nr_frags;
2653 nic->tx_pkt_count++;
2654
2655 pci_unmap_single(nic->pdev, (dma_addr_t)
2656 txdlp->Buffer_Pointer,
2657 skb->len - skb->data_len,
2658 PCI_DMA_TODEVICE);
2659 if (frg_cnt) {
2660 TxD_t *temp;
2661 temp = txdlp;
2662 txdlp++;
2663 for (j = 0; j < frg_cnt; j++, txdlp++) {
2664 skb_frag_t *frag =
2665 &skb_shinfo(skb)->frags[j];
0b1f7ebe
K
2666 if (!txdlp->Buffer_Pointer)
2667 break;
20346722
K
2668 pci_unmap_page(nic->pdev,
2669 (dma_addr_t)
2670 txdlp->
2671 Buffer_Pointer,
2672 frag->size,
2673 PCI_DMA_TODEVICE);
1da177e4 2674 }
20346722 2675 txdlp = temp;
1da177e4 2676 }
20346722
K
2677 memset(txdlp, 0,
2678 (sizeof(TxD_t) * fifo_data->max_txds));
2679
2680 /* Updating the statistics block */
20346722
K
2681 nic->stats.tx_bytes += skb->len;
2682 dev_kfree_skb_irq(skb);
2683
2684 get_info.offset++;
2685 get_info.offset %= get_info.fifo_len + 1;
2686 txdlp = (TxD_t *) fifo_data->list_info
2687 [get_info.offset].list_virt_addr;
2688 fifo_data->tx_curr_get_info.offset =
2689 get_info.offset;
1da177e4
LT
2690 }
2691
2692 spin_lock(&nic->tx_lock);
2693 if (netif_queue_stopped(dev))
2694 netif_wake_queue(dev);
2695 spin_unlock(&nic->tx_lock);
2696}
2697
20346722 2698/**
1da177e4
LT
2699 * alarm_intr_handler - Alarm Interrrupt handler
2700 * @nic: device private variable
20346722 2701 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 2702 * complete, this function is called. If the interrupt was to indicate
20346722
K
2703 * a loss of link, the OSM link status handler is invoked for any other
2704 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
2705 * and a H/W reset is issued.
2706 * Return Value:
2707 * NONE
2708*/
2709
2710static void alarm_intr_handler(struct s2io_nic *nic)
2711{
2712 struct net_device *dev = (struct net_device *) nic->dev;
2713 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2714 register u64 val64 = 0, err_reg = 0;
2715
2716 /* Handling link status change error Intr */
a371a07d
K
2717 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2718 err_reg = readq(&bar0->mac_rmac_err_reg);
2719 writeq(err_reg, &bar0->mac_rmac_err_reg);
2720 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2721 schedule_work(&nic->set_link_task);
2722 }
1da177e4
LT
2723 }
2724
5e25b9dd
K
2725 /* Handling Ecc errors */
2726 val64 = readq(&bar0->mc_err_reg);
2727 writeq(val64, &bar0->mc_err_reg);
2728 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2729 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac
K
2730 nic->mac_control.stats_info->sw_stat.
2731 double_ecc_errs++;
776bd20f 2732 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
5e25b9dd 2733 dev->name);
776bd20f 2734 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
e960fc5c 2735 if (nic->device_type != XFRAME_II_DEVICE) {
776bd20f 2736 /* Reset XframeI only if critical error */
2737 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2738 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2739 netif_stop_queue(dev);
2740 schedule_work(&nic->rst_timer_task);
2741 }
e960fc5c 2742 }
5e25b9dd 2743 } else {
7ba013ac
K
2744 nic->mac_control.stats_info->sw_stat.
2745 single_ecc_errs++;
5e25b9dd
K
2746 }
2747 }
2748
1da177e4
LT
2749 /* In case of a serious error, the device will be Reset. */
2750 val64 = readq(&bar0->serr_source);
2751 if (val64 & SERR_SOURCE_ANY) {
2752 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
776bd20f 2753 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
2754 (unsigned long long)val64);
1da177e4
LT
2755 netif_stop_queue(dev);
2756 schedule_work(&nic->rst_timer_task);
2757 }
2758
2759 /*
2760 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2761 * Error occurs, the adapter will be recycled by disabling the
20346722 2762 * adapter enable bit and enabling it again after the device
1da177e4
LT
2763 * becomes Quiescent.
2764 */
2765 val64 = readq(&bar0->pcc_err_reg);
2766 writeq(val64, &bar0->pcc_err_reg);
2767 if (val64 & PCC_FB_ECC_DB_ERR) {
2768 u64 ac = readq(&bar0->adapter_control);
2769 ac &= ~(ADAPTER_CNTL_EN);
2770 writeq(ac, &bar0->adapter_control);
2771 ac = readq(&bar0->adapter_control);
2772 schedule_work(&nic->set_link_task);
2773 }
2774
2775 /* Other type of interrupts are not being handled now, TODO */
2776}
2777
20346722 2778/**
1da177e4 2779 * wait_for_cmd_complete - waits for a command to complete.
20346722 2780 * @sp : private member of the device structure, which is a pointer to the
1da177e4 2781 * s2io_nic structure.
20346722
K
2782 * Description: Function that waits for a command to Write into RMAC
2783 * ADDR DATA registers to be completed and returns either success or
2784 * error depending on whether the command was complete or not.
1da177e4
LT
2785 * Return value:
2786 * SUCCESS on success and FAILURE on failure.
2787 */
2788
20346722 2789int wait_for_cmd_complete(nic_t * sp)
1da177e4
LT
2790{
2791 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2792 int ret = FAILURE, cnt = 0;
2793 u64 val64;
2794
2795 while (TRUE) {
2796 val64 = readq(&bar0->rmac_addr_cmd_mem);
2797 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2798 ret = SUCCESS;
2799 break;
2800 }
2801 msleep(50);
2802 if (cnt++ > 10)
2803 break;
2804 }
2805
2806 return ret;
2807}
2808
20346722
K
2809/**
2810 * s2io_reset - Resets the card.
1da177e4
LT
2811 * @sp : private member of the device structure.
2812 * Description: Function to Reset the card. This function then also
20346722 2813 * restores the previously saved PCI configuration space registers as
1da177e4
LT
2814 * the card reset also resets the configuration space.
2815 * Return value:
2816 * void.
2817 */
2818
20346722 2819void s2io_reset(nic_t * sp)
1da177e4
LT
2820{
2821 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2822 u64 val64;
5e25b9dd 2823 u16 subid, pci_cmd;
1da177e4 2824
0b1f7ebe 2825 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 2826 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 2827
1da177e4
LT
2828 val64 = SW_RESET_ALL;
2829 writeq(val64, &bar0->sw_reset);
2830
20346722
K
2831 /*
2832 * At this stage, if the PCI write is indeed completed, the
2833 * card is reset and so is the PCI Config space of the device.
2834 * So a read cannot be issued at this stage on any of the
1da177e4
LT
2835 * registers to ensure the write into "sw_reset" register
2836 * has gone through.
2837 * Question: Is there any system call that will explicitly force
2838 * all the write commands still pending on the bus to be pushed
2839 * through?
2840 * As of now I'am just giving a 250ms delay and hoping that the
2841 * PCI write to sw_reset register is done by this time.
2842 */
2843 msleep(250);
2844
e960fc5c 2845 /* Restore the PCI state saved during initialization. */
2846 pci_restore_state(sp->pdev);
2847 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
0b1f7ebe 2848 pci_cmd);
1da177e4
LT
2849 s2io_init_pci(sp);
2850
2851 msleep(250);
2852
20346722
K
2853 /* Set swapper to enable I/O register access */
2854 s2io_set_swapper(sp);
2855
5e25b9dd 2856 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b
K
2857 if (sp->device_type == XFRAME_II_DEVICE) {
2858 /* Clear parity err detect bit */
2859 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 2860
303bcb4b
K
2861 /* Clearing PCIX Ecc status register */
2862 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 2863
303bcb4b
K
2864 /* Clearing PCI_STATUS error reflected here */
2865 writeq(BIT(62), &bar0->txpic_int_reg);
2866 }
5e25b9dd 2867
20346722
K
2868 /* Reset device statistics maintained by OS */
2869 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2870
1da177e4
LT
2871 /* SXE-002: Configure link and activity LED to turn it off */
2872 subid = sp->pdev->subsystem_device;
541ae68f
K
2873 if (((subid & 0xFF) >= 0x07) &&
2874 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2875 val64 = readq(&bar0->gpio_control);
2876 val64 |= 0x0000800000000000ULL;
2877 writeq(val64, &bar0->gpio_control);
2878 val64 = 0x0411040400000000ULL;
509a2671 2879 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2880 }
2881
541ae68f
K
2882 /*
2883 * Clear spurious ECC interrupts that would have occured on
2884 * XFRAME II cards after reset.
2885 */
2886 if (sp->device_type == XFRAME_II_DEVICE) {
2887 val64 = readq(&bar0->pcc_err_reg);
2888 writeq(val64, &bar0->pcc_err_reg);
2889 }
2890
1da177e4
LT
2891 sp->device_enabled_once = FALSE;
2892}
2893
2894/**
20346722
K
2895 * s2io_set_swapper - to set the swapper controle on the card
2896 * @sp : private member of the device structure,
1da177e4 2897 * pointer to the s2io_nic structure.
20346722 2898 * Description: Function to set the swapper control on the card
1da177e4
LT
2899 * correctly depending on the 'endianness' of the system.
2900 * Return value:
2901 * SUCCESS on success and FAILURE on failure.
2902 */
2903
20346722 2904int s2io_set_swapper(nic_t * sp)
1da177e4
LT
2905{
2906 struct net_device *dev = sp->dev;
2907 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2908 u64 val64, valt, valr;
2909
20346722 2910 /*
1da177e4
LT
2911 * Set proper endian settings and verify the same by reading
2912 * the PIF Feed-back register.
2913 */
2914
2915 val64 = readq(&bar0->pif_rd_swapper_fb);
2916 if (val64 != 0x0123456789ABCDEFULL) {
2917 int i = 0;
2918 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2919 0x8100008181000081ULL, /* FE=1, SE=0 */
2920 0x4200004242000042ULL, /* FE=0, SE=1 */
2921 0}; /* FE=0, SE=0 */
2922
2923 while(i<4) {
2924 writeq(value[i], &bar0->swapper_ctrl);
2925 val64 = readq(&bar0->pif_rd_swapper_fb);
2926 if (val64 == 0x0123456789ABCDEFULL)
2927 break;
2928 i++;
2929 }
2930 if (i == 4) {
2931 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2932 dev->name);
2933 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2934 (unsigned long long) val64);
2935 return FAILURE;
2936 }
2937 valr = value[i];
2938 } else {
2939 valr = readq(&bar0->swapper_ctrl);
2940 }
2941
2942 valt = 0x0123456789ABCDEFULL;
2943 writeq(valt, &bar0->xmsi_address);
2944 val64 = readq(&bar0->xmsi_address);
2945
2946 if(val64 != valt) {
2947 int i = 0;
2948 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2949 0x0081810000818100ULL, /* FE=1, SE=0 */
2950 0x0042420000424200ULL, /* FE=0, SE=1 */
2951 0}; /* FE=0, SE=0 */
2952
2953 while(i<4) {
2954 writeq((value[i] | valr), &bar0->swapper_ctrl);
2955 writeq(valt, &bar0->xmsi_address);
2956 val64 = readq(&bar0->xmsi_address);
2957 if(val64 == valt)
2958 break;
2959 i++;
2960 }
2961 if(i == 4) {
20346722 2962 unsigned long long x = val64;
1da177e4 2963 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 2964 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
2965 return FAILURE;
2966 }
2967 }
2968 val64 = readq(&bar0->swapper_ctrl);
2969 val64 &= 0xFFFF000000000000ULL;
2970
2971#ifdef __BIG_ENDIAN
20346722
K
2972 /*
2973 * The device by default set to a big endian format, so a
1da177e4
LT
2974 * big endian driver need not set anything.
2975 */
2976 val64 |= (SWAPPER_CTRL_TXP_FE |
2977 SWAPPER_CTRL_TXP_SE |
2978 SWAPPER_CTRL_TXD_R_FE |
2979 SWAPPER_CTRL_TXD_W_FE |
2980 SWAPPER_CTRL_TXF_R_FE |
2981 SWAPPER_CTRL_RXD_R_FE |
2982 SWAPPER_CTRL_RXD_W_FE |
2983 SWAPPER_CTRL_RXF_W_FE |
2984 SWAPPER_CTRL_XMSI_FE |
2985 SWAPPER_CTRL_XMSI_SE |
2986 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2987 writeq(val64, &bar0->swapper_ctrl);
2988#else
20346722 2989 /*
1da177e4 2990 * Initially we enable all bits to make it accessible by the
20346722 2991 * driver, then we selectively enable only those bits that
1da177e4
LT
2992 * we want to set.
2993 */
2994 val64 |= (SWAPPER_CTRL_TXP_FE |
2995 SWAPPER_CTRL_TXP_SE |
2996 SWAPPER_CTRL_TXD_R_FE |
2997 SWAPPER_CTRL_TXD_R_SE |
2998 SWAPPER_CTRL_TXD_W_FE |
2999 SWAPPER_CTRL_TXD_W_SE |
3000 SWAPPER_CTRL_TXF_R_FE |
3001 SWAPPER_CTRL_RXD_R_FE |
3002 SWAPPER_CTRL_RXD_R_SE |
3003 SWAPPER_CTRL_RXD_W_FE |
3004 SWAPPER_CTRL_RXD_W_SE |
3005 SWAPPER_CTRL_RXF_W_FE |
3006 SWAPPER_CTRL_XMSI_FE |
3007 SWAPPER_CTRL_XMSI_SE |
3008 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3009 writeq(val64, &bar0->swapper_ctrl);
3010#endif
3011 val64 = readq(&bar0->swapper_ctrl);
3012
20346722
K
3013 /*
3014 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3015 * feedback register.
3016 */
3017 val64 = readq(&bar0->pif_rd_swapper_fb);
3018 if (val64 != 0x0123456789ABCDEFULL) {
3019 /* Endian settings are incorrect, calls for another dekko. */
3020 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3021 dev->name);
3022 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3023 (unsigned long long) val64);
3024 return FAILURE;
3025 }
3026
3027 return SUCCESS;
3028}
3029
3030/* ********************************************************* *
3031 * Functions defined below concern the OS part of the driver *
3032 * ********************************************************* */
3033
20346722 3034/**
1da177e4
LT
3035 * s2io_open - open entry point of the driver
3036 * @dev : pointer to the device structure.
3037 * Description:
3038 * This function is the open entry point of the driver. It mainly calls a
3039 * function to allocate Rx buffers and inserts them into the buffer
20346722 3040 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3041 * Return value:
3042 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3043 * file on failure.
3044 */
3045
20346722 3046int s2io_open(struct net_device *dev)
1da177e4
LT
3047{
3048 nic_t *sp = dev->priv;
3049 int err = 0;
3050
20346722
K
3051 /*
3052 * Make sure you have link off by default every time
1da177e4
LT
3053 * Nic is initialized
3054 */
3055 netif_carrier_off(dev);
0b1f7ebe 3056 sp->last_link_state = 0;
1da177e4
LT
3057
3058 /* Initialize H/W and enable interrupts */
3059 if (s2io_card_up(sp)) {
3060 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3061 dev->name);
20346722
K
3062 err = -ENODEV;
3063 goto hw_init_failed;
1da177e4
LT
3064 }
3065
3066 /* After proper initialization of H/W, register ISR */
20346722 3067 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
1da177e4
LT
3068 sp->name, dev);
3069 if (err) {
1da177e4
LT
3070 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3071 dev->name);
20346722 3072 goto isr_registration_failed;
1da177e4
LT
3073 }
3074
3075 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3076 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
20346722
K
3077 err = -ENODEV;
3078 goto setting_mac_address_failed;
1da177e4
LT
3079 }
3080
3081 netif_start_queue(dev);
3082 return 0;
20346722
K
3083
3084setting_mac_address_failed:
3085 free_irq(sp->pdev->irq, dev);
3086isr_registration_failed:
25fff88e 3087 del_timer_sync(&sp->alarm_timer);
20346722
K
3088 s2io_reset(sp);
3089hw_init_failed:
3090 return err;
1da177e4
LT
3091}
3092
3093/**
3094 * s2io_close -close entry point of the driver
3095 * @dev : device pointer.
3096 * Description:
3097 * This is the stop entry point of the driver. It needs to undo exactly
3098 * whatever was done by the open entry point,thus it's usually referred to
3099 * as the close function.Among other things this function mainly stops the
3100 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3101 * Return value:
3102 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3103 * file on failure.
3104 */
3105
20346722 3106int s2io_close(struct net_device *dev)
1da177e4
LT
3107{
3108 nic_t *sp = dev->priv;
1da177e4
LT
3109 flush_scheduled_work();
3110 netif_stop_queue(dev);
3111 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3112 s2io_card_down(sp);
3113
20346722 3114 free_irq(sp->pdev->irq, dev);
1da177e4
LT
3115 sp->device_close_flag = TRUE; /* Device is shut down. */
3116 return 0;
3117}
3118
3119/**
3120 * s2io_xmit - Tx entry point of te driver
3121 * @skb : the socket buffer containing the Tx data.
3122 * @dev : device pointer.
3123 * Description :
3124 * This function is the Tx entry point of the driver. S2IO NIC supports
3125 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3126 * NOTE: when device cant queue the pkt,just the trans_start variable will
3127 * not be upadted.
3128 * Return value:
3129 * 0 on success & 1 on failure.
3130 */
3131
20346722 3132int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
3133{
3134 nic_t *sp = dev->priv;
3135 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3136 register u64 val64;
3137 TxD_t *txdp;
3138 TxFIFO_element_t __iomem *tx_fifo;
3139 unsigned long flags;
3140#ifdef NETIF_F_TSO
3141 int mss;
3142#endif
be3a6b02
K
3143 u16 vlan_tag = 0;
3144 int vlan_priority = 0;
1da177e4
LT
3145 mac_info_t *mac_control;
3146 struct config_param *config;
1da177e4
LT
3147
3148 mac_control = &sp->mac_control;
3149 config = &sp->config;
3150
20346722 3151 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 3152 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 3153 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 3154 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
3155 dev->name);
3156 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722
K
3157 dev_kfree_skb(skb);
3158 return 0;
1da177e4
LT
3159 }
3160
3161 queue = 0;
1da177e4 3162
be3a6b02
K
3163 /* Get Fifo number to Transmit based on vlan priority */
3164 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3165 vlan_tag = vlan_tx_tag_get(skb);
3166 vlan_priority = vlan_tag >> 13;
3167 queue = config->fifo_mapping[vlan_priority];
3168 }
3169
20346722
K
3170 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3171 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3172 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3173 list_virt_addr;
3174
3175 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4
LT
3176 /* Avoid "put" pointer going beyond "get" pointer */
3177 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
776bd20f 3178 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
1da177e4
LT
3179 netif_stop_queue(dev);
3180 dev_kfree_skb(skb);
3181 spin_unlock_irqrestore(&sp->tx_lock, flags);
3182 return 0;
3183 }
0b1f7ebe
K
3184
3185 /* A buffer with no data will be dropped */
3186 if (!skb->len) {
3187 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3188 dev_kfree_skb(skb);
3189 spin_unlock_irqrestore(&sp->tx_lock, flags);
3190 return 0;
3191 }
3192
1da177e4
LT
3193#ifdef NETIF_F_TSO
3194 mss = skb_shinfo(skb)->tso_size;
3195 if (mss) {
3196 txdp->Control_1 |= TXD_TCP_LSO_EN;
3197 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3198 }
3199#endif
3200
3201 frg_cnt = skb_shinfo(skb)->nr_frags;
3202 frg_len = skb->len - skb->data_len;
3203
1da177e4
LT
3204 txdp->Buffer_Pointer = pci_map_single
3205 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
20346722 3206 txdp->Host_Control = (unsigned long) skb;
1da177e4
LT
3207 if (skb->ip_summed == CHECKSUM_HW) {
3208 txdp->Control_2 |=
3209 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3210 TXD_TX_CKO_UDP_EN);
3211 }
3212
3213 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 3214
be3a6b02
K
3215 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3216 txdp->Control_2 |= TXD_VLAN_ENABLE;
3217 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3218 }
3219
1da177e4
LT
3220 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3221 TXD_GATHER_CODE_FIRST);
3222 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3223
3224 /* For fragmented SKB. */
3225 for (i = 0; i < frg_cnt; i++) {
3226 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe
K
3227 /* A '0' length fragment will be ignored */
3228 if (!frag->size)
3229 continue;
1da177e4
LT
3230 txdp++;
3231 txdp->Buffer_Pointer = (u64) pci_map_page
3232 (sp->pdev, frag->page, frag->page_offset,
3233 frag->size, PCI_DMA_TODEVICE);
3234 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3235 }
3236 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3237
3238 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 3239 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
3240 writeq(val64, &tx_fifo->TxDL_Pointer);
3241
3242 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3243 TX_FIFO_LAST_LIST);
20346722 3244
1da177e4
LT
3245#ifdef NETIF_F_TSO
3246 if (mss)
3247 val64 |= TX_FIFO_SPECIAL_FUNC;
3248#endif
3249 writeq(val64, &tx_fifo->List_Control);
3250
303bcb4b
K
3251 mmiowb();
3252
1da177e4 3253 put_off++;
20346722
K
3254 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3255 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
3256
3257 /* Avoid "put" pointer going beyond "get" pointer */
3258 if (((put_off + 1) % queue_len) == get_off) {
3259 DBG_PRINT(TX_DBG,
3260 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3261 put_off, get_off);
3262 netif_stop_queue(dev);
3263 }
3264
3265 dev->trans_start = jiffies;
3266 spin_unlock_irqrestore(&sp->tx_lock, flags);
3267
3268 return 0;
3269}
3270
25fff88e
K
3271static void
3272s2io_alarm_handle(unsigned long data)
3273{
3274 nic_t *sp = (nic_t *)data;
3275
3276 alarm_intr_handler(sp);
3277 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3278}
3279
a371a07d
K
3280static void s2io_txpic_intr_handle(nic_t *sp)
3281{
509a2671 3282 XENA_dev_config_t __iomem *bar0 = sp->bar0;
a371a07d
K
3283 u64 val64;
3284
3285 val64 = readq(&bar0->pic_int_status);
3286 if (val64 & PIC_INT_GPIO) {
3287 val64 = readq(&bar0->gpio_int_reg);
3288 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3289 (val64 & GPIO_INT_REG_LINK_UP)) {
3290 val64 |= GPIO_INT_REG_LINK_DOWN;
3291 val64 |= GPIO_INT_REG_LINK_UP;
3292 writeq(val64, &bar0->gpio_int_reg);
3293 goto masking;
3294 }
3295
3296 if (((sp->last_link_state == LINK_UP) &&
3297 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3298 ((sp->last_link_state == LINK_DOWN) &&
3299 (val64 & GPIO_INT_REG_LINK_UP))) {
3300 val64 = readq(&bar0->gpio_int_mask);
3301 val64 |= GPIO_INT_MASK_LINK_DOWN;
3302 val64 |= GPIO_INT_MASK_LINK_UP;
3303 writeq(val64, &bar0->gpio_int_mask);
3304 s2io_set_link((unsigned long)sp);
3305 }
3306masking:
3307 if (sp->last_link_state == LINK_UP) {
3308 /*enable down interrupt */
3309 val64 = readq(&bar0->gpio_int_mask);
3310 /* unmasks link down intr */
3311 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3312 /* masks link up intr */
3313 val64 |= GPIO_INT_MASK_LINK_UP;
3314 writeq(val64, &bar0->gpio_int_mask);
3315 } else {
3316 /*enable UP Interrupt */
3317 val64 = readq(&bar0->gpio_int_mask);
3318 /* unmasks link up interrupt */
3319 val64 &= ~GPIO_INT_MASK_LINK_UP;
3320 /* masks link down interrupt */
3321 val64 |= GPIO_INT_MASK_LINK_DOWN;
3322 writeq(val64, &bar0->gpio_int_mask);
3323 }
3324 }
3325}
3326
1da177e4
LT
3327/**
3328 * s2io_isr - ISR handler of the device .
3329 * @irq: the irq of the device.
3330 * @dev_id: a void pointer to the dev structure of the NIC.
3331 * @pt_regs: pointer to the registers pushed on the stack.
20346722
K
3332 * Description: This function is the ISR handler of the device. It
3333 * identifies the reason for the interrupt and calls the relevant
3334 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
3335 * recv buffers, if their numbers are below the panic value which is
3336 * presently set to 25% of the original number of rcv buffers allocated.
3337 * Return value:
20346722 3338 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
3339 * IRQ_NONE: will be returned if interrupt is not from our device
3340 */
3341static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3342{
3343 struct net_device *dev = (struct net_device *) dev_id;
3344 nic_t *sp = dev->priv;
3345 XENA_dev_config_t __iomem *bar0 = sp->bar0;
20346722 3346 int i;
fe113638 3347 u64 reason = 0, val64;
1da177e4
LT
3348 mac_info_t *mac_control;
3349 struct config_param *config;
3350
7ba013ac 3351 atomic_inc(&sp->isr_cnt);
1da177e4
LT
3352 mac_control = &sp->mac_control;
3353 config = &sp->config;
3354
20346722 3355 /*
1da177e4
LT
3356 * Identify the cause for interrupt and call the appropriate
3357 * interrupt handler. Causes for the interrupt could be;
3358 * 1. Rx of packet.
3359 * 2. Tx complete.
3360 * 3. Link down.
20346722 3361 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
3362 */
3363 reason = readq(&bar0->general_int_status);
3364
3365 if (!reason) {
3366 /* The interrupt was not raised by Xena. */
7ba013ac 3367 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3368 return IRQ_NONE;
3369 }
3370
1da177e4
LT
3371#ifdef CONFIG_S2IO_NAPI
3372 if (reason & GEN_INTR_RXTRAFFIC) {
3373 if (netif_rx_schedule_prep(dev)) {
3374 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3375 DISABLE_INTRS);
3376 __netif_rx_schedule(dev);
3377 }
3378 }
3379#else
3380 /* If Intr is because of Rx Traffic */
3381 if (reason & GEN_INTR_RXTRAFFIC) {
fe113638
K
3382 /*
3383 * rx_traffic_int reg is an R1 register, writing all 1's
3384 * will ensure that the actual interrupt causing bit get's
3385 * cleared and hence a read can be avoided.
3386 */
3387 val64 = 0xFFFFFFFFFFFFFFFFULL;
3388 writeq(val64, &bar0->rx_traffic_int);
20346722
K
3389 for (i = 0; i < config->rx_ring_num; i++) {
3390 rx_intr_handler(&mac_control->rings[i]);
3391 }
1da177e4
LT
3392 }
3393#endif
3394
20346722
K
3395 /* If Intr is because of Tx Traffic */
3396 if (reason & GEN_INTR_TXTRAFFIC) {
fe113638
K
3397 /*
3398 * tx_traffic_int reg is an R1 register, writing all 1's
3399 * will ensure that the actual interrupt causing bit get's
3400 * cleared and hence a read can be avoided.
3401 */
3402 val64 = 0xFFFFFFFFFFFFFFFFULL;
3403 writeq(val64, &bar0->tx_traffic_int);
3404
20346722
K
3405 for (i = 0; i < config->tx_fifo_num; i++)
3406 tx_intr_handler(&mac_control->fifos[i]);
3407 }
3408
a371a07d
K
3409 if (reason & GEN_INTR_TXPIC)
3410 s2io_txpic_intr_handle(sp);
20346722
K
3411 /*
3412 * If the Rx buffer count is below the panic threshold then
3413 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
3414 * else schedule a tasklet to reallocate the buffers.
3415 */
3416#ifndef CONFIG_S2IO_NAPI
3417 for (i = 0; i < config->rx_ring_num; i++) {
20346722 3418 int ret;
1da177e4
LT
3419 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3420 int level = rx_buffer_level(sp, rxb_size, i);
3421
3422 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3423 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3424 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3425 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3426 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3427 dev->name);
3428 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3429 clear_bit(0, (&sp->tasklet_status));
7ba013ac 3430 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3431 return IRQ_HANDLED;
3432 }
3433 clear_bit(0, (&sp->tasklet_status));
3434 } else if (level == LOW) {
3435 tasklet_schedule(&sp->task);
3436 }
3437 }
3438#endif
3439
7ba013ac 3440 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3441 return IRQ_HANDLED;
3442}
3443
7ba013ac
K
3444/**
3445 * s2io_updt_stats -
3446 */
3447static void s2io_updt_stats(nic_t *sp)
3448{
3449 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3450 u64 val64;
3451 int cnt = 0;
3452
3453 if (atomic_read(&sp->card_state) == CARD_UP) {
3454 /* Apprx 30us on a 133 MHz bus */
3455 val64 = SET_UPDT_CLICKS(10) |
3456 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3457 writeq(val64, &bar0->stat_cfg);
3458 do {
3459 udelay(100);
3460 val64 = readq(&bar0->stat_cfg);
3461 if (!(val64 & BIT(0)))
3462 break;
3463 cnt++;
3464 if (cnt == 5)
3465 break; /* Updt failed */
3466 } while(1);
3467 }
3468}
3469
1da177e4 3470/**
20346722 3471 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
3472 * @dev : pointer to the device structure.
3473 * Description:
20346722 3474 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
3475 * structure and returns a pointer to the same.
3476 * Return value:
3477 * pointer to the updated net_device_stats structure.
3478 */
3479
20346722 3480struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4
LT
3481{
3482 nic_t *sp = dev->priv;
3483 mac_info_t *mac_control;
3484 struct config_param *config;
3485
20346722 3486
1da177e4
LT
3487 mac_control = &sp->mac_control;
3488 config = &sp->config;
3489
7ba013ac
K
3490 /* Configure Stats for immediate updt */
3491 s2io_updt_stats(sp);
3492
3493 sp->stats.tx_packets =
3494 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722
K
3495 sp->stats.tx_errors =
3496 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3497 sp->stats.rx_errors =
3498 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3499 sp->stats.multicast =
3500 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 3501 sp->stats.rx_length_errors =
20346722 3502 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
3503
3504 return (&sp->stats);
3505}
3506
3507/**
3508 * s2io_set_multicast - entry point for multicast address enable/disable.
3509 * @dev : pointer to the device structure
3510 * Description:
20346722
K
3511 * This function is a driver entry point which gets called by the kernel
3512 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
3513 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3514 * determine, if multicast address must be enabled or if promiscuous mode
3515 * is to be disabled etc.
3516 * Return value:
3517 * void.
3518 */
3519
3520static void s2io_set_multicast(struct net_device *dev)
3521{
3522 int i, j, prev_cnt;
3523 struct dev_mc_list *mclist;
3524 nic_t *sp = dev->priv;
3525 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3526 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3527 0xfeffffffffffULL;
3528 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3529 void __iomem *add;
3530
3531 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3532 /* Enable all Multicast addresses */
3533 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3534 &bar0->rmac_addr_data0_mem);
3535 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3536 &bar0->rmac_addr_data1_mem);
3537 val64 = RMAC_ADDR_CMD_MEM_WE |
3538 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3539 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3540 writeq(val64, &bar0->rmac_addr_cmd_mem);
3541 /* Wait till command completes */
3542 wait_for_cmd_complete(sp);
3543
3544 sp->m_cast_flg = 1;
3545 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3546 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3547 /* Disable all Multicast addresses */
3548 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3549 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
3550 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3551 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3552 val64 = RMAC_ADDR_CMD_MEM_WE |
3553 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3554 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3555 writeq(val64, &bar0->rmac_addr_cmd_mem);
3556 /* Wait till command completes */
3557 wait_for_cmd_complete(sp);
3558
3559 sp->m_cast_flg = 0;
3560 sp->all_multi_pos = 0;
3561 }
3562
3563 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3564 /* Put the NIC into promiscuous mode */
3565 add = &bar0->mac_cfg;
3566 val64 = readq(&bar0->mac_cfg);
3567 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3568
3569 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3570 writel((u32) val64, add);
3571 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3572 writel((u32) (val64 >> 32), (add + 4));
3573
3574 val64 = readq(&bar0->mac_cfg);
3575 sp->promisc_flg = 1;
776bd20f 3576 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
3577 dev->name);
3578 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3579 /* Remove the NIC from promiscuous mode */
3580 add = &bar0->mac_cfg;
3581 val64 = readq(&bar0->mac_cfg);
3582 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3583
3584 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3585 writel((u32) val64, add);
3586 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3587 writel((u32) (val64 >> 32), (add + 4));
3588
3589 val64 = readq(&bar0->mac_cfg);
3590 sp->promisc_flg = 0;
776bd20f 3591 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
1da177e4
LT
3592 dev->name);
3593 }
3594
3595 /* Update individual M_CAST address list */
3596 if ((!sp->m_cast_flg) && dev->mc_count) {
3597 if (dev->mc_count >
3598 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3599 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3600 dev->name);
3601 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3602 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3603 return;
3604 }
3605
3606 prev_cnt = sp->mc_addr_count;
3607 sp->mc_addr_count = dev->mc_count;
3608
3609 /* Clear out the previous list of Mc in the H/W. */
3610 for (i = 0; i < prev_cnt; i++) {
3611 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3612 &bar0->rmac_addr_data0_mem);
3613 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3614 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3615 val64 = RMAC_ADDR_CMD_MEM_WE |
3616 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3617 RMAC_ADDR_CMD_MEM_OFFSET
3618 (MAC_MC_ADDR_START_OFFSET + i);
3619 writeq(val64, &bar0->rmac_addr_cmd_mem);
3620
3621 /* Wait for command completes */
3622 if (wait_for_cmd_complete(sp)) {
3623 DBG_PRINT(ERR_DBG, "%s: Adding ",
3624 dev->name);
3625 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3626 return;
3627 }
3628 }
3629
3630 /* Create the new Rx filter list and update the same in H/W. */
3631 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3632 i++, mclist = mclist->next) {
3633 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3634 ETH_ALEN);
3635 for (j = 0; j < ETH_ALEN; j++) {
3636 mac_addr |= mclist->dmi_addr[j];
3637 mac_addr <<= 8;
3638 }
3639 mac_addr >>= 8;
3640 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3641 &bar0->rmac_addr_data0_mem);
3642 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3643 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3644 val64 = RMAC_ADDR_CMD_MEM_WE |
3645 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3646 RMAC_ADDR_CMD_MEM_OFFSET
3647 (i + MAC_MC_ADDR_START_OFFSET);
3648 writeq(val64, &bar0->rmac_addr_cmd_mem);
3649
3650 /* Wait for command completes */
3651 if (wait_for_cmd_complete(sp)) {
3652 DBG_PRINT(ERR_DBG, "%s: Adding ",
3653 dev->name);
3654 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3655 return;
3656 }
3657 }
3658 }
3659}
3660
3661/**
20346722 3662 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
3663 * @dev : pointer to the device structure.
3664 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 3665 * Description : This procedure will program the Xframe to receive
1da177e4 3666 * frames with new Mac Address
20346722 3667 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
3668 * as defined in errno.h file on failure.
3669 */
3670
3671int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3672{
3673 nic_t *sp = dev->priv;
3674 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3675 register u64 val64, mac_addr = 0;
3676 int i;
3677
20346722 3678 /*
1da177e4
LT
3679 * Set the new MAC address as the new unicast filter and reflect this
3680 * change on the device address registered with the OS. It will be
20346722 3681 * at offset 0.
1da177e4
LT
3682 */
3683 for (i = 0; i < ETH_ALEN; i++) {
3684 mac_addr <<= 8;
3685 mac_addr |= addr[i];
3686 }
3687
3688 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3689 &bar0->rmac_addr_data0_mem);
3690
3691 val64 =
3692 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3693 RMAC_ADDR_CMD_MEM_OFFSET(0);
3694 writeq(val64, &bar0->rmac_addr_cmd_mem);
3695 /* Wait till command completes */
3696 if (wait_for_cmd_complete(sp)) {
3697 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3698 return FAILURE;
3699 }
3700
3701 return SUCCESS;
3702}
3703
3704/**
20346722 3705 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
3706 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3707 * @info: pointer to the structure with parameters given by ethtool to set
3708 * link information.
3709 * Description:
20346722 3710 * The function sets different link parameters provided by the user onto
1da177e4
LT
3711 * the NIC.
3712 * Return value:
3713 * 0 on success.
3714*/
3715
3716static int s2io_ethtool_sset(struct net_device *dev,
3717 struct ethtool_cmd *info)
3718{
3719 nic_t *sp = dev->priv;
3720 if ((info->autoneg == AUTONEG_ENABLE) ||
3721 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3722 return -EINVAL;
3723 else {
3724 s2io_close(sp->dev);
3725 s2io_open(sp->dev);
3726 }
3727
3728 return 0;
3729}
3730
3731/**
20346722 3732 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
3733 * @sp : private member of the device structure, pointer to the
3734 * s2io_nic structure.
3735 * @info : pointer to the structure with parameters given by ethtool
3736 * to return link information.
3737 * Description:
3738 * Returns link specific information like speed, duplex etc.. to ethtool.
3739 * Return value :
3740 * return 0 on success.
3741 */
3742
3743static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3744{
3745 nic_t *sp = dev->priv;
3746 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3747 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3748 info->port = PORT_FIBRE;
3749 /* info->transceiver?? TODO */
3750
3751 if (netif_carrier_ok(sp->dev)) {
3752 info->speed = 10000;
3753 info->duplex = DUPLEX_FULL;
3754 } else {
3755 info->speed = -1;
3756 info->duplex = -1;
3757 }
3758
3759 info->autoneg = AUTONEG_DISABLE;
3760 return 0;
3761}
3762
3763/**
20346722
K
3764 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3765 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3766 * s2io_nic structure.
3767 * @info : pointer to the structure with parameters given by ethtool to
3768 * return driver information.
3769 * Description:
3770 * Returns driver specefic information like name, version etc.. to ethtool.
3771 * Return value:
3772 * void
3773 */
3774
3775static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3776 struct ethtool_drvinfo *info)
3777{
3778 nic_t *sp = dev->priv;
3779
3780 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3781 strncpy(info->version, s2io_driver_version,
3782 sizeof(s2io_driver_version));
3783 strncpy(info->fw_version, "", 32);
3784 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3785 info->regdump_len = XENA_REG_SPACE;
3786 info->eedump_len = XENA_EEPROM_SPACE;
3787 info->testinfo_len = S2IO_TEST_LEN;
3788 info->n_stats = S2IO_STAT_LEN;
3789}
3790
3791/**
3792 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 3793 * @sp: private member of the device structure, which is a pointer to the
1da177e4 3794 * s2io_nic structure.
20346722 3795 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
3796 * dumping the registers.
3797 * @reg_space: The input argumnet into which all the registers are dumped.
3798 * Description:
3799 * Dumps the entire register space of xFrame NIC into the user given
3800 * buffer area.
3801 * Return value :
3802 * void .
3803*/
3804
3805static void s2io_ethtool_gregs(struct net_device *dev,
3806 struct ethtool_regs *regs, void *space)
3807{
3808 int i;
3809 u64 reg;
3810 u8 *reg_space = (u8 *) space;
3811 nic_t *sp = dev->priv;
3812
3813 regs->len = XENA_REG_SPACE;
3814 regs->version = sp->pdev->subsystem_device;
3815
3816 for (i = 0; i < regs->len; i += 8) {
3817 reg = readq(sp->bar0 + i);
3818 memcpy((reg_space + i), &reg, 8);
3819 }
3820}
3821
3822/**
3823 * s2io_phy_id - timer function that alternates adapter LED.
20346722 3824 * @data : address of the private member of the device structure, which
1da177e4 3825 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
3826 * Description: This is actually the timer function that alternates the
3827 * adapter LED bit of the adapter control bit to set/reset every time on
3828 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
3829 * once every second.
3830*/
3831static void s2io_phy_id(unsigned long data)
3832{
3833 nic_t *sp = (nic_t *) data;
3834 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3835 u64 val64 = 0;
3836 u16 subid;
3837
3838 subid = sp->pdev->subsystem_device;
541ae68f
K
3839 if ((sp->device_type == XFRAME_II_DEVICE) ||
3840 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
3841 val64 = readq(&bar0->gpio_control);
3842 val64 ^= GPIO_CTRL_GPIO_0;
3843 writeq(val64, &bar0->gpio_control);
3844 } else {
3845 val64 = readq(&bar0->adapter_control);
3846 val64 ^= ADAPTER_LED_ON;
3847 writeq(val64, &bar0->adapter_control);
3848 }
3849
3850 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3851}
3852
3853/**
3854 * s2io_ethtool_idnic - To physically identify the nic on the system.
3855 * @sp : private member of the device structure, which is a pointer to the
3856 * s2io_nic structure.
20346722 3857 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
3858 * ethtool.
3859 * Description: Used to physically identify the NIC on the system.
20346722 3860 * The Link LED will blink for a time specified by the user for
1da177e4 3861 * identification.
20346722 3862 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
3863 * identification is possible only if it's link is up.
3864 * Return value:
3865 * int , returns 0 on success
3866 */
3867
3868static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3869{
3870 u64 val64 = 0, last_gpio_ctrl_val;
3871 nic_t *sp = dev->priv;
3872 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3873 u16 subid;
3874
3875 subid = sp->pdev->subsystem_device;
3876 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f
K
3877 if ((sp->device_type == XFRAME_I_DEVICE) &&
3878 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
3879 val64 = readq(&bar0->adapter_control);
3880 if (!(val64 & ADAPTER_CNTL_EN)) {
3881 printk(KERN_ERR
3882 "Adapter Link down, cannot blink LED\n");
3883 return -EFAULT;
3884 }
3885 }
3886 if (sp->id_timer.function == NULL) {
3887 init_timer(&sp->id_timer);
3888 sp->id_timer.function = s2io_phy_id;
3889 sp->id_timer.data = (unsigned long) sp;
3890 }
3891 mod_timer(&sp->id_timer, jiffies);
3892 if (data)
20346722 3893 msleep_interruptible(data * HZ);
1da177e4 3894 else
20346722 3895 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
3896 del_timer_sync(&sp->id_timer);
3897
541ae68f 3898 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
3899 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3900 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3901 }
3902
3903 return 0;
3904}
3905
3906/**
3907 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
3908 * @sp : private member of the device structure, which is a pointer to the
3909 * s2io_nic structure.
1da177e4
LT
3910 * @ep : pointer to the structure with pause parameters given by ethtool.
3911 * Description:
3912 * Returns the Pause frame generation and reception capability of the NIC.
3913 * Return value:
3914 * void
3915 */
3916static void s2io_ethtool_getpause_data(struct net_device *dev,
3917 struct ethtool_pauseparam *ep)
3918{
3919 u64 val64;
3920 nic_t *sp = dev->priv;
3921 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3922
3923 val64 = readq(&bar0->rmac_pause_cfg);
3924 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3925 ep->tx_pause = TRUE;
3926 if (val64 & RMAC_PAUSE_RX_ENABLE)
3927 ep->rx_pause = TRUE;
3928 ep->autoneg = FALSE;
3929}
3930
3931/**
3932 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 3933 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3934 * s2io_nic structure.
3935 * @ep : pointer to the structure with pause parameters given by ethtool.
3936 * Description:
3937 * It can be used to set or reset Pause frame generation or reception
3938 * support of the NIC.
3939 * Return value:
3940 * int, returns 0 on Success
3941 */
3942
3943static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 3944 struct ethtool_pauseparam *ep)
1da177e4
LT
3945{
3946 u64 val64;
3947 nic_t *sp = dev->priv;
3948 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3949
3950 val64 = readq(&bar0->rmac_pause_cfg);
3951 if (ep->tx_pause)
3952 val64 |= RMAC_PAUSE_GEN_ENABLE;
3953 else
3954 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3955 if (ep->rx_pause)
3956 val64 |= RMAC_PAUSE_RX_ENABLE;
3957 else
3958 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3959 writeq(val64, &bar0->rmac_pause_cfg);
3960 return 0;
3961}
3962
3963/**
3964 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 3965 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3966 * s2io_nic structure.
3967 * @off : offset at which the data must be written
3968 * @data : Its an output parameter where the data read at the given
20346722 3969 * offset is stored.
1da177e4 3970 * Description:
20346722 3971 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
3972 * read data.
3973 * NOTE: Will allow to read only part of the EEPROM visible through the
3974 * I2C bus.
3975 * Return value:
3976 * -1 on failure and 0 on success.
3977 */
3978
3979#define S2IO_DEV_ID 5
3980static int read_eeprom(nic_t * sp, int off, u32 * data)
3981{
3982 int ret = -1;
3983 u32 exit_cnt = 0;
3984 u64 val64;
3985 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3986
3987 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3988 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3989 I2C_CONTROL_CNTL_START;
3990 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3991
3992 while (exit_cnt < 5) {
3993 val64 = readq(&bar0->i2c_control);
3994 if (I2C_CONTROL_CNTL_END(val64)) {
3995 *data = I2C_CONTROL_GET_DATA(val64);
3996 ret = 0;
3997 break;
3998 }
3999 msleep(50);
4000 exit_cnt++;
4001 }
4002
4003 return ret;
4004}
4005
4006/**
4007 * write_eeprom - actually writes the relevant part of the data value.
4008 * @sp : private member of the device structure, which is a pointer to the
4009 * s2io_nic structure.
4010 * @off : offset at which the data must be written
4011 * @data : The data that is to be written
20346722 4012 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
4013 * the Eeprom. (max of 3)
4014 * Description:
4015 * Actually writes the relevant part of the data value into the Eeprom
4016 * through the I2C bus.
4017 * Return value:
4018 * 0 on success, -1 on failure.
4019 */
4020
4021static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
4022{
4023 int exit_cnt = 0, ret = -1;
4024 u64 val64;
4025 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4026
4027 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4028 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
4029 I2C_CONTROL_CNTL_START;
4030 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4031
4032 while (exit_cnt < 5) {
4033 val64 = readq(&bar0->i2c_control);
4034 if (I2C_CONTROL_CNTL_END(val64)) {
4035 if (!(val64 & I2C_CONTROL_NACK))
4036 ret = 0;
4037 break;
4038 }
4039 msleep(50);
4040 exit_cnt++;
4041 }
4042
4043 return ret;
4044}
4045
4046/**
4047 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4048 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 4049 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
4050 * containing all relevant information.
4051 * @data_buf : user defined value to be written into Eeprom.
4052 * Description: Reads the values stored in the Eeprom at given offset
4053 * for a given length. Stores these values int the input argument data
4054 * buffer 'data_buf' and returns these to the caller (ethtool.)
4055 * Return value:
4056 * int 0 on success
4057 */
4058
4059static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 4060 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4
LT
4061{
4062 u32 data, i, valid;
4063 nic_t *sp = dev->priv;
4064
4065 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4066
4067 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4068 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4069
4070 for (i = 0; i < eeprom->len; i += 4) {
4071 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4072 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4073 return -EFAULT;
4074 }
4075 valid = INV(data);
4076 memcpy((data_buf + i), &valid, 4);
4077 }
4078 return 0;
4079}
4080
4081/**
4082 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4083 * @sp : private member of the device structure, which is a pointer to the
4084 * s2io_nic structure.
20346722 4085 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
4086 * containing all relevant information.
4087 * @data_buf ; user defined value to be written into Eeprom.
4088 * Description:
4089 * Tries to write the user provided value in the Eeprom, at the offset
4090 * given by the user.
4091 * Return value:
4092 * 0 on success, -EFAULT on failure.
4093 */
4094
4095static int s2io_ethtool_seeprom(struct net_device *dev,
4096 struct ethtool_eeprom *eeprom,
4097 u8 * data_buf)
4098{
4099 int len = eeprom->len, cnt = 0;
4100 u32 valid = 0, data;
4101 nic_t *sp = dev->priv;
4102
4103 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4104 DBG_PRINT(ERR_DBG,
4105 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4106 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4107 eeprom->magic);
4108 return -EFAULT;
4109 }
4110
4111 while (len) {
4112 data = (u32) data_buf[cnt] & 0x000000FF;
4113 if (data) {
4114 valid = (u32) (data << 24);
4115 } else
4116 valid = data;
4117
4118 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4119 DBG_PRINT(ERR_DBG,
4120 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4121 DBG_PRINT(ERR_DBG,
4122 "write into the specified offset\n");
4123 return -EFAULT;
4124 }
4125 cnt++;
4126 len--;
4127 }
4128
4129 return 0;
4130}
4131
4132/**
20346722
K
4133 * s2io_register_test - reads and writes into all clock domains.
4134 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4135 * s2io_nic structure.
4136 * @data : variable that returns the result of each of the test conducted b
4137 * by the driver.
4138 * Description:
4139 * Read and write into all clock domains. The NIC has 3 clock domains,
4140 * see that registers in all the three regions are accessible.
4141 * Return value:
4142 * 0 on success.
4143 */
4144
4145static int s2io_register_test(nic_t * sp, uint64_t * data)
4146{
4147 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4148 u64 val64 = 0;
4149 int fail = 0;
4150
20346722
K
4151 val64 = readq(&bar0->pif_rd_swapper_fb);
4152 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
4153 fail = 1;
4154 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4155 }
4156
4157 val64 = readq(&bar0->rmac_pause_cfg);
4158 if (val64 != 0xc000ffff00000000ULL) {
4159 fail = 1;
4160 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4161 }
4162
4163 val64 = readq(&bar0->rx_queue_cfg);
4164 if (val64 != 0x0808080808080808ULL) {
4165 fail = 1;
4166 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4167 }
4168
4169 val64 = readq(&bar0->xgxs_efifo_cfg);
4170 if (val64 != 0x000000001923141EULL) {
4171 fail = 1;
4172 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4173 }
4174
4175 val64 = 0x5A5A5A5A5A5A5A5AULL;
4176 writeq(val64, &bar0->xmsi_data);
4177 val64 = readq(&bar0->xmsi_data);
4178 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4179 fail = 1;
4180 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4181 }
4182
4183 val64 = 0xA5A5A5A5A5A5A5A5ULL;
4184 writeq(val64, &bar0->xmsi_data);
4185 val64 = readq(&bar0->xmsi_data);
4186 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4187 fail = 1;
4188 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4189 }
4190
4191 *data = fail;
4192 return 0;
4193}
4194
4195/**
20346722 4196 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
4197 * @sp : private member of the device structure, which is a pointer to the
4198 * s2io_nic structure.
4199 * @data:variable that returns the result of each of the test conducted by
4200 * the driver.
4201 * Description:
20346722 4202 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
4203 * register.
4204 * Return value:
4205 * 0 on success.
4206 */
4207
4208static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4209{
4210 int fail = 0;
4211 u32 ret_data;
4212
4213 /* Test Write Error at offset 0 */
4214 if (!write_eeprom(sp, 0, 0, 3))
4215 fail = 1;
4216
4217 /* Test Write at offset 4f0 */
4218 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4219 fail = 1;
4220 if (read_eeprom(sp, 0x4F0, &ret_data))
4221 fail = 1;
4222
4223 if (ret_data != 0x01234567)
4224 fail = 1;
4225
4226 /* Reset the EEPROM data go FFFF */
4227 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4228
4229 /* Test Write Request Error at offset 0x7c */
4230 if (!write_eeprom(sp, 0x07C, 0, 3))
4231 fail = 1;
4232
4233 /* Test Write Request at offset 0x7fc */
4234 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4235 fail = 1;
4236 if (read_eeprom(sp, 0x7FC, &ret_data))
4237 fail = 1;
4238
4239 if (ret_data != 0x01234567)
4240 fail = 1;
4241
4242 /* Reset the EEPROM data go FFFF */
4243 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4244
4245 /* Test Write Error at offset 0x80 */
4246 if (!write_eeprom(sp, 0x080, 0, 3))
4247 fail = 1;
4248
4249 /* Test Write Error at offset 0xfc */
4250 if (!write_eeprom(sp, 0x0FC, 0, 3))
4251 fail = 1;
4252
4253 /* Test Write Error at offset 0x100 */
4254 if (!write_eeprom(sp, 0x100, 0, 3))
4255 fail = 1;
4256
4257 /* Test Write Error at offset 4ec */
4258 if (!write_eeprom(sp, 0x4EC, 0, 3))
4259 fail = 1;
4260
4261 *data = fail;
4262 return 0;
4263}
4264
4265/**
4266 * s2io_bist_test - invokes the MemBist test of the card .
20346722 4267 * @sp : private member of the device structure, which is a pointer to the
1da177e4 4268 * s2io_nic structure.
20346722 4269 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
4270 * the driver.
4271 * Description:
4272 * This invokes the MemBist test of the card. We give around
4273 * 2 secs time for the Test to complete. If it's still not complete
20346722 4274 * within this peiod, we consider that the test failed.
1da177e4
LT
4275 * Return value:
4276 * 0 on success and -1 on failure.
4277 */
4278
4279static int s2io_bist_test(nic_t * sp, uint64_t * data)
4280{
4281 u8 bist = 0;
4282 int cnt = 0, ret = -1;
4283
4284 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4285 bist |= PCI_BIST_START;
4286 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4287
4288 while (cnt < 20) {
4289 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4290 if (!(bist & PCI_BIST_START)) {
4291 *data = (bist & PCI_BIST_CODE_MASK);
4292 ret = 0;
4293 break;
4294 }
4295 msleep(100);
4296 cnt++;
4297 }
4298
4299 return ret;
4300}
4301
4302/**
20346722
K
4303 * s2io-link_test - verifies the link state of the nic
4304 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
4305 * s2io_nic structure.
4306 * @data: variable that returns the result of each of the test conducted by
4307 * the driver.
4308 * Description:
20346722 4309 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
4310 * argument 'data' appropriately.
4311 * Return value:
4312 * 0 on success.
4313 */
4314
4315static int s2io_link_test(nic_t * sp, uint64_t * data)
4316{
4317 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4318 u64 val64;
4319
4320 val64 = readq(&bar0->adapter_status);
4321 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4322 *data = 1;
4323
4324 return 0;
4325}
4326
4327/**
20346722
K
4328 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4329 * @sp - private member of the device structure, which is a pointer to the
1da177e4 4330 * s2io_nic structure.
20346722 4331 * @data - variable that returns the result of each of the test
1da177e4
LT
4332 * conducted by the driver.
4333 * Description:
20346722 4334 * This is one of the offline test that tests the read and write
1da177e4
LT
4335 * access to the RldRam chip on the NIC.
4336 * Return value:
4337 * 0 on success.
4338 */
4339
4340static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4341{
4342 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4343 u64 val64;
4344 int cnt, iteration = 0, test_pass = 0;
4345
4346 val64 = readq(&bar0->adapter_control);
4347 val64 &= ~ADAPTER_ECC_EN;
4348 writeq(val64, &bar0->adapter_control);
4349
4350 val64 = readq(&bar0->mc_rldram_test_ctrl);
4351 val64 |= MC_RLDRAM_TEST_MODE;
4352 writeq(val64, &bar0->mc_rldram_test_ctrl);
4353
4354 val64 = readq(&bar0->mc_rldram_mrs);
4355 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4356 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4357
4358 val64 |= MC_RLDRAM_MRS_ENABLE;
4359 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4360
4361 while (iteration < 2) {
4362 val64 = 0x55555555aaaa0000ULL;
4363 if (iteration == 1) {
4364 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4365 }
4366 writeq(val64, &bar0->mc_rldram_test_d0);
4367
4368 val64 = 0xaaaa5a5555550000ULL;
4369 if (iteration == 1) {
4370 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4371 }
4372 writeq(val64, &bar0->mc_rldram_test_d1);
4373
4374 val64 = 0x55aaaaaaaa5a0000ULL;
4375 if (iteration == 1) {
4376 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4377 }
4378 writeq(val64, &bar0->mc_rldram_test_d2);
4379
4380 val64 = (u64) (0x0000003fffff0000ULL);
4381 writeq(val64, &bar0->mc_rldram_test_add);
4382
4383
4384 val64 = MC_RLDRAM_TEST_MODE;
4385 writeq(val64, &bar0->mc_rldram_test_ctrl);
4386
4387 val64 |=
4388 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4389 MC_RLDRAM_TEST_GO;
4390 writeq(val64, &bar0->mc_rldram_test_ctrl);
4391
4392 for (cnt = 0; cnt < 5; cnt++) {
4393 val64 = readq(&bar0->mc_rldram_test_ctrl);
4394 if (val64 & MC_RLDRAM_TEST_DONE)
4395 break;
4396 msleep(200);
4397 }
4398
4399 if (cnt == 5)
4400 break;
4401
4402 val64 = MC_RLDRAM_TEST_MODE;
4403 writeq(val64, &bar0->mc_rldram_test_ctrl);
4404
4405 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4406 writeq(val64, &bar0->mc_rldram_test_ctrl);
4407
4408 for (cnt = 0; cnt < 5; cnt++) {
4409 val64 = readq(&bar0->mc_rldram_test_ctrl);
4410 if (val64 & MC_RLDRAM_TEST_DONE)
4411 break;
4412 msleep(500);
4413 }
4414
4415 if (cnt == 5)
4416 break;
4417
4418 val64 = readq(&bar0->mc_rldram_test_ctrl);
4419 if (val64 & MC_RLDRAM_TEST_PASS)
4420 test_pass = 1;
4421
4422 iteration++;
4423 }
4424
4425 if (!test_pass)
4426 *data = 1;
4427 else
4428 *data = 0;
4429
4430 return 0;
4431}
4432
4433/**
4434 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4435 * @sp : private member of the device structure, which is a pointer to the
4436 * s2io_nic structure.
4437 * @ethtest : pointer to a ethtool command specific structure that will be
4438 * returned to the user.
20346722 4439 * @data : variable that returns the result of each of the test
1da177e4
LT
4440 * conducted by the driver.
4441 * Description:
4442 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4443 * the health of the card.
4444 * Return value:
4445 * void
4446 */
4447
4448static void s2io_ethtool_test(struct net_device *dev,
4449 struct ethtool_test *ethtest,
4450 uint64_t * data)
4451{
4452 nic_t *sp = dev->priv;
4453 int orig_state = netif_running(sp->dev);
4454
4455 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4456 /* Offline Tests. */
20346722 4457 if (orig_state)
1da177e4 4458 s2io_close(sp->dev);
1da177e4
LT
4459
4460 if (s2io_register_test(sp, &data[0]))
4461 ethtest->flags |= ETH_TEST_FL_FAILED;
4462
4463 s2io_reset(sp);
1da177e4
LT
4464
4465 if (s2io_rldram_test(sp, &data[3]))
4466 ethtest->flags |= ETH_TEST_FL_FAILED;
4467
4468 s2io_reset(sp);
1da177e4
LT
4469
4470 if (s2io_eeprom_test(sp, &data[1]))
4471 ethtest->flags |= ETH_TEST_FL_FAILED;
4472
4473 if (s2io_bist_test(sp, &data[4]))
4474 ethtest->flags |= ETH_TEST_FL_FAILED;
4475
4476 if (orig_state)
4477 s2io_open(sp->dev);
4478
4479 data[2] = 0;
4480 } else {
4481 /* Online Tests. */
4482 if (!orig_state) {
4483 DBG_PRINT(ERR_DBG,
4484 "%s: is not up, cannot run test\n",
4485 dev->name);
4486 data[0] = -1;
4487 data[1] = -1;
4488 data[2] = -1;
4489 data[3] = -1;
4490 data[4] = -1;
4491 }
4492
4493 if (s2io_link_test(sp, &data[2]))
4494 ethtest->flags |= ETH_TEST_FL_FAILED;
4495
4496 data[0] = 0;
4497 data[1] = 0;
4498 data[3] = 0;
4499 data[4] = 0;
4500 }
4501}
4502
4503static void s2io_get_ethtool_stats(struct net_device *dev,
4504 struct ethtool_stats *estats,
4505 u64 * tmp_stats)
4506{
4507 int i = 0;
4508 nic_t *sp = dev->priv;
4509 StatInfo_t *stat_info = sp->mac_control.stats_info;
4510
7ba013ac 4511 s2io_updt_stats(sp);
541ae68f
K
4512 tmp_stats[i++] =
4513 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4514 le32_to_cpu(stat_info->tmac_frms);
4515 tmp_stats[i++] =
4516 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4517 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 4518 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f
K
4519 tmp_stats[i++] =
4520 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4521 le32_to_cpu(stat_info->tmac_mcst_frms);
4522 tmp_stats[i++] =
4523 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4524 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 4525 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
541ae68f
K
4526 tmp_stats[i++] =
4527 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4528 le32_to_cpu(stat_info->tmac_any_err_frms);
1da177e4 4529 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f
K
4530 tmp_stats[i++] =
4531 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4532 le32_to_cpu(stat_info->tmac_vld_ip);
4533 tmp_stats[i++] =
4534 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4535 le32_to_cpu(stat_info->tmac_drop_ip);
4536 tmp_stats[i++] =
4537 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4538 le32_to_cpu(stat_info->tmac_icmp);
4539 tmp_stats[i++] =
4540 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4541 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 4542 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f
K
4543 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4544 le32_to_cpu(stat_info->tmac_udp);
4545 tmp_stats[i++] =
4546 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4547 le32_to_cpu(stat_info->rmac_vld_frms);
4548 tmp_stats[i++] =
4549 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4550 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
4551 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4552 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f
K
4553 tmp_stats[i++] =
4554 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4555 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4556 tmp_stats[i++] =
4557 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4558 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4
LT
4559 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4560 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4561 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
541ae68f
K
4562 tmp_stats[i++] =
4563 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4564 le32_to_cpu(stat_info->rmac_discarded_frms);
4565 tmp_stats[i++] =
4566 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4567 le32_to_cpu(stat_info->rmac_usized_frms);
4568 tmp_stats[i++] =
4569 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4570 le32_to_cpu(stat_info->rmac_osized_frms);
4571 tmp_stats[i++] =
4572 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4573 le32_to_cpu(stat_info->rmac_frag_frms);
4574 tmp_stats[i++] =
4575 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4576 le32_to_cpu(stat_info->rmac_jabber_frms);
4577 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4578 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
4579 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4580 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
541ae68f
K
4581 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4582 le32_to_cpu(stat_info->rmac_drop_ip);
4583 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4584 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 4585 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
541ae68f
K
4586 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4587 le32_to_cpu(stat_info->rmac_udp);
4588 tmp_stats[i++] =
4589 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4590 le32_to_cpu(stat_info->rmac_err_drp_udp);
4591 tmp_stats[i++] =
4592 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4593 le32_to_cpu(stat_info->rmac_pause_cnt);
4594 tmp_stats[i++] =
4595 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4596 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 4597 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
7ba013ac
K
4598 tmp_stats[i++] = 0;
4599 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4600 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
1da177e4
LT
4601}
4602
20346722 4603int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
4604{
4605 return (XENA_REG_SPACE);
4606}
4607
4608
20346722 4609u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4
LT
4610{
4611 nic_t *sp = dev->priv;
4612
4613 return (sp->rx_csum);
4614}
20346722 4615int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4616{
4617 nic_t *sp = dev->priv;
4618
4619 if (data)
4620 sp->rx_csum = 1;
4621 else
4622 sp->rx_csum = 0;
4623
4624 return 0;
4625}
20346722 4626int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
4627{
4628 return (XENA_EEPROM_SPACE);
4629}
4630
20346722 4631int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
4632{
4633 return (S2IO_TEST_LEN);
4634}
20346722
K
4635void s2io_ethtool_get_strings(struct net_device *dev,
4636 u32 stringset, u8 * data)
1da177e4
LT
4637{
4638 switch (stringset) {
4639 case ETH_SS_TEST:
4640 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4641 break;
4642 case ETH_SS_STATS:
4643 memcpy(data, &ethtool_stats_keys,
4644 sizeof(ethtool_stats_keys));
4645 }
4646}
1da177e4
LT
4647static int s2io_ethtool_get_stats_count(struct net_device *dev)
4648{
4649 return (S2IO_STAT_LEN);
4650}
4651
20346722 4652int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4653{
4654 if (data)
4655 dev->features |= NETIF_F_IP_CSUM;
4656 else
4657 dev->features &= ~NETIF_F_IP_CSUM;
4658
4659 return 0;
4660}
4661
4662
4663static struct ethtool_ops netdev_ethtool_ops = {
4664 .get_settings = s2io_ethtool_gset,
4665 .set_settings = s2io_ethtool_sset,
4666 .get_drvinfo = s2io_ethtool_gdrvinfo,
4667 .get_regs_len = s2io_ethtool_get_regs_len,
4668 .get_regs = s2io_ethtool_gregs,
4669 .get_link = ethtool_op_get_link,
4670 .get_eeprom_len = s2io_get_eeprom_len,
4671 .get_eeprom = s2io_ethtool_geeprom,
4672 .set_eeprom = s2io_ethtool_seeprom,
4673 .get_pauseparam = s2io_ethtool_getpause_data,
4674 .set_pauseparam = s2io_ethtool_setpause_data,
4675 .get_rx_csum = s2io_ethtool_get_rx_csum,
4676 .set_rx_csum = s2io_ethtool_set_rx_csum,
4677 .get_tx_csum = ethtool_op_get_tx_csum,
4678 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4679 .get_sg = ethtool_op_get_sg,
4680 .set_sg = ethtool_op_set_sg,
4681#ifdef NETIF_F_TSO
4682 .get_tso = ethtool_op_get_tso,
4683 .set_tso = ethtool_op_set_tso,
4684#endif
4685 .self_test_count = s2io_ethtool_self_test_count,
4686 .self_test = s2io_ethtool_test,
4687 .get_strings = s2io_ethtool_get_strings,
4688 .phys_id = s2io_ethtool_idnic,
4689 .get_stats_count = s2io_ethtool_get_stats_count,
4690 .get_ethtool_stats = s2io_get_ethtool_stats
4691};
4692
4693/**
20346722 4694 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
4695 * @dev : Device pointer.
4696 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4697 * a proprietary structure used to pass information to the driver.
4698 * @cmd : This is used to distinguish between the different commands that
4699 * can be passed to the IOCTL functions.
4700 * Description:
20346722
K
4701 * Currently there are no special functionality supported in IOCTL, hence
4702 * function always return EOPNOTSUPPORTED
1da177e4
LT
4703 */
4704
20346722 4705int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
4706{
4707 return -EOPNOTSUPP;
4708}
4709
4710/**
4711 * s2io_change_mtu - entry point to change MTU size for the device.
4712 * @dev : device pointer.
4713 * @new_mtu : the new MTU size for the device.
4714 * Description: A driver entry point to change MTU size for the device.
4715 * Before changing the MTU the device must be stopped.
4716 * Return value:
4717 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4718 * file on failure.
4719 */
4720
20346722 4721int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4
LT
4722{
4723 nic_t *sp = dev->priv;
1da177e4
LT
4724
4725 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4726 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4727 dev->name);
4728 return -EPERM;
4729 }
4730
1da177e4 4731 dev->mtu = new_mtu;
d8892c6e
K
4732 if (netif_running(dev)) {
4733 s2io_card_down(sp);
4734 netif_stop_queue(dev);
4735 if (s2io_card_up(sp)) {
4736 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4737 __FUNCTION__);
4738 }
4739 if (netif_queue_stopped(dev))
4740 netif_wake_queue(dev);
4741 } else { /* Device is down */
4742 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4743 u64 val64 = new_mtu;
4744
4745 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4746 }
1da177e4
LT
4747
4748 return 0;
4749}
4750
4751/**
4752 * s2io_tasklet - Bottom half of the ISR.
4753 * @dev_adr : address of the device structure in dma_addr_t format.
4754 * Description:
4755 * This is the tasklet or the bottom half of the ISR. This is
20346722 4756 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 4757 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 4758 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
4759 * replenish the Rx buffers in the Rx buffer descriptors.
4760 * Return value:
4761 * void.
4762 */
4763
4764static void s2io_tasklet(unsigned long dev_addr)
4765{
4766 struct net_device *dev = (struct net_device *) dev_addr;
4767 nic_t *sp = dev->priv;
4768 int i, ret;
4769 mac_info_t *mac_control;
4770 struct config_param *config;
4771
4772 mac_control = &sp->mac_control;
4773 config = &sp->config;
4774
4775 if (!TASKLET_IN_USE) {
4776 for (i = 0; i < config->rx_ring_num; i++) {
4777 ret = fill_rx_buffers(sp, i);
4778 if (ret == -ENOMEM) {
4779 DBG_PRINT(ERR_DBG, "%s: Out of ",
4780 dev->name);
4781 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4782 break;
4783 } else if (ret == -EFILL) {
4784 DBG_PRINT(ERR_DBG,
4785 "%s: Rx Ring %d is full\n",
4786 dev->name, i);
4787 break;
4788 }
4789 }
4790 clear_bit(0, (&sp->tasklet_status));
4791 }
4792}
4793
4794/**
4795 * s2io_set_link - Set the LInk status
4796 * @data: long pointer to device private structue
4797 * Description: Sets the link status for the adapter
4798 */
4799
4800static void s2io_set_link(unsigned long data)
4801{
4802 nic_t *nic = (nic_t *) data;
4803 struct net_device *dev = nic->dev;
4804 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4805 register u64 val64;
4806 u16 subid;
4807
4808 if (test_and_set_bit(0, &(nic->link_state))) {
4809 /* The card is being reset, no point doing anything */
4810 return;
4811 }
4812
4813 subid = nic->pdev->subsystem_device;
a371a07d
K
4814 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
4815 /*
4816 * Allow a small delay for the NICs self initiated
4817 * cleanup to complete.
4818 */
4819 msleep(100);
4820 }
1da177e4
LT
4821
4822 val64 = readq(&bar0->adapter_status);
20346722 4823 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
4824 if (LINK_IS_UP(val64)) {
4825 val64 = readq(&bar0->adapter_control);
4826 val64 |= ADAPTER_CNTL_EN;
4827 writeq(val64, &bar0->adapter_control);
541ae68f
K
4828 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4829 subid)) {
1da177e4
LT
4830 val64 = readq(&bar0->gpio_control);
4831 val64 |= GPIO_CTRL_GPIO_0;
4832 writeq(val64, &bar0->gpio_control);
4833 val64 = readq(&bar0->gpio_control);
4834 } else {
4835 val64 |= ADAPTER_LED_ON;
4836 writeq(val64, &bar0->adapter_control);
4837 }
a371a07d
K
4838 if (s2io_link_fault_indication(nic) ==
4839 MAC_RMAC_ERR_TIMER) {
4840 val64 = readq(&bar0->adapter_status);
4841 if (!LINK_IS_UP(val64)) {
4842 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4843 DBG_PRINT(ERR_DBG, " Link down");
4844 DBG_PRINT(ERR_DBG, "after ");
4845 DBG_PRINT(ERR_DBG, "enabling ");
4846 DBG_PRINT(ERR_DBG, "device \n");
4847 }
1da177e4
LT
4848 }
4849 if (nic->device_enabled_once == FALSE) {
4850 nic->device_enabled_once = TRUE;
4851 }
4852 s2io_link(nic, LINK_UP);
4853 } else {
541ae68f
K
4854 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4855 subid)) {
1da177e4
LT
4856 val64 = readq(&bar0->gpio_control);
4857 val64 &= ~GPIO_CTRL_GPIO_0;
4858 writeq(val64, &bar0->gpio_control);
4859 val64 = readq(&bar0->gpio_control);
4860 }
4861 s2io_link(nic, LINK_DOWN);
4862 }
4863 } else { /* NIC is not Quiescent. */
4864 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4865 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4866 netif_stop_queue(dev);
4867 }
4868 clear_bit(0, &(nic->link_state));
4869}
4870
4871static void s2io_card_down(nic_t * sp)
4872{
4873 int cnt = 0;
4874 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4875 unsigned long flags;
4876 register u64 val64 = 0;
4877
25fff88e 4878 del_timer_sync(&sp->alarm_timer);
1da177e4 4879 /* If s2io_set_link task is executing, wait till it completes. */
20346722 4880 while (test_and_set_bit(0, &(sp->link_state))) {
1da177e4 4881 msleep(50);
20346722 4882 }
1da177e4
LT
4883 atomic_set(&sp->card_state, CARD_DOWN);
4884
4885 /* disable Tx and Rx traffic on the NIC */
4886 stop_nic(sp);
4887
4888 /* Kill tasklet. */
4889 tasklet_kill(&sp->task);
4890
4891 /* Check if the device is Quiescent and then Reset the NIC */
4892 do {
4893 val64 = readq(&bar0->adapter_status);
20346722 4894 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
1da177e4
LT
4895 break;
4896 }
4897
4898 msleep(50);
4899 cnt++;
4900 if (cnt == 10) {
4901 DBG_PRINT(ERR_DBG,
4902 "s2io_close:Device not Quiescent ");
4903 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4904 (unsigned long long) val64);
4905 break;
4906 }
4907 } while (1);
1da177e4
LT
4908 s2io_reset(sp);
4909
7ba013ac
K
4910 /* Waiting till all Interrupt handlers are complete */
4911 cnt = 0;
4912 do {
4913 msleep(10);
4914 if (!atomic_read(&sp->isr_cnt))
4915 break;
4916 cnt++;
4917 } while(cnt < 5);
4918
4919 spin_lock_irqsave(&sp->tx_lock, flags);
4920 /* Free all Tx buffers */
1da177e4 4921 free_tx_buffers(sp);
7ba013ac
K
4922 spin_unlock_irqrestore(&sp->tx_lock, flags);
4923
4924 /* Free all Rx buffers */
4925 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 4926 free_rx_buffers(sp);
7ba013ac 4927 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 4928
1da177e4
LT
4929 clear_bit(0, &(sp->link_state));
4930}
4931
4932static int s2io_card_up(nic_t * sp)
4933{
4934 int i, ret;
4935 mac_info_t *mac_control;
4936 struct config_param *config;
4937 struct net_device *dev = (struct net_device *) sp->dev;
4938
4939 /* Initialize the H/W I/O registers */
4940 if (init_nic(sp) != 0) {
4941 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4942 dev->name);
4943 return -ENODEV;
4944 }
4945
20346722
K
4946 /*
4947 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
4948 * Rx ring and initializing buffers into 30 Rx blocks
4949 */
4950 mac_control = &sp->mac_control;
4951 config = &sp->config;
4952
4953 for (i = 0; i < config->rx_ring_num; i++) {
4954 if ((ret = fill_rx_buffers(sp, i))) {
4955 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4956 dev->name);
4957 s2io_reset(sp);
4958 free_rx_buffers(sp);
4959 return -ENOMEM;
4960 }
4961 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4962 atomic_read(&sp->rx_bufs_left[i]));
4963 }
4964
4965 /* Setting its receive mode */
4966 s2io_set_multicast(dev);
4967
4968 /* Enable tasklet for the device */
4969 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4970
4971 /* Enable Rx Traffic and interrupts on the NIC */
4972 if (start_nic(sp)) {
4973 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4974 tasklet_kill(&sp->task);
4975 s2io_reset(sp);
4976 free_irq(dev->irq, dev);
4977 free_rx_buffers(sp);
4978 return -ENODEV;
4979 }
4980
25fff88e
K
4981 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4982
1da177e4
LT
4983 atomic_set(&sp->card_state, CARD_UP);
4984 return 0;
4985}
4986
20346722 4987/**
1da177e4
LT
4988 * s2io_restart_nic - Resets the NIC.
4989 * @data : long pointer to the device private structure
4990 * Description:
4991 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 4992 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
4993 * the run time of the watch dog routine which is run holding a
4994 * spin lock.
4995 */
4996
4997static void s2io_restart_nic(unsigned long data)
4998{
4999 struct net_device *dev = (struct net_device *) data;
5000 nic_t *sp = dev->priv;
5001
5002 s2io_card_down(sp);
5003 if (s2io_card_up(sp)) {
5004 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5005 dev->name);
5006 }
5007 netif_wake_queue(dev);
5008 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
5009 dev->name);
20346722 5010
1da177e4
LT
5011}
5012
20346722
K
5013/**
5014 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
5015 * @dev : Pointer to net device structure
5016 * Description:
5017 * This function is triggered if the Tx Queue is stopped
5018 * for a pre-defined amount of time when the Interface is still up.
5019 * If the Interface is jammed in such a situation, the hardware is
5020 * reset (by s2io_close) and restarted again (by s2io_open) to
5021 * overcome any problem that might have been caused in the hardware.
5022 * Return value:
5023 * void
5024 */
5025
5026static void s2io_tx_watchdog(struct net_device *dev)
5027{
5028 nic_t *sp = dev->priv;
5029
5030 if (netif_carrier_ok(dev)) {
5031 schedule_work(&sp->rst_timer_task);
5032 }
5033}
5034
5035/**
5036 * rx_osm_handler - To perform some OS related operations on SKB.
5037 * @sp: private member of the device structure,pointer to s2io_nic structure.
5038 * @skb : the socket buffer pointer.
5039 * @len : length of the packet
5040 * @cksum : FCS checksum of the frame.
5041 * @ring_no : the ring from which this RxD was extracted.
20346722 5042 * Description:
1da177e4
LT
5043 * This function is called by the Tx interrupt serivce routine to perform
5044 * some OS related operations on the SKB before passing it to the upper
5045 * layers. It mainly checks if the checksum is OK, if so adds it to the
5046 * SKBs cksum variable, increments the Rx packet count and passes the SKB
5047 * to the upper layer. If the checksum is wrong, it increments the Rx
5048 * packet error count, frees the SKB and returns error.
5049 * Return value:
5050 * SUCCESS on success and -1 on failure.
5051 */
20346722 5052static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
1da177e4 5053{
20346722 5054 nic_t *sp = ring_data->nic;
1da177e4 5055 struct net_device *dev = (struct net_device *) sp->dev;
20346722
K
5056 struct sk_buff *skb = (struct sk_buff *)
5057 ((unsigned long) rxdp->Host_Control);
5058 int ring_no = ring_data->ring_no;
1da177e4
LT
5059 u16 l3_csum, l4_csum;
5060#ifdef CONFIG_2BUFF_MODE
20346722
K
5061 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5062 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5063 int get_block = ring_data->rx_curr_get_info.block_index;
5064 int get_off = ring_data->rx_curr_get_info.offset;
5065 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
1da177e4 5066 unsigned char *buff;
20346722
K
5067#else
5068 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
1da177e4 5069#endif
20346722
K
5070 skb->dev = dev;
5071 if (rxdp->Control_1 & RXD_T_CODE) {
5072 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5073 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5074 dev->name, err);
1ddc50d4
K
5075 dev_kfree_skb(skb);
5076 sp->stats.rx_crc_errors++;
5077 atomic_dec(&sp->rx_bufs_left[ring_no]);
5078 rxdp->Host_Control = 0;
5079 return 0;
20346722 5080 }
1da177e4 5081
20346722
K
5082 /* Updating statistics */
5083 rxdp->Host_Control = 0;
5084 sp->rx_pkt_count++;
5085 sp->stats.rx_packets++;
5086#ifndef CONFIG_2BUFF_MODE
5087 sp->stats.rx_bytes += len;
5088#else
5089 sp->stats.rx_bytes += buf0_len + buf2_len;
5090#endif
5091
5092#ifndef CONFIG_2BUFF_MODE
5093 skb_put(skb, len);
5094#else
5095 buff = skb_push(skb, buf0_len);
5096 memcpy(buff, ba->ba_0, buf0_len);
5097 skb_put(skb, buf2_len);
5098#endif
5099
5100 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5101 (sp->rx_csum)) {
5102 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
5103 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5104 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 5105 /*
1da177e4
LT
5106 * NIC verifies if the Checksum of the received
5107 * frame is Ok or not and accordingly returns
5108 * a flag in the RxD.
5109 */
5110 skb->ip_summed = CHECKSUM_UNNECESSARY;
5111 } else {
20346722
K
5112 /*
5113 * Packet with erroneous checksum, let the
1da177e4
LT
5114 * upper layers deal with it.
5115 */
5116 skb->ip_summed = CHECKSUM_NONE;
5117 }
5118 } else {
5119 skb->ip_summed = CHECKSUM_NONE;
5120 }
5121
1da177e4 5122 skb->protocol = eth_type_trans(skb, dev);
1da177e4 5123#ifdef CONFIG_S2IO_NAPI
be3a6b02
K
5124 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5125 /* Queueing the vlan frame to the upper layer */
5126 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5127 RXD_GET_VLAN_TAG(rxdp->Control_2));
5128 } else {
5129 netif_receive_skb(skb);
5130 }
1da177e4 5131#else
be3a6b02
K
5132 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5133 /* Queueing the vlan frame to the upper layer */
5134 vlan_hwaccel_rx(skb, sp->vlgrp,
5135 RXD_GET_VLAN_TAG(rxdp->Control_2));
5136 } else {
5137 netif_rx(skb);
5138 }
1da177e4 5139#endif
1da177e4 5140 dev->last_rx = jiffies;
1da177e4 5141 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
5142 return SUCCESS;
5143}
5144
5145/**
5146 * s2io_link - stops/starts the Tx queue.
5147 * @sp : private member of the device structure, which is a pointer to the
5148 * s2io_nic structure.
5149 * @link : inidicates whether link is UP/DOWN.
5150 * Description:
5151 * This function stops/starts the Tx queue depending on whether the link
20346722
K
5152 * status of the NIC is is down or up. This is called by the Alarm
5153 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
5154 * Return value:
5155 * void.
5156 */
5157
20346722 5158void s2io_link(nic_t * sp, int link)
1da177e4
LT
5159{
5160 struct net_device *dev = (struct net_device *) sp->dev;
5161
5162 if (link != sp->last_link_state) {
5163 if (link == LINK_DOWN) {
5164 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5165 netif_carrier_off(dev);
5166 } else {
5167 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5168 netif_carrier_on(dev);
5169 }
5170 }
5171 sp->last_link_state = link;
5172}
5173
5174/**
20346722
K
5175 * get_xena_rev_id - to identify revision ID of xena.
5176 * @pdev : PCI Dev structure
5177 * Description:
5178 * Function to identify the Revision ID of xena.
5179 * Return value:
5180 * returns the revision ID of the device.
5181 */
5182
5183int get_xena_rev_id(struct pci_dev *pdev)
5184{
5185 u8 id = 0;
5186 int ret;
5187 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5188 return id;
5189}
5190
5191/**
5192 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5193 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5194 * s2io_nic structure.
5195 * Description:
5196 * This function initializes a few of the PCI and PCI-X configuration registers
5197 * with recommended values.
5198 * Return value:
5199 * void
5200 */
5201
5202static void s2io_init_pci(nic_t * sp)
5203{
20346722 5204 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
5205
5206 /* Enable Data Parity Error Recovery in PCI-X command register. */
5207 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5208 &(pcix_cmd));
1da177e4 5209 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5210 (pcix_cmd | 1));
1da177e4 5211 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5212 &(pcix_cmd));
1da177e4
LT
5213
5214 /* Set the PErr Response bit in PCI command register. */
5215 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5216 pci_write_config_word(sp->pdev, PCI_COMMAND,
5217 (pci_cmd | PCI_COMMAND_PARITY));
5218 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5219
1da177e4 5220 /* Forcibly disabling relaxed ordering capability of the card. */
20346722 5221 pcix_cmd &= 0xfffd;
1da177e4 5222 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5223 pcix_cmd);
1da177e4 5224 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5225 &(pcix_cmd));
1da177e4
LT
5226}
5227
5228MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5229MODULE_LICENSE("GPL");
5230module_param(tx_fifo_num, int, 0);
1da177e4 5231module_param(rx_ring_num, int, 0);
20346722
K
5232module_param_array(tx_fifo_len, uint, NULL, 0);
5233module_param_array(rx_ring_sz, uint, NULL, 0);
20346722 5234module_param_array(rts_frm_len, uint, NULL, 0);
5e25b9dd 5235module_param(use_continuous_tx_intrs, int, 1);
1da177e4
LT
5236module_param(rmac_pause_time, int, 0);
5237module_param(mc_pause_threshold_q0q3, int, 0);
5238module_param(mc_pause_threshold_q4q7, int, 0);
5239module_param(shared_splits, int, 0);
5240module_param(tmac_util_period, int, 0);
5241module_param(rmac_util_period, int, 0);
b6e3f982 5242module_param(bimodal, bool, 0);
1da177e4
LT
5243#ifndef CONFIG_S2IO_NAPI
5244module_param(indicate_max_pkts, int, 0);
5245#endif
303bcb4b 5246module_param(rxsync_frequency, int, 0);
20346722 5247
1da177e4 5248/**
20346722 5249 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
5250 * @pdev : structure containing the PCI related information of the device.
5251 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5252 * Description:
5253 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
5254 * All OS related initialization including memory and device structure and
5255 * initlaization of the device private variable is done. Also the swapper
5256 * control register is initialized to enable read and write into the I/O
1da177e4
LT
5257 * registers of the device.
5258 * Return value:
5259 * returns 0 on success and negative on failure.
5260 */
5261
5262static int __devinit
5263s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5264{
5265 nic_t *sp;
5266 struct net_device *dev;
1da177e4
LT
5267 int i, j, ret;
5268 int dma_flag = FALSE;
5269 u32 mac_up, mac_down;
5270 u64 val64 = 0, tmp64 = 0;
5271 XENA_dev_config_t __iomem *bar0 = NULL;
5272 u16 subid;
5273 mac_info_t *mac_control;
5274 struct config_param *config;
541ae68f 5275 int mode;
1da177e4 5276
20346722
K
5277#ifdef CONFIG_S2IO_NAPI
5278 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5279#endif
1da177e4
LT
5280
5281 if ((ret = pci_enable_device(pdev))) {
5282 DBG_PRINT(ERR_DBG,
5283 "s2io_init_nic: pci_enable_device failed\n");
5284 return ret;
5285 }
5286
1e7f0bd8 5287 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
5288 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5289 dma_flag = TRUE;
1da177e4 5290 if (pci_set_consistent_dma_mask
1e7f0bd8 5291 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
5292 DBG_PRINT(ERR_DBG,
5293 "Unable to obtain 64bit DMA for \
5294 consistent allocations\n");
5295 pci_disable_device(pdev);
5296 return -ENOMEM;
5297 }
1e7f0bd8 5298 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
5299 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5300 } else {
5301 pci_disable_device(pdev);
5302 return -ENOMEM;
5303 }
5304
5305 if (pci_request_regions(pdev, s2io_driver_name)) {
5306 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5307 pci_disable_device(pdev);
5308 return -ENODEV;
5309 }
5310
5311 dev = alloc_etherdev(sizeof(nic_t));
5312 if (dev == NULL) {
5313 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5314 pci_disable_device(pdev);
5315 pci_release_regions(pdev);
5316 return -ENODEV;
5317 }
5318
5319 pci_set_master(pdev);
5320 pci_set_drvdata(pdev, dev);
5321 SET_MODULE_OWNER(dev);
5322 SET_NETDEV_DEV(dev, &pdev->dev);
5323
5324 /* Private member variable initialized to s2io NIC structure */
5325 sp = dev->priv;
5326 memset(sp, 0, sizeof(nic_t));
5327 sp->dev = dev;
5328 sp->pdev = pdev;
1da177e4 5329 sp->high_dma_flag = dma_flag;
1da177e4 5330 sp->device_enabled_once = FALSE;
1da177e4 5331
541ae68f
K
5332 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5333 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5334 sp->device_type = XFRAME_II_DEVICE;
5335 else
5336 sp->device_type = XFRAME_I_DEVICE;
5337
1da177e4
LT
5338 /* Initialize some PCI/PCI-X fields of the NIC. */
5339 s2io_init_pci(sp);
5340
20346722 5341 /*
1da177e4 5342 * Setting the device configuration parameters.
20346722
K
5343 * Most of these parameters can be specified by the user during
5344 * module insertion as they are module loadable parameters. If
5345 * these parameters are not not specified during load time, they
1da177e4
LT
5346 * are initialized with default values.
5347 */
5348 mac_control = &sp->mac_control;
5349 config = &sp->config;
5350
5351 /* Tx side parameters. */
0b1f7ebe
K
5352 if (tx_fifo_len[0] == 0)
5353 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
1da177e4
LT
5354 config->tx_fifo_num = tx_fifo_num;
5355 for (i = 0; i < MAX_TX_FIFOS; i++) {
5356 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5357 config->tx_cfg[i].fifo_priority = i;
5358 }
5359
20346722
K
5360 /* mapping the QoS priority to the configured fifos */
5361 for (i = 0; i < MAX_TX_FIFOS; i++)
5362 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5363
1da177e4
LT
5364 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5365 for (i = 0; i < config->tx_fifo_num; i++) {
5366 config->tx_cfg[i].f_no_snoop =
5367 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5368 if (config->tx_cfg[i].fifo_len < 65) {
5369 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5370 break;
5371 }
5372 }
776bd20f 5373 config->max_txds = MAX_SKB_FRAGS + 1;
1da177e4
LT
5374
5375 /* Rx side parameters. */
0b1f7ebe
K
5376 if (rx_ring_sz[0] == 0)
5377 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
1da177e4
LT
5378 config->rx_ring_num = rx_ring_num;
5379 for (i = 0; i < MAX_RX_RINGS; i++) {
5380 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5381 (MAX_RXDS_PER_BLOCK + 1);
5382 config->rx_cfg[i].ring_priority = i;
5383 }
5384
5385 for (i = 0; i < rx_ring_num; i++) {
5386 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5387 config->rx_cfg[i].f_no_snoop =
5388 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5389 }
5390
5391 /* Setting Mac Control parameters */
5392 mac_control->rmac_pause_time = rmac_pause_time;
5393 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5394 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5395
5396
5397 /* Initialize Ring buffer parameters. */
5398 for (i = 0; i < config->rx_ring_num; i++)
5399 atomic_set(&sp->rx_bufs_left[i], 0);
5400
7ba013ac
K
5401 /* Initialize the number of ISRs currently running */
5402 atomic_set(&sp->isr_cnt, 0);
5403
1da177e4
LT
5404 /* initialize the shared memory used by the NIC and the host */
5405 if (init_shared_mem(sp)) {
5406 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
0b1f7ebe 5407 __FUNCTION__);
1da177e4
LT
5408 ret = -ENOMEM;
5409 goto mem_alloc_failed;
5410 }
5411
5412 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5413 pci_resource_len(pdev, 0));
5414 if (!sp->bar0) {
5415 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5416 dev->name);
5417 ret = -ENOMEM;
5418 goto bar0_remap_failed;
5419 }
5420
5421 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5422 pci_resource_len(pdev, 2));
5423 if (!sp->bar1) {
5424 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5425 dev->name);
5426 ret = -ENOMEM;
5427 goto bar1_remap_failed;
5428 }
5429
5430 dev->irq = pdev->irq;
5431 dev->base_addr = (unsigned long) sp->bar0;
5432
5433 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5434 for (j = 0; j < MAX_TX_FIFOS; j++) {
5435 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5436 (sp->bar1 + (j * 0x00020000));
5437 }
5438
5439 /* Driver entry points */
5440 dev->open = &s2io_open;
5441 dev->stop = &s2io_close;
5442 dev->hard_start_xmit = &s2io_xmit;
5443 dev->get_stats = &s2io_get_stats;
5444 dev->set_multicast_list = &s2io_set_multicast;
5445 dev->do_ioctl = &s2io_ioctl;
5446 dev->change_mtu = &s2io_change_mtu;
5447 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02
K
5448 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5449 dev->vlan_rx_register = s2io_vlan_rx_register;
5450 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 5451
1da177e4
LT
5452 /*
5453 * will use eth_mac_addr() for dev->set_mac_address
5454 * mac address will be set every time dev->open() is called
5455 */
20346722 5456#if defined(CONFIG_S2IO_NAPI)
1da177e4 5457 dev->poll = s2io_poll;
20346722 5458 dev->weight = 32;
1da177e4
LT
5459#endif
5460
5461 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5462 if (sp->high_dma_flag == TRUE)
5463 dev->features |= NETIF_F_HIGHDMA;
5464#ifdef NETIF_F_TSO
5465 dev->features |= NETIF_F_TSO;
5466#endif
5467
5468 dev->tx_timeout = &s2io_tx_watchdog;
5469 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5470 INIT_WORK(&sp->rst_timer_task,
5471 (void (*)(void *)) s2io_restart_nic, dev);
5472 INIT_WORK(&sp->set_link_task,
5473 (void (*)(void *)) s2io_set_link, sp);
5474
e960fc5c 5475 pci_save_state(sp->pdev);
1da177e4
LT
5476
5477 /* Setting swapper control on the NIC, for proper reset operation */
5478 if (s2io_set_swapper(sp)) {
5479 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5480 dev->name);
5481 ret = -EAGAIN;
5482 goto set_swap_failed;
5483 }
5484
541ae68f
K
5485 /* Verify if the Herc works on the slot its placed into */
5486 if (sp->device_type & XFRAME_II_DEVICE) {
5487 mode = s2io_verify_pci_mode(sp);
5488 if (mode < 0) {
5489 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5490 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5491 ret = -EBADSLT;
5492 goto set_swap_failed;
5493 }
5494 }
5495
5496 /* Not needed for Herc */
5497 if (sp->device_type & XFRAME_I_DEVICE) {
5498 /*
5499 * Fix for all "FFs" MAC address problems observed on
5500 * Alpha platforms
5501 */
5502 fix_mac_address(sp);
5503 s2io_reset(sp);
5504 }
1da177e4
LT
5505
5506 /*
1da177e4
LT
5507 * MAC address initialization.
5508 * For now only one mac address will be read and used.
5509 */
5510 bar0 = sp->bar0;
5511 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5512 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5513 writeq(val64, &bar0->rmac_addr_cmd_mem);
5514 wait_for_cmd_complete(sp);
5515
5516 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5517 mac_down = (u32) tmp64;
5518 mac_up = (u32) (tmp64 >> 32);
5519
5520 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5521
5522 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5523 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5524 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5525 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5526 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5527 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5528
1da177e4
LT
5529 /* Set the factory defined MAC address initially */
5530 dev->addr_len = ETH_ALEN;
5531 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5532
5533 /*
20346722 5534 * Initialize the tasklet status and link state flags
541ae68f 5535 * and the card state parameter
1da177e4
LT
5536 */
5537 atomic_set(&(sp->card_state), 0);
5538 sp->tasklet_status = 0;
5539 sp->link_state = 0;
5540
1da177e4
LT
5541 /* Initialize spinlocks */
5542 spin_lock_init(&sp->tx_lock);
5543#ifndef CONFIG_S2IO_NAPI
5544 spin_lock_init(&sp->put_lock);
5545#endif
7ba013ac 5546 spin_lock_init(&sp->rx_lock);
1da177e4 5547
20346722
K
5548 /*
5549 * SXE-002: Configure link and activity LED to init state
5550 * on driver load.
1da177e4
LT
5551 */
5552 subid = sp->pdev->subsystem_device;
5553 if ((subid & 0xFF) >= 0x07) {
5554 val64 = readq(&bar0->gpio_control);
5555 val64 |= 0x0000800000000000ULL;
5556 writeq(val64, &bar0->gpio_control);
5557 val64 = 0x0411040400000000ULL;
5558 writeq(val64, (void __iomem *) bar0 + 0x2700);
5559 val64 = readq(&bar0->gpio_control);
5560 }
5561
5562 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5563
5564 if (register_netdev(dev)) {
5565 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5566 ret = -ENODEV;
5567 goto register_failed;
5568 }
5569
541ae68f
K
5570 if (sp->device_type & XFRAME_II_DEVICE) {
5571 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5572 dev->name);
776bd20f 5573 DBG_PRINT(ERR_DBG, "(rev %d), %s",
541ae68f
K
5574 get_xena_rev_id(sp->pdev),
5575 s2io_driver_version);
776bd20f 5576#ifdef CONFIG_2BUFF_MODE
5577 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
5578#endif
5579
5580 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
541ae68f
K
5581 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5582 sp->def_mac_addr[0].mac_addr[0],
5583 sp->def_mac_addr[0].mac_addr[1],
5584 sp->def_mac_addr[0].mac_addr[2],
5585 sp->def_mac_addr[0].mac_addr[3],
5586 sp->def_mac_addr[0].mac_addr[4],
5587 sp->def_mac_addr[0].mac_addr[5]);
0b1f7ebe 5588 mode = s2io_print_pci_mode(sp);
541ae68f
K
5589 if (mode < 0) {
5590 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5591 ret = -EBADSLT;
5592 goto set_swap_failed;
5593 }
5594 } else {
5595 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5596 dev->name);
776bd20f 5597 DBG_PRINT(ERR_DBG, "(rev %d), %s",
541ae68f
K
5598 get_xena_rev_id(sp->pdev),
5599 s2io_driver_version);
776bd20f 5600#ifdef CONFIG_2BUFF_MODE
5601 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
5602#endif
5603 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
541ae68f
K
5604 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5605 sp->def_mac_addr[0].mac_addr[0],
5606 sp->def_mac_addr[0].mac_addr[1],
5607 sp->def_mac_addr[0].mac_addr[2],
5608 sp->def_mac_addr[0].mac_addr[3],
5609 sp->def_mac_addr[0].mac_addr[4],
5610 sp->def_mac_addr[0].mac_addr[5]);
5611 }
5612
7ba013ac
K
5613 /* Initialize device name */
5614 strcpy(sp->name, dev->name);
541ae68f
K
5615 if (sp->device_type & XFRAME_II_DEVICE)
5616 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5617 else
5618 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
7ba013ac 5619
b6e3f982
K
5620 /* Initialize bimodal Interrupts */
5621 sp->config.bimodal = bimodal;
5622 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5623 sp->config.bimodal = 0;
5624 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5625 dev->name);
5626 }
5627
20346722
K
5628 /*
5629 * Make Link state as off at this point, when the Link change
5630 * interrupt comes the state will be automatically changed to
1da177e4
LT
5631 * the right state.
5632 */
5633 netif_carrier_off(dev);
1da177e4
LT
5634
5635 return 0;
5636
5637 register_failed:
5638 set_swap_failed:
5639 iounmap(sp->bar1);
5640 bar1_remap_failed:
5641 iounmap(sp->bar0);
5642 bar0_remap_failed:
5643 mem_alloc_failed:
5644 free_shared_mem(sp);
5645 pci_disable_device(pdev);
5646 pci_release_regions(pdev);
5647 pci_set_drvdata(pdev, NULL);
5648 free_netdev(dev);
5649
5650 return ret;
5651}
5652
5653/**
20346722 5654 * s2io_rem_nic - Free the PCI device
1da177e4 5655 * @pdev: structure containing the PCI related information of the device.
20346722 5656 * Description: This function is called by the Pci subsystem to release a
1da177e4 5657 * PCI device and free up all resource held up by the device. This could
20346722 5658 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
5659 * from memory.
5660 */
5661
5662static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5663{
5664 struct net_device *dev =
5665 (struct net_device *) pci_get_drvdata(pdev);
5666 nic_t *sp;
5667
5668 if (dev == NULL) {
5669 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5670 return;
5671 }
5672
5673 sp = dev->priv;
5674 unregister_netdev(dev);
5675
5676 free_shared_mem(sp);
5677 iounmap(sp->bar0);
5678 iounmap(sp->bar1);
5679 pci_disable_device(pdev);
5680 pci_release_regions(pdev);
5681 pci_set_drvdata(pdev, NULL);
1da177e4
LT
5682 free_netdev(dev);
5683}
5684
5685/**
5686 * s2io_starter - Entry point for the driver
5687 * Description: This function is the entry point for the driver. It verifies
5688 * the module loadable parameters and initializes PCI configuration space.
5689 */
5690
5691int __init s2io_starter(void)
5692{
5693 return pci_module_init(&s2io_driver);
5694}
5695
5696/**
20346722 5697 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
5698 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5699 */
5700
20346722 5701void s2io_closer(void)
1da177e4
LT
5702{
5703 pci_unregister_driver(&s2io_driver);
5704 DBG_PRINT(INIT_DBG, "cleanup done\n");
5705}
5706
5707module_init(s2io_starter);
5708module_exit(s2io_closer);