]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/s2io.c
[NET]: Merge TSO/UFO fields in sk_buff
[mirror_ubuntu-zesty-kernel.git] / drivers / net / s2io.c
1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 *
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 ************************************************************************/
46
47 #include <linux/config.h>
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/errno.h>
51 #include <linux/ioport.h>
52 #include <linux/pci.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/kernel.h>
55 #include <linux/netdevice.h>
56 #include <linux/etherdevice.h>
57 #include <linux/skbuff.h>
58 #include <linux/init.h>
59 #include <linux/delay.h>
60 #include <linux/stddef.h>
61 #include <linux/ioctl.h>
62 #include <linux/timex.h>
63 #include <linux/sched.h>
64 #include <linux/ethtool.h>
65 #include <linux/workqueue.h>
66 #include <linux/if_vlan.h>
67 #include <linux/ip.h>
68 #include <linux/tcp.h>
69 #include <net/tcp.h>
70
71 #include <asm/system.h>
72 #include <asm/uaccess.h>
73 #include <asm/io.h>
74 #include <asm/div64.h>
75
76 /* local include */
77 #include "s2io.h"
78 #include "s2io-regs.h"
79
80 #define DRV_VERSION "2.0.14.2"
81
82 /* S2io Driver name & version. */
83 static char s2io_driver_name[] = "Neterion";
84 static char s2io_driver_version[] = DRV_VERSION;
85
86 static int rxd_size[4] = {32,48,48,64};
87 static int rxd_count[4] = {127,85,85,63};
88
89 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
90 {
91 int ret;
92
93 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
94 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
95
96 return ret;
97 }
98
99 /*
100 * Cards with following subsystem_id have a link state indication
101 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
102 * macro below identifies these cards given the subsystem_id.
103 */
104 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
105 (dev_type == XFRAME_I_DEVICE) ? \
106 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
107 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
108
109 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
110 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
111 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
112 #define PANIC 1
113 #define LOW 2
114 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
115 {
116 mac_info_t *mac_control;
117
118 mac_control = &sp->mac_control;
119 if (rxb_size <= rxd_count[sp->rxd_mode])
120 return PANIC;
121 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
122 return LOW;
123 return 0;
124 }
125
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
133 };
134
135 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
136 {"tmac_frms"},
137 {"tmac_data_octets"},
138 {"tmac_drop_frms"},
139 {"tmac_mcst_frms"},
140 {"tmac_bcst_frms"},
141 {"tmac_pause_ctrl_frms"},
142 {"tmac_ttl_octets"},
143 {"tmac_ucst_frms"},
144 {"tmac_nucst_frms"},
145 {"tmac_any_err_frms"},
146 {"tmac_ttl_less_fb_octets"},
147 {"tmac_vld_ip_octets"},
148 {"tmac_vld_ip"},
149 {"tmac_drop_ip"},
150 {"tmac_icmp"},
151 {"tmac_rst_tcp"},
152 {"tmac_tcp"},
153 {"tmac_udp"},
154 {"rmac_vld_frms"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
157 {"rmac_drop_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
161 {"rmac_out_rng_len_err_frms"},
162 {"rmac_long_frms"},
163 {"rmac_pause_ctrl_frms"},
164 {"rmac_unsup_ctrl_frms"},
165 {"rmac_ttl_octets"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
168 {"rmac_discarded_frms"},
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
171 {"rmac_ttl_frms"},
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
174 {"rmac_frag_frms"},
175 {"rmac_jabber_frms"},
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
182 {"rmac_ip"},
183 {"rmac_ip_octets"},
184 {"rmac_hdr_err_ip"},
185 {"rmac_drop_ip"},
186 {"rmac_icmp"},
187 {"rmac_tcp"},
188 {"rmac_udp"},
189 {"rmac_err_drp_udp"},
190 {"rmac_xgmii_err_sym"},
191 {"rmac_frms_q0"},
192 {"rmac_frms_q1"},
193 {"rmac_frms_q2"},
194 {"rmac_frms_q3"},
195 {"rmac_frms_q4"},
196 {"rmac_frms_q5"},
197 {"rmac_frms_q6"},
198 {"rmac_frms_q7"},
199 {"rmac_full_q0"},
200 {"rmac_full_q1"},
201 {"rmac_full_q2"},
202 {"rmac_full_q3"},
203 {"rmac_full_q4"},
204 {"rmac_full_q5"},
205 {"rmac_full_q6"},
206 {"rmac_full_q7"},
207 {"rmac_pause_cnt"},
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
210 {"rmac_accepted_ip"},
211 {"rmac_err_tcp"},
212 {"rd_req_cnt"},
213 {"new_rd_req_cnt"},
214 {"new_rd_req_rtry_cnt"},
215 {"rd_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
217 {"wr_req_cnt"},
218 {"new_wr_req_cnt"},
219 {"new_wr_req_rtry_cnt"},
220 {"wr_rtry_cnt"},
221 {"wr_disc_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
223 {"txp_wr_cnt"},
224 {"txd_rd_cnt"},
225 {"txd_wr_cnt"},
226 {"rxd_rd_cnt"},
227 {"rxd_wr_cnt"},
228 {"txf_rd_cnt"},
229 {"rxf_wr_cnt"},
230 {"rmac_ttl_1519_4095_frms"},
231 {"rmac_ttl_4096_8191_frms"},
232 {"rmac_ttl_8192_max_frms"},
233 {"rmac_ttl_gt_max_frms"},
234 {"rmac_osized_alt_frms"},
235 {"rmac_jabber_alt_frms"},
236 {"rmac_gt_max_alt_frms"},
237 {"rmac_vlan_frms"},
238 {"rmac_len_discard"},
239 {"rmac_fcs_discard"},
240 {"rmac_pf_discard"},
241 {"rmac_da_discard"},
242 {"rmac_red_discard"},
243 {"rmac_rts_discard"},
244 {"rmac_ingm_full_discard"},
245 {"link_fault_cnt"},
246 {"\n DRIVER STATISTICS"},
247 {"single_bit_ecc_errs"},
248 {"double_bit_ecc_errs"},
249 {"parity_err_cnt"},
250 {"serious_err_cnt"},
251 {"soft_reset_cnt"},
252 {"fifo_full_cnt"},
253 {"ring_full_cnt"},
254 ("alarm_transceiver_temp_high"),
255 ("alarm_transceiver_temp_low"),
256 ("alarm_laser_bias_current_high"),
257 ("alarm_laser_bias_current_low"),
258 ("alarm_laser_output_power_high"),
259 ("alarm_laser_output_power_low"),
260 ("warn_transceiver_temp_high"),
261 ("warn_transceiver_temp_low"),
262 ("warn_laser_bias_current_high"),
263 ("warn_laser_bias_current_low"),
264 ("warn_laser_output_power_high"),
265 ("warn_laser_output_power_low"),
266 ("lro_aggregated_pkts"),
267 ("lro_flush_both_count"),
268 ("lro_out_of_sequence_pkts"),
269 ("lro_flush_due_to_max_pkts"),
270 ("lro_avg_aggr_pkts"),
271 };
272
273 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
274 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
275
276 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
277 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
278
279 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
280 init_timer(&timer); \
281 timer.function = handle; \
282 timer.data = (unsigned long) arg; \
283 mod_timer(&timer, (jiffies + exp)) \
284
285 /* Add the vlan */
286 static void s2io_vlan_rx_register(struct net_device *dev,
287 struct vlan_group *grp)
288 {
289 nic_t *nic = dev->priv;
290 unsigned long flags;
291
292 spin_lock_irqsave(&nic->tx_lock, flags);
293 nic->vlgrp = grp;
294 spin_unlock_irqrestore(&nic->tx_lock, flags);
295 }
296
297 /* Unregister the vlan */
298 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
299 {
300 nic_t *nic = dev->priv;
301 unsigned long flags;
302
303 spin_lock_irqsave(&nic->tx_lock, flags);
304 if (nic->vlgrp)
305 nic->vlgrp->vlan_devices[vid] = NULL;
306 spin_unlock_irqrestore(&nic->tx_lock, flags);
307 }
308
309 /*
310 * Constants to be programmed into the Xena's registers, to configure
311 * the XAUI.
312 */
313
314 #define END_SIGN 0x0
315 static const u64 herc_act_dtx_cfg[] = {
316 /* Set address */
317 0x8000051536750000ULL, 0x80000515367500E0ULL,
318 /* Write data */
319 0x8000051536750004ULL, 0x80000515367500E4ULL,
320 /* Set address */
321 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
322 /* Write data */
323 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
324 /* Set address */
325 0x801205150D440000ULL, 0x801205150D4400E0ULL,
326 /* Write data */
327 0x801205150D440004ULL, 0x801205150D4400E4ULL,
328 /* Set address */
329 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
330 /* Write data */
331 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
332 /* Done */
333 END_SIGN
334 };
335
336 static const u64 xena_dtx_cfg[] = {
337 /* Set address */
338 0x8000051500000000ULL, 0x80000515000000E0ULL,
339 /* Write data */
340 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
341 /* Set address */
342 0x8001051500000000ULL, 0x80010515000000E0ULL,
343 /* Write data */
344 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
345 /* Set address */
346 0x8002051500000000ULL, 0x80020515000000E0ULL,
347 /* Write data */
348 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
349 END_SIGN
350 };
351
352 /*
353 * Constants for Fixing the MacAddress problem seen mostly on
354 * Alpha machines.
355 */
356 static const u64 fix_mac[] = {
357 0x0060000000000000ULL, 0x0060600000000000ULL,
358 0x0040600000000000ULL, 0x0000600000000000ULL,
359 0x0020600000000000ULL, 0x0060600000000000ULL,
360 0x0020600000000000ULL, 0x0060600000000000ULL,
361 0x0020600000000000ULL, 0x0060600000000000ULL,
362 0x0020600000000000ULL, 0x0060600000000000ULL,
363 0x0020600000000000ULL, 0x0060600000000000ULL,
364 0x0020600000000000ULL, 0x0060600000000000ULL,
365 0x0020600000000000ULL, 0x0060600000000000ULL,
366 0x0020600000000000ULL, 0x0060600000000000ULL,
367 0x0020600000000000ULL, 0x0060600000000000ULL,
368 0x0020600000000000ULL, 0x0060600000000000ULL,
369 0x0020600000000000ULL, 0x0000600000000000ULL,
370 0x0040600000000000ULL, 0x0060600000000000ULL,
371 END_SIGN
372 };
373
374 /* Module Loadable parameters. */
375 static unsigned int tx_fifo_num = 1;
376 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
377 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
378 static unsigned int rx_ring_num = 1;
379 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
380 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
381 static unsigned int rts_frm_len[MAX_RX_RINGS] =
382 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
383 static unsigned int rx_ring_mode = 1;
384 static unsigned int use_continuous_tx_intrs = 1;
385 static unsigned int rmac_pause_time = 0x100;
386 static unsigned int mc_pause_threshold_q0q3 = 187;
387 static unsigned int mc_pause_threshold_q4q7 = 187;
388 static unsigned int shared_splits;
389 static unsigned int tmac_util_period = 5;
390 static unsigned int rmac_util_period = 5;
391 static unsigned int bimodal = 0;
392 static unsigned int l3l4hdr_size = 128;
393 #ifndef CONFIG_S2IO_NAPI
394 static unsigned int indicate_max_pkts;
395 #endif
396 /* Frequency of Rx desc syncs expressed as power of 2 */
397 static unsigned int rxsync_frequency = 3;
398 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
399 static unsigned int intr_type = 0;
400 /* Large receive offload feature */
401 static unsigned int lro = 0;
402 /* Max pkts to be aggregated by LRO at one time. If not specified,
403 * aggregation happens until we hit max IP pkt size(64K)
404 */
405 static unsigned int lro_max_pkts = 0xFFFF;
406
407 /*
408 * S2IO device table.
409 * This table lists all the devices that this driver supports.
410 */
411 static struct pci_device_id s2io_tbl[] __devinitdata = {
412 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
413 PCI_ANY_ID, PCI_ANY_ID},
414 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
415 PCI_ANY_ID, PCI_ANY_ID},
416 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
417 PCI_ANY_ID, PCI_ANY_ID},
418 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
419 PCI_ANY_ID, PCI_ANY_ID},
420 {0,}
421 };
422
423 MODULE_DEVICE_TABLE(pci, s2io_tbl);
424
425 static struct pci_driver s2io_driver = {
426 .name = "S2IO",
427 .id_table = s2io_tbl,
428 .probe = s2io_init_nic,
429 .remove = __devexit_p(s2io_rem_nic),
430 };
431
432 /* A simplifier macro used both by init and free shared_mem Fns(). */
433 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
434
435 /**
436 * init_shared_mem - Allocation and Initialization of Memory
437 * @nic: Device private variable.
438 * Description: The function allocates all the memory areas shared
439 * between the NIC and the driver. This includes Tx descriptors,
440 * Rx descriptors and the statistics block.
441 */
442
443 static int init_shared_mem(struct s2io_nic *nic)
444 {
445 u32 size;
446 void *tmp_v_addr, *tmp_v_addr_next;
447 dma_addr_t tmp_p_addr, tmp_p_addr_next;
448 RxD_block_t *pre_rxd_blk = NULL;
449 int i, j, blk_cnt, rx_sz, tx_sz;
450 int lst_size, lst_per_page;
451 struct net_device *dev = nic->dev;
452 unsigned long tmp;
453 buffAdd_t *ba;
454
455 mac_info_t *mac_control;
456 struct config_param *config;
457
458 mac_control = &nic->mac_control;
459 config = &nic->config;
460
461
462 /* Allocation and initialization of TXDLs in FIOFs */
463 size = 0;
464 for (i = 0; i < config->tx_fifo_num; i++) {
465 size += config->tx_cfg[i].fifo_len;
466 }
467 if (size > MAX_AVAILABLE_TXDS) {
468 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
469 __FUNCTION__);
470 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
471 return FAILURE;
472 }
473
474 lst_size = (sizeof(TxD_t) * config->max_txds);
475 tx_sz = lst_size * size;
476 lst_per_page = PAGE_SIZE / lst_size;
477
478 for (i = 0; i < config->tx_fifo_num; i++) {
479 int fifo_len = config->tx_cfg[i].fifo_len;
480 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
481 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
482 GFP_KERNEL);
483 if (!mac_control->fifos[i].list_info) {
484 DBG_PRINT(ERR_DBG,
485 "Malloc failed for list_info\n");
486 return -ENOMEM;
487 }
488 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
489 }
490 for (i = 0; i < config->tx_fifo_num; i++) {
491 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
492 lst_per_page);
493 mac_control->fifos[i].tx_curr_put_info.offset = 0;
494 mac_control->fifos[i].tx_curr_put_info.fifo_len =
495 config->tx_cfg[i].fifo_len - 1;
496 mac_control->fifos[i].tx_curr_get_info.offset = 0;
497 mac_control->fifos[i].tx_curr_get_info.fifo_len =
498 config->tx_cfg[i].fifo_len - 1;
499 mac_control->fifos[i].fifo_no = i;
500 mac_control->fifos[i].nic = nic;
501 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
502
503 for (j = 0; j < page_num; j++) {
504 int k = 0;
505 dma_addr_t tmp_p;
506 void *tmp_v;
507 tmp_v = pci_alloc_consistent(nic->pdev,
508 PAGE_SIZE, &tmp_p);
509 if (!tmp_v) {
510 DBG_PRINT(ERR_DBG,
511 "pci_alloc_consistent ");
512 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
513 return -ENOMEM;
514 }
515 /* If we got a zero DMA address(can happen on
516 * certain platforms like PPC), reallocate.
517 * Store virtual address of page we don't want,
518 * to be freed later.
519 */
520 if (!tmp_p) {
521 mac_control->zerodma_virt_addr = tmp_v;
522 DBG_PRINT(INIT_DBG,
523 "%s: Zero DMA address for TxDL. ", dev->name);
524 DBG_PRINT(INIT_DBG,
525 "Virtual address %p\n", tmp_v);
526 tmp_v = pci_alloc_consistent(nic->pdev,
527 PAGE_SIZE, &tmp_p);
528 if (!tmp_v) {
529 DBG_PRINT(ERR_DBG,
530 "pci_alloc_consistent ");
531 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
532 return -ENOMEM;
533 }
534 }
535 while (k < lst_per_page) {
536 int l = (j * lst_per_page) + k;
537 if (l == config->tx_cfg[i].fifo_len)
538 break;
539 mac_control->fifos[i].list_info[l].list_virt_addr =
540 tmp_v + (k * lst_size);
541 mac_control->fifos[i].list_info[l].list_phy_addr =
542 tmp_p + (k * lst_size);
543 k++;
544 }
545 }
546 }
547
548 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
549 if (!nic->ufo_in_band_v)
550 return -ENOMEM;
551
552 /* Allocation and initialization of RXDs in Rings */
553 size = 0;
554 for (i = 0; i < config->rx_ring_num; i++) {
555 if (config->rx_cfg[i].num_rxd %
556 (rxd_count[nic->rxd_mode] + 1)) {
557 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
558 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
559 i);
560 DBG_PRINT(ERR_DBG, "RxDs per Block");
561 return FAILURE;
562 }
563 size += config->rx_cfg[i].num_rxd;
564 mac_control->rings[i].block_count =
565 config->rx_cfg[i].num_rxd /
566 (rxd_count[nic->rxd_mode] + 1 );
567 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
568 mac_control->rings[i].block_count;
569 }
570 if (nic->rxd_mode == RXD_MODE_1)
571 size = (size * (sizeof(RxD1_t)));
572 else
573 size = (size * (sizeof(RxD3_t)));
574 rx_sz = size;
575
576 for (i = 0; i < config->rx_ring_num; i++) {
577 mac_control->rings[i].rx_curr_get_info.block_index = 0;
578 mac_control->rings[i].rx_curr_get_info.offset = 0;
579 mac_control->rings[i].rx_curr_get_info.ring_len =
580 config->rx_cfg[i].num_rxd - 1;
581 mac_control->rings[i].rx_curr_put_info.block_index = 0;
582 mac_control->rings[i].rx_curr_put_info.offset = 0;
583 mac_control->rings[i].rx_curr_put_info.ring_len =
584 config->rx_cfg[i].num_rxd - 1;
585 mac_control->rings[i].nic = nic;
586 mac_control->rings[i].ring_no = i;
587
588 blk_cnt = config->rx_cfg[i].num_rxd /
589 (rxd_count[nic->rxd_mode] + 1);
590 /* Allocating all the Rx blocks */
591 for (j = 0; j < blk_cnt; j++) {
592 rx_block_info_t *rx_blocks;
593 int l;
594
595 rx_blocks = &mac_control->rings[i].rx_blocks[j];
596 size = SIZE_OF_BLOCK; //size is always page size
597 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
598 &tmp_p_addr);
599 if (tmp_v_addr == NULL) {
600 /*
601 * In case of failure, free_shared_mem()
602 * is called, which should free any
603 * memory that was alloced till the
604 * failure happened.
605 */
606 rx_blocks->block_virt_addr = tmp_v_addr;
607 return -ENOMEM;
608 }
609 memset(tmp_v_addr, 0, size);
610 rx_blocks->block_virt_addr = tmp_v_addr;
611 rx_blocks->block_dma_addr = tmp_p_addr;
612 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
613 rxd_count[nic->rxd_mode],
614 GFP_KERNEL);
615 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
616 rx_blocks->rxds[l].virt_addr =
617 rx_blocks->block_virt_addr +
618 (rxd_size[nic->rxd_mode] * l);
619 rx_blocks->rxds[l].dma_addr =
620 rx_blocks->block_dma_addr +
621 (rxd_size[nic->rxd_mode] * l);
622 }
623 }
624 /* Interlinking all Rx Blocks */
625 for (j = 0; j < blk_cnt; j++) {
626 tmp_v_addr =
627 mac_control->rings[i].rx_blocks[j].block_virt_addr;
628 tmp_v_addr_next =
629 mac_control->rings[i].rx_blocks[(j + 1) %
630 blk_cnt].block_virt_addr;
631 tmp_p_addr =
632 mac_control->rings[i].rx_blocks[j].block_dma_addr;
633 tmp_p_addr_next =
634 mac_control->rings[i].rx_blocks[(j + 1) %
635 blk_cnt].block_dma_addr;
636
637 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
638 pre_rxd_blk->reserved_2_pNext_RxD_block =
639 (unsigned long) tmp_v_addr_next;
640 pre_rxd_blk->pNext_RxD_Blk_physical =
641 (u64) tmp_p_addr_next;
642 }
643 }
644 if (nic->rxd_mode >= RXD_MODE_3A) {
645 /*
646 * Allocation of Storages for buffer addresses in 2BUFF mode
647 * and the buffers as well.
648 */
649 for (i = 0; i < config->rx_ring_num; i++) {
650 blk_cnt = config->rx_cfg[i].num_rxd /
651 (rxd_count[nic->rxd_mode]+ 1);
652 mac_control->rings[i].ba =
653 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
654 GFP_KERNEL);
655 if (!mac_control->rings[i].ba)
656 return -ENOMEM;
657 for (j = 0; j < blk_cnt; j++) {
658 int k = 0;
659 mac_control->rings[i].ba[j] =
660 kmalloc((sizeof(buffAdd_t) *
661 (rxd_count[nic->rxd_mode] + 1)),
662 GFP_KERNEL);
663 if (!mac_control->rings[i].ba[j])
664 return -ENOMEM;
665 while (k != rxd_count[nic->rxd_mode]) {
666 ba = &mac_control->rings[i].ba[j][k];
667
668 ba->ba_0_org = (void *) kmalloc
669 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
670 if (!ba->ba_0_org)
671 return -ENOMEM;
672 tmp = (unsigned long)ba->ba_0_org;
673 tmp += ALIGN_SIZE;
674 tmp &= ~((unsigned long) ALIGN_SIZE);
675 ba->ba_0 = (void *) tmp;
676
677 ba->ba_1_org = (void *) kmalloc
678 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
679 if (!ba->ba_1_org)
680 return -ENOMEM;
681 tmp = (unsigned long) ba->ba_1_org;
682 tmp += ALIGN_SIZE;
683 tmp &= ~((unsigned long) ALIGN_SIZE);
684 ba->ba_1 = (void *) tmp;
685 k++;
686 }
687 }
688 }
689 }
690
691 /* Allocation and initialization of Statistics block */
692 size = sizeof(StatInfo_t);
693 mac_control->stats_mem = pci_alloc_consistent
694 (nic->pdev, size, &mac_control->stats_mem_phy);
695
696 if (!mac_control->stats_mem) {
697 /*
698 * In case of failure, free_shared_mem() is called, which
699 * should free any memory that was alloced till the
700 * failure happened.
701 */
702 return -ENOMEM;
703 }
704 mac_control->stats_mem_sz = size;
705
706 tmp_v_addr = mac_control->stats_mem;
707 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
708 memset(tmp_v_addr, 0, size);
709 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
710 (unsigned long long) tmp_p_addr);
711
712 return SUCCESS;
713 }
714
715 /**
716 * free_shared_mem - Free the allocated Memory
717 * @nic: Device private variable.
718 * Description: This function is to free all memory locations allocated by
719 * the init_shared_mem() function and return it to the kernel.
720 */
721
722 static void free_shared_mem(struct s2io_nic *nic)
723 {
724 int i, j, blk_cnt, size;
725 void *tmp_v_addr;
726 dma_addr_t tmp_p_addr;
727 mac_info_t *mac_control;
728 struct config_param *config;
729 int lst_size, lst_per_page;
730 struct net_device *dev = nic->dev;
731
732 if (!nic)
733 return;
734
735 mac_control = &nic->mac_control;
736 config = &nic->config;
737
738 lst_size = (sizeof(TxD_t) * config->max_txds);
739 lst_per_page = PAGE_SIZE / lst_size;
740
741 for (i = 0; i < config->tx_fifo_num; i++) {
742 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
743 lst_per_page);
744 for (j = 0; j < page_num; j++) {
745 int mem_blks = (j * lst_per_page);
746 if (!mac_control->fifos[i].list_info)
747 return;
748 if (!mac_control->fifos[i].list_info[mem_blks].
749 list_virt_addr)
750 break;
751 pci_free_consistent(nic->pdev, PAGE_SIZE,
752 mac_control->fifos[i].
753 list_info[mem_blks].
754 list_virt_addr,
755 mac_control->fifos[i].
756 list_info[mem_blks].
757 list_phy_addr);
758 }
759 /* If we got a zero DMA address during allocation,
760 * free the page now
761 */
762 if (mac_control->zerodma_virt_addr) {
763 pci_free_consistent(nic->pdev, PAGE_SIZE,
764 mac_control->zerodma_virt_addr,
765 (dma_addr_t)0);
766 DBG_PRINT(INIT_DBG,
767 "%s: Freeing TxDL with zero DMA addr. ",
768 dev->name);
769 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
770 mac_control->zerodma_virt_addr);
771 }
772 kfree(mac_control->fifos[i].list_info);
773 }
774
775 size = SIZE_OF_BLOCK;
776 for (i = 0; i < config->rx_ring_num; i++) {
777 blk_cnt = mac_control->rings[i].block_count;
778 for (j = 0; j < blk_cnt; j++) {
779 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
780 block_virt_addr;
781 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
782 block_dma_addr;
783 if (tmp_v_addr == NULL)
784 break;
785 pci_free_consistent(nic->pdev, size,
786 tmp_v_addr, tmp_p_addr);
787 kfree(mac_control->rings[i].rx_blocks[j].rxds);
788 }
789 }
790
791 if (nic->rxd_mode >= RXD_MODE_3A) {
792 /* Freeing buffer storage addresses in 2BUFF mode. */
793 for (i = 0; i < config->rx_ring_num; i++) {
794 blk_cnt = config->rx_cfg[i].num_rxd /
795 (rxd_count[nic->rxd_mode] + 1);
796 for (j = 0; j < blk_cnt; j++) {
797 int k = 0;
798 if (!mac_control->rings[i].ba[j])
799 continue;
800 while (k != rxd_count[nic->rxd_mode]) {
801 buffAdd_t *ba =
802 &mac_control->rings[i].ba[j][k];
803 kfree(ba->ba_0_org);
804 kfree(ba->ba_1_org);
805 k++;
806 }
807 kfree(mac_control->rings[i].ba[j]);
808 }
809 kfree(mac_control->rings[i].ba);
810 }
811 }
812
813 if (mac_control->stats_mem) {
814 pci_free_consistent(nic->pdev,
815 mac_control->stats_mem_sz,
816 mac_control->stats_mem,
817 mac_control->stats_mem_phy);
818 }
819 if (nic->ufo_in_band_v)
820 kfree(nic->ufo_in_band_v);
821 }
822
823 /**
824 * s2io_verify_pci_mode -
825 */
826
827 static int s2io_verify_pci_mode(nic_t *nic)
828 {
829 XENA_dev_config_t __iomem *bar0 = nic->bar0;
830 register u64 val64 = 0;
831 int mode;
832
833 val64 = readq(&bar0->pci_mode);
834 mode = (u8)GET_PCI_MODE(val64);
835
836 if ( val64 & PCI_MODE_UNKNOWN_MODE)
837 return -1; /* Unknown PCI mode */
838 return mode;
839 }
840
841 #define NEC_VENID 0x1033
842 #define NEC_DEVID 0x0125
843 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
844 {
845 struct pci_dev *tdev = NULL;
846 while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
847 if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){
848 if (tdev->bus == s2io_pdev->bus->parent)
849 return 1;
850 }
851 }
852 return 0;
853 }
854
855 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
856 /**
857 * s2io_print_pci_mode -
858 */
859 static int s2io_print_pci_mode(nic_t *nic)
860 {
861 XENA_dev_config_t __iomem *bar0 = nic->bar0;
862 register u64 val64 = 0;
863 int mode;
864 struct config_param *config = &nic->config;
865
866 val64 = readq(&bar0->pci_mode);
867 mode = (u8)GET_PCI_MODE(val64);
868
869 if ( val64 & PCI_MODE_UNKNOWN_MODE)
870 return -1; /* Unknown PCI mode */
871
872 config->bus_speed = bus_speed[mode];
873
874 if (s2io_on_nec_bridge(nic->pdev)) {
875 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
876 nic->dev->name);
877 return mode;
878 }
879
880 if (val64 & PCI_MODE_32_BITS) {
881 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
882 } else {
883 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
884 }
885
886 switch(mode) {
887 case PCI_MODE_PCI_33:
888 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
889 break;
890 case PCI_MODE_PCI_66:
891 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
892 break;
893 case PCI_MODE_PCIX_M1_66:
894 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
895 break;
896 case PCI_MODE_PCIX_M1_100:
897 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
898 break;
899 case PCI_MODE_PCIX_M1_133:
900 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
901 break;
902 case PCI_MODE_PCIX_M2_66:
903 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
904 break;
905 case PCI_MODE_PCIX_M2_100:
906 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
907 break;
908 case PCI_MODE_PCIX_M2_133:
909 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
910 break;
911 default:
912 return -1; /* Unsupported bus speed */
913 }
914
915 return mode;
916 }
917
918 /**
919 * init_nic - Initialization of hardware
920 * @nic: device peivate variable
921 * Description: The function sequentially configures every block
922 * of the H/W from their reset values.
923 * Return Value: SUCCESS on success and
924 * '-1' on failure (endian settings incorrect).
925 */
926
927 static int init_nic(struct s2io_nic *nic)
928 {
929 XENA_dev_config_t __iomem *bar0 = nic->bar0;
930 struct net_device *dev = nic->dev;
931 register u64 val64 = 0;
932 void __iomem *add;
933 u32 time;
934 int i, j;
935 mac_info_t *mac_control;
936 struct config_param *config;
937 int dtx_cnt = 0;
938 unsigned long long mem_share;
939 int mem_size;
940
941 mac_control = &nic->mac_control;
942 config = &nic->config;
943
944 /* to set the swapper controle on the card */
945 if(s2io_set_swapper(nic)) {
946 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
947 return -1;
948 }
949
950 /*
951 * Herc requires EOI to be removed from reset before XGXS, so..
952 */
953 if (nic->device_type & XFRAME_II_DEVICE) {
954 val64 = 0xA500000000ULL;
955 writeq(val64, &bar0->sw_reset);
956 msleep(500);
957 val64 = readq(&bar0->sw_reset);
958 }
959
960 /* Remove XGXS from reset state */
961 val64 = 0;
962 writeq(val64, &bar0->sw_reset);
963 msleep(500);
964 val64 = readq(&bar0->sw_reset);
965
966 /* Enable Receiving broadcasts */
967 add = &bar0->mac_cfg;
968 val64 = readq(&bar0->mac_cfg);
969 val64 |= MAC_RMAC_BCAST_ENABLE;
970 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
971 writel((u32) val64, add);
972 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
973 writel((u32) (val64 >> 32), (add + 4));
974
975 /* Read registers in all blocks */
976 val64 = readq(&bar0->mac_int_mask);
977 val64 = readq(&bar0->mc_int_mask);
978 val64 = readq(&bar0->xgxs_int_mask);
979
980 /* Set MTU */
981 val64 = dev->mtu;
982 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
983
984 if (nic->device_type & XFRAME_II_DEVICE) {
985 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
986 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
987 &bar0->dtx_control, UF);
988 if (dtx_cnt & 0x1)
989 msleep(1); /* Necessary!! */
990 dtx_cnt++;
991 }
992 } else {
993 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
994 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
995 &bar0->dtx_control, UF);
996 val64 = readq(&bar0->dtx_control);
997 dtx_cnt++;
998 }
999 }
1000
1001 /* Tx DMA Initialization */
1002 val64 = 0;
1003 writeq(val64, &bar0->tx_fifo_partition_0);
1004 writeq(val64, &bar0->tx_fifo_partition_1);
1005 writeq(val64, &bar0->tx_fifo_partition_2);
1006 writeq(val64, &bar0->tx_fifo_partition_3);
1007
1008
1009 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1010 val64 |=
1011 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1012 13) | vBIT(config->tx_cfg[i].fifo_priority,
1013 ((i * 32) + 5), 3);
1014
1015 if (i == (config->tx_fifo_num - 1)) {
1016 if (i % 2 == 0)
1017 i++;
1018 }
1019
1020 switch (i) {
1021 case 1:
1022 writeq(val64, &bar0->tx_fifo_partition_0);
1023 val64 = 0;
1024 break;
1025 case 3:
1026 writeq(val64, &bar0->tx_fifo_partition_1);
1027 val64 = 0;
1028 break;
1029 case 5:
1030 writeq(val64, &bar0->tx_fifo_partition_2);
1031 val64 = 0;
1032 break;
1033 case 7:
1034 writeq(val64, &bar0->tx_fifo_partition_3);
1035 break;
1036 }
1037 }
1038
1039 /*
1040 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1041 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1042 */
1043 if ((nic->device_type == XFRAME_I_DEVICE) &&
1044 (get_xena_rev_id(nic->pdev) < 4))
1045 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1046
1047 val64 = readq(&bar0->tx_fifo_partition_0);
1048 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1049 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1050
1051 /*
1052 * Initialization of Tx_PA_CONFIG register to ignore packet
1053 * integrity checking.
1054 */
1055 val64 = readq(&bar0->tx_pa_cfg);
1056 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1057 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1058 writeq(val64, &bar0->tx_pa_cfg);
1059
1060 /* Rx DMA intialization. */
1061 val64 = 0;
1062 for (i = 0; i < config->rx_ring_num; i++) {
1063 val64 |=
1064 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1065 3);
1066 }
1067 writeq(val64, &bar0->rx_queue_priority);
1068
1069 /*
1070 * Allocating equal share of memory to all the
1071 * configured Rings.
1072 */
1073 val64 = 0;
1074 if (nic->device_type & XFRAME_II_DEVICE)
1075 mem_size = 32;
1076 else
1077 mem_size = 64;
1078
1079 for (i = 0; i < config->rx_ring_num; i++) {
1080 switch (i) {
1081 case 0:
1082 mem_share = (mem_size / config->rx_ring_num +
1083 mem_size % config->rx_ring_num);
1084 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1085 continue;
1086 case 1:
1087 mem_share = (mem_size / config->rx_ring_num);
1088 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1089 continue;
1090 case 2:
1091 mem_share = (mem_size / config->rx_ring_num);
1092 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1093 continue;
1094 case 3:
1095 mem_share = (mem_size / config->rx_ring_num);
1096 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1097 continue;
1098 case 4:
1099 mem_share = (mem_size / config->rx_ring_num);
1100 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1101 continue;
1102 case 5:
1103 mem_share = (mem_size / config->rx_ring_num);
1104 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1105 continue;
1106 case 6:
1107 mem_share = (mem_size / config->rx_ring_num);
1108 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1109 continue;
1110 case 7:
1111 mem_share = (mem_size / config->rx_ring_num);
1112 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1113 continue;
1114 }
1115 }
1116 writeq(val64, &bar0->rx_queue_cfg);
1117
1118 /*
1119 * Filling Tx round robin registers
1120 * as per the number of FIFOs
1121 */
1122 switch (config->tx_fifo_num) {
1123 case 1:
1124 val64 = 0x0000000000000000ULL;
1125 writeq(val64, &bar0->tx_w_round_robin_0);
1126 writeq(val64, &bar0->tx_w_round_robin_1);
1127 writeq(val64, &bar0->tx_w_round_robin_2);
1128 writeq(val64, &bar0->tx_w_round_robin_3);
1129 writeq(val64, &bar0->tx_w_round_robin_4);
1130 break;
1131 case 2:
1132 val64 = 0x0000010000010000ULL;
1133 writeq(val64, &bar0->tx_w_round_robin_0);
1134 val64 = 0x0100000100000100ULL;
1135 writeq(val64, &bar0->tx_w_round_robin_1);
1136 val64 = 0x0001000001000001ULL;
1137 writeq(val64, &bar0->tx_w_round_robin_2);
1138 val64 = 0x0000010000010000ULL;
1139 writeq(val64, &bar0->tx_w_round_robin_3);
1140 val64 = 0x0100000000000000ULL;
1141 writeq(val64, &bar0->tx_w_round_robin_4);
1142 break;
1143 case 3:
1144 val64 = 0x0001000102000001ULL;
1145 writeq(val64, &bar0->tx_w_round_robin_0);
1146 val64 = 0x0001020000010001ULL;
1147 writeq(val64, &bar0->tx_w_round_robin_1);
1148 val64 = 0x0200000100010200ULL;
1149 writeq(val64, &bar0->tx_w_round_robin_2);
1150 val64 = 0x0001000102000001ULL;
1151 writeq(val64, &bar0->tx_w_round_robin_3);
1152 val64 = 0x0001020000000000ULL;
1153 writeq(val64, &bar0->tx_w_round_robin_4);
1154 break;
1155 case 4:
1156 val64 = 0x0001020300010200ULL;
1157 writeq(val64, &bar0->tx_w_round_robin_0);
1158 val64 = 0x0100000102030001ULL;
1159 writeq(val64, &bar0->tx_w_round_robin_1);
1160 val64 = 0x0200010000010203ULL;
1161 writeq(val64, &bar0->tx_w_round_robin_2);
1162 val64 = 0x0001020001000001ULL;
1163 writeq(val64, &bar0->tx_w_round_robin_3);
1164 val64 = 0x0203000100000000ULL;
1165 writeq(val64, &bar0->tx_w_round_robin_4);
1166 break;
1167 case 5:
1168 val64 = 0x0001000203000102ULL;
1169 writeq(val64, &bar0->tx_w_round_robin_0);
1170 val64 = 0x0001020001030004ULL;
1171 writeq(val64, &bar0->tx_w_round_robin_1);
1172 val64 = 0x0001000203000102ULL;
1173 writeq(val64, &bar0->tx_w_round_robin_2);
1174 val64 = 0x0001020001030004ULL;
1175 writeq(val64, &bar0->tx_w_round_robin_3);
1176 val64 = 0x0001000000000000ULL;
1177 writeq(val64, &bar0->tx_w_round_robin_4);
1178 break;
1179 case 6:
1180 val64 = 0x0001020304000102ULL;
1181 writeq(val64, &bar0->tx_w_round_robin_0);
1182 val64 = 0x0304050001020001ULL;
1183 writeq(val64, &bar0->tx_w_round_robin_1);
1184 val64 = 0x0203000100000102ULL;
1185 writeq(val64, &bar0->tx_w_round_robin_2);
1186 val64 = 0x0304000102030405ULL;
1187 writeq(val64, &bar0->tx_w_round_robin_3);
1188 val64 = 0x0001000200000000ULL;
1189 writeq(val64, &bar0->tx_w_round_robin_4);
1190 break;
1191 case 7:
1192 val64 = 0x0001020001020300ULL;
1193 writeq(val64, &bar0->tx_w_round_robin_0);
1194 val64 = 0x0102030400010203ULL;
1195 writeq(val64, &bar0->tx_w_round_robin_1);
1196 val64 = 0x0405060001020001ULL;
1197 writeq(val64, &bar0->tx_w_round_robin_2);
1198 val64 = 0x0304050000010200ULL;
1199 writeq(val64, &bar0->tx_w_round_robin_3);
1200 val64 = 0x0102030000000000ULL;
1201 writeq(val64, &bar0->tx_w_round_robin_4);
1202 break;
1203 case 8:
1204 val64 = 0x0001020300040105ULL;
1205 writeq(val64, &bar0->tx_w_round_robin_0);
1206 val64 = 0x0200030106000204ULL;
1207 writeq(val64, &bar0->tx_w_round_robin_1);
1208 val64 = 0x0103000502010007ULL;
1209 writeq(val64, &bar0->tx_w_round_robin_2);
1210 val64 = 0x0304010002060500ULL;
1211 writeq(val64, &bar0->tx_w_round_robin_3);
1212 val64 = 0x0103020400000000ULL;
1213 writeq(val64, &bar0->tx_w_round_robin_4);
1214 break;
1215 }
1216
1217 /* Enable Tx FIFO partition 0. */
1218 val64 = readq(&bar0->tx_fifo_partition_0);
1219 val64 |= (TX_FIFO_PARTITION_EN);
1220 writeq(val64, &bar0->tx_fifo_partition_0);
1221
1222 /* Filling the Rx round robin registers as per the
1223 * number of Rings and steering based on QoS.
1224 */
1225 switch (config->rx_ring_num) {
1226 case 1:
1227 val64 = 0x8080808080808080ULL;
1228 writeq(val64, &bar0->rts_qos_steering);
1229 break;
1230 case 2:
1231 val64 = 0x0000010000010000ULL;
1232 writeq(val64, &bar0->rx_w_round_robin_0);
1233 val64 = 0x0100000100000100ULL;
1234 writeq(val64, &bar0->rx_w_round_robin_1);
1235 val64 = 0x0001000001000001ULL;
1236 writeq(val64, &bar0->rx_w_round_robin_2);
1237 val64 = 0x0000010000010000ULL;
1238 writeq(val64, &bar0->rx_w_round_robin_3);
1239 val64 = 0x0100000000000000ULL;
1240 writeq(val64, &bar0->rx_w_round_robin_4);
1241
1242 val64 = 0x8080808040404040ULL;
1243 writeq(val64, &bar0->rts_qos_steering);
1244 break;
1245 case 3:
1246 val64 = 0x0001000102000001ULL;
1247 writeq(val64, &bar0->rx_w_round_robin_0);
1248 val64 = 0x0001020000010001ULL;
1249 writeq(val64, &bar0->rx_w_round_robin_1);
1250 val64 = 0x0200000100010200ULL;
1251 writeq(val64, &bar0->rx_w_round_robin_2);
1252 val64 = 0x0001000102000001ULL;
1253 writeq(val64, &bar0->rx_w_round_robin_3);
1254 val64 = 0x0001020000000000ULL;
1255 writeq(val64, &bar0->rx_w_round_robin_4);
1256
1257 val64 = 0x8080804040402020ULL;
1258 writeq(val64, &bar0->rts_qos_steering);
1259 break;
1260 case 4:
1261 val64 = 0x0001020300010200ULL;
1262 writeq(val64, &bar0->rx_w_round_robin_0);
1263 val64 = 0x0100000102030001ULL;
1264 writeq(val64, &bar0->rx_w_round_robin_1);
1265 val64 = 0x0200010000010203ULL;
1266 writeq(val64, &bar0->rx_w_round_robin_2);
1267 val64 = 0x0001020001000001ULL;
1268 writeq(val64, &bar0->rx_w_round_robin_3);
1269 val64 = 0x0203000100000000ULL;
1270 writeq(val64, &bar0->rx_w_round_robin_4);
1271
1272 val64 = 0x8080404020201010ULL;
1273 writeq(val64, &bar0->rts_qos_steering);
1274 break;
1275 case 5:
1276 val64 = 0x0001000203000102ULL;
1277 writeq(val64, &bar0->rx_w_round_robin_0);
1278 val64 = 0x0001020001030004ULL;
1279 writeq(val64, &bar0->rx_w_round_robin_1);
1280 val64 = 0x0001000203000102ULL;
1281 writeq(val64, &bar0->rx_w_round_robin_2);
1282 val64 = 0x0001020001030004ULL;
1283 writeq(val64, &bar0->rx_w_round_robin_3);
1284 val64 = 0x0001000000000000ULL;
1285 writeq(val64, &bar0->rx_w_round_robin_4);
1286
1287 val64 = 0x8080404020201008ULL;
1288 writeq(val64, &bar0->rts_qos_steering);
1289 break;
1290 case 6:
1291 val64 = 0x0001020304000102ULL;
1292 writeq(val64, &bar0->rx_w_round_robin_0);
1293 val64 = 0x0304050001020001ULL;
1294 writeq(val64, &bar0->rx_w_round_robin_1);
1295 val64 = 0x0203000100000102ULL;
1296 writeq(val64, &bar0->rx_w_round_robin_2);
1297 val64 = 0x0304000102030405ULL;
1298 writeq(val64, &bar0->rx_w_round_robin_3);
1299 val64 = 0x0001000200000000ULL;
1300 writeq(val64, &bar0->rx_w_round_robin_4);
1301
1302 val64 = 0x8080404020100804ULL;
1303 writeq(val64, &bar0->rts_qos_steering);
1304 break;
1305 case 7:
1306 val64 = 0x0001020001020300ULL;
1307 writeq(val64, &bar0->rx_w_round_robin_0);
1308 val64 = 0x0102030400010203ULL;
1309 writeq(val64, &bar0->rx_w_round_robin_1);
1310 val64 = 0x0405060001020001ULL;
1311 writeq(val64, &bar0->rx_w_round_robin_2);
1312 val64 = 0x0304050000010200ULL;
1313 writeq(val64, &bar0->rx_w_round_robin_3);
1314 val64 = 0x0102030000000000ULL;
1315 writeq(val64, &bar0->rx_w_round_robin_4);
1316
1317 val64 = 0x8080402010080402ULL;
1318 writeq(val64, &bar0->rts_qos_steering);
1319 break;
1320 case 8:
1321 val64 = 0x0001020300040105ULL;
1322 writeq(val64, &bar0->rx_w_round_robin_0);
1323 val64 = 0x0200030106000204ULL;
1324 writeq(val64, &bar0->rx_w_round_robin_1);
1325 val64 = 0x0103000502010007ULL;
1326 writeq(val64, &bar0->rx_w_round_robin_2);
1327 val64 = 0x0304010002060500ULL;
1328 writeq(val64, &bar0->rx_w_round_robin_3);
1329 val64 = 0x0103020400000000ULL;
1330 writeq(val64, &bar0->rx_w_round_robin_4);
1331
1332 val64 = 0x8040201008040201ULL;
1333 writeq(val64, &bar0->rts_qos_steering);
1334 break;
1335 }
1336
1337 /* UDP Fix */
1338 val64 = 0;
1339 for (i = 0; i < 8; i++)
1340 writeq(val64, &bar0->rts_frm_len_n[i]);
1341
1342 /* Set the default rts frame length for the rings configured */
1343 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1344 for (i = 0 ; i < config->rx_ring_num ; i++)
1345 writeq(val64, &bar0->rts_frm_len_n[i]);
1346
1347 /* Set the frame length for the configured rings
1348 * desired by the user
1349 */
1350 for (i = 0; i < config->rx_ring_num; i++) {
1351 /* If rts_frm_len[i] == 0 then it is assumed that user not
1352 * specified frame length steering.
1353 * If the user provides the frame length then program
1354 * the rts_frm_len register for those values or else
1355 * leave it as it is.
1356 */
1357 if (rts_frm_len[i] != 0) {
1358 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1359 &bar0->rts_frm_len_n[i]);
1360 }
1361 }
1362
1363 /* Program statistics memory */
1364 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1365
1366 if (nic->device_type == XFRAME_II_DEVICE) {
1367 val64 = STAT_BC(0x320);
1368 writeq(val64, &bar0->stat_byte_cnt);
1369 }
1370
1371 /*
1372 * Initializing the sampling rate for the device to calculate the
1373 * bandwidth utilization.
1374 */
1375 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1376 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1377 writeq(val64, &bar0->mac_link_util);
1378
1379
1380 /*
1381 * Initializing the Transmit and Receive Traffic Interrupt
1382 * Scheme.
1383 */
1384 /*
1385 * TTI Initialization. Default Tx timer gets us about
1386 * 250 interrupts per sec. Continuous interrupts are enabled
1387 * by default.
1388 */
1389 if (nic->device_type == XFRAME_II_DEVICE) {
1390 int count = (nic->config.bus_speed * 125)/2;
1391 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1392 } else {
1393
1394 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1395 }
1396 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1397 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1398 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1399 if (use_continuous_tx_intrs)
1400 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1401 writeq(val64, &bar0->tti_data1_mem);
1402
1403 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1404 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1405 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1406 writeq(val64, &bar0->tti_data2_mem);
1407
1408 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1409 writeq(val64, &bar0->tti_command_mem);
1410
1411 /*
1412 * Once the operation completes, the Strobe bit of the command
1413 * register will be reset. We poll for this particular condition
1414 * We wait for a maximum of 500ms for the operation to complete,
1415 * if it's not complete by then we return error.
1416 */
1417 time = 0;
1418 while (TRUE) {
1419 val64 = readq(&bar0->tti_command_mem);
1420 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1421 break;
1422 }
1423 if (time > 10) {
1424 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1425 dev->name);
1426 return -1;
1427 }
1428 msleep(50);
1429 time++;
1430 }
1431
1432 if (nic->config.bimodal) {
1433 int k = 0;
1434 for (k = 0; k < config->rx_ring_num; k++) {
1435 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1436 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1437 writeq(val64, &bar0->tti_command_mem);
1438
1439 /*
1440 * Once the operation completes, the Strobe bit of the command
1441 * register will be reset. We poll for this particular condition
1442 * We wait for a maximum of 500ms for the operation to complete,
1443 * if it's not complete by then we return error.
1444 */
1445 time = 0;
1446 while (TRUE) {
1447 val64 = readq(&bar0->tti_command_mem);
1448 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1449 break;
1450 }
1451 if (time > 10) {
1452 DBG_PRINT(ERR_DBG,
1453 "%s: TTI init Failed\n",
1454 dev->name);
1455 return -1;
1456 }
1457 time++;
1458 msleep(50);
1459 }
1460 }
1461 } else {
1462
1463 /* RTI Initialization */
1464 if (nic->device_type == XFRAME_II_DEVICE) {
1465 /*
1466 * Programmed to generate Apprx 500 Intrs per
1467 * second
1468 */
1469 int count = (nic->config.bus_speed * 125)/4;
1470 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1471 } else {
1472 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1473 }
1474 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1475 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1476 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1477
1478 writeq(val64, &bar0->rti_data1_mem);
1479
1480 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1481 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1482 if (nic->intr_type == MSI_X)
1483 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1484 RTI_DATA2_MEM_RX_UFC_D(0x40));
1485 else
1486 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1487 RTI_DATA2_MEM_RX_UFC_D(0x80));
1488 writeq(val64, &bar0->rti_data2_mem);
1489
1490 for (i = 0; i < config->rx_ring_num; i++) {
1491 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1492 | RTI_CMD_MEM_OFFSET(i);
1493 writeq(val64, &bar0->rti_command_mem);
1494
1495 /*
1496 * Once the operation completes, the Strobe bit of the
1497 * command register will be reset. We poll for this
1498 * particular condition. We wait for a maximum of 500ms
1499 * for the operation to complete, if it's not complete
1500 * by then we return error.
1501 */
1502 time = 0;
1503 while (TRUE) {
1504 val64 = readq(&bar0->rti_command_mem);
1505 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1506 break;
1507 }
1508 if (time > 10) {
1509 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1510 dev->name);
1511 return -1;
1512 }
1513 time++;
1514 msleep(50);
1515 }
1516 }
1517 }
1518
1519 /*
1520 * Initializing proper values as Pause threshold into all
1521 * the 8 Queues on Rx side.
1522 */
1523 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1524 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1525
1526 /* Disable RMAC PAD STRIPPING */
1527 add = &bar0->mac_cfg;
1528 val64 = readq(&bar0->mac_cfg);
1529 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1530 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1531 writel((u32) (val64), add);
1532 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1533 writel((u32) (val64 >> 32), (add + 4));
1534 val64 = readq(&bar0->mac_cfg);
1535
1536 /* Enable FCS stripping by adapter */
1537 add = &bar0->mac_cfg;
1538 val64 = readq(&bar0->mac_cfg);
1539 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1540 if (nic->device_type == XFRAME_II_DEVICE)
1541 writeq(val64, &bar0->mac_cfg);
1542 else {
1543 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1544 writel((u32) (val64), add);
1545 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1546 writel((u32) (val64 >> 32), (add + 4));
1547 }
1548
1549 /*
1550 * Set the time value to be inserted in the pause frame
1551 * generated by xena.
1552 */
1553 val64 = readq(&bar0->rmac_pause_cfg);
1554 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1555 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1556 writeq(val64, &bar0->rmac_pause_cfg);
1557
1558 /*
1559 * Set the Threshold Limit for Generating the pause frame
1560 * If the amount of data in any Queue exceeds ratio of
1561 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1562 * pause frame is generated
1563 */
1564 val64 = 0;
1565 for (i = 0; i < 4; i++) {
1566 val64 |=
1567 (((u64) 0xFF00 | nic->mac_control.
1568 mc_pause_threshold_q0q3)
1569 << (i * 2 * 8));
1570 }
1571 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1572
1573 val64 = 0;
1574 for (i = 0; i < 4; i++) {
1575 val64 |=
1576 (((u64) 0xFF00 | nic->mac_control.
1577 mc_pause_threshold_q4q7)
1578 << (i * 2 * 8));
1579 }
1580 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1581
1582 /*
1583 * TxDMA will stop Read request if the number of read split has
1584 * exceeded the limit pointed by shared_splits
1585 */
1586 val64 = readq(&bar0->pic_control);
1587 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1588 writeq(val64, &bar0->pic_control);
1589
1590 if (nic->config.bus_speed == 266) {
1591 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1592 writeq(0x0, &bar0->read_retry_delay);
1593 writeq(0x0, &bar0->write_retry_delay);
1594 }
1595
1596 /*
1597 * Programming the Herc to split every write transaction
1598 * that does not start on an ADB to reduce disconnects.
1599 */
1600 if (nic->device_type == XFRAME_II_DEVICE) {
1601 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
1602 writeq(val64, &bar0->misc_control);
1603 val64 = readq(&bar0->pic_control2);
1604 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1605 writeq(val64, &bar0->pic_control2);
1606 }
1607 if (strstr(nic->product_name, "CX4")) {
1608 val64 = TMAC_AVG_IPG(0x17);
1609 writeq(val64, &bar0->tmac_avg_ipg);
1610 }
1611
1612 return SUCCESS;
1613 }
1614 #define LINK_UP_DOWN_INTERRUPT 1
1615 #define MAC_RMAC_ERR_TIMER 2
1616
1617 static int s2io_link_fault_indication(nic_t *nic)
1618 {
1619 if (nic->intr_type != INTA)
1620 return MAC_RMAC_ERR_TIMER;
1621 if (nic->device_type == XFRAME_II_DEVICE)
1622 return LINK_UP_DOWN_INTERRUPT;
1623 else
1624 return MAC_RMAC_ERR_TIMER;
1625 }
1626
1627 /**
1628 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1629 * @nic: device private variable,
1630 * @mask: A mask indicating which Intr block must be modified and,
1631 * @flag: A flag indicating whether to enable or disable the Intrs.
1632 * Description: This function will either disable or enable the interrupts
1633 * depending on the flag argument. The mask argument can be used to
1634 * enable/disable any Intr block.
1635 * Return Value: NONE.
1636 */
1637
1638 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1639 {
1640 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1641 register u64 val64 = 0, temp64 = 0;
1642
1643 /* Top level interrupt classification */
1644 /* PIC Interrupts */
1645 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1646 /* Enable PIC Intrs in the general intr mask register */
1647 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1648 if (flag == ENABLE_INTRS) {
1649 temp64 = readq(&bar0->general_int_mask);
1650 temp64 &= ~((u64) val64);
1651 writeq(temp64, &bar0->general_int_mask);
1652 /*
1653 * If Hercules adapter enable GPIO otherwise
1654 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1655 * interrupts for now.
1656 * TODO
1657 */
1658 if (s2io_link_fault_indication(nic) ==
1659 LINK_UP_DOWN_INTERRUPT ) {
1660 temp64 = readq(&bar0->pic_int_mask);
1661 temp64 &= ~((u64) PIC_INT_GPIO);
1662 writeq(temp64, &bar0->pic_int_mask);
1663 temp64 = readq(&bar0->gpio_int_mask);
1664 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1665 writeq(temp64, &bar0->gpio_int_mask);
1666 } else {
1667 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1668 }
1669 /*
1670 * No MSI Support is available presently, so TTI and
1671 * RTI interrupts are also disabled.
1672 */
1673 } else if (flag == DISABLE_INTRS) {
1674 /*
1675 * Disable PIC Intrs in the general
1676 * intr mask register
1677 */
1678 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1679 temp64 = readq(&bar0->general_int_mask);
1680 val64 |= temp64;
1681 writeq(val64, &bar0->general_int_mask);
1682 }
1683 }
1684
1685 /* DMA Interrupts */
1686 /* Enabling/Disabling Tx DMA interrupts */
1687 if (mask & TX_DMA_INTR) {
1688 /* Enable TxDMA Intrs in the general intr mask register */
1689 val64 = TXDMA_INT_M;
1690 if (flag == ENABLE_INTRS) {
1691 temp64 = readq(&bar0->general_int_mask);
1692 temp64 &= ~((u64) val64);
1693 writeq(temp64, &bar0->general_int_mask);
1694 /*
1695 * Keep all interrupts other than PFC interrupt
1696 * and PCC interrupt disabled in DMA level.
1697 */
1698 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1699 TXDMA_PCC_INT_M);
1700 writeq(val64, &bar0->txdma_int_mask);
1701 /*
1702 * Enable only the MISC error 1 interrupt in PFC block
1703 */
1704 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1705 writeq(val64, &bar0->pfc_err_mask);
1706 /*
1707 * Enable only the FB_ECC error interrupt in PCC block
1708 */
1709 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1710 writeq(val64, &bar0->pcc_err_mask);
1711 } else if (flag == DISABLE_INTRS) {
1712 /*
1713 * Disable TxDMA Intrs in the general intr mask
1714 * register
1715 */
1716 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1717 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1718 temp64 = readq(&bar0->general_int_mask);
1719 val64 |= temp64;
1720 writeq(val64, &bar0->general_int_mask);
1721 }
1722 }
1723
1724 /* Enabling/Disabling Rx DMA interrupts */
1725 if (mask & RX_DMA_INTR) {
1726 /* Enable RxDMA Intrs in the general intr mask register */
1727 val64 = RXDMA_INT_M;
1728 if (flag == ENABLE_INTRS) {
1729 temp64 = readq(&bar0->general_int_mask);
1730 temp64 &= ~((u64) val64);
1731 writeq(temp64, &bar0->general_int_mask);
1732 /*
1733 * All RxDMA block interrupts are disabled for now
1734 * TODO
1735 */
1736 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1737 } else if (flag == DISABLE_INTRS) {
1738 /*
1739 * Disable RxDMA Intrs in the general intr mask
1740 * register
1741 */
1742 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1743 temp64 = readq(&bar0->general_int_mask);
1744 val64 |= temp64;
1745 writeq(val64, &bar0->general_int_mask);
1746 }
1747 }
1748
1749 /* MAC Interrupts */
1750 /* Enabling/Disabling MAC interrupts */
1751 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1752 val64 = TXMAC_INT_M | RXMAC_INT_M;
1753 if (flag == ENABLE_INTRS) {
1754 temp64 = readq(&bar0->general_int_mask);
1755 temp64 &= ~((u64) val64);
1756 writeq(temp64, &bar0->general_int_mask);
1757 /*
1758 * All MAC block error interrupts are disabled for now
1759 * TODO
1760 */
1761 } else if (flag == DISABLE_INTRS) {
1762 /*
1763 * Disable MAC Intrs in the general intr mask register
1764 */
1765 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1766 writeq(DISABLE_ALL_INTRS,
1767 &bar0->mac_rmac_err_mask);
1768
1769 temp64 = readq(&bar0->general_int_mask);
1770 val64 |= temp64;
1771 writeq(val64, &bar0->general_int_mask);
1772 }
1773 }
1774
1775 /* XGXS Interrupts */
1776 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1777 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1778 if (flag == ENABLE_INTRS) {
1779 temp64 = readq(&bar0->general_int_mask);
1780 temp64 &= ~((u64) val64);
1781 writeq(temp64, &bar0->general_int_mask);
1782 /*
1783 * All XGXS block error interrupts are disabled for now
1784 * TODO
1785 */
1786 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1787 } else if (flag == DISABLE_INTRS) {
1788 /*
1789 * Disable MC Intrs in the general intr mask register
1790 */
1791 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1792 temp64 = readq(&bar0->general_int_mask);
1793 val64 |= temp64;
1794 writeq(val64, &bar0->general_int_mask);
1795 }
1796 }
1797
1798 /* Memory Controller(MC) interrupts */
1799 if (mask & MC_INTR) {
1800 val64 = MC_INT_M;
1801 if (flag == ENABLE_INTRS) {
1802 temp64 = readq(&bar0->general_int_mask);
1803 temp64 &= ~((u64) val64);
1804 writeq(temp64, &bar0->general_int_mask);
1805 /*
1806 * Enable all MC Intrs.
1807 */
1808 writeq(0x0, &bar0->mc_int_mask);
1809 writeq(0x0, &bar0->mc_err_mask);
1810 } else if (flag == DISABLE_INTRS) {
1811 /*
1812 * Disable MC Intrs in the general intr mask register
1813 */
1814 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1815 temp64 = readq(&bar0->general_int_mask);
1816 val64 |= temp64;
1817 writeq(val64, &bar0->general_int_mask);
1818 }
1819 }
1820
1821
1822 /* Tx traffic interrupts */
1823 if (mask & TX_TRAFFIC_INTR) {
1824 val64 = TXTRAFFIC_INT_M;
1825 if (flag == ENABLE_INTRS) {
1826 temp64 = readq(&bar0->general_int_mask);
1827 temp64 &= ~((u64) val64);
1828 writeq(temp64, &bar0->general_int_mask);
1829 /*
1830 * Enable all the Tx side interrupts
1831 * writing 0 Enables all 64 TX interrupt levels
1832 */
1833 writeq(0x0, &bar0->tx_traffic_mask);
1834 } else if (flag == DISABLE_INTRS) {
1835 /*
1836 * Disable Tx Traffic Intrs in the general intr mask
1837 * register.
1838 */
1839 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1840 temp64 = readq(&bar0->general_int_mask);
1841 val64 |= temp64;
1842 writeq(val64, &bar0->general_int_mask);
1843 }
1844 }
1845
1846 /* Rx traffic interrupts */
1847 if (mask & RX_TRAFFIC_INTR) {
1848 val64 = RXTRAFFIC_INT_M;
1849 if (flag == ENABLE_INTRS) {
1850 temp64 = readq(&bar0->general_int_mask);
1851 temp64 &= ~((u64) val64);
1852 writeq(temp64, &bar0->general_int_mask);
1853 /* writing 0 Enables all 8 RX interrupt levels */
1854 writeq(0x0, &bar0->rx_traffic_mask);
1855 } else if (flag == DISABLE_INTRS) {
1856 /*
1857 * Disable Rx Traffic Intrs in the general intr mask
1858 * register.
1859 */
1860 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1861 temp64 = readq(&bar0->general_int_mask);
1862 val64 |= temp64;
1863 writeq(val64, &bar0->general_int_mask);
1864 }
1865 }
1866 }
1867
1868 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1869 {
1870 int ret = 0;
1871
1872 if (flag == FALSE) {
1873 if ((!herc && (rev_id >= 4)) || herc) {
1874 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1875 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1876 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1877 ret = 1;
1878 }
1879 }else {
1880 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1881 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1882 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1883 ret = 1;
1884 }
1885 }
1886 } else {
1887 if ((!herc && (rev_id >= 4)) || herc) {
1888 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1889 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1890 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1891 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1892 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1893 ret = 1;
1894 }
1895 } else {
1896 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1897 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1898 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1899 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1900 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1901 ret = 1;
1902 }
1903 }
1904 }
1905
1906 return ret;
1907 }
1908 /**
1909 * verify_xena_quiescence - Checks whether the H/W is ready
1910 * @val64 : Value read from adapter status register.
1911 * @flag : indicates if the adapter enable bit was ever written once
1912 * before.
1913 * Description: Returns whether the H/W is ready to go or not. Depending
1914 * on whether adapter enable bit was written or not the comparison
1915 * differs and the calling function passes the input argument flag to
1916 * indicate this.
1917 * Return: 1 If xena is quiescence
1918 * 0 If Xena is not quiescence
1919 */
1920
1921 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1922 {
1923 int ret = 0, herc;
1924 u64 tmp64 = ~((u64) val64);
1925 int rev_id = get_xena_rev_id(sp->pdev);
1926
1927 herc = (sp->device_type == XFRAME_II_DEVICE);
1928 if (!
1929 (tmp64 &
1930 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1931 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1932 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1933 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1934 ADAPTER_STATUS_P_PLL_LOCK))) {
1935 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1936 }
1937
1938 return ret;
1939 }
1940
1941 /**
1942 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1943 * @sp: Pointer to device specifc structure
1944 * Description :
1945 * New procedure to clear mac address reading problems on Alpha platforms
1946 *
1947 */
1948
1949 static void fix_mac_address(nic_t * sp)
1950 {
1951 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1952 u64 val64;
1953 int i = 0;
1954
1955 while (fix_mac[i] != END_SIGN) {
1956 writeq(fix_mac[i++], &bar0->gpio_control);
1957 udelay(10);
1958 val64 = readq(&bar0->gpio_control);
1959 }
1960 }
1961
1962 /**
1963 * start_nic - Turns the device on
1964 * @nic : device private variable.
1965 * Description:
1966 * This function actually turns the device on. Before this function is
1967 * called,all Registers are configured from their reset states
1968 * and shared memory is allocated but the NIC is still quiescent. On
1969 * calling this function, the device interrupts are cleared and the NIC is
1970 * literally switched on by writing into the adapter control register.
1971 * Return Value:
1972 * SUCCESS on success and -1 on failure.
1973 */
1974
1975 static int start_nic(struct s2io_nic *nic)
1976 {
1977 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1978 struct net_device *dev = nic->dev;
1979 register u64 val64 = 0;
1980 u16 interruptible;
1981 u16 subid, i;
1982 mac_info_t *mac_control;
1983 struct config_param *config;
1984
1985 mac_control = &nic->mac_control;
1986 config = &nic->config;
1987
1988 /* PRC Initialization and configuration */
1989 for (i = 0; i < config->rx_ring_num; i++) {
1990 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1991 &bar0->prc_rxd0_n[i]);
1992
1993 val64 = readq(&bar0->prc_ctrl_n[i]);
1994 if (nic->config.bimodal)
1995 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1996 if (nic->rxd_mode == RXD_MODE_1)
1997 val64 |= PRC_CTRL_RC_ENABLED;
1998 else
1999 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2000 if (nic->device_type == XFRAME_II_DEVICE)
2001 val64 |= PRC_CTRL_GROUP_READS;
2002 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2003 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2004 writeq(val64, &bar0->prc_ctrl_n[i]);
2005 }
2006
2007 if (nic->rxd_mode == RXD_MODE_3B) {
2008 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2009 val64 = readq(&bar0->rx_pa_cfg);
2010 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2011 writeq(val64, &bar0->rx_pa_cfg);
2012 }
2013
2014 /*
2015 * Enabling MC-RLDRAM. After enabling the device, we timeout
2016 * for around 100ms, which is approximately the time required
2017 * for the device to be ready for operation.
2018 */
2019 val64 = readq(&bar0->mc_rldram_mrs);
2020 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2021 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2022 val64 = readq(&bar0->mc_rldram_mrs);
2023
2024 msleep(100); /* Delay by around 100 ms. */
2025
2026 /* Enabling ECC Protection. */
2027 val64 = readq(&bar0->adapter_control);
2028 val64 &= ~ADAPTER_ECC_EN;
2029 writeq(val64, &bar0->adapter_control);
2030
2031 /*
2032 * Clearing any possible Link state change interrupts that
2033 * could have popped up just before Enabling the card.
2034 */
2035 val64 = readq(&bar0->mac_rmac_err_reg);
2036 if (val64)
2037 writeq(val64, &bar0->mac_rmac_err_reg);
2038
2039 /*
2040 * Verify if the device is ready to be enabled, if so enable
2041 * it.
2042 */
2043 val64 = readq(&bar0->adapter_status);
2044 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
2045 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2046 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2047 (unsigned long long) val64);
2048 return FAILURE;
2049 }
2050
2051 /* Enable select interrupts */
2052 if (nic->intr_type != INTA)
2053 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2054 else {
2055 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2056 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2057 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2058 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
2059 }
2060
2061 /*
2062 * With some switches, link might be already up at this point.
2063 * Because of this weird behavior, when we enable laser,
2064 * we may not get link. We need to handle this. We cannot
2065 * figure out which switch is misbehaving. So we are forced to
2066 * make a global change.
2067 */
2068
2069 /* Enabling Laser. */
2070 val64 = readq(&bar0->adapter_control);
2071 val64 |= ADAPTER_EOI_TX_ON;
2072 writeq(val64, &bar0->adapter_control);
2073
2074 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2075 /*
2076 * Dont see link state interrupts initally on some switches,
2077 * so directly scheduling the link state task here.
2078 */
2079 schedule_work(&nic->set_link_task);
2080 }
2081 /* SXE-002: Initialize link and activity LED */
2082 subid = nic->pdev->subsystem_device;
2083 if (((subid & 0xFF) >= 0x07) &&
2084 (nic->device_type == XFRAME_I_DEVICE)) {
2085 val64 = readq(&bar0->gpio_control);
2086 val64 |= 0x0000800000000000ULL;
2087 writeq(val64, &bar0->gpio_control);
2088 val64 = 0x0411040400000000ULL;
2089 writeq(val64, (void __iomem *)bar0 + 0x2700);
2090 }
2091
2092 return SUCCESS;
2093 }
2094 /**
2095 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2096 */
2097 static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2098 {
2099 nic_t *nic = fifo_data->nic;
2100 struct sk_buff *skb;
2101 TxD_t *txds;
2102 u16 j, frg_cnt;
2103
2104 txds = txdlp;
2105 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2106 pci_unmap_single(nic->pdev, (dma_addr_t)
2107 txds->Buffer_Pointer, sizeof(u64),
2108 PCI_DMA_TODEVICE);
2109 txds++;
2110 }
2111
2112 skb = (struct sk_buff *) ((unsigned long)
2113 txds->Host_Control);
2114 if (!skb) {
2115 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2116 return NULL;
2117 }
2118 pci_unmap_single(nic->pdev, (dma_addr_t)
2119 txds->Buffer_Pointer,
2120 skb->len - skb->data_len,
2121 PCI_DMA_TODEVICE);
2122 frg_cnt = skb_shinfo(skb)->nr_frags;
2123 if (frg_cnt) {
2124 txds++;
2125 for (j = 0; j < frg_cnt; j++, txds++) {
2126 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2127 if (!txds->Buffer_Pointer)
2128 break;
2129 pci_unmap_page(nic->pdev, (dma_addr_t)
2130 txds->Buffer_Pointer,
2131 frag->size, PCI_DMA_TODEVICE);
2132 }
2133 }
2134 txdlp->Host_Control = 0;
2135 return(skb);
2136 }
2137
2138 /**
2139 * free_tx_buffers - Free all queued Tx buffers
2140 * @nic : device private variable.
2141 * Description:
2142 * Free all queued Tx buffers.
2143 * Return Value: void
2144 */
2145
2146 static void free_tx_buffers(struct s2io_nic *nic)
2147 {
2148 struct net_device *dev = nic->dev;
2149 struct sk_buff *skb;
2150 TxD_t *txdp;
2151 int i, j;
2152 mac_info_t *mac_control;
2153 struct config_param *config;
2154 int cnt = 0;
2155
2156 mac_control = &nic->mac_control;
2157 config = &nic->config;
2158
2159 for (i = 0; i < config->tx_fifo_num; i++) {
2160 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2161 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2162 list_virt_addr;
2163 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2164 if (skb) {
2165 dev_kfree_skb(skb);
2166 cnt++;
2167 }
2168 }
2169 DBG_PRINT(INTR_DBG,
2170 "%s:forcibly freeing %d skbs on FIFO%d\n",
2171 dev->name, cnt, i);
2172 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2173 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2174 }
2175 }
2176
2177 /**
2178 * stop_nic - To stop the nic
2179 * @nic ; device private variable.
2180 * Description:
2181 * This function does exactly the opposite of what the start_nic()
2182 * function does. This function is called to stop the device.
2183 * Return Value:
2184 * void.
2185 */
2186
2187 static void stop_nic(struct s2io_nic *nic)
2188 {
2189 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2190 register u64 val64 = 0;
2191 u16 interruptible;
2192 mac_info_t *mac_control;
2193 struct config_param *config;
2194
2195 mac_control = &nic->mac_control;
2196 config = &nic->config;
2197
2198 /* Disable all interrupts */
2199 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2200 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2201 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2202 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2203
2204 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2205 val64 = readq(&bar0->adapter_control);
2206 val64 &= ~(ADAPTER_CNTL_EN);
2207 writeq(val64, &bar0->adapter_control);
2208 }
2209
2210 static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2211 {
2212 struct net_device *dev = nic->dev;
2213 struct sk_buff *frag_list;
2214 void *tmp;
2215
2216 /* Buffer-1 receives L3/L4 headers */
2217 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2218 (nic->pdev, skb->data, l3l4hdr_size + 4,
2219 PCI_DMA_FROMDEVICE);
2220
2221 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2222 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2223 if (skb_shinfo(skb)->frag_list == NULL) {
2224 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2225 return -ENOMEM ;
2226 }
2227 frag_list = skb_shinfo(skb)->frag_list;
2228 frag_list->next = NULL;
2229 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2230 frag_list->data = tmp;
2231 frag_list->tail = tmp;
2232
2233 /* Buffer-2 receives L4 data payload */
2234 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2235 frag_list->data, dev->mtu,
2236 PCI_DMA_FROMDEVICE);
2237 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2238 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2239
2240 return SUCCESS;
2241 }
2242
2243 /**
2244 * fill_rx_buffers - Allocates the Rx side skbs
2245 * @nic: device private variable
2246 * @ring_no: ring number
2247 * Description:
2248 * The function allocates Rx side skbs and puts the physical
2249 * address of these buffers into the RxD buffer pointers, so that the NIC
2250 * can DMA the received frame into these locations.
2251 * The NIC supports 3 receive modes, viz
2252 * 1. single buffer,
2253 * 2. three buffer and
2254 * 3. Five buffer modes.
2255 * Each mode defines how many fragments the received frame will be split
2256 * up into by the NIC. The frame is split into L3 header, L4 Header,
2257 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2258 * is split into 3 fragments. As of now only single buffer mode is
2259 * supported.
2260 * Return Value:
2261 * SUCCESS on success or an appropriate -ve value on failure.
2262 */
2263
2264 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2265 {
2266 struct net_device *dev = nic->dev;
2267 struct sk_buff *skb;
2268 RxD_t *rxdp;
2269 int off, off1, size, block_no, block_no1;
2270 u32 alloc_tab = 0;
2271 u32 alloc_cnt;
2272 mac_info_t *mac_control;
2273 struct config_param *config;
2274 u64 tmp;
2275 buffAdd_t *ba;
2276 #ifndef CONFIG_S2IO_NAPI
2277 unsigned long flags;
2278 #endif
2279 RxD_t *first_rxdp = NULL;
2280
2281 mac_control = &nic->mac_control;
2282 config = &nic->config;
2283 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2284 atomic_read(&nic->rx_bufs_left[ring_no]);
2285
2286 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2287 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2288 while (alloc_tab < alloc_cnt) {
2289 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2290 block_index;
2291 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2292
2293 rxdp = mac_control->rings[ring_no].
2294 rx_blocks[block_no].rxds[off].virt_addr;
2295
2296 if ((block_no == block_no1) && (off == off1) &&
2297 (rxdp->Host_Control)) {
2298 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2299 dev->name);
2300 DBG_PRINT(INTR_DBG, " info equated\n");
2301 goto end;
2302 }
2303 if (off && (off == rxd_count[nic->rxd_mode])) {
2304 mac_control->rings[ring_no].rx_curr_put_info.
2305 block_index++;
2306 if (mac_control->rings[ring_no].rx_curr_put_info.
2307 block_index == mac_control->rings[ring_no].
2308 block_count)
2309 mac_control->rings[ring_no].rx_curr_put_info.
2310 block_index = 0;
2311 block_no = mac_control->rings[ring_no].
2312 rx_curr_put_info.block_index;
2313 if (off == rxd_count[nic->rxd_mode])
2314 off = 0;
2315 mac_control->rings[ring_no].rx_curr_put_info.
2316 offset = off;
2317 rxdp = mac_control->rings[ring_no].
2318 rx_blocks[block_no].block_virt_addr;
2319 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2320 dev->name, rxdp);
2321 }
2322 #ifndef CONFIG_S2IO_NAPI
2323 spin_lock_irqsave(&nic->put_lock, flags);
2324 mac_control->rings[ring_no].put_pos =
2325 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2326 spin_unlock_irqrestore(&nic->put_lock, flags);
2327 #endif
2328 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2329 ((nic->rxd_mode >= RXD_MODE_3A) &&
2330 (rxdp->Control_2 & BIT(0)))) {
2331 mac_control->rings[ring_no].rx_curr_put_info.
2332 offset = off;
2333 goto end;
2334 }
2335 /* calculate size of skb based on ring mode */
2336 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2337 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2338 if (nic->rxd_mode == RXD_MODE_1)
2339 size += NET_IP_ALIGN;
2340 else if (nic->rxd_mode == RXD_MODE_3B)
2341 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2342 else
2343 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2344
2345 /* allocate skb */
2346 skb = dev_alloc_skb(size);
2347 if(!skb) {
2348 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2349 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2350 if (first_rxdp) {
2351 wmb();
2352 first_rxdp->Control_1 |= RXD_OWN_XENA;
2353 }
2354 return -ENOMEM ;
2355 }
2356 if (nic->rxd_mode == RXD_MODE_1) {
2357 /* 1 buffer mode - normal operation mode */
2358 memset(rxdp, 0, sizeof(RxD1_t));
2359 skb_reserve(skb, NET_IP_ALIGN);
2360 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2361 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2362 PCI_DMA_FROMDEVICE);
2363 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2364
2365 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2366 /*
2367 * 2 or 3 buffer mode -
2368 * Both 2 buffer mode and 3 buffer mode provides 128
2369 * byte aligned receive buffers.
2370 *
2371 * 3 buffer mode provides header separation where in
2372 * skb->data will have L3/L4 headers where as
2373 * skb_shinfo(skb)->frag_list will have the L4 data
2374 * payload
2375 */
2376
2377 memset(rxdp, 0, sizeof(RxD3_t));
2378 ba = &mac_control->rings[ring_no].ba[block_no][off];
2379 skb_reserve(skb, BUF0_LEN);
2380 tmp = (u64)(unsigned long) skb->data;
2381 tmp += ALIGN_SIZE;
2382 tmp &= ~ALIGN_SIZE;
2383 skb->data = (void *) (unsigned long)tmp;
2384 skb->tail = (void *) (unsigned long)tmp;
2385
2386 ((RxD3_t*)rxdp)->Buffer0_ptr =
2387 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2388 PCI_DMA_FROMDEVICE);
2389 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2390 if (nic->rxd_mode == RXD_MODE_3B) {
2391 /* Two buffer mode */
2392
2393 /*
2394 * Buffer2 will have L3/L4 header plus
2395 * L4 payload
2396 */
2397 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2398 (nic->pdev, skb->data, dev->mtu + 4,
2399 PCI_DMA_FROMDEVICE);
2400
2401 /* Buffer-1 will be dummy buffer not used */
2402 ((RxD3_t*)rxdp)->Buffer1_ptr =
2403 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2404 PCI_DMA_FROMDEVICE);
2405 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2406 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2407 (dev->mtu + 4);
2408 } else {
2409 /* 3 buffer mode */
2410 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2411 dev_kfree_skb_irq(skb);
2412 if (first_rxdp) {
2413 wmb();
2414 first_rxdp->Control_1 |=
2415 RXD_OWN_XENA;
2416 }
2417 return -ENOMEM ;
2418 }
2419 }
2420 rxdp->Control_2 |= BIT(0);
2421 }
2422 rxdp->Host_Control = (unsigned long) (skb);
2423 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2424 rxdp->Control_1 |= RXD_OWN_XENA;
2425 off++;
2426 if (off == (rxd_count[nic->rxd_mode] + 1))
2427 off = 0;
2428 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2429
2430 rxdp->Control_2 |= SET_RXD_MARKER;
2431 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2432 if (first_rxdp) {
2433 wmb();
2434 first_rxdp->Control_1 |= RXD_OWN_XENA;
2435 }
2436 first_rxdp = rxdp;
2437 }
2438 atomic_inc(&nic->rx_bufs_left[ring_no]);
2439 alloc_tab++;
2440 }
2441
2442 end:
2443 /* Transfer ownership of first descriptor to adapter just before
2444 * exiting. Before that, use memory barrier so that ownership
2445 * and other fields are seen by adapter correctly.
2446 */
2447 if (first_rxdp) {
2448 wmb();
2449 first_rxdp->Control_1 |= RXD_OWN_XENA;
2450 }
2451
2452 return SUCCESS;
2453 }
2454
2455 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2456 {
2457 struct net_device *dev = sp->dev;
2458 int j;
2459 struct sk_buff *skb;
2460 RxD_t *rxdp;
2461 mac_info_t *mac_control;
2462 buffAdd_t *ba;
2463
2464 mac_control = &sp->mac_control;
2465 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2466 rxdp = mac_control->rings[ring_no].
2467 rx_blocks[blk].rxds[j].virt_addr;
2468 skb = (struct sk_buff *)
2469 ((unsigned long) rxdp->Host_Control);
2470 if (!skb) {
2471 continue;
2472 }
2473 if (sp->rxd_mode == RXD_MODE_1) {
2474 pci_unmap_single(sp->pdev, (dma_addr_t)
2475 ((RxD1_t*)rxdp)->Buffer0_ptr,
2476 dev->mtu +
2477 HEADER_ETHERNET_II_802_3_SIZE
2478 + HEADER_802_2_SIZE +
2479 HEADER_SNAP_SIZE,
2480 PCI_DMA_FROMDEVICE);
2481 memset(rxdp, 0, sizeof(RxD1_t));
2482 } else if(sp->rxd_mode == RXD_MODE_3B) {
2483 ba = &mac_control->rings[ring_no].
2484 ba[blk][j];
2485 pci_unmap_single(sp->pdev, (dma_addr_t)
2486 ((RxD3_t*)rxdp)->Buffer0_ptr,
2487 BUF0_LEN,
2488 PCI_DMA_FROMDEVICE);
2489 pci_unmap_single(sp->pdev, (dma_addr_t)
2490 ((RxD3_t*)rxdp)->Buffer1_ptr,
2491 BUF1_LEN,
2492 PCI_DMA_FROMDEVICE);
2493 pci_unmap_single(sp->pdev, (dma_addr_t)
2494 ((RxD3_t*)rxdp)->Buffer2_ptr,
2495 dev->mtu + 4,
2496 PCI_DMA_FROMDEVICE);
2497 memset(rxdp, 0, sizeof(RxD3_t));
2498 } else {
2499 pci_unmap_single(sp->pdev, (dma_addr_t)
2500 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2501 PCI_DMA_FROMDEVICE);
2502 pci_unmap_single(sp->pdev, (dma_addr_t)
2503 ((RxD3_t*)rxdp)->Buffer1_ptr,
2504 l3l4hdr_size + 4,
2505 PCI_DMA_FROMDEVICE);
2506 pci_unmap_single(sp->pdev, (dma_addr_t)
2507 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2508 PCI_DMA_FROMDEVICE);
2509 memset(rxdp, 0, sizeof(RxD3_t));
2510 }
2511 dev_kfree_skb(skb);
2512 atomic_dec(&sp->rx_bufs_left[ring_no]);
2513 }
2514 }
2515
2516 /**
2517 * free_rx_buffers - Frees all Rx buffers
2518 * @sp: device private variable.
2519 * Description:
2520 * This function will free all Rx buffers allocated by host.
2521 * Return Value:
2522 * NONE.
2523 */
2524
2525 static void free_rx_buffers(struct s2io_nic *sp)
2526 {
2527 struct net_device *dev = sp->dev;
2528 int i, blk = 0, buf_cnt = 0;
2529 mac_info_t *mac_control;
2530 struct config_param *config;
2531
2532 mac_control = &sp->mac_control;
2533 config = &sp->config;
2534
2535 for (i = 0; i < config->rx_ring_num; i++) {
2536 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2537 free_rxd_blk(sp,i,blk);
2538
2539 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2540 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2541 mac_control->rings[i].rx_curr_put_info.offset = 0;
2542 mac_control->rings[i].rx_curr_get_info.offset = 0;
2543 atomic_set(&sp->rx_bufs_left[i], 0);
2544 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2545 dev->name, buf_cnt, i);
2546 }
2547 }
2548
2549 /**
2550 * s2io_poll - Rx interrupt handler for NAPI support
2551 * @dev : pointer to the device structure.
2552 * @budget : The number of packets that were budgeted to be processed
2553 * during one pass through the 'Poll" function.
2554 * Description:
2555 * Comes into picture only if NAPI support has been incorporated. It does
2556 * the same thing that rx_intr_handler does, but not in a interrupt context
2557 * also It will process only a given number of packets.
2558 * Return value:
2559 * 0 on success and 1 if there are No Rx packets to be processed.
2560 */
2561
2562 #if defined(CONFIG_S2IO_NAPI)
2563 static int s2io_poll(struct net_device *dev, int *budget)
2564 {
2565 nic_t *nic = dev->priv;
2566 int pkt_cnt = 0, org_pkts_to_process;
2567 mac_info_t *mac_control;
2568 struct config_param *config;
2569 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2570 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2571 int i;
2572
2573 atomic_inc(&nic->isr_cnt);
2574 mac_control = &nic->mac_control;
2575 config = &nic->config;
2576
2577 nic->pkts_to_process = *budget;
2578 if (nic->pkts_to_process > dev->quota)
2579 nic->pkts_to_process = dev->quota;
2580 org_pkts_to_process = nic->pkts_to_process;
2581
2582 writeq(val64, &bar0->rx_traffic_int);
2583 val64 = readl(&bar0->rx_traffic_int);
2584
2585 for (i = 0; i < config->rx_ring_num; i++) {
2586 rx_intr_handler(&mac_control->rings[i]);
2587 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2588 if (!nic->pkts_to_process) {
2589 /* Quota for the current iteration has been met */
2590 goto no_rx;
2591 }
2592 }
2593 if (!pkt_cnt)
2594 pkt_cnt = 1;
2595
2596 dev->quota -= pkt_cnt;
2597 *budget -= pkt_cnt;
2598 netif_rx_complete(dev);
2599
2600 for (i = 0; i < config->rx_ring_num; i++) {
2601 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2602 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2603 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2604 break;
2605 }
2606 }
2607 /* Re enable the Rx interrupts. */
2608 writeq(0x0, &bar0->rx_traffic_mask);
2609 val64 = readl(&bar0->rx_traffic_mask);
2610 atomic_dec(&nic->isr_cnt);
2611 return 0;
2612
2613 no_rx:
2614 dev->quota -= pkt_cnt;
2615 *budget -= pkt_cnt;
2616
2617 for (i = 0; i < config->rx_ring_num; i++) {
2618 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2619 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2620 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2621 break;
2622 }
2623 }
2624 atomic_dec(&nic->isr_cnt);
2625 return 1;
2626 }
2627 #endif
2628
2629 /**
2630 * s2io_netpoll - Rx interrupt service handler for netpoll support
2631 * @dev : pointer to the device structure.
2632 * Description:
2633 * Polling 'interrupt' - used by things like netconsole to send skbs
2634 * without having to re-enable interrupts. It's not called while
2635 * the interrupt routine is executing.
2636 */
2637
2638 #ifdef CONFIG_NET_POLL_CONTROLLER
2639 static void s2io_netpoll(struct net_device *dev)
2640 {
2641 nic_t *nic = dev->priv;
2642 mac_info_t *mac_control;
2643 struct config_param *config;
2644 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2645 u64 val64;
2646 int i;
2647
2648 disable_irq(dev->irq);
2649
2650 atomic_inc(&nic->isr_cnt);
2651 mac_control = &nic->mac_control;
2652 config = &nic->config;
2653
2654 val64 = readq(&bar0->rx_traffic_int);
2655 writeq(val64, &bar0->rx_traffic_int);
2656
2657 for (i = 0; i < config->rx_ring_num; i++)
2658 rx_intr_handler(&mac_control->rings[i]);
2659
2660 for (i = 0; i < config->rx_ring_num; i++) {
2661 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2662 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2663 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2664 break;
2665 }
2666 }
2667 atomic_dec(&nic->isr_cnt);
2668 enable_irq(dev->irq);
2669 return;
2670 }
2671 #endif
2672
2673 /**
2674 * rx_intr_handler - Rx interrupt handler
2675 * @nic: device private variable.
2676 * Description:
2677 * If the interrupt is because of a received frame or if the
2678 * receive ring contains fresh as yet un-processed frames,this function is
2679 * called. It picks out the RxD at which place the last Rx processing had
2680 * stopped and sends the skb to the OSM's Rx handler and then increments
2681 * the offset.
2682 * Return Value:
2683 * NONE.
2684 */
2685 static void rx_intr_handler(ring_info_t *ring_data)
2686 {
2687 nic_t *nic = ring_data->nic;
2688 struct net_device *dev = (struct net_device *) nic->dev;
2689 int get_block, put_block, put_offset;
2690 rx_curr_get_info_t get_info, put_info;
2691 RxD_t *rxdp;
2692 struct sk_buff *skb;
2693 #ifndef CONFIG_S2IO_NAPI
2694 int pkt_cnt = 0;
2695 #endif
2696 int i;
2697
2698 spin_lock(&nic->rx_lock);
2699 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2700 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2701 __FUNCTION__, dev->name);
2702 spin_unlock(&nic->rx_lock);
2703 return;
2704 }
2705
2706 get_info = ring_data->rx_curr_get_info;
2707 get_block = get_info.block_index;
2708 put_info = ring_data->rx_curr_put_info;
2709 put_block = put_info.block_index;
2710 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2711 #ifndef CONFIG_S2IO_NAPI
2712 spin_lock(&nic->put_lock);
2713 put_offset = ring_data->put_pos;
2714 spin_unlock(&nic->put_lock);
2715 #else
2716 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2717 put_info.offset;
2718 #endif
2719 while (RXD_IS_UP2DT(rxdp)) {
2720 /* If your are next to put index then it's FIFO full condition */
2721 if ((get_block == put_block) &&
2722 (get_info.offset + 1) == put_info.offset) {
2723 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2724 break;
2725 }
2726 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2727 if (skb == NULL) {
2728 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2729 dev->name);
2730 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2731 spin_unlock(&nic->rx_lock);
2732 return;
2733 }
2734 if (nic->rxd_mode == RXD_MODE_1) {
2735 pci_unmap_single(nic->pdev, (dma_addr_t)
2736 ((RxD1_t*)rxdp)->Buffer0_ptr,
2737 dev->mtu +
2738 HEADER_ETHERNET_II_802_3_SIZE +
2739 HEADER_802_2_SIZE +
2740 HEADER_SNAP_SIZE,
2741 PCI_DMA_FROMDEVICE);
2742 } else if (nic->rxd_mode == RXD_MODE_3B) {
2743 pci_unmap_single(nic->pdev, (dma_addr_t)
2744 ((RxD3_t*)rxdp)->Buffer0_ptr,
2745 BUF0_LEN, PCI_DMA_FROMDEVICE);
2746 pci_unmap_single(nic->pdev, (dma_addr_t)
2747 ((RxD3_t*)rxdp)->Buffer1_ptr,
2748 BUF1_LEN, PCI_DMA_FROMDEVICE);
2749 pci_unmap_single(nic->pdev, (dma_addr_t)
2750 ((RxD3_t*)rxdp)->Buffer2_ptr,
2751 dev->mtu + 4,
2752 PCI_DMA_FROMDEVICE);
2753 } else {
2754 pci_unmap_single(nic->pdev, (dma_addr_t)
2755 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2756 PCI_DMA_FROMDEVICE);
2757 pci_unmap_single(nic->pdev, (dma_addr_t)
2758 ((RxD3_t*)rxdp)->Buffer1_ptr,
2759 l3l4hdr_size + 4,
2760 PCI_DMA_FROMDEVICE);
2761 pci_unmap_single(nic->pdev, (dma_addr_t)
2762 ((RxD3_t*)rxdp)->Buffer2_ptr,
2763 dev->mtu, PCI_DMA_FROMDEVICE);
2764 }
2765 prefetch(skb->data);
2766 rx_osm_handler(ring_data, rxdp);
2767 get_info.offset++;
2768 ring_data->rx_curr_get_info.offset = get_info.offset;
2769 rxdp = ring_data->rx_blocks[get_block].
2770 rxds[get_info.offset].virt_addr;
2771 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2772 get_info.offset = 0;
2773 ring_data->rx_curr_get_info.offset = get_info.offset;
2774 get_block++;
2775 if (get_block == ring_data->block_count)
2776 get_block = 0;
2777 ring_data->rx_curr_get_info.block_index = get_block;
2778 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2779 }
2780
2781 #ifdef CONFIG_S2IO_NAPI
2782 nic->pkts_to_process -= 1;
2783 if (!nic->pkts_to_process)
2784 break;
2785 #else
2786 pkt_cnt++;
2787 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2788 break;
2789 #endif
2790 }
2791 if (nic->lro) {
2792 /* Clear all LRO sessions before exiting */
2793 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2794 lro_t *lro = &nic->lro0_n[i];
2795 if (lro->in_use) {
2796 update_L3L4_header(nic, lro);
2797 queue_rx_frame(lro->parent);
2798 clear_lro_session(lro);
2799 }
2800 }
2801 }
2802
2803 spin_unlock(&nic->rx_lock);
2804 }
2805
2806 /**
2807 * tx_intr_handler - Transmit interrupt handler
2808 * @nic : device private variable
2809 * Description:
2810 * If an interrupt was raised to indicate DMA complete of the
2811 * Tx packet, this function is called. It identifies the last TxD
2812 * whose buffer was freed and frees all skbs whose data have already
2813 * DMA'ed into the NICs internal memory.
2814 * Return Value:
2815 * NONE
2816 */
2817
2818 static void tx_intr_handler(fifo_info_t *fifo_data)
2819 {
2820 nic_t *nic = fifo_data->nic;
2821 struct net_device *dev = (struct net_device *) nic->dev;
2822 tx_curr_get_info_t get_info, put_info;
2823 struct sk_buff *skb;
2824 TxD_t *txdlp;
2825
2826 get_info = fifo_data->tx_curr_get_info;
2827 put_info = fifo_data->tx_curr_put_info;
2828 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2829 list_virt_addr;
2830 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2831 (get_info.offset != put_info.offset) &&
2832 (txdlp->Host_Control)) {
2833 /* Check for TxD errors */
2834 if (txdlp->Control_1 & TXD_T_CODE) {
2835 unsigned long long err;
2836 err = txdlp->Control_1 & TXD_T_CODE;
2837 if (err & 0x1) {
2838 nic->mac_control.stats_info->sw_stat.
2839 parity_err_cnt++;
2840 }
2841 if ((err >> 48) == 0xA) {
2842 DBG_PRINT(TX_DBG, "TxD returned due \
2843 to loss of link\n");
2844 }
2845 else {
2846 DBG_PRINT(ERR_DBG, "***TxD error \
2847 %llx\n", err);
2848 }
2849 }
2850
2851 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2852 if (skb == NULL) {
2853 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2854 __FUNCTION__);
2855 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2856 return;
2857 }
2858
2859 /* Updating the statistics block */
2860 nic->stats.tx_bytes += skb->len;
2861 dev_kfree_skb_irq(skb);
2862
2863 get_info.offset++;
2864 if (get_info.offset == get_info.fifo_len + 1)
2865 get_info.offset = 0;
2866 txdlp = (TxD_t *) fifo_data->list_info
2867 [get_info.offset].list_virt_addr;
2868 fifo_data->tx_curr_get_info.offset =
2869 get_info.offset;
2870 }
2871
2872 spin_lock(&nic->tx_lock);
2873 if (netif_queue_stopped(dev))
2874 netif_wake_queue(dev);
2875 spin_unlock(&nic->tx_lock);
2876 }
2877
2878 /**
2879 * s2io_mdio_write - Function to write in to MDIO registers
2880 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2881 * @addr : address value
2882 * @value : data value
2883 * @dev : pointer to net_device structure
2884 * Description:
2885 * This function is used to write values to the MDIO registers
2886 * NONE
2887 */
2888 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2889 {
2890 u64 val64 = 0x0;
2891 nic_t *sp = dev->priv;
2892 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2893
2894 //address transaction
2895 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2896 | MDIO_MMD_DEV_ADDR(mmd_type)
2897 | MDIO_MMS_PRT_ADDR(0x0);
2898 writeq(val64, &bar0->mdio_control);
2899 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2900 writeq(val64, &bar0->mdio_control);
2901 udelay(100);
2902
2903 //Data transaction
2904 val64 = 0x0;
2905 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2906 | MDIO_MMD_DEV_ADDR(mmd_type)
2907 | MDIO_MMS_PRT_ADDR(0x0)
2908 | MDIO_MDIO_DATA(value)
2909 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2910 writeq(val64, &bar0->mdio_control);
2911 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2912 writeq(val64, &bar0->mdio_control);
2913 udelay(100);
2914
2915 val64 = 0x0;
2916 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2917 | MDIO_MMD_DEV_ADDR(mmd_type)
2918 | MDIO_MMS_PRT_ADDR(0x0)
2919 | MDIO_OP(MDIO_OP_READ_TRANS);
2920 writeq(val64, &bar0->mdio_control);
2921 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2922 writeq(val64, &bar0->mdio_control);
2923 udelay(100);
2924
2925 }
2926
2927 /**
2928 * s2io_mdio_read - Function to write in to MDIO registers
2929 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2930 * @addr : address value
2931 * @dev : pointer to net_device structure
2932 * Description:
2933 * This function is used to read values to the MDIO registers
2934 * NONE
2935 */
2936 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2937 {
2938 u64 val64 = 0x0;
2939 u64 rval64 = 0x0;
2940 nic_t *sp = dev->priv;
2941 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2942
2943 /* address transaction */
2944 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2945 | MDIO_MMD_DEV_ADDR(mmd_type)
2946 | MDIO_MMS_PRT_ADDR(0x0);
2947 writeq(val64, &bar0->mdio_control);
2948 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2949 writeq(val64, &bar0->mdio_control);
2950 udelay(100);
2951
2952 /* Data transaction */
2953 val64 = 0x0;
2954 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2955 | MDIO_MMD_DEV_ADDR(mmd_type)
2956 | MDIO_MMS_PRT_ADDR(0x0)
2957 | MDIO_OP(MDIO_OP_READ_TRANS);
2958 writeq(val64, &bar0->mdio_control);
2959 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2960 writeq(val64, &bar0->mdio_control);
2961 udelay(100);
2962
2963 /* Read the value from regs */
2964 rval64 = readq(&bar0->mdio_control);
2965 rval64 = rval64 & 0xFFFF0000;
2966 rval64 = rval64 >> 16;
2967 return rval64;
2968 }
2969 /**
2970 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2971 * @counter : couter value to be updated
2972 * @flag : flag to indicate the status
2973 * @type : counter type
2974 * Description:
2975 * This function is to check the status of the xpak counters value
2976 * NONE
2977 */
2978
2979 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2980 {
2981 u64 mask = 0x3;
2982 u64 val64;
2983 int i;
2984 for(i = 0; i <index; i++)
2985 mask = mask << 0x2;
2986
2987 if(flag > 0)
2988 {
2989 *counter = *counter + 1;
2990 val64 = *regs_stat & mask;
2991 val64 = val64 >> (index * 0x2);
2992 val64 = val64 + 1;
2993 if(val64 == 3)
2994 {
2995 switch(type)
2996 {
2997 case 1:
2998 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2999 "service. Excessive temperatures may "
3000 "result in premature transceiver "
3001 "failure \n");
3002 break;
3003 case 2:
3004 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3005 "service Excessive bias currents may "
3006 "indicate imminent laser diode "
3007 "failure \n");
3008 break;
3009 case 3:
3010 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3011 "service Excessive laser output "
3012 "power may saturate far-end "
3013 "receiver\n");
3014 break;
3015 default:
3016 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3017 "type \n");
3018 }
3019 val64 = 0x0;
3020 }
3021 val64 = val64 << (index * 0x2);
3022 *regs_stat = (*regs_stat & (~mask)) | (val64);
3023
3024 } else {
3025 *regs_stat = *regs_stat & (~mask);
3026 }
3027 }
3028
3029 /**
3030 * s2io_updt_xpak_counter - Function to update the xpak counters
3031 * @dev : pointer to net_device struct
3032 * Description:
3033 * This function is to upate the status of the xpak counters value
3034 * NONE
3035 */
3036 static void s2io_updt_xpak_counter(struct net_device *dev)
3037 {
3038 u16 flag = 0x0;
3039 u16 type = 0x0;
3040 u16 val16 = 0x0;
3041 u64 val64 = 0x0;
3042 u64 addr = 0x0;
3043
3044 nic_t *sp = dev->priv;
3045 StatInfo_t *stat_info = sp->mac_control.stats_info;
3046
3047 /* Check the communication with the MDIO slave */
3048 addr = 0x0000;
3049 val64 = 0x0;
3050 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3051 if((val64 == 0xFFFF) || (val64 == 0x0000))
3052 {
3053 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3054 "Returned %llx\n", (unsigned long long)val64);
3055 return;
3056 }
3057
3058 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3059 if(val64 != 0x2040)
3060 {
3061 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3062 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3063 (unsigned long long)val64);
3064 return;
3065 }
3066
3067 /* Loading the DOM register to MDIO register */
3068 addr = 0xA100;
3069 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3070 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3071
3072 /* Reading the Alarm flags */
3073 addr = 0xA070;
3074 val64 = 0x0;
3075 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3076
3077 flag = CHECKBIT(val64, 0x7);
3078 type = 1;
3079 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3080 &stat_info->xpak_stat.xpak_regs_stat,
3081 0x0, flag, type);
3082
3083 if(CHECKBIT(val64, 0x6))
3084 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3085
3086 flag = CHECKBIT(val64, 0x3);
3087 type = 2;
3088 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3089 &stat_info->xpak_stat.xpak_regs_stat,
3090 0x2, flag, type);
3091
3092 if(CHECKBIT(val64, 0x2))
3093 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3094
3095 flag = CHECKBIT(val64, 0x1);
3096 type = 3;
3097 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3098 &stat_info->xpak_stat.xpak_regs_stat,
3099 0x4, flag, type);
3100
3101 if(CHECKBIT(val64, 0x0))
3102 stat_info->xpak_stat.alarm_laser_output_power_low++;
3103
3104 /* Reading the Warning flags */
3105 addr = 0xA074;
3106 val64 = 0x0;
3107 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3108
3109 if(CHECKBIT(val64, 0x7))
3110 stat_info->xpak_stat.warn_transceiver_temp_high++;
3111
3112 if(CHECKBIT(val64, 0x6))
3113 stat_info->xpak_stat.warn_transceiver_temp_low++;
3114
3115 if(CHECKBIT(val64, 0x3))
3116 stat_info->xpak_stat.warn_laser_bias_current_high++;
3117
3118 if(CHECKBIT(val64, 0x2))
3119 stat_info->xpak_stat.warn_laser_bias_current_low++;
3120
3121 if(CHECKBIT(val64, 0x1))
3122 stat_info->xpak_stat.warn_laser_output_power_high++;
3123
3124 if(CHECKBIT(val64, 0x0))
3125 stat_info->xpak_stat.warn_laser_output_power_low++;
3126 }
3127
3128 /**
3129 * alarm_intr_handler - Alarm Interrrupt handler
3130 * @nic: device private variable
3131 * Description: If the interrupt was neither because of Rx packet or Tx
3132 * complete, this function is called. If the interrupt was to indicate
3133 * a loss of link, the OSM link status handler is invoked for any other
3134 * alarm interrupt the block that raised the interrupt is displayed
3135 * and a H/W reset is issued.
3136 * Return Value:
3137 * NONE
3138 */
3139
3140 static void alarm_intr_handler(struct s2io_nic *nic)
3141 {
3142 struct net_device *dev = (struct net_device *) nic->dev;
3143 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3144 register u64 val64 = 0, err_reg = 0;
3145 u64 cnt;
3146 int i;
3147 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3148 /* Handling the XPAK counters update */
3149 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3150 /* waiting for an hour */
3151 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3152 } else {
3153 s2io_updt_xpak_counter(dev);
3154 /* reset the count to zero */
3155 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3156 }
3157
3158 /* Handling link status change error Intr */
3159 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3160 err_reg = readq(&bar0->mac_rmac_err_reg);
3161 writeq(err_reg, &bar0->mac_rmac_err_reg);
3162 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3163 schedule_work(&nic->set_link_task);
3164 }
3165 }
3166
3167 /* Handling Ecc errors */
3168 val64 = readq(&bar0->mc_err_reg);
3169 writeq(val64, &bar0->mc_err_reg);
3170 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3171 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3172 nic->mac_control.stats_info->sw_stat.
3173 double_ecc_errs++;
3174 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3175 dev->name);
3176 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3177 if (nic->device_type != XFRAME_II_DEVICE) {
3178 /* Reset XframeI only if critical error */
3179 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3180 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3181 netif_stop_queue(dev);
3182 schedule_work(&nic->rst_timer_task);
3183 nic->mac_control.stats_info->sw_stat.
3184 soft_reset_cnt++;
3185 }
3186 }
3187 } else {
3188 nic->mac_control.stats_info->sw_stat.
3189 single_ecc_errs++;
3190 }
3191 }
3192
3193 /* In case of a serious error, the device will be Reset. */
3194 val64 = readq(&bar0->serr_source);
3195 if (val64 & SERR_SOURCE_ANY) {
3196 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3197 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3198 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3199 (unsigned long long)val64);
3200 netif_stop_queue(dev);
3201 schedule_work(&nic->rst_timer_task);
3202 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3203 }
3204
3205 /*
3206 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3207 * Error occurs, the adapter will be recycled by disabling the
3208 * adapter enable bit and enabling it again after the device
3209 * becomes Quiescent.
3210 */
3211 val64 = readq(&bar0->pcc_err_reg);
3212 writeq(val64, &bar0->pcc_err_reg);
3213 if (val64 & PCC_FB_ECC_DB_ERR) {
3214 u64 ac = readq(&bar0->adapter_control);
3215 ac &= ~(ADAPTER_CNTL_EN);
3216 writeq(ac, &bar0->adapter_control);
3217 ac = readq(&bar0->adapter_control);
3218 schedule_work(&nic->set_link_task);
3219 }
3220 /* Check for data parity error */
3221 val64 = readq(&bar0->pic_int_status);
3222 if (val64 & PIC_INT_GPIO) {
3223 val64 = readq(&bar0->gpio_int_reg);
3224 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3225 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3226 schedule_work(&nic->rst_timer_task);
3227 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3228 }
3229 }
3230
3231 /* Check for ring full counter */
3232 if (nic->device_type & XFRAME_II_DEVICE) {
3233 val64 = readq(&bar0->ring_bump_counter1);
3234 for (i=0; i<4; i++) {
3235 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3236 cnt >>= 64 - ((i+1)*16);
3237 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3238 += cnt;
3239 }
3240
3241 val64 = readq(&bar0->ring_bump_counter2);
3242 for (i=0; i<4; i++) {
3243 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3244 cnt >>= 64 - ((i+1)*16);
3245 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3246 += cnt;
3247 }
3248 }
3249
3250 /* Other type of interrupts are not being handled now, TODO */
3251 }
3252
3253 /**
3254 * wait_for_cmd_complete - waits for a command to complete.
3255 * @sp : private member of the device structure, which is a pointer to the
3256 * s2io_nic structure.
3257 * Description: Function that waits for a command to Write into RMAC
3258 * ADDR DATA registers to be completed and returns either success or
3259 * error depending on whether the command was complete or not.
3260 * Return value:
3261 * SUCCESS on success and FAILURE on failure.
3262 */
3263
3264 static int wait_for_cmd_complete(void *addr, u64 busy_bit)
3265 {
3266 int ret = FAILURE, cnt = 0;
3267 u64 val64;
3268
3269 while (TRUE) {
3270 val64 = readq(addr);
3271 if (!(val64 & busy_bit)) {
3272 ret = SUCCESS;
3273 break;
3274 }
3275
3276 if(in_interrupt())
3277 mdelay(50);
3278 else
3279 msleep(50);
3280
3281 if (cnt++ > 10)
3282 break;
3283 }
3284 return ret;
3285 }
3286
3287 /**
3288 * s2io_reset - Resets the card.
3289 * @sp : private member of the device structure.
3290 * Description: Function to Reset the card. This function then also
3291 * restores the previously saved PCI configuration space registers as
3292 * the card reset also resets the configuration space.
3293 * Return value:
3294 * void.
3295 */
3296
3297 static void s2io_reset(nic_t * sp)
3298 {
3299 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3300 u64 val64;
3301 u16 subid, pci_cmd;
3302
3303 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3304 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3305
3306 val64 = SW_RESET_ALL;
3307 writeq(val64, &bar0->sw_reset);
3308
3309 /*
3310 * At this stage, if the PCI write is indeed completed, the
3311 * card is reset and so is the PCI Config space of the device.
3312 * So a read cannot be issued at this stage on any of the
3313 * registers to ensure the write into "sw_reset" register
3314 * has gone through.
3315 * Question: Is there any system call that will explicitly force
3316 * all the write commands still pending on the bus to be pushed
3317 * through?
3318 * As of now I'am just giving a 250ms delay and hoping that the
3319 * PCI write to sw_reset register is done by this time.
3320 */
3321 msleep(250);
3322 if (strstr(sp->product_name, "CX4")) {
3323 msleep(750);
3324 }
3325
3326 /* Restore the PCI state saved during initialization. */
3327 pci_restore_state(sp->pdev);
3328 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
3329 pci_cmd);
3330 s2io_init_pci(sp);
3331
3332 msleep(250);
3333
3334 /* Set swapper to enable I/O register access */
3335 s2io_set_swapper(sp);
3336
3337 /* Restore the MSIX table entries from local variables */
3338 restore_xmsi_data(sp);
3339
3340 /* Clear certain PCI/PCI-X fields after reset */
3341 if (sp->device_type == XFRAME_II_DEVICE) {
3342 /* Clear parity err detect bit */
3343 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3344
3345 /* Clearing PCIX Ecc status register */
3346 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3347
3348 /* Clearing PCI_STATUS error reflected here */
3349 writeq(BIT(62), &bar0->txpic_int_reg);
3350 }
3351
3352 /* Reset device statistics maintained by OS */
3353 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3354
3355 /* SXE-002: Configure link and activity LED to turn it off */
3356 subid = sp->pdev->subsystem_device;
3357 if (((subid & 0xFF) >= 0x07) &&
3358 (sp->device_type == XFRAME_I_DEVICE)) {
3359 val64 = readq(&bar0->gpio_control);
3360 val64 |= 0x0000800000000000ULL;
3361 writeq(val64, &bar0->gpio_control);
3362 val64 = 0x0411040400000000ULL;
3363 writeq(val64, (void __iomem *)bar0 + 0x2700);
3364 }
3365
3366 /*
3367 * Clear spurious ECC interrupts that would have occured on
3368 * XFRAME II cards after reset.
3369 */
3370 if (sp->device_type == XFRAME_II_DEVICE) {
3371 val64 = readq(&bar0->pcc_err_reg);
3372 writeq(val64, &bar0->pcc_err_reg);
3373 }
3374
3375 sp->device_enabled_once = FALSE;
3376 }
3377
3378 /**
3379 * s2io_set_swapper - to set the swapper controle on the card
3380 * @sp : private member of the device structure,
3381 * pointer to the s2io_nic structure.
3382 * Description: Function to set the swapper control on the card
3383 * correctly depending on the 'endianness' of the system.
3384 * Return value:
3385 * SUCCESS on success and FAILURE on failure.
3386 */
3387
3388 static int s2io_set_swapper(nic_t * sp)
3389 {
3390 struct net_device *dev = sp->dev;
3391 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3392 u64 val64, valt, valr;
3393
3394 /*
3395 * Set proper endian settings and verify the same by reading
3396 * the PIF Feed-back register.
3397 */
3398
3399 val64 = readq(&bar0->pif_rd_swapper_fb);
3400 if (val64 != 0x0123456789ABCDEFULL) {
3401 int i = 0;
3402 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3403 0x8100008181000081ULL, /* FE=1, SE=0 */
3404 0x4200004242000042ULL, /* FE=0, SE=1 */
3405 0}; /* FE=0, SE=0 */
3406
3407 while(i<4) {
3408 writeq(value[i], &bar0->swapper_ctrl);
3409 val64 = readq(&bar0->pif_rd_swapper_fb);
3410 if (val64 == 0x0123456789ABCDEFULL)
3411 break;
3412 i++;
3413 }
3414 if (i == 4) {
3415 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3416 dev->name);
3417 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3418 (unsigned long long) val64);
3419 return FAILURE;
3420 }
3421 valr = value[i];
3422 } else {
3423 valr = readq(&bar0->swapper_ctrl);
3424 }
3425
3426 valt = 0x0123456789ABCDEFULL;
3427 writeq(valt, &bar0->xmsi_address);
3428 val64 = readq(&bar0->xmsi_address);
3429
3430 if(val64 != valt) {
3431 int i = 0;
3432 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3433 0x0081810000818100ULL, /* FE=1, SE=0 */
3434 0x0042420000424200ULL, /* FE=0, SE=1 */
3435 0}; /* FE=0, SE=0 */
3436
3437 while(i<4) {
3438 writeq((value[i] | valr), &bar0->swapper_ctrl);
3439 writeq(valt, &bar0->xmsi_address);
3440 val64 = readq(&bar0->xmsi_address);
3441 if(val64 == valt)
3442 break;
3443 i++;
3444 }
3445 if(i == 4) {
3446 unsigned long long x = val64;
3447 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3448 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3449 return FAILURE;
3450 }
3451 }
3452 val64 = readq(&bar0->swapper_ctrl);
3453 val64 &= 0xFFFF000000000000ULL;
3454
3455 #ifdef __BIG_ENDIAN
3456 /*
3457 * The device by default set to a big endian format, so a
3458 * big endian driver need not set anything.
3459 */
3460 val64 |= (SWAPPER_CTRL_TXP_FE |
3461 SWAPPER_CTRL_TXP_SE |
3462 SWAPPER_CTRL_TXD_R_FE |
3463 SWAPPER_CTRL_TXD_W_FE |
3464 SWAPPER_CTRL_TXF_R_FE |
3465 SWAPPER_CTRL_RXD_R_FE |
3466 SWAPPER_CTRL_RXD_W_FE |
3467 SWAPPER_CTRL_RXF_W_FE |
3468 SWAPPER_CTRL_XMSI_FE |
3469 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3470 if (sp->intr_type == INTA)
3471 val64 |= SWAPPER_CTRL_XMSI_SE;
3472 writeq(val64, &bar0->swapper_ctrl);
3473 #else
3474 /*
3475 * Initially we enable all bits to make it accessible by the
3476 * driver, then we selectively enable only those bits that
3477 * we want to set.
3478 */
3479 val64 |= (SWAPPER_CTRL_TXP_FE |
3480 SWAPPER_CTRL_TXP_SE |
3481 SWAPPER_CTRL_TXD_R_FE |
3482 SWAPPER_CTRL_TXD_R_SE |
3483 SWAPPER_CTRL_TXD_W_FE |
3484 SWAPPER_CTRL_TXD_W_SE |
3485 SWAPPER_CTRL_TXF_R_FE |
3486 SWAPPER_CTRL_RXD_R_FE |
3487 SWAPPER_CTRL_RXD_R_SE |
3488 SWAPPER_CTRL_RXD_W_FE |
3489 SWAPPER_CTRL_RXD_W_SE |
3490 SWAPPER_CTRL_RXF_W_FE |
3491 SWAPPER_CTRL_XMSI_FE |
3492 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3493 if (sp->intr_type == INTA)
3494 val64 |= SWAPPER_CTRL_XMSI_SE;
3495 writeq(val64, &bar0->swapper_ctrl);
3496 #endif
3497 val64 = readq(&bar0->swapper_ctrl);
3498
3499 /*
3500 * Verifying if endian settings are accurate by reading a
3501 * feedback register.
3502 */
3503 val64 = readq(&bar0->pif_rd_swapper_fb);
3504 if (val64 != 0x0123456789ABCDEFULL) {
3505 /* Endian settings are incorrect, calls for another dekko. */
3506 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3507 dev->name);
3508 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3509 (unsigned long long) val64);
3510 return FAILURE;
3511 }
3512
3513 return SUCCESS;
3514 }
3515
3516 static int wait_for_msix_trans(nic_t *nic, int i)
3517 {
3518 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3519 u64 val64;
3520 int ret = 0, cnt = 0;
3521
3522 do {
3523 val64 = readq(&bar0->xmsi_access);
3524 if (!(val64 & BIT(15)))
3525 break;
3526 mdelay(1);
3527 cnt++;
3528 } while(cnt < 5);
3529 if (cnt == 5) {
3530 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3531 ret = 1;
3532 }
3533
3534 return ret;
3535 }
3536
3537 static void restore_xmsi_data(nic_t *nic)
3538 {
3539 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3540 u64 val64;
3541 int i;
3542
3543 for (i=0; i< nic->avail_msix_vectors; i++) {
3544 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3545 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3546 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3547 writeq(val64, &bar0->xmsi_access);
3548 if (wait_for_msix_trans(nic, i)) {
3549 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3550 continue;
3551 }
3552 }
3553 }
3554
3555 static void store_xmsi_data(nic_t *nic)
3556 {
3557 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3558 u64 val64, addr, data;
3559 int i;
3560
3561 /* Store and display */
3562 for (i=0; i< nic->avail_msix_vectors; i++) {
3563 val64 = (BIT(15) | vBIT(i, 26, 6));
3564 writeq(val64, &bar0->xmsi_access);
3565 if (wait_for_msix_trans(nic, i)) {
3566 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3567 continue;
3568 }
3569 addr = readq(&bar0->xmsi_address);
3570 data = readq(&bar0->xmsi_data);
3571 if (addr && data) {
3572 nic->msix_info[i].addr = addr;
3573 nic->msix_info[i].data = data;
3574 }
3575 }
3576 }
3577
3578 int s2io_enable_msi(nic_t *nic)
3579 {
3580 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3581 u16 msi_ctrl, msg_val;
3582 struct config_param *config = &nic->config;
3583 struct net_device *dev = nic->dev;
3584 u64 val64, tx_mat, rx_mat;
3585 int i, err;
3586
3587 val64 = readq(&bar0->pic_control);
3588 val64 &= ~BIT(1);
3589 writeq(val64, &bar0->pic_control);
3590
3591 err = pci_enable_msi(nic->pdev);
3592 if (err) {
3593 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3594 nic->dev->name);
3595 return err;
3596 }
3597
3598 /*
3599 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3600 * for interrupt handling.
3601 */
3602 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3603 msg_val ^= 0x1;
3604 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3605 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3606
3607 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3608 msi_ctrl |= 0x10;
3609 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3610
3611 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3612 tx_mat = readq(&bar0->tx_mat0_n[0]);
3613 for (i=0; i<config->tx_fifo_num; i++) {
3614 tx_mat |= TX_MAT_SET(i, 1);
3615 }
3616 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3617
3618 rx_mat = readq(&bar0->rx_mat);
3619 for (i=0; i<config->rx_ring_num; i++) {
3620 rx_mat |= RX_MAT_SET(i, 1);
3621 }
3622 writeq(rx_mat, &bar0->rx_mat);
3623
3624 dev->irq = nic->pdev->irq;
3625 return 0;
3626 }
3627
3628 static int s2io_enable_msi_x(nic_t *nic)
3629 {
3630 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3631 u64 tx_mat, rx_mat;
3632 u16 msi_control; /* Temp variable */
3633 int ret, i, j, msix_indx = 1;
3634
3635 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3636 GFP_KERNEL);
3637 if (nic->entries == NULL) {
3638 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3639 return -ENOMEM;
3640 }
3641 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3642
3643 nic->s2io_entries =
3644 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3645 GFP_KERNEL);
3646 if (nic->s2io_entries == NULL) {
3647 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3648 kfree(nic->entries);
3649 return -ENOMEM;
3650 }
3651 memset(nic->s2io_entries, 0,
3652 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3653
3654 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3655 nic->entries[i].entry = i;
3656 nic->s2io_entries[i].entry = i;
3657 nic->s2io_entries[i].arg = NULL;
3658 nic->s2io_entries[i].in_use = 0;
3659 }
3660
3661 tx_mat = readq(&bar0->tx_mat0_n[0]);
3662 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3663 tx_mat |= TX_MAT_SET(i, msix_indx);
3664 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3665 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3666 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3667 }
3668 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3669
3670 if (!nic->config.bimodal) {
3671 rx_mat = readq(&bar0->rx_mat);
3672 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3673 rx_mat |= RX_MAT_SET(j, msix_indx);
3674 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3675 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3676 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3677 }
3678 writeq(rx_mat, &bar0->rx_mat);
3679 } else {
3680 tx_mat = readq(&bar0->tx_mat0_n[7]);
3681 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3682 tx_mat |= TX_MAT_SET(i, msix_indx);
3683 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3684 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3685 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3686 }
3687 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3688 }
3689
3690 nic->avail_msix_vectors = 0;
3691 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3692 /* We fail init if error or we get less vectors than min required */
3693 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3694 nic->avail_msix_vectors = ret;
3695 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3696 }
3697 if (ret) {
3698 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3699 kfree(nic->entries);
3700 kfree(nic->s2io_entries);
3701 nic->entries = NULL;
3702 nic->s2io_entries = NULL;
3703 nic->avail_msix_vectors = 0;
3704 return -ENOMEM;
3705 }
3706 if (!nic->avail_msix_vectors)
3707 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3708
3709 /*
3710 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3711 * in the herc NIC. (Temp change, needs to be removed later)
3712 */
3713 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3714 msi_control |= 0x1; /* Enable MSI */
3715 pci_write_config_word(nic->pdev, 0x42, msi_control);
3716
3717 return 0;
3718 }
3719
3720 /* ********************************************************* *
3721 * Functions defined below concern the OS part of the driver *
3722 * ********************************************************* */
3723
3724 /**
3725 * s2io_open - open entry point of the driver
3726 * @dev : pointer to the device structure.
3727 * Description:
3728 * This function is the open entry point of the driver. It mainly calls a
3729 * function to allocate Rx buffers and inserts them into the buffer
3730 * descriptors and then enables the Rx part of the NIC.
3731 * Return value:
3732 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3733 * file on failure.
3734 */
3735
3736 static int s2io_open(struct net_device *dev)
3737 {
3738 nic_t *sp = dev->priv;
3739 int err = 0;
3740
3741 /*
3742 * Make sure you have link off by default every time
3743 * Nic is initialized
3744 */
3745 netif_carrier_off(dev);
3746 sp->last_link_state = 0;
3747
3748 /* Initialize H/W and enable interrupts */
3749 err = s2io_card_up(sp);
3750 if (err) {
3751 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3752 dev->name);
3753 if (err == -ENODEV)
3754 goto hw_init_failed;
3755 else
3756 goto hw_enable_failed;
3757 }
3758
3759 /* Store the values of the MSIX table in the nic_t structure */
3760 store_xmsi_data(sp);
3761
3762 /* After proper initialization of H/W, register ISR */
3763 if (sp->intr_type == MSI) {
3764 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
3765 SA_SHIRQ, sp->name, dev);
3766 if (err) {
3767 DBG_PRINT(ERR_DBG, "%s: MSI registration \
3768 failed\n", dev->name);
3769 goto isr_registration_failed;
3770 }
3771 }
3772 if (sp->intr_type == MSI_X) {
3773 int i;
3774
3775 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
3776 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
3777 sprintf(sp->desc1, "%s:MSI-X-%d-TX",
3778 dev->name, i);
3779 err = request_irq(sp->entries[i].vector,
3780 s2io_msix_fifo_handle, 0, sp->desc1,
3781 sp->s2io_entries[i].arg);
3782 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
3783 (unsigned long long)sp->msix_info[i].addr);
3784 } else {
3785 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3786 dev->name, i);
3787 err = request_irq(sp->entries[i].vector,
3788 s2io_msix_ring_handle, 0, sp->desc2,
3789 sp->s2io_entries[i].arg);
3790 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
3791 (unsigned long long)sp->msix_info[i].addr);
3792 }
3793 if (err) {
3794 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
3795 failed\n", dev->name, i);
3796 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
3797 goto isr_registration_failed;
3798 }
3799 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
3800 }
3801 }
3802 if (sp->intr_type == INTA) {
3803 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
3804 sp->name, dev);
3805 if (err) {
3806 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3807 dev->name);
3808 goto isr_registration_failed;
3809 }
3810 }
3811
3812 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3813 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3814 err = -ENODEV;
3815 goto setting_mac_address_failed;
3816 }
3817
3818 netif_start_queue(dev);
3819 return 0;
3820
3821 setting_mac_address_failed:
3822 if (sp->intr_type != MSI_X)
3823 free_irq(sp->pdev->irq, dev);
3824 isr_registration_failed:
3825 del_timer_sync(&sp->alarm_timer);
3826 if (sp->intr_type == MSI_X) {
3827 int i;
3828 u16 msi_control; /* Temp variable */
3829
3830 for (i=1; (sp->s2io_entries[i].in_use ==
3831 MSIX_REGISTERED_SUCCESS); i++) {
3832 int vector = sp->entries[i].vector;
3833 void *arg = sp->s2io_entries[i].arg;
3834
3835 free_irq(vector, arg);
3836 }
3837 pci_disable_msix(sp->pdev);
3838
3839 /* Temp */
3840 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3841 msi_control &= 0xFFFE; /* Disable MSI */
3842 pci_write_config_word(sp->pdev, 0x42, msi_control);
3843 }
3844 else if (sp->intr_type == MSI)
3845 pci_disable_msi(sp->pdev);
3846 hw_enable_failed:
3847 s2io_reset(sp);
3848 hw_init_failed:
3849 if (sp->intr_type == MSI_X) {
3850 if (sp->entries)
3851 kfree(sp->entries);
3852 if (sp->s2io_entries)
3853 kfree(sp->s2io_entries);
3854 }
3855 return err;
3856 }
3857
3858 /**
3859 * s2io_close -close entry point of the driver
3860 * @dev : device pointer.
3861 * Description:
3862 * This is the stop entry point of the driver. It needs to undo exactly
3863 * whatever was done by the open entry point,thus it's usually referred to
3864 * as the close function.Among other things this function mainly stops the
3865 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3866 * Return value:
3867 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3868 * file on failure.
3869 */
3870
3871 static int s2io_close(struct net_device *dev)
3872 {
3873 nic_t *sp = dev->priv;
3874
3875 flush_scheduled_work();
3876 netif_stop_queue(dev);
3877 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3878 s2io_card_down(sp, 1);
3879
3880 sp->device_close_flag = TRUE; /* Device is shut down. */
3881 return 0;
3882 }
3883
3884 /**
3885 * s2io_xmit - Tx entry point of te driver
3886 * @skb : the socket buffer containing the Tx data.
3887 * @dev : device pointer.
3888 * Description :
3889 * This function is the Tx entry point of the driver. S2IO NIC supports
3890 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3891 * NOTE: when device cant queue the pkt,just the trans_start variable will
3892 * not be upadted.
3893 * Return value:
3894 * 0 on success & 1 on failure.
3895 */
3896
3897 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3898 {
3899 nic_t *sp = dev->priv;
3900 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3901 register u64 val64;
3902 TxD_t *txdp;
3903 TxFIFO_element_t __iomem *tx_fifo;
3904 unsigned long flags;
3905 #ifdef NETIF_F_TSO
3906 int mss;
3907 #endif
3908 u16 vlan_tag = 0;
3909 int vlan_priority = 0;
3910 mac_info_t *mac_control;
3911 struct config_param *config;
3912
3913 mac_control = &sp->mac_control;
3914 config = &sp->config;
3915
3916 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3917 spin_lock_irqsave(&sp->tx_lock, flags);
3918 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3919 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3920 dev->name);
3921 spin_unlock_irqrestore(&sp->tx_lock, flags);
3922 dev_kfree_skb(skb);
3923 return 0;
3924 }
3925
3926 queue = 0;
3927
3928 /* Get Fifo number to Transmit based on vlan priority */
3929 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3930 vlan_tag = vlan_tx_tag_get(skb);
3931 vlan_priority = vlan_tag >> 13;
3932 queue = config->fifo_mapping[vlan_priority];
3933 }
3934
3935 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3936 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3937 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3938 list_virt_addr;
3939
3940 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3941 /* Avoid "put" pointer going beyond "get" pointer */
3942 if (txdp->Host_Control ||
3943 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3944 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3945 netif_stop_queue(dev);
3946 dev_kfree_skb(skb);
3947 spin_unlock_irqrestore(&sp->tx_lock, flags);
3948 return 0;
3949 }
3950
3951 /* A buffer with no data will be dropped */
3952 if (!skb->len) {
3953 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3954 dev_kfree_skb(skb);
3955 spin_unlock_irqrestore(&sp->tx_lock, flags);
3956 return 0;
3957 }
3958
3959 txdp->Control_1 = 0;
3960 txdp->Control_2 = 0;
3961 #ifdef NETIF_F_TSO
3962 mss = skb_shinfo(skb)->gso_size;
3963 if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) {
3964 txdp->Control_1 |= TXD_TCP_LSO_EN;
3965 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3966 }
3967 #endif
3968 if (skb->ip_summed == CHECKSUM_HW) {
3969 txdp->Control_2 |=
3970 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3971 TXD_TX_CKO_UDP_EN);
3972 }
3973 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3974 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3975 txdp->Control_2 |= config->tx_intr_type;
3976
3977 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3978 txdp->Control_2 |= TXD_VLAN_ENABLE;
3979 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3980 }
3981
3982 frg_len = skb->len - skb->data_len;
3983 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) {
3984 int ufo_size;
3985
3986 ufo_size = skb_shinfo(skb)->gso_size;
3987 ufo_size &= ~7;
3988 txdp->Control_1 |= TXD_UFO_EN;
3989 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3990 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3991 #ifdef __BIG_ENDIAN
3992 sp->ufo_in_band_v[put_off] =
3993 (u64)skb_shinfo(skb)->ip6_frag_id;
3994 #else
3995 sp->ufo_in_band_v[put_off] =
3996 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3997 #endif
3998 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3999 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4000 sp->ufo_in_band_v,
4001 sizeof(u64), PCI_DMA_TODEVICE);
4002 txdp++;
4003 txdp->Control_1 = 0;
4004 txdp->Control_2 = 0;
4005 }
4006
4007 txdp->Buffer_Pointer = pci_map_single
4008 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4009 txdp->Host_Control = (unsigned long) skb;
4010 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4011
4012 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
4013 txdp->Control_1 |= TXD_UFO_EN;
4014
4015 frg_cnt = skb_shinfo(skb)->nr_frags;
4016 /* For fragmented SKB. */
4017 for (i = 0; i < frg_cnt; i++) {
4018 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4019 /* A '0' length fragment will be ignored */
4020 if (!frag->size)
4021 continue;
4022 txdp++;
4023 txdp->Buffer_Pointer = (u64) pci_map_page
4024 (sp->pdev, frag->page, frag->page_offset,
4025 frag->size, PCI_DMA_TODEVICE);
4026 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4027 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
4028 txdp->Control_1 |= TXD_UFO_EN;
4029 }
4030 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4031
4032 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
4033 frg_cnt++; /* as Txd0 was used for inband header */
4034
4035 tx_fifo = mac_control->tx_FIFO_start[queue];
4036 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4037 writeq(val64, &tx_fifo->TxDL_Pointer);
4038
4039 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4040 TX_FIFO_LAST_LIST);
4041
4042 #ifdef NETIF_F_TSO
4043 if (mss)
4044 val64 |= TX_FIFO_SPECIAL_FUNC;
4045 #endif
4046 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4)
4047 val64 |= TX_FIFO_SPECIAL_FUNC;
4048 writeq(val64, &tx_fifo->List_Control);
4049
4050 mmiowb();
4051
4052 put_off++;
4053 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4054 put_off = 0;
4055 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4056
4057 /* Avoid "put" pointer going beyond "get" pointer */
4058 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4059 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4060 DBG_PRINT(TX_DBG,
4061 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4062 put_off, get_off);
4063 netif_stop_queue(dev);
4064 }
4065
4066 dev->trans_start = jiffies;
4067 spin_unlock_irqrestore(&sp->tx_lock, flags);
4068
4069 return 0;
4070 }
4071
4072 static void
4073 s2io_alarm_handle(unsigned long data)
4074 {
4075 nic_t *sp = (nic_t *)data;
4076
4077 alarm_intr_handler(sp);
4078 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4079 }
4080
4081 static irqreturn_t
4082 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
4083 {
4084 struct net_device *dev = (struct net_device *) dev_id;
4085 nic_t *sp = dev->priv;
4086 int i;
4087 int ret;
4088 mac_info_t *mac_control;
4089 struct config_param *config;
4090
4091 atomic_inc(&sp->isr_cnt);
4092 mac_control = &sp->mac_control;
4093 config = &sp->config;
4094 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4095
4096 /* If Intr is because of Rx Traffic */
4097 for (i = 0; i < config->rx_ring_num; i++)
4098 rx_intr_handler(&mac_control->rings[i]);
4099
4100 /* If Intr is because of Tx Traffic */
4101 for (i = 0; i < config->tx_fifo_num; i++)
4102 tx_intr_handler(&mac_control->fifos[i]);
4103
4104 /*
4105 * If the Rx buffer count is below the panic threshold then
4106 * reallocate the buffers from the interrupt handler itself,
4107 * else schedule a tasklet to reallocate the buffers.
4108 */
4109 for (i = 0; i < config->rx_ring_num; i++) {
4110 if (!sp->lro) {
4111 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4112 int level = rx_buffer_level(sp, rxb_size, i);
4113
4114 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4115 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4116 dev->name);
4117 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4118 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4119 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4120 dev->name);
4121 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4122 clear_bit(0, (&sp->tasklet_status));
4123 atomic_dec(&sp->isr_cnt);
4124 return IRQ_HANDLED;
4125 }
4126 clear_bit(0, (&sp->tasklet_status));
4127 } else if (level == LOW) {
4128 tasklet_schedule(&sp->task);
4129 }
4130 }
4131 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4132 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4133 dev->name);
4134 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4135 break;
4136 }
4137 }
4138
4139 atomic_dec(&sp->isr_cnt);
4140 return IRQ_HANDLED;
4141 }
4142
4143 static irqreturn_t
4144 s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
4145 {
4146 ring_info_t *ring = (ring_info_t *)dev_id;
4147 nic_t *sp = ring->nic;
4148 struct net_device *dev = (struct net_device *) dev_id;
4149 int rxb_size, level, rng_n;
4150
4151 atomic_inc(&sp->isr_cnt);
4152 rx_intr_handler(ring);
4153
4154 rng_n = ring->ring_no;
4155 if (!sp->lro) {
4156 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4157 level = rx_buffer_level(sp, rxb_size, rng_n);
4158
4159 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4160 int ret;
4161 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4162 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4163 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4164 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4165 __FUNCTION__);
4166 clear_bit(0, (&sp->tasklet_status));
4167 return IRQ_HANDLED;
4168 }
4169 clear_bit(0, (&sp->tasklet_status));
4170 } else if (level == LOW) {
4171 tasklet_schedule(&sp->task);
4172 }
4173 }
4174 else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4175 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
4176 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4177 }
4178
4179 atomic_dec(&sp->isr_cnt);
4180
4181 return IRQ_HANDLED;
4182 }
4183
4184 static irqreturn_t
4185 s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
4186 {
4187 fifo_info_t *fifo = (fifo_info_t *)dev_id;
4188 nic_t *sp = fifo->nic;
4189
4190 atomic_inc(&sp->isr_cnt);
4191 tx_intr_handler(fifo);
4192 atomic_dec(&sp->isr_cnt);
4193 return IRQ_HANDLED;
4194 }
4195 static void s2io_txpic_intr_handle(nic_t *sp)
4196 {
4197 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4198 u64 val64;
4199
4200 val64 = readq(&bar0->pic_int_status);
4201 if (val64 & PIC_INT_GPIO) {
4202 val64 = readq(&bar0->gpio_int_reg);
4203 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4204 (val64 & GPIO_INT_REG_LINK_UP)) {
4205 /*
4206 * This is unstable state so clear both up/down
4207 * interrupt and adapter to re-evaluate the link state.
4208 */
4209 val64 |= GPIO_INT_REG_LINK_DOWN;
4210 val64 |= GPIO_INT_REG_LINK_UP;
4211 writeq(val64, &bar0->gpio_int_reg);
4212 val64 = readq(&bar0->gpio_int_mask);
4213 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4214 GPIO_INT_MASK_LINK_DOWN);
4215 writeq(val64, &bar0->gpio_int_mask);
4216 }
4217 else if (val64 & GPIO_INT_REG_LINK_UP) {
4218 val64 = readq(&bar0->adapter_status);
4219 if (verify_xena_quiescence(sp, val64,
4220 sp->device_enabled_once)) {
4221 /* Enable Adapter */
4222 val64 = readq(&bar0->adapter_control);
4223 val64 |= ADAPTER_CNTL_EN;
4224 writeq(val64, &bar0->adapter_control);
4225 val64 |= ADAPTER_LED_ON;
4226 writeq(val64, &bar0->adapter_control);
4227 if (!sp->device_enabled_once)
4228 sp->device_enabled_once = 1;
4229
4230 s2io_link(sp, LINK_UP);
4231 /*
4232 * unmask link down interrupt and mask link-up
4233 * intr
4234 */
4235 val64 = readq(&bar0->gpio_int_mask);
4236 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4237 val64 |= GPIO_INT_MASK_LINK_UP;
4238 writeq(val64, &bar0->gpio_int_mask);
4239
4240 }
4241 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4242 val64 = readq(&bar0->adapter_status);
4243 if (verify_xena_quiescence(sp, val64,
4244 sp->device_enabled_once)) {
4245 s2io_link(sp, LINK_DOWN);
4246 /* Link is down so unmaks link up interrupt */
4247 val64 = readq(&bar0->gpio_int_mask);
4248 val64 &= ~GPIO_INT_MASK_LINK_UP;
4249 val64 |= GPIO_INT_MASK_LINK_DOWN;
4250 writeq(val64, &bar0->gpio_int_mask);
4251 }
4252 }
4253 }
4254 val64 = readq(&bar0->gpio_int_mask);
4255 }
4256
4257 /**
4258 * s2io_isr - ISR handler of the device .
4259 * @irq: the irq of the device.
4260 * @dev_id: a void pointer to the dev structure of the NIC.
4261 * @pt_regs: pointer to the registers pushed on the stack.
4262 * Description: This function is the ISR handler of the device. It
4263 * identifies the reason for the interrupt and calls the relevant
4264 * service routines. As a contongency measure, this ISR allocates the
4265 * recv buffers, if their numbers are below the panic value which is
4266 * presently set to 25% of the original number of rcv buffers allocated.
4267 * Return value:
4268 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4269 * IRQ_NONE: will be returned if interrupt is not from our device
4270 */
4271 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4272 {
4273 struct net_device *dev = (struct net_device *) dev_id;
4274 nic_t *sp = dev->priv;
4275 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4276 int i;
4277 u64 reason = 0, val64, org_mask;
4278 mac_info_t *mac_control;
4279 struct config_param *config;
4280
4281 atomic_inc(&sp->isr_cnt);
4282 mac_control = &sp->mac_control;
4283 config = &sp->config;
4284
4285 /*
4286 * Identify the cause for interrupt and call the appropriate
4287 * interrupt handler. Causes for the interrupt could be;
4288 * 1. Rx of packet.
4289 * 2. Tx complete.
4290 * 3. Link down.
4291 * 4. Error in any functional blocks of the NIC.
4292 */
4293 reason = readq(&bar0->general_int_status);
4294
4295 if (!reason) {
4296 /* The interrupt was not raised by Xena. */
4297 atomic_dec(&sp->isr_cnt);
4298 return IRQ_NONE;
4299 }
4300
4301 val64 = 0xFFFFFFFFFFFFFFFFULL;
4302 /* Store current mask before masking all interrupts */
4303 org_mask = readq(&bar0->general_int_mask);
4304 writeq(val64, &bar0->general_int_mask);
4305
4306 #ifdef CONFIG_S2IO_NAPI
4307 if (reason & GEN_INTR_RXTRAFFIC) {
4308 if (netif_rx_schedule_prep(dev)) {
4309 writeq(val64, &bar0->rx_traffic_mask);
4310 __netif_rx_schedule(dev);
4311 }
4312 }
4313 #else
4314 /*
4315 * Rx handler is called by default, without checking for the
4316 * cause of interrupt.
4317 * rx_traffic_int reg is an R1 register, writing all 1's
4318 * will ensure that the actual interrupt causing bit get's
4319 * cleared and hence a read can be avoided.
4320 */
4321 writeq(val64, &bar0->rx_traffic_int);
4322 for (i = 0; i < config->rx_ring_num; i++) {
4323 rx_intr_handler(&mac_control->rings[i]);
4324 }
4325 #endif
4326
4327 /*
4328 * tx_traffic_int reg is an R1 register, writing all 1's
4329 * will ensure that the actual interrupt causing bit get's
4330 * cleared and hence a read can be avoided.
4331 */
4332 writeq(val64, &bar0->tx_traffic_int);
4333
4334 for (i = 0; i < config->tx_fifo_num; i++)
4335 tx_intr_handler(&mac_control->fifos[i]);
4336
4337 if (reason & GEN_INTR_TXPIC)
4338 s2io_txpic_intr_handle(sp);
4339 /*
4340 * If the Rx buffer count is below the panic threshold then
4341 * reallocate the buffers from the interrupt handler itself,
4342 * else schedule a tasklet to reallocate the buffers.
4343 */
4344 #ifndef CONFIG_S2IO_NAPI
4345 for (i = 0; i < config->rx_ring_num; i++) {
4346 if (!sp->lro) {
4347 int ret;
4348 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4349 int level = rx_buffer_level(sp, rxb_size, i);
4350
4351 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4352 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4353 dev->name);
4354 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4355 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4356 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4357 dev->name);
4358 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4359 clear_bit(0, (&sp->tasklet_status));
4360 atomic_dec(&sp->isr_cnt);
4361 writeq(org_mask, &bar0->general_int_mask);
4362 return IRQ_HANDLED;
4363 }
4364 clear_bit(0, (&sp->tasklet_status));
4365 } else if (level == LOW) {
4366 tasklet_schedule(&sp->task);
4367 }
4368 }
4369 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4370 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4371 dev->name);
4372 DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
4373 break;
4374 }
4375 }
4376 #endif
4377 writeq(org_mask, &bar0->general_int_mask);
4378 atomic_dec(&sp->isr_cnt);
4379 return IRQ_HANDLED;
4380 }
4381
4382 /**
4383 * s2io_updt_stats -
4384 */
4385 static void s2io_updt_stats(nic_t *sp)
4386 {
4387 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4388 u64 val64;
4389 int cnt = 0;
4390
4391 if (atomic_read(&sp->card_state) == CARD_UP) {
4392 /* Apprx 30us on a 133 MHz bus */
4393 val64 = SET_UPDT_CLICKS(10) |
4394 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4395 writeq(val64, &bar0->stat_cfg);
4396 do {
4397 udelay(100);
4398 val64 = readq(&bar0->stat_cfg);
4399 if (!(val64 & BIT(0)))
4400 break;
4401 cnt++;
4402 if (cnt == 5)
4403 break; /* Updt failed */
4404 } while(1);
4405 }
4406 }
4407
4408 /**
4409 * s2io_get_stats - Updates the device statistics structure.
4410 * @dev : pointer to the device structure.
4411 * Description:
4412 * This function updates the device statistics structure in the s2io_nic
4413 * structure and returns a pointer to the same.
4414 * Return value:
4415 * pointer to the updated net_device_stats structure.
4416 */
4417
4418 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4419 {
4420 nic_t *sp = dev->priv;
4421 mac_info_t *mac_control;
4422 struct config_param *config;
4423
4424
4425 mac_control = &sp->mac_control;
4426 config = &sp->config;
4427
4428 /* Configure Stats for immediate updt */
4429 s2io_updt_stats(sp);
4430
4431 sp->stats.tx_packets =
4432 le32_to_cpu(mac_control->stats_info->tmac_frms);
4433 sp->stats.tx_errors =
4434 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4435 sp->stats.rx_errors =
4436 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
4437 sp->stats.multicast =
4438 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4439 sp->stats.rx_length_errors =
4440 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
4441
4442 return (&sp->stats);
4443 }
4444
4445 /**
4446 * s2io_set_multicast - entry point for multicast address enable/disable.
4447 * @dev : pointer to the device structure
4448 * Description:
4449 * This function is a driver entry point which gets called by the kernel
4450 * whenever multicast addresses must be enabled/disabled. This also gets
4451 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4452 * determine, if multicast address must be enabled or if promiscuous mode
4453 * is to be disabled etc.
4454 * Return value:
4455 * void.
4456 */
4457
4458 static void s2io_set_multicast(struct net_device *dev)
4459 {
4460 int i, j, prev_cnt;
4461 struct dev_mc_list *mclist;
4462 nic_t *sp = dev->priv;
4463 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4464 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4465 0xfeffffffffffULL;
4466 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4467 void __iomem *add;
4468
4469 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4470 /* Enable all Multicast addresses */
4471 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4472 &bar0->rmac_addr_data0_mem);
4473 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4474 &bar0->rmac_addr_data1_mem);
4475 val64 = RMAC_ADDR_CMD_MEM_WE |
4476 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4477 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4478 writeq(val64, &bar0->rmac_addr_cmd_mem);
4479 /* Wait till command completes */
4480 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4481 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
4482
4483 sp->m_cast_flg = 1;
4484 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4485 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4486 /* Disable all Multicast addresses */
4487 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4488 &bar0->rmac_addr_data0_mem);
4489 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4490 &bar0->rmac_addr_data1_mem);
4491 val64 = RMAC_ADDR_CMD_MEM_WE |
4492 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4493 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4494 writeq(val64, &bar0->rmac_addr_cmd_mem);
4495 /* Wait till command completes */
4496 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4497 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
4498
4499 sp->m_cast_flg = 0;
4500 sp->all_multi_pos = 0;
4501 }
4502
4503 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4504 /* Put the NIC into promiscuous mode */
4505 add = &bar0->mac_cfg;
4506 val64 = readq(&bar0->mac_cfg);
4507 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4508
4509 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4510 writel((u32) val64, add);
4511 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4512 writel((u32) (val64 >> 32), (add + 4));
4513
4514 val64 = readq(&bar0->mac_cfg);
4515 sp->promisc_flg = 1;
4516 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4517 dev->name);
4518 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4519 /* Remove the NIC from promiscuous mode */
4520 add = &bar0->mac_cfg;
4521 val64 = readq(&bar0->mac_cfg);
4522 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4523
4524 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4525 writel((u32) val64, add);
4526 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4527 writel((u32) (val64 >> 32), (add + 4));
4528
4529 val64 = readq(&bar0->mac_cfg);
4530 sp->promisc_flg = 0;
4531 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4532 dev->name);
4533 }
4534
4535 /* Update individual M_CAST address list */
4536 if ((!sp->m_cast_flg) && dev->mc_count) {
4537 if (dev->mc_count >
4538 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4539 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4540 dev->name);
4541 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4542 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4543 return;
4544 }
4545
4546 prev_cnt = sp->mc_addr_count;
4547 sp->mc_addr_count = dev->mc_count;
4548
4549 /* Clear out the previous list of Mc in the H/W. */
4550 for (i = 0; i < prev_cnt; i++) {
4551 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4552 &bar0->rmac_addr_data0_mem);
4553 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4554 &bar0->rmac_addr_data1_mem);
4555 val64 = RMAC_ADDR_CMD_MEM_WE |
4556 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4557 RMAC_ADDR_CMD_MEM_OFFSET
4558 (MAC_MC_ADDR_START_OFFSET + i);
4559 writeq(val64, &bar0->rmac_addr_cmd_mem);
4560
4561 /* Wait for command completes */
4562 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4563 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4564 DBG_PRINT(ERR_DBG, "%s: Adding ",
4565 dev->name);
4566 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4567 return;
4568 }
4569 }
4570
4571 /* Create the new Rx filter list and update the same in H/W. */
4572 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4573 i++, mclist = mclist->next) {
4574 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4575 ETH_ALEN);
4576 mac_addr = 0;
4577 for (j = 0; j < ETH_ALEN; j++) {
4578 mac_addr |= mclist->dmi_addr[j];
4579 mac_addr <<= 8;
4580 }
4581 mac_addr >>= 8;
4582 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4583 &bar0->rmac_addr_data0_mem);
4584 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4585 &bar0->rmac_addr_data1_mem);
4586 val64 = RMAC_ADDR_CMD_MEM_WE |
4587 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4588 RMAC_ADDR_CMD_MEM_OFFSET
4589 (i + MAC_MC_ADDR_START_OFFSET);
4590 writeq(val64, &bar0->rmac_addr_cmd_mem);
4591
4592 /* Wait for command completes */
4593 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4594 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4595 DBG_PRINT(ERR_DBG, "%s: Adding ",
4596 dev->name);
4597 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4598 return;
4599 }
4600 }
4601 }
4602 }
4603
4604 /**
4605 * s2io_set_mac_addr - Programs the Xframe mac address
4606 * @dev : pointer to the device structure.
4607 * @addr: a uchar pointer to the new mac address which is to be set.
4608 * Description : This procedure will program the Xframe to receive
4609 * frames with new Mac Address
4610 * Return value: SUCCESS on success and an appropriate (-)ve integer
4611 * as defined in errno.h file on failure.
4612 */
4613
4614 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4615 {
4616 nic_t *sp = dev->priv;
4617 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4618 register u64 val64, mac_addr = 0;
4619 int i;
4620
4621 /*
4622 * Set the new MAC address as the new unicast filter and reflect this
4623 * change on the device address registered with the OS. It will be
4624 * at offset 0.
4625 */
4626 for (i = 0; i < ETH_ALEN; i++) {
4627 mac_addr <<= 8;
4628 mac_addr |= addr[i];
4629 }
4630
4631 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4632 &bar0->rmac_addr_data0_mem);
4633
4634 val64 =
4635 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4636 RMAC_ADDR_CMD_MEM_OFFSET(0);
4637 writeq(val64, &bar0->rmac_addr_cmd_mem);
4638 /* Wait till command completes */
4639 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4640 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4641 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4642 return FAILURE;
4643 }
4644
4645 return SUCCESS;
4646 }
4647
4648 /**
4649 * s2io_ethtool_sset - Sets different link parameters.
4650 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4651 * @info: pointer to the structure with parameters given by ethtool to set
4652 * link information.
4653 * Description:
4654 * The function sets different link parameters provided by the user onto
4655 * the NIC.
4656 * Return value:
4657 * 0 on success.
4658 */
4659
4660 static int s2io_ethtool_sset(struct net_device *dev,
4661 struct ethtool_cmd *info)
4662 {
4663 nic_t *sp = dev->priv;
4664 if ((info->autoneg == AUTONEG_ENABLE) ||
4665 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4666 return -EINVAL;
4667 else {
4668 s2io_close(sp->dev);
4669 s2io_open(sp->dev);
4670 }
4671
4672 return 0;
4673 }
4674
4675 /**
4676 * s2io_ethtol_gset - Return link specific information.
4677 * @sp : private member of the device structure, pointer to the
4678 * s2io_nic structure.
4679 * @info : pointer to the structure with parameters given by ethtool
4680 * to return link information.
4681 * Description:
4682 * Returns link specific information like speed, duplex etc.. to ethtool.
4683 * Return value :
4684 * return 0 on success.
4685 */
4686
4687 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4688 {
4689 nic_t *sp = dev->priv;
4690 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4691 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4692 info->port = PORT_FIBRE;
4693 /* info->transceiver?? TODO */
4694
4695 if (netif_carrier_ok(sp->dev)) {
4696 info->speed = 10000;
4697 info->duplex = DUPLEX_FULL;
4698 } else {
4699 info->speed = -1;
4700 info->duplex = -1;
4701 }
4702
4703 info->autoneg = AUTONEG_DISABLE;
4704 return 0;
4705 }
4706
4707 /**
4708 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4709 * @sp : private member of the device structure, which is a pointer to the
4710 * s2io_nic structure.
4711 * @info : pointer to the structure with parameters given by ethtool to
4712 * return driver information.
4713 * Description:
4714 * Returns driver specefic information like name, version etc.. to ethtool.
4715 * Return value:
4716 * void
4717 */
4718
4719 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4720 struct ethtool_drvinfo *info)
4721 {
4722 nic_t *sp = dev->priv;
4723
4724 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4725 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4726 strncpy(info->fw_version, "", sizeof(info->fw_version));
4727 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4728 info->regdump_len = XENA_REG_SPACE;
4729 info->eedump_len = XENA_EEPROM_SPACE;
4730 info->testinfo_len = S2IO_TEST_LEN;
4731 info->n_stats = S2IO_STAT_LEN;
4732 }
4733
4734 /**
4735 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4736 * @sp: private member of the device structure, which is a pointer to the
4737 * s2io_nic structure.
4738 * @regs : pointer to the structure with parameters given by ethtool for
4739 * dumping the registers.
4740 * @reg_space: The input argumnet into which all the registers are dumped.
4741 * Description:
4742 * Dumps the entire register space of xFrame NIC into the user given
4743 * buffer area.
4744 * Return value :
4745 * void .
4746 */
4747
4748 static void s2io_ethtool_gregs(struct net_device *dev,
4749 struct ethtool_regs *regs, void *space)
4750 {
4751 int i;
4752 u64 reg;
4753 u8 *reg_space = (u8 *) space;
4754 nic_t *sp = dev->priv;
4755
4756 regs->len = XENA_REG_SPACE;
4757 regs->version = sp->pdev->subsystem_device;
4758
4759 for (i = 0; i < regs->len; i += 8) {
4760 reg = readq(sp->bar0 + i);
4761 memcpy((reg_space + i), &reg, 8);
4762 }
4763 }
4764
4765 /**
4766 * s2io_phy_id - timer function that alternates adapter LED.
4767 * @data : address of the private member of the device structure, which
4768 * is a pointer to the s2io_nic structure, provided as an u32.
4769 * Description: This is actually the timer function that alternates the
4770 * adapter LED bit of the adapter control bit to set/reset every time on
4771 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4772 * once every second.
4773 */
4774 static void s2io_phy_id(unsigned long data)
4775 {
4776 nic_t *sp = (nic_t *) data;
4777 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4778 u64 val64 = 0;
4779 u16 subid;
4780
4781 subid = sp->pdev->subsystem_device;
4782 if ((sp->device_type == XFRAME_II_DEVICE) ||
4783 ((subid & 0xFF) >= 0x07)) {
4784 val64 = readq(&bar0->gpio_control);
4785 val64 ^= GPIO_CTRL_GPIO_0;
4786 writeq(val64, &bar0->gpio_control);
4787 } else {
4788 val64 = readq(&bar0->adapter_control);
4789 val64 ^= ADAPTER_LED_ON;
4790 writeq(val64, &bar0->adapter_control);
4791 }
4792
4793 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4794 }
4795
4796 /**
4797 * s2io_ethtool_idnic - To physically identify the nic on the system.
4798 * @sp : private member of the device structure, which is a pointer to the
4799 * s2io_nic structure.
4800 * @id : pointer to the structure with identification parameters given by
4801 * ethtool.
4802 * Description: Used to physically identify the NIC on the system.
4803 * The Link LED will blink for a time specified by the user for
4804 * identification.
4805 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4806 * identification is possible only if it's link is up.
4807 * Return value:
4808 * int , returns 0 on success
4809 */
4810
4811 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4812 {
4813 u64 val64 = 0, last_gpio_ctrl_val;
4814 nic_t *sp = dev->priv;
4815 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4816 u16 subid;
4817
4818 subid = sp->pdev->subsystem_device;
4819 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4820 if ((sp->device_type == XFRAME_I_DEVICE) &&
4821 ((subid & 0xFF) < 0x07)) {
4822 val64 = readq(&bar0->adapter_control);
4823 if (!(val64 & ADAPTER_CNTL_EN)) {
4824 printk(KERN_ERR
4825 "Adapter Link down, cannot blink LED\n");
4826 return -EFAULT;
4827 }
4828 }
4829 if (sp->id_timer.function == NULL) {
4830 init_timer(&sp->id_timer);
4831 sp->id_timer.function = s2io_phy_id;
4832 sp->id_timer.data = (unsigned long) sp;
4833 }
4834 mod_timer(&sp->id_timer, jiffies);
4835 if (data)
4836 msleep_interruptible(data * HZ);
4837 else
4838 msleep_interruptible(MAX_FLICKER_TIME);
4839 del_timer_sync(&sp->id_timer);
4840
4841 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4842 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4843 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4844 }
4845
4846 return 0;
4847 }
4848
4849 /**
4850 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4851 * @sp : private member of the device structure, which is a pointer to the
4852 * s2io_nic structure.
4853 * @ep : pointer to the structure with pause parameters given by ethtool.
4854 * Description:
4855 * Returns the Pause frame generation and reception capability of the NIC.
4856 * Return value:
4857 * void
4858 */
4859 static void s2io_ethtool_getpause_data(struct net_device *dev,
4860 struct ethtool_pauseparam *ep)
4861 {
4862 u64 val64;
4863 nic_t *sp = dev->priv;
4864 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4865
4866 val64 = readq(&bar0->rmac_pause_cfg);
4867 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4868 ep->tx_pause = TRUE;
4869 if (val64 & RMAC_PAUSE_RX_ENABLE)
4870 ep->rx_pause = TRUE;
4871 ep->autoneg = FALSE;
4872 }
4873
4874 /**
4875 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4876 * @sp : private member of the device structure, which is a pointer to the
4877 * s2io_nic structure.
4878 * @ep : pointer to the structure with pause parameters given by ethtool.
4879 * Description:
4880 * It can be used to set or reset Pause frame generation or reception
4881 * support of the NIC.
4882 * Return value:
4883 * int, returns 0 on Success
4884 */
4885
4886 static int s2io_ethtool_setpause_data(struct net_device *dev,
4887 struct ethtool_pauseparam *ep)
4888 {
4889 u64 val64;
4890 nic_t *sp = dev->priv;
4891 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4892
4893 val64 = readq(&bar0->rmac_pause_cfg);
4894 if (ep->tx_pause)
4895 val64 |= RMAC_PAUSE_GEN_ENABLE;
4896 else
4897 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4898 if (ep->rx_pause)
4899 val64 |= RMAC_PAUSE_RX_ENABLE;
4900 else
4901 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4902 writeq(val64, &bar0->rmac_pause_cfg);
4903 return 0;
4904 }
4905
4906 /**
4907 * read_eeprom - reads 4 bytes of data from user given offset.
4908 * @sp : private member of the device structure, which is a pointer to the
4909 * s2io_nic structure.
4910 * @off : offset at which the data must be written
4911 * @data : Its an output parameter where the data read at the given
4912 * offset is stored.
4913 * Description:
4914 * Will read 4 bytes of data from the user given offset and return the
4915 * read data.
4916 * NOTE: Will allow to read only part of the EEPROM visible through the
4917 * I2C bus.
4918 * Return value:
4919 * -1 on failure and 0 on success.
4920 */
4921
4922 #define S2IO_DEV_ID 5
4923 static int read_eeprom(nic_t * sp, int off, u64 * data)
4924 {
4925 int ret = -1;
4926 u32 exit_cnt = 0;
4927 u64 val64;
4928 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4929
4930 if (sp->device_type == XFRAME_I_DEVICE) {
4931 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4932 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4933 I2C_CONTROL_CNTL_START;
4934 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4935
4936 while (exit_cnt < 5) {
4937 val64 = readq(&bar0->i2c_control);
4938 if (I2C_CONTROL_CNTL_END(val64)) {
4939 *data = I2C_CONTROL_GET_DATA(val64);
4940 ret = 0;
4941 break;
4942 }
4943 msleep(50);
4944 exit_cnt++;
4945 }
4946 }
4947
4948 if (sp->device_type == XFRAME_II_DEVICE) {
4949 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4950 SPI_CONTROL_BYTECNT(0x3) |
4951 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4952 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4953 val64 |= SPI_CONTROL_REQ;
4954 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4955 while (exit_cnt < 5) {
4956 val64 = readq(&bar0->spi_control);
4957 if (val64 & SPI_CONTROL_NACK) {
4958 ret = 1;
4959 break;
4960 } else if (val64 & SPI_CONTROL_DONE) {
4961 *data = readq(&bar0->spi_data);
4962 *data &= 0xffffff;
4963 ret = 0;
4964 break;
4965 }
4966 msleep(50);
4967 exit_cnt++;
4968 }
4969 }
4970 return ret;
4971 }
4972
4973 /**
4974 * write_eeprom - actually writes the relevant part of the data value.
4975 * @sp : private member of the device structure, which is a pointer to the
4976 * s2io_nic structure.
4977 * @off : offset at which the data must be written
4978 * @data : The data that is to be written
4979 * @cnt : Number of bytes of the data that are actually to be written into
4980 * the Eeprom. (max of 3)
4981 * Description:
4982 * Actually writes the relevant part of the data value into the Eeprom
4983 * through the I2C bus.
4984 * Return value:
4985 * 0 on success, -1 on failure.
4986 */
4987
4988 static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4989 {
4990 int exit_cnt = 0, ret = -1;
4991 u64 val64;
4992 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4993
4994 if (sp->device_type == XFRAME_I_DEVICE) {
4995 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4996 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4997 I2C_CONTROL_CNTL_START;
4998 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4999
5000 while (exit_cnt < 5) {
5001 val64 = readq(&bar0->i2c_control);
5002 if (I2C_CONTROL_CNTL_END(val64)) {
5003 if (!(val64 & I2C_CONTROL_NACK))
5004 ret = 0;
5005 break;
5006 }
5007 msleep(50);
5008 exit_cnt++;
5009 }
5010 }
5011
5012 if (sp->device_type == XFRAME_II_DEVICE) {
5013 int write_cnt = (cnt == 8) ? 0 : cnt;
5014 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5015
5016 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5017 SPI_CONTROL_BYTECNT(write_cnt) |
5018 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5019 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5020 val64 |= SPI_CONTROL_REQ;
5021 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5022 while (exit_cnt < 5) {
5023 val64 = readq(&bar0->spi_control);
5024 if (val64 & SPI_CONTROL_NACK) {
5025 ret = 1;
5026 break;
5027 } else if (val64 & SPI_CONTROL_DONE) {
5028 ret = 0;
5029 break;
5030 }
5031 msleep(50);
5032 exit_cnt++;
5033 }
5034 }
5035 return ret;
5036 }
5037 static void s2io_vpd_read(nic_t *nic)
5038 {
5039 u8 vpd_data[256],data;
5040 int i=0, cnt, fail = 0;
5041 int vpd_addr = 0x80;
5042
5043 if (nic->device_type == XFRAME_II_DEVICE) {
5044 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5045 vpd_addr = 0x80;
5046 }
5047 else {
5048 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5049 vpd_addr = 0x50;
5050 }
5051
5052 for (i = 0; i < 256; i +=4 ) {
5053 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5054 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5055 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5056 for (cnt = 0; cnt <5; cnt++) {
5057 msleep(2);
5058 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5059 if (data == 0x80)
5060 break;
5061 }
5062 if (cnt >= 5) {
5063 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5064 fail = 1;
5065 break;
5066 }
5067 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5068 (u32 *)&vpd_data[i]);
5069 }
5070 if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) {
5071 memset(nic->product_name, 0, vpd_data[1]);
5072 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5073 }
5074 }
5075
5076 /**
5077 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5078 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5079 * @eeprom : pointer to the user level structure provided by ethtool,
5080 * containing all relevant information.
5081 * @data_buf : user defined value to be written into Eeprom.
5082 * Description: Reads the values stored in the Eeprom at given offset
5083 * for a given length. Stores these values int the input argument data
5084 * buffer 'data_buf' and returns these to the caller (ethtool.)
5085 * Return value:
5086 * int 0 on success
5087 */
5088
5089 static int s2io_ethtool_geeprom(struct net_device *dev,
5090 struct ethtool_eeprom *eeprom, u8 * data_buf)
5091 {
5092 u32 i, valid;
5093 u64 data;
5094 nic_t *sp = dev->priv;
5095
5096 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5097
5098 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5099 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5100
5101 for (i = 0; i < eeprom->len; i += 4) {
5102 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5103 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5104 return -EFAULT;
5105 }
5106 valid = INV(data);
5107 memcpy((data_buf + i), &valid, 4);
5108 }
5109 return 0;
5110 }
5111
5112 /**
5113 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5114 * @sp : private member of the device structure, which is a pointer to the
5115 * s2io_nic structure.
5116 * @eeprom : pointer to the user level structure provided by ethtool,
5117 * containing all relevant information.
5118 * @data_buf ; user defined value to be written into Eeprom.
5119 * Description:
5120 * Tries to write the user provided value in the Eeprom, at the offset
5121 * given by the user.
5122 * Return value:
5123 * 0 on success, -EFAULT on failure.
5124 */
5125
5126 static int s2io_ethtool_seeprom(struct net_device *dev,
5127 struct ethtool_eeprom *eeprom,
5128 u8 * data_buf)
5129 {
5130 int len = eeprom->len, cnt = 0;
5131 u64 valid = 0, data;
5132 nic_t *sp = dev->priv;
5133
5134 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5135 DBG_PRINT(ERR_DBG,
5136 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5137 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5138 eeprom->magic);
5139 return -EFAULT;
5140 }
5141
5142 while (len) {
5143 data = (u32) data_buf[cnt] & 0x000000FF;
5144 if (data) {
5145 valid = (u32) (data << 24);
5146 } else
5147 valid = data;
5148
5149 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5150 DBG_PRINT(ERR_DBG,
5151 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5152 DBG_PRINT(ERR_DBG,
5153 "write into the specified offset\n");
5154 return -EFAULT;
5155 }
5156 cnt++;
5157 len--;
5158 }
5159
5160 return 0;
5161 }
5162
5163 /**
5164 * s2io_register_test - reads and writes into all clock domains.
5165 * @sp : private member of the device structure, which is a pointer to the
5166 * s2io_nic structure.
5167 * @data : variable that returns the result of each of the test conducted b
5168 * by the driver.
5169 * Description:
5170 * Read and write into all clock domains. The NIC has 3 clock domains,
5171 * see that registers in all the three regions are accessible.
5172 * Return value:
5173 * 0 on success.
5174 */
5175
5176 static int s2io_register_test(nic_t * sp, uint64_t * data)
5177 {
5178 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5179 u64 val64 = 0, exp_val;
5180 int fail = 0;
5181
5182 val64 = readq(&bar0->pif_rd_swapper_fb);
5183 if (val64 != 0x123456789abcdefULL) {
5184 fail = 1;
5185 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5186 }
5187
5188 val64 = readq(&bar0->rmac_pause_cfg);
5189 if (val64 != 0xc000ffff00000000ULL) {
5190 fail = 1;
5191 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5192 }
5193
5194 val64 = readq(&bar0->rx_queue_cfg);
5195 if (sp->device_type == XFRAME_II_DEVICE)
5196 exp_val = 0x0404040404040404ULL;
5197 else
5198 exp_val = 0x0808080808080808ULL;
5199 if (val64 != exp_val) {
5200 fail = 1;
5201 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5202 }
5203
5204 val64 = readq(&bar0->xgxs_efifo_cfg);
5205 if (val64 != 0x000000001923141EULL) {
5206 fail = 1;
5207 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5208 }
5209
5210 val64 = 0x5A5A5A5A5A5A5A5AULL;
5211 writeq(val64, &bar0->xmsi_data);
5212 val64 = readq(&bar0->xmsi_data);
5213 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5214 fail = 1;
5215 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5216 }
5217
5218 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5219 writeq(val64, &bar0->xmsi_data);
5220 val64 = readq(&bar0->xmsi_data);
5221 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5222 fail = 1;
5223 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5224 }
5225
5226 *data = fail;
5227 return fail;
5228 }
5229
5230 /**
5231 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5232 * @sp : private member of the device structure, which is a pointer to the
5233 * s2io_nic structure.
5234 * @data:variable that returns the result of each of the test conducted by
5235 * the driver.
5236 * Description:
5237 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5238 * register.
5239 * Return value:
5240 * 0 on success.
5241 */
5242
5243 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
5244 {
5245 int fail = 0;
5246 u64 ret_data, org_4F0, org_7F0;
5247 u8 saved_4F0 = 0, saved_7F0 = 0;
5248 struct net_device *dev = sp->dev;
5249
5250 /* Test Write Error at offset 0 */
5251 /* Note that SPI interface allows write access to all areas
5252 * of EEPROM. Hence doing all negative testing only for Xframe I.
5253 */
5254 if (sp->device_type == XFRAME_I_DEVICE)
5255 if (!write_eeprom(sp, 0, 0, 3))
5256 fail = 1;
5257
5258 /* Save current values at offsets 0x4F0 and 0x7F0 */
5259 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5260 saved_4F0 = 1;
5261 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5262 saved_7F0 = 1;
5263
5264 /* Test Write at offset 4f0 */
5265 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5266 fail = 1;
5267 if (read_eeprom(sp, 0x4F0, &ret_data))
5268 fail = 1;
5269
5270 if (ret_data != 0x012345) {
5271 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5272 "Data written %llx Data read %llx\n",
5273 dev->name, (unsigned long long)0x12345,
5274 (unsigned long long)ret_data);
5275 fail = 1;
5276 }
5277
5278 /* Reset the EEPROM data go FFFF */
5279 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5280
5281 /* Test Write Request Error at offset 0x7c */
5282 if (sp->device_type == XFRAME_I_DEVICE)
5283 if (!write_eeprom(sp, 0x07C, 0, 3))
5284 fail = 1;
5285
5286 /* Test Write Request at offset 0x7f0 */
5287 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5288 fail = 1;
5289 if (read_eeprom(sp, 0x7F0, &ret_data))
5290 fail = 1;
5291
5292 if (ret_data != 0x012345) {
5293 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5294 "Data written %llx Data read %llx\n",
5295 dev->name, (unsigned long long)0x12345,
5296 (unsigned long long)ret_data);
5297 fail = 1;
5298 }
5299
5300 /* Reset the EEPROM data go FFFF */
5301 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5302
5303 if (sp->device_type == XFRAME_I_DEVICE) {
5304 /* Test Write Error at offset 0x80 */
5305 if (!write_eeprom(sp, 0x080, 0, 3))
5306 fail = 1;
5307
5308 /* Test Write Error at offset 0xfc */
5309 if (!write_eeprom(sp, 0x0FC, 0, 3))
5310 fail = 1;
5311
5312 /* Test Write Error at offset 0x100 */
5313 if (!write_eeprom(sp, 0x100, 0, 3))
5314 fail = 1;
5315
5316 /* Test Write Error at offset 4ec */
5317 if (!write_eeprom(sp, 0x4EC, 0, 3))
5318 fail = 1;
5319 }
5320
5321 /* Restore values at offsets 0x4F0 and 0x7F0 */
5322 if (saved_4F0)
5323 write_eeprom(sp, 0x4F0, org_4F0, 3);
5324 if (saved_7F0)
5325 write_eeprom(sp, 0x7F0, org_7F0, 3);
5326
5327 *data = fail;
5328 return fail;
5329 }
5330
5331 /**
5332 * s2io_bist_test - invokes the MemBist test of the card .
5333 * @sp : private member of the device structure, which is a pointer to the
5334 * s2io_nic structure.
5335 * @data:variable that returns the result of each of the test conducted by
5336 * the driver.
5337 * Description:
5338 * This invokes the MemBist test of the card. We give around
5339 * 2 secs time for the Test to complete. If it's still not complete
5340 * within this peiod, we consider that the test failed.
5341 * Return value:
5342 * 0 on success and -1 on failure.
5343 */
5344
5345 static int s2io_bist_test(nic_t * sp, uint64_t * data)
5346 {
5347 u8 bist = 0;
5348 int cnt = 0, ret = -1;
5349
5350 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5351 bist |= PCI_BIST_START;
5352 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5353
5354 while (cnt < 20) {
5355 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5356 if (!(bist & PCI_BIST_START)) {
5357 *data = (bist & PCI_BIST_CODE_MASK);
5358 ret = 0;
5359 break;
5360 }
5361 msleep(100);
5362 cnt++;
5363 }
5364
5365 return ret;
5366 }
5367
5368 /**
5369 * s2io-link_test - verifies the link state of the nic
5370 * @sp ; private member of the device structure, which is a pointer to the
5371 * s2io_nic structure.
5372 * @data: variable that returns the result of each of the test conducted by
5373 * the driver.
5374 * Description:
5375 * The function verifies the link state of the NIC and updates the input
5376 * argument 'data' appropriately.
5377 * Return value:
5378 * 0 on success.
5379 */
5380
5381 static int s2io_link_test(nic_t * sp, uint64_t * data)
5382 {
5383 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5384 u64 val64;
5385
5386 val64 = readq(&bar0->adapter_status);
5387 if(!(LINK_IS_UP(val64)))
5388 *data = 1;
5389 else
5390 *data = 0;
5391
5392 return 0;
5393 }
5394
5395 /**
5396 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5397 * @sp - private member of the device structure, which is a pointer to the
5398 * s2io_nic structure.
5399 * @data - variable that returns the result of each of the test
5400 * conducted by the driver.
5401 * Description:
5402 * This is one of the offline test that tests the read and write
5403 * access to the RldRam chip on the NIC.
5404 * Return value:
5405 * 0 on success.
5406 */
5407
5408 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
5409 {
5410 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5411 u64 val64;
5412 int cnt, iteration = 0, test_fail = 0;
5413
5414 val64 = readq(&bar0->adapter_control);
5415 val64 &= ~ADAPTER_ECC_EN;
5416 writeq(val64, &bar0->adapter_control);
5417
5418 val64 = readq(&bar0->mc_rldram_test_ctrl);
5419 val64 |= MC_RLDRAM_TEST_MODE;
5420 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5421
5422 val64 = readq(&bar0->mc_rldram_mrs);
5423 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5424 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5425
5426 val64 |= MC_RLDRAM_MRS_ENABLE;
5427 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5428
5429 while (iteration < 2) {
5430 val64 = 0x55555555aaaa0000ULL;
5431 if (iteration == 1) {
5432 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5433 }
5434 writeq(val64, &bar0->mc_rldram_test_d0);
5435
5436 val64 = 0xaaaa5a5555550000ULL;
5437 if (iteration == 1) {
5438 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5439 }
5440 writeq(val64, &bar0->mc_rldram_test_d1);
5441
5442 val64 = 0x55aaaaaaaa5a0000ULL;
5443 if (iteration == 1) {
5444 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5445 }
5446 writeq(val64, &bar0->mc_rldram_test_d2);
5447
5448 val64 = (u64) (0x0000003ffffe0100ULL);
5449 writeq(val64, &bar0->mc_rldram_test_add);
5450
5451 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5452 MC_RLDRAM_TEST_GO;
5453 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5454
5455 for (cnt = 0; cnt < 5; cnt++) {
5456 val64 = readq(&bar0->mc_rldram_test_ctrl);
5457 if (val64 & MC_RLDRAM_TEST_DONE)
5458 break;
5459 msleep(200);
5460 }
5461
5462 if (cnt == 5)
5463 break;
5464
5465 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5466 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5467
5468 for (cnt = 0; cnt < 5; cnt++) {
5469 val64 = readq(&bar0->mc_rldram_test_ctrl);
5470 if (val64 & MC_RLDRAM_TEST_DONE)
5471 break;
5472 msleep(500);
5473 }
5474
5475 if (cnt == 5)
5476 break;
5477
5478 val64 = readq(&bar0->mc_rldram_test_ctrl);
5479 if (!(val64 & MC_RLDRAM_TEST_PASS))
5480 test_fail = 1;
5481
5482 iteration++;
5483 }
5484
5485 *data = test_fail;
5486
5487 /* Bring the adapter out of test mode */
5488 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5489
5490 return test_fail;
5491 }
5492
5493 /**
5494 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5495 * @sp : private member of the device structure, which is a pointer to the
5496 * s2io_nic structure.
5497 * @ethtest : pointer to a ethtool command specific structure that will be
5498 * returned to the user.
5499 * @data : variable that returns the result of each of the test
5500 * conducted by the driver.
5501 * Description:
5502 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5503 * the health of the card.
5504 * Return value:
5505 * void
5506 */
5507
5508 static void s2io_ethtool_test(struct net_device *dev,
5509 struct ethtool_test *ethtest,
5510 uint64_t * data)
5511 {
5512 nic_t *sp = dev->priv;
5513 int orig_state = netif_running(sp->dev);
5514
5515 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5516 /* Offline Tests. */
5517 if (orig_state)
5518 s2io_close(sp->dev);
5519
5520 if (s2io_register_test(sp, &data[0]))
5521 ethtest->flags |= ETH_TEST_FL_FAILED;
5522
5523 s2io_reset(sp);
5524
5525 if (s2io_rldram_test(sp, &data[3]))
5526 ethtest->flags |= ETH_TEST_FL_FAILED;
5527
5528 s2io_reset(sp);
5529
5530 if (s2io_eeprom_test(sp, &data[1]))
5531 ethtest->flags |= ETH_TEST_FL_FAILED;
5532
5533 if (s2io_bist_test(sp, &data[4]))
5534 ethtest->flags |= ETH_TEST_FL_FAILED;
5535
5536 if (orig_state)
5537 s2io_open(sp->dev);
5538
5539 data[2] = 0;
5540 } else {
5541 /* Online Tests. */
5542 if (!orig_state) {
5543 DBG_PRINT(ERR_DBG,
5544 "%s: is not up, cannot run test\n",
5545 dev->name);
5546 data[0] = -1;
5547 data[1] = -1;
5548 data[2] = -1;
5549 data[3] = -1;
5550 data[4] = -1;
5551 }
5552
5553 if (s2io_link_test(sp, &data[2]))
5554 ethtest->flags |= ETH_TEST_FL_FAILED;
5555
5556 data[0] = 0;
5557 data[1] = 0;
5558 data[3] = 0;
5559 data[4] = 0;
5560 }
5561 }
5562
5563 static void s2io_get_ethtool_stats(struct net_device *dev,
5564 struct ethtool_stats *estats,
5565 u64 * tmp_stats)
5566 {
5567 int i = 0;
5568 nic_t *sp = dev->priv;
5569 StatInfo_t *stat_info = sp->mac_control.stats_info;
5570
5571 s2io_updt_stats(sp);
5572 tmp_stats[i++] =
5573 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5574 le32_to_cpu(stat_info->tmac_frms);
5575 tmp_stats[i++] =
5576 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5577 le32_to_cpu(stat_info->tmac_data_octets);
5578 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5579 tmp_stats[i++] =
5580 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5581 le32_to_cpu(stat_info->tmac_mcst_frms);
5582 tmp_stats[i++] =
5583 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5584 le32_to_cpu(stat_info->tmac_bcst_frms);
5585 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5586 tmp_stats[i++] =
5587 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5588 le32_to_cpu(stat_info->tmac_ttl_octets);
5589 tmp_stats[i++] =
5590 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5591 le32_to_cpu(stat_info->tmac_ucst_frms);
5592 tmp_stats[i++] =
5593 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5594 le32_to_cpu(stat_info->tmac_nucst_frms);
5595 tmp_stats[i++] =
5596 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5597 le32_to_cpu(stat_info->tmac_any_err_frms);
5598 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5599 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5600 tmp_stats[i++] =
5601 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5602 le32_to_cpu(stat_info->tmac_vld_ip);
5603 tmp_stats[i++] =
5604 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5605 le32_to_cpu(stat_info->tmac_drop_ip);
5606 tmp_stats[i++] =
5607 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5608 le32_to_cpu(stat_info->tmac_icmp);
5609 tmp_stats[i++] =
5610 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5611 le32_to_cpu(stat_info->tmac_rst_tcp);
5612 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5613 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5614 le32_to_cpu(stat_info->tmac_udp);
5615 tmp_stats[i++] =
5616 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5617 le32_to_cpu(stat_info->rmac_vld_frms);
5618 tmp_stats[i++] =
5619 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5620 le32_to_cpu(stat_info->rmac_data_octets);
5621 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5622 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5623 tmp_stats[i++] =
5624 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5625 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5626 tmp_stats[i++] =
5627 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5628 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5629 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5630 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5631 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5632 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5633 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5634 tmp_stats[i++] =
5635 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5636 le32_to_cpu(stat_info->rmac_ttl_octets);
5637 tmp_stats[i++] =
5638 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5639 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5640 tmp_stats[i++] =
5641 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5642 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5643 tmp_stats[i++] =
5644 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5645 le32_to_cpu(stat_info->rmac_discarded_frms);
5646 tmp_stats[i++] =
5647 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5648 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5649 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5650 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5651 tmp_stats[i++] =
5652 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5653 le32_to_cpu(stat_info->rmac_usized_frms);
5654 tmp_stats[i++] =
5655 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5656 le32_to_cpu(stat_info->rmac_osized_frms);
5657 tmp_stats[i++] =
5658 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5659 le32_to_cpu(stat_info->rmac_frag_frms);
5660 tmp_stats[i++] =
5661 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5662 le32_to_cpu(stat_info->rmac_jabber_frms);
5663 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5664 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5665 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5666 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5667 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5668 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5669 tmp_stats[i++] =
5670 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5671 le32_to_cpu(stat_info->rmac_ip);
5672 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5673 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5674 tmp_stats[i++] =
5675 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5676 le32_to_cpu(stat_info->rmac_drop_ip);
5677 tmp_stats[i++] =
5678 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5679 le32_to_cpu(stat_info->rmac_icmp);
5680 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5681 tmp_stats[i++] =
5682 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5683 le32_to_cpu(stat_info->rmac_udp);
5684 tmp_stats[i++] =
5685 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5686 le32_to_cpu(stat_info->rmac_err_drp_udp);
5687 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5688 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5689 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5690 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5691 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5692 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5693 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5694 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5695 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5696 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5697 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5698 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5699 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5700 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5701 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5702 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5703 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5704 tmp_stats[i++] =
5705 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5706 le32_to_cpu(stat_info->rmac_pause_cnt);
5707 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5708 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5709 tmp_stats[i++] =
5710 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5711 le32_to_cpu(stat_info->rmac_accepted_ip);
5712 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5713 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5714 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5715 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5716 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5717 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5718 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5719 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5720 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5721 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5722 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5723 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5724 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5725 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5726 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5727 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5728 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5729 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5730 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5731 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5732 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5733 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5734 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5735 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5736 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5737 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5738 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5739 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5740 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5741 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5742 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5743 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5744 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5745 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5746 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5747 tmp_stats[i++] = 0;
5748 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5749 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5750 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5751 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5752 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5753 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5754 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5755 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5756 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5757 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5758 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5759 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5760 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5761 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5762 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5763 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5764 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5765 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5766 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5767 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5768 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5769 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5770 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5771 if (stat_info->sw_stat.num_aggregations) {
5772 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5773 int count = 0;
5774 /*
5775 * Since 64-bit divide does not work on all platforms,
5776 * do repeated subtraction.
5777 */
5778 while (tmp >= stat_info->sw_stat.num_aggregations) {
5779 tmp -= stat_info->sw_stat.num_aggregations;
5780 count++;
5781 }
5782 tmp_stats[i++] = count;
5783 }
5784 else
5785 tmp_stats[i++] = 0;
5786 }
5787
5788 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5789 {
5790 return (XENA_REG_SPACE);
5791 }
5792
5793
5794 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5795 {
5796 nic_t *sp = dev->priv;
5797
5798 return (sp->rx_csum);
5799 }
5800
5801 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5802 {
5803 nic_t *sp = dev->priv;
5804
5805 if (data)
5806 sp->rx_csum = 1;
5807 else
5808 sp->rx_csum = 0;
5809
5810 return 0;
5811 }
5812
5813 static int s2io_get_eeprom_len(struct net_device *dev)
5814 {
5815 return (XENA_EEPROM_SPACE);
5816 }
5817
5818 static int s2io_ethtool_self_test_count(struct net_device *dev)
5819 {
5820 return (S2IO_TEST_LEN);
5821 }
5822
5823 static void s2io_ethtool_get_strings(struct net_device *dev,
5824 u32 stringset, u8 * data)
5825 {
5826 switch (stringset) {
5827 case ETH_SS_TEST:
5828 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5829 break;
5830 case ETH_SS_STATS:
5831 memcpy(data, &ethtool_stats_keys,
5832 sizeof(ethtool_stats_keys));
5833 }
5834 }
5835 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5836 {
5837 return (S2IO_STAT_LEN);
5838 }
5839
5840 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5841 {
5842 if (data)
5843 dev->features |= NETIF_F_IP_CSUM;
5844 else
5845 dev->features &= ~NETIF_F_IP_CSUM;
5846
5847 return 0;
5848 }
5849
5850
5851 static struct ethtool_ops netdev_ethtool_ops = {
5852 .get_settings = s2io_ethtool_gset,
5853 .set_settings = s2io_ethtool_sset,
5854 .get_drvinfo = s2io_ethtool_gdrvinfo,
5855 .get_regs_len = s2io_ethtool_get_regs_len,
5856 .get_regs = s2io_ethtool_gregs,
5857 .get_link = ethtool_op_get_link,
5858 .get_eeprom_len = s2io_get_eeprom_len,
5859 .get_eeprom = s2io_ethtool_geeprom,
5860 .set_eeprom = s2io_ethtool_seeprom,
5861 .get_pauseparam = s2io_ethtool_getpause_data,
5862 .set_pauseparam = s2io_ethtool_setpause_data,
5863 .get_rx_csum = s2io_ethtool_get_rx_csum,
5864 .set_rx_csum = s2io_ethtool_set_rx_csum,
5865 .get_tx_csum = ethtool_op_get_tx_csum,
5866 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5867 .get_sg = ethtool_op_get_sg,
5868 .set_sg = ethtool_op_set_sg,
5869 #ifdef NETIF_F_TSO
5870 .get_tso = ethtool_op_get_tso,
5871 .set_tso = ethtool_op_set_tso,
5872 #endif
5873 .get_ufo = ethtool_op_get_ufo,
5874 .set_ufo = ethtool_op_set_ufo,
5875 .self_test_count = s2io_ethtool_self_test_count,
5876 .self_test = s2io_ethtool_test,
5877 .get_strings = s2io_ethtool_get_strings,
5878 .phys_id = s2io_ethtool_idnic,
5879 .get_stats_count = s2io_ethtool_get_stats_count,
5880 .get_ethtool_stats = s2io_get_ethtool_stats
5881 };
5882
5883 /**
5884 * s2io_ioctl - Entry point for the Ioctl
5885 * @dev : Device pointer.
5886 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5887 * a proprietary structure used to pass information to the driver.
5888 * @cmd : This is used to distinguish between the different commands that
5889 * can be passed to the IOCTL functions.
5890 * Description:
5891 * Currently there are no special functionality supported in IOCTL, hence
5892 * function always return EOPNOTSUPPORTED
5893 */
5894
5895 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5896 {
5897 return -EOPNOTSUPP;
5898 }
5899
5900 /**
5901 * s2io_change_mtu - entry point to change MTU size for the device.
5902 * @dev : device pointer.
5903 * @new_mtu : the new MTU size for the device.
5904 * Description: A driver entry point to change MTU size for the device.
5905 * Before changing the MTU the device must be stopped.
5906 * Return value:
5907 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5908 * file on failure.
5909 */
5910
5911 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5912 {
5913 nic_t *sp = dev->priv;
5914
5915 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5916 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5917 dev->name);
5918 return -EPERM;
5919 }
5920
5921 dev->mtu = new_mtu;
5922 if (netif_running(dev)) {
5923 s2io_card_down(sp, 0);
5924 netif_stop_queue(dev);
5925 if (s2io_card_up(sp)) {
5926 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5927 __FUNCTION__);
5928 }
5929 if (netif_queue_stopped(dev))
5930 netif_wake_queue(dev);
5931 } else { /* Device is down */
5932 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5933 u64 val64 = new_mtu;
5934
5935 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5936 }
5937
5938 return 0;
5939 }
5940
5941 /**
5942 * s2io_tasklet - Bottom half of the ISR.
5943 * @dev_adr : address of the device structure in dma_addr_t format.
5944 * Description:
5945 * This is the tasklet or the bottom half of the ISR. This is
5946 * an extension of the ISR which is scheduled by the scheduler to be run
5947 * when the load on the CPU is low. All low priority tasks of the ISR can
5948 * be pushed into the tasklet. For now the tasklet is used only to
5949 * replenish the Rx buffers in the Rx buffer descriptors.
5950 * Return value:
5951 * void.
5952 */
5953
5954 static void s2io_tasklet(unsigned long dev_addr)
5955 {
5956 struct net_device *dev = (struct net_device *) dev_addr;
5957 nic_t *sp = dev->priv;
5958 int i, ret;
5959 mac_info_t *mac_control;
5960 struct config_param *config;
5961
5962 mac_control = &sp->mac_control;
5963 config = &sp->config;
5964
5965 if (!TASKLET_IN_USE) {
5966 for (i = 0; i < config->rx_ring_num; i++) {
5967 ret = fill_rx_buffers(sp, i);
5968 if (ret == -ENOMEM) {
5969 DBG_PRINT(ERR_DBG, "%s: Out of ",
5970 dev->name);
5971 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5972 break;
5973 } else if (ret == -EFILL) {
5974 DBG_PRINT(ERR_DBG,
5975 "%s: Rx Ring %d is full\n",
5976 dev->name, i);
5977 break;
5978 }
5979 }
5980 clear_bit(0, (&sp->tasklet_status));
5981 }
5982 }
5983
5984 /**
5985 * s2io_set_link - Set the LInk status
5986 * @data: long pointer to device private structue
5987 * Description: Sets the link status for the adapter
5988 */
5989
5990 static void s2io_set_link(unsigned long data)
5991 {
5992 nic_t *nic = (nic_t *) data;
5993 struct net_device *dev = nic->dev;
5994 XENA_dev_config_t __iomem *bar0 = nic->bar0;
5995 register u64 val64;
5996 u16 subid;
5997
5998 if (test_and_set_bit(0, &(nic->link_state))) {
5999 /* The card is being reset, no point doing anything */
6000 return;
6001 }
6002
6003 subid = nic->pdev->subsystem_device;
6004 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6005 /*
6006 * Allow a small delay for the NICs self initiated
6007 * cleanup to complete.
6008 */
6009 msleep(100);
6010 }
6011
6012 val64 = readq(&bar0->adapter_status);
6013 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
6014 if (LINK_IS_UP(val64)) {
6015 val64 = readq(&bar0->adapter_control);
6016 val64 |= ADAPTER_CNTL_EN;
6017 writeq(val64, &bar0->adapter_control);
6018 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6019 subid)) {
6020 val64 = readq(&bar0->gpio_control);
6021 val64 |= GPIO_CTRL_GPIO_0;
6022 writeq(val64, &bar0->gpio_control);
6023 val64 = readq(&bar0->gpio_control);
6024 } else {
6025 val64 |= ADAPTER_LED_ON;
6026 writeq(val64, &bar0->adapter_control);
6027 }
6028 if (s2io_link_fault_indication(nic) ==
6029 MAC_RMAC_ERR_TIMER) {
6030 val64 = readq(&bar0->adapter_status);
6031 if (!LINK_IS_UP(val64)) {
6032 DBG_PRINT(ERR_DBG, "%s:", dev->name);
6033 DBG_PRINT(ERR_DBG, " Link down");
6034 DBG_PRINT(ERR_DBG, "after ");
6035 DBG_PRINT(ERR_DBG, "enabling ");
6036 DBG_PRINT(ERR_DBG, "device \n");
6037 }
6038 }
6039 if (nic->device_enabled_once == FALSE) {
6040 nic->device_enabled_once = TRUE;
6041 }
6042 s2io_link(nic, LINK_UP);
6043 } else {
6044 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6045 subid)) {
6046 val64 = readq(&bar0->gpio_control);
6047 val64 &= ~GPIO_CTRL_GPIO_0;
6048 writeq(val64, &bar0->gpio_control);
6049 val64 = readq(&bar0->gpio_control);
6050 }
6051 s2io_link(nic, LINK_DOWN);
6052 }
6053 } else { /* NIC is not Quiescent. */
6054 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6055 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6056 netif_stop_queue(dev);
6057 }
6058 clear_bit(0, &(nic->link_state));
6059 }
6060
6061 static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
6062 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6063 u64 *temp2, int size)
6064 {
6065 struct net_device *dev = sp->dev;
6066 struct sk_buff *frag_list;
6067
6068 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6069 /* allocate skb */
6070 if (*skb) {
6071 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6072 /*
6073 * As Rx frame are not going to be processed,
6074 * using same mapped address for the Rxd
6075 * buffer pointer
6076 */
6077 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0;
6078 } else {
6079 *skb = dev_alloc_skb(size);
6080 if (!(*skb)) {
6081 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
6082 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
6083 return -ENOMEM ;
6084 }
6085 /* storing the mapped addr in a temp variable
6086 * such it will be used for next rxd whose
6087 * Host Control is NULL
6088 */
6089 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 =
6090 pci_map_single( sp->pdev, (*skb)->data,
6091 size - NET_IP_ALIGN,
6092 PCI_DMA_FROMDEVICE);
6093 rxdp->Host_Control = (unsigned long) (*skb);
6094 }
6095 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6096 /* Two buffer Mode */
6097 if (*skb) {
6098 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
6099 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
6100 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
6101 } else {
6102 *skb = dev_alloc_skb(size);
6103 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
6104 pci_map_single(sp->pdev, (*skb)->data,
6105 dev->mtu + 4,
6106 PCI_DMA_FROMDEVICE);
6107 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
6108 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6109 PCI_DMA_FROMDEVICE);
6110 rxdp->Host_Control = (unsigned long) (*skb);
6111
6112 /* Buffer-1 will be dummy buffer not used */
6113 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
6114 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6115 PCI_DMA_FROMDEVICE);
6116 }
6117 } else if ((rxdp->Host_Control == 0)) {
6118 /* Three buffer mode */
6119 if (*skb) {
6120 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
6121 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
6122 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
6123 } else {
6124 *skb = dev_alloc_skb(size);
6125
6126 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
6127 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6128 PCI_DMA_FROMDEVICE);
6129 /* Buffer-1 receives L3/L4 headers */
6130 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
6131 pci_map_single( sp->pdev, (*skb)->data,
6132 l3l4hdr_size + 4,
6133 PCI_DMA_FROMDEVICE);
6134 /*
6135 * skb_shinfo(skb)->frag_list will have L4
6136 * data payload
6137 */
6138 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6139 ALIGN_SIZE);
6140 if (skb_shinfo(*skb)->frag_list == NULL) {
6141 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6142 failed\n ", dev->name);
6143 return -ENOMEM ;
6144 }
6145 frag_list = skb_shinfo(*skb)->frag_list;
6146 frag_list->next = NULL;
6147 /*
6148 * Buffer-2 receives L4 data payload
6149 */
6150 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
6151 pci_map_single( sp->pdev, frag_list->data,
6152 dev->mtu, PCI_DMA_FROMDEVICE);
6153 }
6154 }
6155 return 0;
6156 }
6157 static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
6158 {
6159 struct net_device *dev = sp->dev;
6160 if (sp->rxd_mode == RXD_MODE_1) {
6161 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6162 } else if (sp->rxd_mode == RXD_MODE_3B) {
6163 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6164 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6165 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6166 } else {
6167 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6168 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6169 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6170 }
6171 }
6172
6173 static int rxd_owner_bit_reset(nic_t *sp)
6174 {
6175 int i, j, k, blk_cnt = 0, size;
6176 mac_info_t * mac_control = &sp->mac_control;
6177 struct config_param *config = &sp->config;
6178 struct net_device *dev = sp->dev;
6179 RxD_t *rxdp = NULL;
6180 struct sk_buff *skb = NULL;
6181 buffAdd_t *ba = NULL;
6182 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6183
6184 /* Calculate the size based on ring mode */
6185 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6186 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6187 if (sp->rxd_mode == RXD_MODE_1)
6188 size += NET_IP_ALIGN;
6189 else if (sp->rxd_mode == RXD_MODE_3B)
6190 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6191 else
6192 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6193
6194 for (i = 0; i < config->rx_ring_num; i++) {
6195 blk_cnt = config->rx_cfg[i].num_rxd /
6196 (rxd_count[sp->rxd_mode] +1);
6197
6198 for (j = 0; j < blk_cnt; j++) {
6199 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6200 rxdp = mac_control->rings[i].
6201 rx_blocks[j].rxds[k].virt_addr;
6202 if(sp->rxd_mode >= RXD_MODE_3A)
6203 ba = &mac_control->rings[i].ba[j][k];
6204 set_rxd_buffer_pointer(sp, rxdp, ba,
6205 &skb,(u64 *)&temp0_64,
6206 (u64 *)&temp1_64,
6207 (u64 *)&temp2_64, size);
6208
6209 set_rxd_buffer_size(sp, rxdp, size);
6210 wmb();
6211 /* flip the Ownership bit to Hardware */
6212 rxdp->Control_1 |= RXD_OWN_XENA;
6213 }
6214 }
6215 }
6216 return 0;
6217
6218 }
6219
6220 static void s2io_card_down(nic_t * sp, int flag)
6221 {
6222 int cnt = 0;
6223 XENA_dev_config_t __iomem *bar0 = sp->bar0;
6224 unsigned long flags;
6225 register u64 val64 = 0;
6226 struct net_device *dev = sp->dev;
6227
6228 del_timer_sync(&sp->alarm_timer);
6229 /* If s2io_set_link task is executing, wait till it completes. */
6230 while (test_and_set_bit(0, &(sp->link_state))) {
6231 msleep(50);
6232 }
6233 atomic_set(&sp->card_state, CARD_DOWN);
6234
6235 /* disable Tx and Rx traffic on the NIC */
6236 stop_nic(sp);
6237 if (flag) {
6238 if (sp->intr_type == MSI_X) {
6239 int i;
6240 u16 msi_control;
6241
6242 for (i=1; (sp->s2io_entries[i].in_use ==
6243 MSIX_REGISTERED_SUCCESS); i++) {
6244 int vector = sp->entries[i].vector;
6245 void *arg = sp->s2io_entries[i].arg;
6246
6247 free_irq(vector, arg);
6248 }
6249 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6250 msi_control &= 0xFFFE; /* Disable MSI */
6251 pci_write_config_word(sp->pdev, 0x42, msi_control);
6252 pci_disable_msix(sp->pdev);
6253 } else {
6254 free_irq(sp->pdev->irq, dev);
6255 if (sp->intr_type == MSI)
6256 pci_disable_msi(sp->pdev);
6257 }
6258 }
6259 /* Waiting till all Interrupt handlers are complete */
6260 cnt = 0;
6261 do {
6262 msleep(10);
6263 if (!atomic_read(&sp->isr_cnt))
6264 break;
6265 cnt++;
6266 } while(cnt < 5);
6267
6268 /* Kill tasklet. */
6269 tasklet_kill(&sp->task);
6270
6271 /* Check if the device is Quiescent and then Reset the NIC */
6272 do {
6273 /* As per the HW requirement we need to replenish the
6274 * receive buffer to avoid the ring bump. Since there is
6275 * no intention of processing the Rx frame at this pointwe are
6276 * just settting the ownership bit of rxd in Each Rx
6277 * ring to HW and set the appropriate buffer size
6278 * based on the ring mode
6279 */
6280 rxd_owner_bit_reset(sp);
6281
6282 val64 = readq(&bar0->adapter_status);
6283 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
6284 break;
6285 }
6286
6287 msleep(50);
6288 cnt++;
6289 if (cnt == 10) {
6290 DBG_PRINT(ERR_DBG,
6291 "s2io_close:Device not Quiescent ");
6292 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6293 (unsigned long long) val64);
6294 break;
6295 }
6296 } while (1);
6297 s2io_reset(sp);
6298
6299 spin_lock_irqsave(&sp->tx_lock, flags);
6300 /* Free all Tx buffers */
6301 free_tx_buffers(sp);
6302 spin_unlock_irqrestore(&sp->tx_lock, flags);
6303
6304 /* Free all Rx buffers */
6305 spin_lock_irqsave(&sp->rx_lock, flags);
6306 free_rx_buffers(sp);
6307 spin_unlock_irqrestore(&sp->rx_lock, flags);
6308
6309 clear_bit(0, &(sp->link_state));
6310 }
6311
6312 static int s2io_card_up(nic_t * sp)
6313 {
6314 int i, ret = 0;
6315 mac_info_t *mac_control;
6316 struct config_param *config;
6317 struct net_device *dev = (struct net_device *) sp->dev;
6318
6319 /* Initialize the H/W I/O registers */
6320 if (init_nic(sp) != 0) {
6321 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6322 dev->name);
6323 return -ENODEV;
6324 }
6325
6326 if (sp->intr_type == MSI)
6327 ret = s2io_enable_msi(sp);
6328 else if (sp->intr_type == MSI_X)
6329 ret = s2io_enable_msi_x(sp);
6330 if (ret) {
6331 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6332 sp->intr_type = INTA;
6333 }
6334
6335 /*
6336 * Initializing the Rx buffers. For now we are considering only 1
6337 * Rx ring and initializing buffers into 30 Rx blocks
6338 */
6339 mac_control = &sp->mac_control;
6340 config = &sp->config;
6341
6342 for (i = 0; i < config->rx_ring_num; i++) {
6343 if ((ret = fill_rx_buffers(sp, i))) {
6344 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6345 dev->name);
6346 s2io_reset(sp);
6347 free_rx_buffers(sp);
6348 return -ENOMEM;
6349 }
6350 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6351 atomic_read(&sp->rx_bufs_left[i]));
6352 }
6353
6354 /* Setting its receive mode */
6355 s2io_set_multicast(dev);
6356
6357 if (sp->lro) {
6358 /* Initialize max aggregatable pkts based on MTU */
6359 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6360 /* Check if we can use(if specified) user provided value */
6361 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6362 sp->lro_max_aggr_per_sess = lro_max_pkts;
6363 }
6364
6365 /* Enable tasklet for the device */
6366 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6367
6368 /* Enable Rx Traffic and interrupts on the NIC */
6369 if (start_nic(sp)) {
6370 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6371 tasklet_kill(&sp->task);
6372 s2io_reset(sp);
6373 free_irq(dev->irq, dev);
6374 free_rx_buffers(sp);
6375 return -ENODEV;
6376 }
6377
6378 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6379
6380 atomic_set(&sp->card_state, CARD_UP);
6381 return 0;
6382 }
6383
6384 /**
6385 * s2io_restart_nic - Resets the NIC.
6386 * @data : long pointer to the device private structure
6387 * Description:
6388 * This function is scheduled to be run by the s2io_tx_watchdog
6389 * function after 0.5 secs to reset the NIC. The idea is to reduce
6390 * the run time of the watch dog routine which is run holding a
6391 * spin lock.
6392 */
6393
6394 static void s2io_restart_nic(unsigned long data)
6395 {
6396 struct net_device *dev = (struct net_device *) data;
6397 nic_t *sp = dev->priv;
6398
6399 s2io_card_down(sp, 0);
6400 if (s2io_card_up(sp)) {
6401 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6402 dev->name);
6403 }
6404 netif_wake_queue(dev);
6405 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6406 dev->name);
6407
6408 }
6409
6410 /**
6411 * s2io_tx_watchdog - Watchdog for transmit side.
6412 * @dev : Pointer to net device structure
6413 * Description:
6414 * This function is triggered if the Tx Queue is stopped
6415 * for a pre-defined amount of time when the Interface is still up.
6416 * If the Interface is jammed in such a situation, the hardware is
6417 * reset (by s2io_close) and restarted again (by s2io_open) to
6418 * overcome any problem that might have been caused in the hardware.
6419 * Return value:
6420 * void
6421 */
6422
6423 static void s2io_tx_watchdog(struct net_device *dev)
6424 {
6425 nic_t *sp = dev->priv;
6426
6427 if (netif_carrier_ok(dev)) {
6428 schedule_work(&sp->rst_timer_task);
6429 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6430 }
6431 }
6432
6433 /**
6434 * rx_osm_handler - To perform some OS related operations on SKB.
6435 * @sp: private member of the device structure,pointer to s2io_nic structure.
6436 * @skb : the socket buffer pointer.
6437 * @len : length of the packet
6438 * @cksum : FCS checksum of the frame.
6439 * @ring_no : the ring from which this RxD was extracted.
6440 * Description:
6441 * This function is called by the Tx interrupt serivce routine to perform
6442 * some OS related operations on the SKB before passing it to the upper
6443 * layers. It mainly checks if the checksum is OK, if so adds it to the
6444 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6445 * to the upper layer. If the checksum is wrong, it increments the Rx
6446 * packet error count, frees the SKB and returns error.
6447 * Return value:
6448 * SUCCESS on success and -1 on failure.
6449 */
6450 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6451 {
6452 nic_t *sp = ring_data->nic;
6453 struct net_device *dev = (struct net_device *) sp->dev;
6454 struct sk_buff *skb = (struct sk_buff *)
6455 ((unsigned long) rxdp->Host_Control);
6456 int ring_no = ring_data->ring_no;
6457 u16 l3_csum, l4_csum;
6458 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6459 lro_t *lro;
6460
6461 skb->dev = dev;
6462
6463 if (err) {
6464 /* Check for parity error */
6465 if (err & 0x1) {
6466 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6467 }
6468
6469 /*
6470 * Drop the packet if bad transfer code. Exception being
6471 * 0x5, which could be due to unsupported IPv6 extension header.
6472 * In this case, we let stack handle the packet.
6473 * Note that in this case, since checksum will be incorrect,
6474 * stack will validate the same.
6475 */
6476 if (err && ((err >> 48) != 0x5)) {
6477 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6478 dev->name, err);
6479 sp->stats.rx_crc_errors++;
6480 dev_kfree_skb(skb);
6481 atomic_dec(&sp->rx_bufs_left[ring_no]);
6482 rxdp->Host_Control = 0;
6483 return 0;
6484 }
6485 }
6486
6487 /* Updating statistics */
6488 rxdp->Host_Control = 0;
6489 sp->rx_pkt_count++;
6490 sp->stats.rx_packets++;
6491 if (sp->rxd_mode == RXD_MODE_1) {
6492 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6493
6494 sp->stats.rx_bytes += len;
6495 skb_put(skb, len);
6496
6497 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6498 int get_block = ring_data->rx_curr_get_info.block_index;
6499 int get_off = ring_data->rx_curr_get_info.offset;
6500 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6501 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6502 unsigned char *buff = skb_push(skb, buf0_len);
6503
6504 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
6505 sp->stats.rx_bytes += buf0_len + buf2_len;
6506 memcpy(buff, ba->ba_0, buf0_len);
6507
6508 if (sp->rxd_mode == RXD_MODE_3A) {
6509 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6510
6511 skb_put(skb, buf1_len);
6512 skb->len += buf2_len;
6513 skb->data_len += buf2_len;
6514 skb->truesize += buf2_len;
6515 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6516 sp->stats.rx_bytes += buf1_len;
6517
6518 } else
6519 skb_put(skb, buf2_len);
6520 }
6521
6522 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6523 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6524 (sp->rx_csum)) {
6525 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6526 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6527 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6528 /*
6529 * NIC verifies if the Checksum of the received
6530 * frame is Ok or not and accordingly returns
6531 * a flag in the RxD.
6532 */
6533 skb->ip_summed = CHECKSUM_UNNECESSARY;
6534 if (sp->lro) {
6535 u32 tcp_len;
6536 u8 *tcp;
6537 int ret = 0;
6538
6539 ret = s2io_club_tcp_session(skb->data, &tcp,
6540 &tcp_len, &lro, rxdp, sp);
6541 switch (ret) {
6542 case 3: /* Begin anew */
6543 lro->parent = skb;
6544 goto aggregate;
6545 case 1: /* Aggregate */
6546 {
6547 lro_append_pkt(sp, lro,
6548 skb, tcp_len);
6549 goto aggregate;
6550 }
6551 case 4: /* Flush session */
6552 {
6553 lro_append_pkt(sp, lro,
6554 skb, tcp_len);
6555 queue_rx_frame(lro->parent);
6556 clear_lro_session(lro);
6557 sp->mac_control.stats_info->
6558 sw_stat.flush_max_pkts++;
6559 goto aggregate;
6560 }
6561 case 2: /* Flush both */
6562 lro->parent->data_len =
6563 lro->frags_len;
6564 sp->mac_control.stats_info->
6565 sw_stat.sending_both++;
6566 queue_rx_frame(lro->parent);
6567 clear_lro_session(lro);
6568 goto send_up;
6569 case 0: /* sessions exceeded */
6570 case -1: /* non-TCP or not
6571 * L2 aggregatable
6572 */
6573 case 5: /*
6574 * First pkt in session not
6575 * L3/L4 aggregatable
6576 */
6577 break;
6578 default:
6579 DBG_PRINT(ERR_DBG,
6580 "%s: Samadhana!!\n",
6581 __FUNCTION__);
6582 BUG();
6583 }
6584 }
6585 } else {
6586 /*
6587 * Packet with erroneous checksum, let the
6588 * upper layers deal with it.
6589 */
6590 skb->ip_summed = CHECKSUM_NONE;
6591 }
6592 } else {
6593 skb->ip_summed = CHECKSUM_NONE;
6594 }
6595
6596 if (!sp->lro) {
6597 skb->protocol = eth_type_trans(skb, dev);
6598 #ifdef CONFIG_S2IO_NAPI
6599 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6600 /* Queueing the vlan frame to the upper layer */
6601 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6602 RXD_GET_VLAN_TAG(rxdp->Control_2));
6603 } else {
6604 netif_receive_skb(skb);
6605 }
6606 #else
6607 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6608 /* Queueing the vlan frame to the upper layer */
6609 vlan_hwaccel_rx(skb, sp->vlgrp,
6610 RXD_GET_VLAN_TAG(rxdp->Control_2));
6611 } else {
6612 netif_rx(skb);
6613 }
6614 #endif
6615 } else {
6616 send_up:
6617 queue_rx_frame(skb);
6618 }
6619 dev->last_rx = jiffies;
6620 aggregate:
6621 atomic_dec(&sp->rx_bufs_left[ring_no]);
6622 return SUCCESS;
6623 }
6624
6625 /**
6626 * s2io_link - stops/starts the Tx queue.
6627 * @sp : private member of the device structure, which is a pointer to the
6628 * s2io_nic structure.
6629 * @link : inidicates whether link is UP/DOWN.
6630 * Description:
6631 * This function stops/starts the Tx queue depending on whether the link
6632 * status of the NIC is is down or up. This is called by the Alarm
6633 * interrupt handler whenever a link change interrupt comes up.
6634 * Return value:
6635 * void.
6636 */
6637
6638 static void s2io_link(nic_t * sp, int link)
6639 {
6640 struct net_device *dev = (struct net_device *) sp->dev;
6641
6642 if (link != sp->last_link_state) {
6643 if (link == LINK_DOWN) {
6644 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6645 netif_carrier_off(dev);
6646 } else {
6647 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6648 netif_carrier_on(dev);
6649 }
6650 }
6651 sp->last_link_state = link;
6652 }
6653
6654 /**
6655 * get_xena_rev_id - to identify revision ID of xena.
6656 * @pdev : PCI Dev structure
6657 * Description:
6658 * Function to identify the Revision ID of xena.
6659 * Return value:
6660 * returns the revision ID of the device.
6661 */
6662
6663 static int get_xena_rev_id(struct pci_dev *pdev)
6664 {
6665 u8 id = 0;
6666 int ret;
6667 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6668 return id;
6669 }
6670
6671 /**
6672 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6673 * @sp : private member of the device structure, which is a pointer to the
6674 * s2io_nic structure.
6675 * Description:
6676 * This function initializes a few of the PCI and PCI-X configuration registers
6677 * with recommended values.
6678 * Return value:
6679 * void
6680 */
6681
6682 static void s2io_init_pci(nic_t * sp)
6683 {
6684 u16 pci_cmd = 0, pcix_cmd = 0;
6685
6686 /* Enable Data Parity Error Recovery in PCI-X command register. */
6687 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6688 &(pcix_cmd));
6689 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6690 (pcix_cmd | 1));
6691 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6692 &(pcix_cmd));
6693
6694 /* Set the PErr Response bit in PCI command register. */
6695 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6696 pci_write_config_word(sp->pdev, PCI_COMMAND,
6697 (pci_cmd | PCI_COMMAND_PARITY));
6698 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6699 }
6700
6701 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
6702 MODULE_LICENSE("GPL");
6703 MODULE_VERSION(DRV_VERSION);
6704
6705 module_param(tx_fifo_num, int, 0);
6706 module_param(rx_ring_num, int, 0);
6707 module_param(rx_ring_mode, int, 0);
6708 module_param_array(tx_fifo_len, uint, NULL, 0);
6709 module_param_array(rx_ring_sz, uint, NULL, 0);
6710 module_param_array(rts_frm_len, uint, NULL, 0);
6711 module_param(use_continuous_tx_intrs, int, 1);
6712 module_param(rmac_pause_time, int, 0);
6713 module_param(mc_pause_threshold_q0q3, int, 0);
6714 module_param(mc_pause_threshold_q4q7, int, 0);
6715 module_param(shared_splits, int, 0);
6716 module_param(tmac_util_period, int, 0);
6717 module_param(rmac_util_period, int, 0);
6718 module_param(bimodal, bool, 0);
6719 module_param(l3l4hdr_size, int , 0);
6720 #ifndef CONFIG_S2IO_NAPI
6721 module_param(indicate_max_pkts, int, 0);
6722 #endif
6723 module_param(rxsync_frequency, int, 0);
6724 module_param(intr_type, int, 0);
6725 module_param(lro, int, 0);
6726 module_param(lro_max_pkts, int, 0);
6727
6728 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6729 {
6730 if ( tx_fifo_num > 8) {
6731 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6732 "supported\n");
6733 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6734 tx_fifo_num = 8;
6735 }
6736 if ( rx_ring_num > 8) {
6737 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6738 "supported\n");
6739 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6740 rx_ring_num = 8;
6741 }
6742 #ifdef CONFIG_S2IO_NAPI
6743 if (*dev_intr_type != INTA) {
6744 DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when "
6745 "MSI/MSI-X is enabled. Defaulting to INTA\n");
6746 *dev_intr_type = INTA;
6747 }
6748 #endif
6749 #ifndef CONFIG_PCI_MSI
6750 if (*dev_intr_type != INTA) {
6751 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6752 "MSI/MSI-X. Defaulting to INTA\n");
6753 *dev_intr_type = INTA;
6754 }
6755 #else
6756 if (*dev_intr_type > MSI_X) {
6757 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6758 "Defaulting to INTA\n");
6759 *dev_intr_type = INTA;
6760 }
6761 #endif
6762 if ((*dev_intr_type == MSI_X) &&
6763 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6764 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6765 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6766 "Defaulting to INTA\n");
6767 *dev_intr_type = INTA;
6768 }
6769 if (rx_ring_mode > 3) {
6770 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6771 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6772 rx_ring_mode = 3;
6773 }
6774 return SUCCESS;
6775 }
6776
6777 /**
6778 * s2io_init_nic - Initialization of the adapter .
6779 * @pdev : structure containing the PCI related information of the device.
6780 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6781 * Description:
6782 * The function initializes an adapter identified by the pci_dec structure.
6783 * All OS related initialization including memory and device structure and
6784 * initlaization of the device private variable is done. Also the swapper
6785 * control register is initialized to enable read and write into the I/O
6786 * registers of the device.
6787 * Return value:
6788 * returns 0 on success and negative on failure.
6789 */
6790
6791 static int __devinit
6792 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6793 {
6794 nic_t *sp;
6795 struct net_device *dev;
6796 int i, j, ret;
6797 int dma_flag = FALSE;
6798 u32 mac_up, mac_down;
6799 u64 val64 = 0, tmp64 = 0;
6800 XENA_dev_config_t __iomem *bar0 = NULL;
6801 u16 subid;
6802 mac_info_t *mac_control;
6803 struct config_param *config;
6804 int mode;
6805 u8 dev_intr_type = intr_type;
6806
6807 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6808 return ret;
6809
6810 if ((ret = pci_enable_device(pdev))) {
6811 DBG_PRINT(ERR_DBG,
6812 "s2io_init_nic: pci_enable_device failed\n");
6813 return ret;
6814 }
6815
6816 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
6817 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6818 dma_flag = TRUE;
6819 if (pci_set_consistent_dma_mask
6820 (pdev, DMA_64BIT_MASK)) {
6821 DBG_PRINT(ERR_DBG,
6822 "Unable to obtain 64bit DMA for \
6823 consistent allocations\n");
6824 pci_disable_device(pdev);
6825 return -ENOMEM;
6826 }
6827 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
6828 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6829 } else {
6830 pci_disable_device(pdev);
6831 return -ENOMEM;
6832 }
6833 if (dev_intr_type != MSI_X) {
6834 if (pci_request_regions(pdev, s2io_driver_name)) {
6835 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
6836 pci_disable_device(pdev);
6837 return -ENODEV;
6838 }
6839 }
6840 else {
6841 if (!(request_mem_region(pci_resource_start(pdev, 0),
6842 pci_resource_len(pdev, 0), s2io_driver_name))) {
6843 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6844 pci_disable_device(pdev);
6845 return -ENODEV;
6846 }
6847 if (!(request_mem_region(pci_resource_start(pdev, 2),
6848 pci_resource_len(pdev, 2), s2io_driver_name))) {
6849 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6850 release_mem_region(pci_resource_start(pdev, 0),
6851 pci_resource_len(pdev, 0));
6852 pci_disable_device(pdev);
6853 return -ENODEV;
6854 }
6855 }
6856
6857 dev = alloc_etherdev(sizeof(nic_t));
6858 if (dev == NULL) {
6859 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6860 pci_disable_device(pdev);
6861 pci_release_regions(pdev);
6862 return -ENODEV;
6863 }
6864
6865 pci_set_master(pdev);
6866 pci_set_drvdata(pdev, dev);
6867 SET_MODULE_OWNER(dev);
6868 SET_NETDEV_DEV(dev, &pdev->dev);
6869
6870 /* Private member variable initialized to s2io NIC structure */
6871 sp = dev->priv;
6872 memset(sp, 0, sizeof(nic_t));
6873 sp->dev = dev;
6874 sp->pdev = pdev;
6875 sp->high_dma_flag = dma_flag;
6876 sp->device_enabled_once = FALSE;
6877 if (rx_ring_mode == 1)
6878 sp->rxd_mode = RXD_MODE_1;
6879 if (rx_ring_mode == 2)
6880 sp->rxd_mode = RXD_MODE_3B;
6881 if (rx_ring_mode == 3)
6882 sp->rxd_mode = RXD_MODE_3A;
6883
6884 sp->intr_type = dev_intr_type;
6885
6886 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6887 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6888 sp->device_type = XFRAME_II_DEVICE;
6889 else
6890 sp->device_type = XFRAME_I_DEVICE;
6891
6892 sp->lro = lro;
6893
6894 /* Initialize some PCI/PCI-X fields of the NIC. */
6895 s2io_init_pci(sp);
6896
6897 /*
6898 * Setting the device configuration parameters.
6899 * Most of these parameters can be specified by the user during
6900 * module insertion as they are module loadable parameters. If
6901 * these parameters are not not specified during load time, they
6902 * are initialized with default values.
6903 */
6904 mac_control = &sp->mac_control;
6905 config = &sp->config;
6906
6907 /* Tx side parameters. */
6908 config->tx_fifo_num = tx_fifo_num;
6909 for (i = 0; i < MAX_TX_FIFOS; i++) {
6910 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
6911 config->tx_cfg[i].fifo_priority = i;
6912 }
6913
6914 /* mapping the QoS priority to the configured fifos */
6915 for (i = 0; i < MAX_TX_FIFOS; i++)
6916 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
6917
6918 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
6919 for (i = 0; i < config->tx_fifo_num; i++) {
6920 config->tx_cfg[i].f_no_snoop =
6921 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
6922 if (config->tx_cfg[i].fifo_len < 65) {
6923 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
6924 break;
6925 }
6926 }
6927 /* + 2 because one Txd for skb->data and one Txd for UFO */
6928 config->max_txds = MAX_SKB_FRAGS + 2;
6929
6930 /* Rx side parameters. */
6931 config->rx_ring_num = rx_ring_num;
6932 for (i = 0; i < MAX_RX_RINGS; i++) {
6933 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
6934 (rxd_count[sp->rxd_mode] + 1);
6935 config->rx_cfg[i].ring_priority = i;
6936 }
6937
6938 for (i = 0; i < rx_ring_num; i++) {
6939 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
6940 config->rx_cfg[i].f_no_snoop =
6941 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
6942 }
6943
6944 /* Setting Mac Control parameters */
6945 mac_control->rmac_pause_time = rmac_pause_time;
6946 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
6947 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
6948
6949
6950 /* Initialize Ring buffer parameters. */
6951 for (i = 0; i < config->rx_ring_num; i++)
6952 atomic_set(&sp->rx_bufs_left[i], 0);
6953
6954 /* Initialize the number of ISRs currently running */
6955 atomic_set(&sp->isr_cnt, 0);
6956
6957 /* initialize the shared memory used by the NIC and the host */
6958 if (init_shared_mem(sp)) {
6959 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
6960 __FUNCTION__);
6961 ret = -ENOMEM;
6962 goto mem_alloc_failed;
6963 }
6964
6965 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
6966 pci_resource_len(pdev, 0));
6967 if (!sp->bar0) {
6968 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
6969 dev->name);
6970 ret = -ENOMEM;
6971 goto bar0_remap_failed;
6972 }
6973
6974 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
6975 pci_resource_len(pdev, 2));
6976 if (!sp->bar1) {
6977 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
6978 dev->name);
6979 ret = -ENOMEM;
6980 goto bar1_remap_failed;
6981 }
6982
6983 dev->irq = pdev->irq;
6984 dev->base_addr = (unsigned long) sp->bar0;
6985
6986 /* Initializing the BAR1 address as the start of the FIFO pointer. */
6987 for (j = 0; j < MAX_TX_FIFOS; j++) {
6988 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
6989 (sp->bar1 + (j * 0x00020000));
6990 }
6991
6992 /* Driver entry points */
6993 dev->open = &s2io_open;
6994 dev->stop = &s2io_close;
6995 dev->hard_start_xmit = &s2io_xmit;
6996 dev->get_stats = &s2io_get_stats;
6997 dev->set_multicast_list = &s2io_set_multicast;
6998 dev->do_ioctl = &s2io_ioctl;
6999 dev->change_mtu = &s2io_change_mtu;
7000 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7001 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7002 dev->vlan_rx_register = s2io_vlan_rx_register;
7003 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7004
7005 /*
7006 * will use eth_mac_addr() for dev->set_mac_address
7007 * mac address will be set every time dev->open() is called
7008 */
7009 #if defined(CONFIG_S2IO_NAPI)
7010 dev->poll = s2io_poll;
7011 dev->weight = 32;
7012 #endif
7013
7014 #ifdef CONFIG_NET_POLL_CONTROLLER
7015 dev->poll_controller = s2io_netpoll;
7016 #endif
7017
7018 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7019 if (sp->high_dma_flag == TRUE)
7020 dev->features |= NETIF_F_HIGHDMA;
7021 #ifdef NETIF_F_TSO
7022 dev->features |= NETIF_F_TSO;
7023 #endif
7024 if (sp->device_type & XFRAME_II_DEVICE) {
7025 dev->features |= NETIF_F_UFO;
7026 dev->features |= NETIF_F_HW_CSUM;
7027 }
7028
7029 dev->tx_timeout = &s2io_tx_watchdog;
7030 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7031 INIT_WORK(&sp->rst_timer_task,
7032 (void (*)(void *)) s2io_restart_nic, dev);
7033 INIT_WORK(&sp->set_link_task,
7034 (void (*)(void *)) s2io_set_link, sp);
7035
7036 pci_save_state(sp->pdev);
7037
7038 /* Setting swapper control on the NIC, for proper reset operation */
7039 if (s2io_set_swapper(sp)) {
7040 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7041 dev->name);
7042 ret = -EAGAIN;
7043 goto set_swap_failed;
7044 }
7045
7046 /* Verify if the Herc works on the slot its placed into */
7047 if (sp->device_type & XFRAME_II_DEVICE) {
7048 mode = s2io_verify_pci_mode(sp);
7049 if (mode < 0) {
7050 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7051 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7052 ret = -EBADSLT;
7053 goto set_swap_failed;
7054 }
7055 }
7056
7057 /* Not needed for Herc */
7058 if (sp->device_type & XFRAME_I_DEVICE) {
7059 /*
7060 * Fix for all "FFs" MAC address problems observed on
7061 * Alpha platforms
7062 */
7063 fix_mac_address(sp);
7064 s2io_reset(sp);
7065 }
7066
7067 /*
7068 * MAC address initialization.
7069 * For now only one mac address will be read and used.
7070 */
7071 bar0 = sp->bar0;
7072 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7073 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7074 writeq(val64, &bar0->rmac_addr_cmd_mem);
7075 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7076 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
7077 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7078 mac_down = (u32) tmp64;
7079 mac_up = (u32) (tmp64 >> 32);
7080
7081 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7082
7083 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7084 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7085 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7086 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7087 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7088 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7089
7090 /* Set the factory defined MAC address initially */
7091 dev->addr_len = ETH_ALEN;
7092 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7093
7094 /*
7095 * Initialize the tasklet status and link state flags
7096 * and the card state parameter
7097 */
7098 atomic_set(&(sp->card_state), 0);
7099 sp->tasklet_status = 0;
7100 sp->link_state = 0;
7101
7102 /* Initialize spinlocks */
7103 spin_lock_init(&sp->tx_lock);
7104 #ifndef CONFIG_S2IO_NAPI
7105 spin_lock_init(&sp->put_lock);
7106 #endif
7107 spin_lock_init(&sp->rx_lock);
7108
7109 /*
7110 * SXE-002: Configure link and activity LED to init state
7111 * on driver load.
7112 */
7113 subid = sp->pdev->subsystem_device;
7114 if ((subid & 0xFF) >= 0x07) {
7115 val64 = readq(&bar0->gpio_control);
7116 val64 |= 0x0000800000000000ULL;
7117 writeq(val64, &bar0->gpio_control);
7118 val64 = 0x0411040400000000ULL;
7119 writeq(val64, (void __iomem *) bar0 + 0x2700);
7120 val64 = readq(&bar0->gpio_control);
7121 }
7122
7123 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7124
7125 if (register_netdev(dev)) {
7126 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7127 ret = -ENODEV;
7128 goto register_failed;
7129 }
7130 s2io_vpd_read(sp);
7131 DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name);
7132 DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n",
7133 get_xena_rev_id(sp->pdev),
7134 s2io_driver_version);
7135 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7136 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7137 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
7138 sp->def_mac_addr[0].mac_addr[0],
7139 sp->def_mac_addr[0].mac_addr[1],
7140 sp->def_mac_addr[0].mac_addr[2],
7141 sp->def_mac_addr[0].mac_addr[3],
7142 sp->def_mac_addr[0].mac_addr[4],
7143 sp->def_mac_addr[0].mac_addr[5]);
7144 if (sp->device_type & XFRAME_II_DEVICE) {
7145 mode = s2io_print_pci_mode(sp);
7146 if (mode < 0) {
7147 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7148 ret = -EBADSLT;
7149 unregister_netdev(dev);
7150 goto set_swap_failed;
7151 }
7152 }
7153 switch(sp->rxd_mode) {
7154 case RXD_MODE_1:
7155 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7156 dev->name);
7157 break;
7158 case RXD_MODE_3B:
7159 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7160 dev->name);
7161 break;
7162 case RXD_MODE_3A:
7163 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7164 dev->name);
7165 break;
7166 }
7167 #ifdef CONFIG_S2IO_NAPI
7168 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7169 #endif
7170 switch(sp->intr_type) {
7171 case INTA:
7172 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7173 break;
7174 case MSI:
7175 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7176 break;
7177 case MSI_X:
7178 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7179 break;
7180 }
7181 if (sp->lro)
7182 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7183 dev->name);
7184
7185 /* Initialize device name */
7186 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7187
7188 /* Initialize bimodal Interrupts */
7189 sp->config.bimodal = bimodal;
7190 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7191 sp->config.bimodal = 0;
7192 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7193 dev->name);
7194 }
7195
7196 /*
7197 * Make Link state as off at this point, when the Link change
7198 * interrupt comes the state will be automatically changed to
7199 * the right state.
7200 */
7201 netif_carrier_off(dev);
7202
7203 return 0;
7204
7205 register_failed:
7206 set_swap_failed:
7207 iounmap(sp->bar1);
7208 bar1_remap_failed:
7209 iounmap(sp->bar0);
7210 bar0_remap_failed:
7211 mem_alloc_failed:
7212 free_shared_mem(sp);
7213 pci_disable_device(pdev);
7214 if (dev_intr_type != MSI_X)
7215 pci_release_regions(pdev);
7216 else {
7217 release_mem_region(pci_resource_start(pdev, 0),
7218 pci_resource_len(pdev, 0));
7219 release_mem_region(pci_resource_start(pdev, 2),
7220 pci_resource_len(pdev, 2));
7221 }
7222 pci_set_drvdata(pdev, NULL);
7223 free_netdev(dev);
7224
7225 return ret;
7226 }
7227
7228 /**
7229 * s2io_rem_nic - Free the PCI device
7230 * @pdev: structure containing the PCI related information of the device.
7231 * Description: This function is called by the Pci subsystem to release a
7232 * PCI device and free up all resource held up by the device. This could
7233 * be in response to a Hot plug event or when the driver is to be removed
7234 * from memory.
7235 */
7236
7237 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7238 {
7239 struct net_device *dev =
7240 (struct net_device *) pci_get_drvdata(pdev);
7241 nic_t *sp;
7242
7243 if (dev == NULL) {
7244 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7245 return;
7246 }
7247
7248 sp = dev->priv;
7249 unregister_netdev(dev);
7250
7251 free_shared_mem(sp);
7252 iounmap(sp->bar0);
7253 iounmap(sp->bar1);
7254 pci_disable_device(pdev);
7255 if (sp->intr_type != MSI_X)
7256 pci_release_regions(pdev);
7257 else {
7258 release_mem_region(pci_resource_start(pdev, 0),
7259 pci_resource_len(pdev, 0));
7260 release_mem_region(pci_resource_start(pdev, 2),
7261 pci_resource_len(pdev, 2));
7262 }
7263 pci_set_drvdata(pdev, NULL);
7264 free_netdev(dev);
7265 }
7266
7267 /**
7268 * s2io_starter - Entry point for the driver
7269 * Description: This function is the entry point for the driver. It verifies
7270 * the module loadable parameters and initializes PCI configuration space.
7271 */
7272
7273 int __init s2io_starter(void)
7274 {
7275 return pci_module_init(&s2io_driver);
7276 }
7277
7278 /**
7279 * s2io_closer - Cleanup routine for the driver
7280 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7281 */
7282
7283 static void s2io_closer(void)
7284 {
7285 pci_unregister_driver(&s2io_driver);
7286 DBG_PRINT(INIT_DBG, "cleanup done\n");
7287 }
7288
7289 module_init(s2io_starter);
7290 module_exit(s2io_closer);
7291
7292 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7293 struct tcphdr **tcp, RxD_t *rxdp)
7294 {
7295 int ip_off;
7296 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7297
7298 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7299 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7300 __FUNCTION__);
7301 return -1;
7302 }
7303
7304 /* TODO:
7305 * By default the VLAN field in the MAC is stripped by the card, if this
7306 * feature is turned off in rx_pa_cfg register, then the ip_off field
7307 * has to be shifted by a further 2 bytes
7308 */
7309 switch (l2_type) {
7310 case 0: /* DIX type */
7311 case 4: /* DIX type with VLAN */
7312 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7313 break;
7314 /* LLC, SNAP etc are considered non-mergeable */
7315 default:
7316 return -1;
7317 }
7318
7319 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7320 ip_len = (u8)((*ip)->ihl);
7321 ip_len <<= 2;
7322 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7323
7324 return 0;
7325 }
7326
7327 static int check_for_socket_match(lro_t *lro, struct iphdr *ip,
7328 struct tcphdr *tcp)
7329 {
7330 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7331 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7332 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7333 return -1;
7334 return 0;
7335 }
7336
7337 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7338 {
7339 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7340 }
7341
7342 static void initiate_new_session(lro_t *lro, u8 *l2h,
7343 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7344 {
7345 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7346 lro->l2h = l2h;
7347 lro->iph = ip;
7348 lro->tcph = tcp;
7349 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7350 lro->tcp_ack = ntohl(tcp->ack_seq);
7351 lro->sg_num = 1;
7352 lro->total_len = ntohs(ip->tot_len);
7353 lro->frags_len = 0;
7354 /*
7355 * check if we saw TCP timestamp. Other consistency checks have
7356 * already been done.
7357 */
7358 if (tcp->doff == 8) {
7359 u32 *ptr;
7360 ptr = (u32 *)(tcp+1);
7361 lro->saw_ts = 1;
7362 lro->cur_tsval = *(ptr+1);
7363 lro->cur_tsecr = *(ptr+2);
7364 }
7365 lro->in_use = 1;
7366 }
7367
7368 static void update_L3L4_header(nic_t *sp, lro_t *lro)
7369 {
7370 struct iphdr *ip = lro->iph;
7371 struct tcphdr *tcp = lro->tcph;
7372 u16 nchk;
7373 StatInfo_t *statinfo = sp->mac_control.stats_info;
7374 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7375
7376 /* Update L3 header */
7377 ip->tot_len = htons(lro->total_len);
7378 ip->check = 0;
7379 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7380 ip->check = nchk;
7381
7382 /* Update L4 header */
7383 tcp->ack_seq = lro->tcp_ack;
7384 tcp->window = lro->window;
7385
7386 /* Update tsecr field if this session has timestamps enabled */
7387 if (lro->saw_ts) {
7388 u32 *ptr = (u32 *)(tcp + 1);
7389 *(ptr+2) = lro->cur_tsecr;
7390 }
7391
7392 /* Update counters required for calculation of
7393 * average no. of packets aggregated.
7394 */
7395 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7396 statinfo->sw_stat.num_aggregations++;
7397 }
7398
7399 static void aggregate_new_rx(lro_t *lro, struct iphdr *ip,
7400 struct tcphdr *tcp, u32 l4_pyld)
7401 {
7402 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7403 lro->total_len += l4_pyld;
7404 lro->frags_len += l4_pyld;
7405 lro->tcp_next_seq += l4_pyld;
7406 lro->sg_num++;
7407
7408 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7409 lro->tcp_ack = tcp->ack_seq;
7410 lro->window = tcp->window;
7411
7412 if (lro->saw_ts) {
7413 u32 *ptr;
7414 /* Update tsecr and tsval from this packet */
7415 ptr = (u32 *) (tcp + 1);
7416 lro->cur_tsval = *(ptr + 1);
7417 lro->cur_tsecr = *(ptr + 2);
7418 }
7419 }
7420
7421 static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7422 struct tcphdr *tcp, u32 tcp_pyld_len)
7423 {
7424 u8 *ptr;
7425
7426 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7427
7428 if (!tcp_pyld_len) {
7429 /* Runt frame or a pure ack */
7430 return -1;
7431 }
7432
7433 if (ip->ihl != 5) /* IP has options */
7434 return -1;
7435
7436 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7437 !tcp->ack) {
7438 /*
7439 * Currently recognize only the ack control word and
7440 * any other control field being set would result in
7441 * flushing the LRO session
7442 */
7443 return -1;
7444 }
7445
7446 /*
7447 * Allow only one TCP timestamp option. Don't aggregate if
7448 * any other options are detected.
7449 */
7450 if (tcp->doff != 5 && tcp->doff != 8)
7451 return -1;
7452
7453 if (tcp->doff == 8) {
7454 ptr = (u8 *)(tcp + 1);
7455 while (*ptr == TCPOPT_NOP)
7456 ptr++;
7457 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7458 return -1;
7459
7460 /* Ensure timestamp value increases monotonically */
7461 if (l_lro)
7462 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7463 return -1;
7464
7465 /* timestamp echo reply should be non-zero */
7466 if (*((u32 *)(ptr+6)) == 0)
7467 return -1;
7468 }
7469
7470 return 0;
7471 }
7472
7473 static int
7474 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7475 RxD_t *rxdp, nic_t *sp)
7476 {
7477 struct iphdr *ip;
7478 struct tcphdr *tcph;
7479 int ret = 0, i;
7480
7481 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7482 rxdp))) {
7483 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7484 ip->saddr, ip->daddr);
7485 } else {
7486 return ret;
7487 }
7488
7489 tcph = (struct tcphdr *)*tcp;
7490 *tcp_len = get_l4_pyld_length(ip, tcph);
7491 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7492 lro_t *l_lro = &sp->lro0_n[i];
7493 if (l_lro->in_use) {
7494 if (check_for_socket_match(l_lro, ip, tcph))
7495 continue;
7496 /* Sock pair matched */
7497 *lro = l_lro;
7498
7499 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7500 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7501 "0x%x, actual 0x%x\n", __FUNCTION__,
7502 (*lro)->tcp_next_seq,
7503 ntohl(tcph->seq));
7504
7505 sp->mac_control.stats_info->
7506 sw_stat.outof_sequence_pkts++;
7507 ret = 2;
7508 break;
7509 }
7510
7511 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7512 ret = 1; /* Aggregate */
7513 else
7514 ret = 2; /* Flush both */
7515 break;
7516 }
7517 }
7518
7519 if (ret == 0) {
7520 /* Before searching for available LRO objects,
7521 * check if the pkt is L3/L4 aggregatable. If not
7522 * don't create new LRO session. Just send this
7523 * packet up.
7524 */
7525 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7526 return 5;
7527 }
7528
7529 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7530 lro_t *l_lro = &sp->lro0_n[i];
7531 if (!(l_lro->in_use)) {
7532 *lro = l_lro;
7533 ret = 3; /* Begin anew */
7534 break;
7535 }
7536 }
7537 }
7538
7539 if (ret == 0) { /* sessions exceeded */
7540 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7541 __FUNCTION__);
7542 *lro = NULL;
7543 return ret;
7544 }
7545
7546 switch (ret) {
7547 case 3:
7548 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7549 break;
7550 case 2:
7551 update_L3L4_header(sp, *lro);
7552 break;
7553 case 1:
7554 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7555 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7556 update_L3L4_header(sp, *lro);
7557 ret = 4; /* Flush the LRO */
7558 }
7559 break;
7560 default:
7561 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7562 __FUNCTION__);
7563 break;
7564 }
7565
7566 return ret;
7567 }
7568
7569 static void clear_lro_session(lro_t *lro)
7570 {
7571 static u16 lro_struct_size = sizeof(lro_t);
7572
7573 memset(lro, 0, lro_struct_size);
7574 }
7575
7576 static void queue_rx_frame(struct sk_buff *skb)
7577 {
7578 struct net_device *dev = skb->dev;
7579
7580 skb->protocol = eth_type_trans(skb, dev);
7581 #ifdef CONFIG_S2IO_NAPI
7582 netif_receive_skb(skb);
7583 #else
7584 netif_rx(skb);
7585 #endif
7586 }
7587
7588 static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7589 u32 tcp_len)
7590 {
7591 struct sk_buff *tmp, *first = lro->parent;
7592
7593 first->len += tcp_len;
7594 first->data_len = lro->frags_len;
7595 skb_pull(skb, (skb->len - tcp_len));
7596 if ((tmp = skb_shinfo(first)->frag_list)) {
7597 while (tmp->next)
7598 tmp = tmp->next;
7599 tmp->next = skb;
7600 }
7601 else
7602 skb_shinfo(first)->frag_list = skb;
7603 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7604 return;
7605 }